connectonion 0.5.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- connectonion/__init__.py +78 -0
- connectonion/address.py +320 -0
- connectonion/agent.py +450 -0
- connectonion/announce.py +84 -0
- connectonion/asgi.py +287 -0
- connectonion/auto_debug_exception.py +181 -0
- connectonion/cli/__init__.py +3 -0
- connectonion/cli/browser_agent/__init__.py +5 -0
- connectonion/cli/browser_agent/browser.py +243 -0
- connectonion/cli/browser_agent/prompt.md +107 -0
- connectonion/cli/commands/__init__.py +1 -0
- connectonion/cli/commands/auth_commands.py +527 -0
- connectonion/cli/commands/browser_commands.py +27 -0
- connectonion/cli/commands/create.py +511 -0
- connectonion/cli/commands/deploy_commands.py +220 -0
- connectonion/cli/commands/doctor_commands.py +173 -0
- connectonion/cli/commands/init.py +469 -0
- connectonion/cli/commands/project_cmd_lib.py +828 -0
- connectonion/cli/commands/reset_commands.py +149 -0
- connectonion/cli/commands/status_commands.py +168 -0
- connectonion/cli/docs/co-vibecoding-principles-docs-contexts-all-in-one.md +2010 -0
- connectonion/cli/docs/connectonion.md +1256 -0
- connectonion/cli/docs.md +123 -0
- connectonion/cli/main.py +148 -0
- connectonion/cli/templates/meta-agent/README.md +287 -0
- connectonion/cli/templates/meta-agent/agent.py +196 -0
- connectonion/cli/templates/meta-agent/prompts/answer_prompt.md +9 -0
- connectonion/cli/templates/meta-agent/prompts/docs_retrieve_prompt.md +15 -0
- connectonion/cli/templates/meta-agent/prompts/metagent.md +71 -0
- connectonion/cli/templates/meta-agent/prompts/think_prompt.md +18 -0
- connectonion/cli/templates/minimal/README.md +56 -0
- connectonion/cli/templates/minimal/agent.py +40 -0
- connectonion/cli/templates/playwright/README.md +118 -0
- connectonion/cli/templates/playwright/agent.py +336 -0
- connectonion/cli/templates/playwright/prompt.md +102 -0
- connectonion/cli/templates/playwright/requirements.txt +3 -0
- connectonion/cli/templates/web-research/agent.py +122 -0
- connectonion/connect.py +128 -0
- connectonion/console.py +539 -0
- connectonion/debug_agent/__init__.py +13 -0
- connectonion/debug_agent/agent.py +45 -0
- connectonion/debug_agent/prompts/debug_assistant.md +72 -0
- connectonion/debug_agent/runtime_inspector.py +406 -0
- connectonion/debug_explainer/__init__.py +10 -0
- connectonion/debug_explainer/explain_agent.py +114 -0
- connectonion/debug_explainer/explain_context.py +263 -0
- connectonion/debug_explainer/explainer_prompt.md +29 -0
- connectonion/debug_explainer/root_cause_analysis_prompt.md +43 -0
- connectonion/debugger_ui.py +1039 -0
- connectonion/decorators.py +208 -0
- connectonion/events.py +248 -0
- connectonion/execution_analyzer/__init__.py +9 -0
- connectonion/execution_analyzer/execution_analysis.py +93 -0
- connectonion/execution_analyzer/execution_analysis_prompt.md +47 -0
- connectonion/host.py +579 -0
- connectonion/interactive_debugger.py +342 -0
- connectonion/llm.py +801 -0
- connectonion/llm_do.py +307 -0
- connectonion/logger.py +300 -0
- connectonion/prompt_files/__init__.py +1 -0
- connectonion/prompt_files/analyze_contact.md +62 -0
- connectonion/prompt_files/eval_expected.md +12 -0
- connectonion/prompt_files/react_evaluate.md +11 -0
- connectonion/prompt_files/react_plan.md +16 -0
- connectonion/prompt_files/reflect.md +22 -0
- connectonion/prompts.py +144 -0
- connectonion/relay.py +200 -0
- connectonion/static/docs.html +688 -0
- connectonion/tool_executor.py +279 -0
- connectonion/tool_factory.py +186 -0
- connectonion/tool_registry.py +105 -0
- connectonion/trust.py +166 -0
- connectonion/trust_agents.py +71 -0
- connectonion/trust_functions.py +88 -0
- connectonion/tui/__init__.py +57 -0
- connectonion/tui/divider.py +39 -0
- connectonion/tui/dropdown.py +251 -0
- connectonion/tui/footer.py +31 -0
- connectonion/tui/fuzzy.py +56 -0
- connectonion/tui/input.py +278 -0
- connectonion/tui/keys.py +35 -0
- connectonion/tui/pick.py +130 -0
- connectonion/tui/providers.py +155 -0
- connectonion/tui/status_bar.py +163 -0
- connectonion/usage.py +161 -0
- connectonion/useful_events_handlers/__init__.py +16 -0
- connectonion/useful_events_handlers/reflect.py +116 -0
- connectonion/useful_plugins/__init__.py +20 -0
- connectonion/useful_plugins/calendar_plugin.py +163 -0
- connectonion/useful_plugins/eval.py +139 -0
- connectonion/useful_plugins/gmail_plugin.py +162 -0
- connectonion/useful_plugins/image_result_formatter.py +127 -0
- connectonion/useful_plugins/re_act.py +78 -0
- connectonion/useful_plugins/shell_approval.py +159 -0
- connectonion/useful_tools/__init__.py +44 -0
- connectonion/useful_tools/diff_writer.py +192 -0
- connectonion/useful_tools/get_emails.py +183 -0
- connectonion/useful_tools/gmail.py +1596 -0
- connectonion/useful_tools/google_calendar.py +613 -0
- connectonion/useful_tools/memory.py +380 -0
- connectonion/useful_tools/microsoft_calendar.py +604 -0
- connectonion/useful_tools/outlook.py +488 -0
- connectonion/useful_tools/send_email.py +205 -0
- connectonion/useful_tools/shell.py +97 -0
- connectonion/useful_tools/slash_command.py +201 -0
- connectonion/useful_tools/terminal.py +285 -0
- connectonion/useful_tools/todo_list.py +241 -0
- connectonion/useful_tools/web_fetch.py +216 -0
- connectonion/xray.py +467 -0
- connectonion-0.5.8.dist-info/METADATA +741 -0
- connectonion-0.5.8.dist-info/RECORD +113 -0
- connectonion-0.5.8.dist-info/WHEEL +4 -0
- connectonion-0.5.8.dist-info/entry_points.txt +3 -0
connectonion/console.py
ADDED
|
@@ -0,0 +1,539 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Purpose: Handle agent terminal output with Rich formatting and optional file logging
|
|
3
|
+
LLM-Note:
|
|
4
|
+
Dependencies: imports from [sys, datetime, pathlib, typing, rich.console, rich.panel, rich.text] | imported by [logger.py, tool_executor.py] | tested by [tests/test_console.py]
|
|
5
|
+
Data flow: receives from Logger/tool_executor → .print(), .log_tool_call(), .log_tool_result() → formats with timestamp → prints to stderr via RichConsole → optionally appends to log_file as plain text
|
|
6
|
+
State/Effects: writes to stderr (not stdout, to avoid mixing with agent results) | writes to log_file if provided (plain text with timestamps) | creates log file parent directories if needed | appends session separator on init
|
|
7
|
+
Integration: exposes Console(log_file), .print(message, style), .log_tool_call(name, args), .log_tool_result(result, timing), .log_llm_response(), .print_xray_table() | tool calls formatted as natural function-call style: greet(name='Alice')
|
|
8
|
+
Performance: direct stderr writes (no buffering delays) | Rich formatting uses stderr (separate from stdout results) | regex-based markup removal for log files
|
|
9
|
+
Errors: no error handling (let I/O errors bubble up) | assumes log_file parent can be created | assumes stderr is available
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import re
|
|
13
|
+
from datetime import datetime
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import Optional, Dict, Any, List, Union
|
|
16
|
+
from rich.console import Console as RichConsole
|
|
17
|
+
from rich.panel import Panel
|
|
18
|
+
from rich.text import Text
|
|
19
|
+
from rich.markup import escape as rich_escape
|
|
20
|
+
|
|
21
|
+
# Use stderr so console output doesn't mix with agent results
|
|
22
|
+
_rich_console = RichConsole(stderr=True)
|
|
23
|
+
|
|
24
|
+
# Brand constants
|
|
25
|
+
BRAND_COLOR = "cyan"
|
|
26
|
+
PREFIX = "[co]"
|
|
27
|
+
|
|
28
|
+
# Onion layer symbols (ties to banner ○ ◎ ●)
|
|
29
|
+
CIRCLE_EMPTY = "○" # Request/waiting
|
|
30
|
+
CIRCLE_FILLED = "●" # Response/complete
|
|
31
|
+
|
|
32
|
+
# Other symbols
|
|
33
|
+
TOOL_SYMBOL = "▸" # Tool execution
|
|
34
|
+
SUCCESS_SYMBOL = "✓" # Action success (tools)
|
|
35
|
+
LLM_DONE_SYMBOL = "⚡" # LLM thinking complete (flash)
|
|
36
|
+
ERROR_SYMBOL = "✗" # Error
|
|
37
|
+
|
|
38
|
+
# Color scheme - brand + semantic
|
|
39
|
+
# Cyan = brand identity ([co], banner)
|
|
40
|
+
# Violet = LLM/AI thinking (○ ●)
|
|
41
|
+
# Green = tool action (▸) and success (✓)
|
|
42
|
+
LLM_COLOR = "magenta" # Violet for LLM thinking
|
|
43
|
+
TOOL_COLOR = "green" # Green for tool action
|
|
44
|
+
SUCCESS_COLOR = "green" # Success indicators
|
|
45
|
+
ERROR_COLOR = "red" # Errors
|
|
46
|
+
DIM_COLOR = "dim" # Metadata (tokens, cost, time)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def _get_version() -> str:
|
|
50
|
+
"""Get version from package, with fallback."""
|
|
51
|
+
from . import __version__
|
|
52
|
+
return __version__
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def _prefix() -> str:
|
|
56
|
+
"""Get formatted [co] prefix.
|
|
57
|
+
|
|
58
|
+
Uses rich_escape to render literal [co] brackets in cyan.
|
|
59
|
+
"""
|
|
60
|
+
return f"[{BRAND_COLOR}]{rich_escape(PREFIX)}[/{BRAND_COLOR}]"
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _plain_prefix() -> str:
|
|
64
|
+
"""Get plain text prefix for log files."""
|
|
65
|
+
return PREFIX
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class Console:
|
|
69
|
+
"""Console for agent output and optional file logging.
|
|
70
|
+
|
|
71
|
+
Always shows output to help users understand what's happening.
|
|
72
|
+
Similar to FastAPI, npm, cargo - always visible by default.
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
def __init__(self, log_file: Optional[Path] = None):
|
|
76
|
+
"""Initialize console.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
log_file: Optional path to write logs (plain text)
|
|
80
|
+
"""
|
|
81
|
+
self.log_file = log_file
|
|
82
|
+
|
|
83
|
+
if self.log_file:
|
|
84
|
+
self._init_log_file()
|
|
85
|
+
|
|
86
|
+
def _init_log_file(self):
|
|
87
|
+
"""Initialize log file with session header."""
|
|
88
|
+
# Create parent dirs if needed
|
|
89
|
+
if self.log_file.parent != Path('.'):
|
|
90
|
+
self.log_file.parent.mkdir(parents=True, exist_ok=True)
|
|
91
|
+
|
|
92
|
+
# Add session separator
|
|
93
|
+
with open(self.log_file, 'a', encoding='utf-8') as f:
|
|
94
|
+
f.write(f"\n{'='*60}\n")
|
|
95
|
+
f.write(f"Session started: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
|
|
96
|
+
f.write(f"{'='*60}\n\n")
|
|
97
|
+
|
|
98
|
+
def print_banner(
|
|
99
|
+
self,
|
|
100
|
+
agent_name: str,
|
|
101
|
+
model: str = "",
|
|
102
|
+
tools: Union[List[str], int] = 0,
|
|
103
|
+
log_dir: Optional[str] = None,
|
|
104
|
+
llm: Any = None
|
|
105
|
+
) -> None:
|
|
106
|
+
"""Print the ConnectOnion banner (Onion Stack style).
|
|
107
|
+
|
|
108
|
+
○
|
|
109
|
+
◎ research-assistant
|
|
110
|
+
● ─────────────────────
|
|
111
|
+
connectonion v0.5.1
|
|
112
|
+
o4-mini · 3 tools
|
|
113
|
+
.co/logs/ · .co/sessions/
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
agent_name: Name of the agent
|
|
117
|
+
model: Model name (e.g., "co/o4-mini")
|
|
118
|
+
tools: List of tool names or count of tools
|
|
119
|
+
log_dir: Log directory path (e.g., ".co/")
|
|
120
|
+
llm: LLM instance to check for free tier
|
|
121
|
+
"""
|
|
122
|
+
version = _get_version()
|
|
123
|
+
|
|
124
|
+
# Calculate tools display
|
|
125
|
+
if isinstance(tools, list):
|
|
126
|
+
tools_count = len(tools)
|
|
127
|
+
else:
|
|
128
|
+
tools_count = tools
|
|
129
|
+
tools_str = f"{tools_count} tool{'s' if tools_count != 1 else ''}" if tools_count else ""
|
|
130
|
+
|
|
131
|
+
# Build meta line: model · tools
|
|
132
|
+
meta_parts = [p for p in [model, tools_str] if p]
|
|
133
|
+
meta_line = " · ".join(meta_parts)
|
|
134
|
+
|
|
135
|
+
# Check if using OpenOnion managed keys (free credits from Aaron)
|
|
136
|
+
is_free_tier = False
|
|
137
|
+
if llm is not None:
|
|
138
|
+
is_free_tier = type(llm).__name__ == "OpenOnionLLM"
|
|
139
|
+
aaron_message = "credits on me, go build —aaron" if is_free_tier else None
|
|
140
|
+
|
|
141
|
+
# Calculate separator length (at least as long as agent name, min 20)
|
|
142
|
+
separator_len = max(len(agent_name), 20)
|
|
143
|
+
separator = "─" * separator_len
|
|
144
|
+
|
|
145
|
+
# Build the banner lines with Rich markup (Onion Stack - descending layers)
|
|
146
|
+
lines = [
|
|
147
|
+
f" [{BRAND_COLOR}]{CIRCLE_EMPTY}[/{BRAND_COLOR}]",
|
|
148
|
+
f" [{BRAND_COLOR}]◎[/{BRAND_COLOR}] [bold]{agent_name}[/bold]",
|
|
149
|
+
f"[{BRAND_COLOR}]{CIRCLE_FILLED}[/{BRAND_COLOR}] [{DIM_COLOR}]{separator}[/{DIM_COLOR}]",
|
|
150
|
+
f" [{BRAND_COLOR}]connectonion[/{BRAND_COLOR}] [{DIM_COLOR}]v{version}[/{DIM_COLOR}]",
|
|
151
|
+
]
|
|
152
|
+
|
|
153
|
+
# Add meta line if there's content
|
|
154
|
+
if meta_line:
|
|
155
|
+
lines.append(f" [{DIM_COLOR}]{meta_line}[/{DIM_COLOR}]")
|
|
156
|
+
|
|
157
|
+
# Add log paths if logging is enabled
|
|
158
|
+
if log_dir:
|
|
159
|
+
lines.append(f" [{DIM_COLOR}]{log_dir}logs/ · {log_dir}sessions/[/{DIM_COLOR}]")
|
|
160
|
+
|
|
161
|
+
# Add Aaron's message for free tier users
|
|
162
|
+
if aaron_message:
|
|
163
|
+
lines.append(f" [{DIM_COLOR}]{aaron_message}[/{DIM_COLOR}]")
|
|
164
|
+
|
|
165
|
+
# Add closing separator
|
|
166
|
+
lines.append(f" [{DIM_COLOR}]{separator}[/{DIM_COLOR}]")
|
|
167
|
+
|
|
168
|
+
# Print with empty line before and after for breathing room
|
|
169
|
+
_rich_console.print()
|
|
170
|
+
for line in lines:
|
|
171
|
+
_rich_console.print(line)
|
|
172
|
+
_rich_console.print()
|
|
173
|
+
|
|
174
|
+
# Log to file (plain text version)
|
|
175
|
+
if self.log_file:
|
|
176
|
+
plain_lines = [
|
|
177
|
+
f" {CIRCLE_EMPTY}",
|
|
178
|
+
f" ◎ {agent_name}",
|
|
179
|
+
f"{CIRCLE_FILLED} {separator}",
|
|
180
|
+
f" connectonion v{version}",
|
|
181
|
+
]
|
|
182
|
+
if meta_line:
|
|
183
|
+
plain_lines.append(f" {meta_line}")
|
|
184
|
+
if log_dir:
|
|
185
|
+
plain_lines.append(f" {log_dir}logs/ · {log_dir}sessions/")
|
|
186
|
+
if aaron_message:
|
|
187
|
+
plain_lines.append(f" {aaron_message}")
|
|
188
|
+
plain_lines.append(f" {separator}")
|
|
189
|
+
|
|
190
|
+
with open(self.log_file, 'a', encoding='utf-8') as f:
|
|
191
|
+
f.write("\n")
|
|
192
|
+
for line in plain_lines:
|
|
193
|
+
f.write(f"{line}\n")
|
|
194
|
+
f.write("\n")
|
|
195
|
+
|
|
196
|
+
def print(self, message: str, style: str = None, use_prefix: bool = True):
|
|
197
|
+
"""Print message to console and/or log file.
|
|
198
|
+
|
|
199
|
+
Args:
|
|
200
|
+
message: The message (can include Rich markup for console)
|
|
201
|
+
style: Additional Rich style for console only
|
|
202
|
+
use_prefix: Whether to include [co] prefix (default True)
|
|
203
|
+
"""
|
|
204
|
+
# Build formatted message with [co] prefix
|
|
205
|
+
if use_prefix:
|
|
206
|
+
formatted = f"{_prefix()} {message}"
|
|
207
|
+
plain = f"{_plain_prefix()} {self._to_plain_text(message)}"
|
|
208
|
+
else:
|
|
209
|
+
formatted = message
|
|
210
|
+
plain = self._to_plain_text(message)
|
|
211
|
+
|
|
212
|
+
# Print to terminal
|
|
213
|
+
if style:
|
|
214
|
+
_rich_console.print(formatted, style=style)
|
|
215
|
+
else:
|
|
216
|
+
_rich_console.print(formatted)
|
|
217
|
+
|
|
218
|
+
# Log file output (plain text) if enabled
|
|
219
|
+
if self.log_file:
|
|
220
|
+
timestamp = datetime.now().strftime("%H:%M:%S")
|
|
221
|
+
with open(self.log_file, 'a', encoding='utf-8') as f:
|
|
222
|
+
f.write(f"[{timestamp}] {plain}\n")
|
|
223
|
+
|
|
224
|
+
def print_task(self, task: str) -> None:
|
|
225
|
+
"""Print the user's task/input.
|
|
226
|
+
|
|
227
|
+
[co] > "find the latest AI papers"
|
|
228
|
+
"""
|
|
229
|
+
# Truncate long tasks for display
|
|
230
|
+
display_task = task[:100] + "..." if len(task) > 100 else task
|
|
231
|
+
_rich_console.print() # Empty line before
|
|
232
|
+
self.print(f'> "{display_task}"')
|
|
233
|
+
_rich_console.print() # Empty line after
|
|
234
|
+
|
|
235
|
+
def print_xray_table(
|
|
236
|
+
self,
|
|
237
|
+
tool_name: str,
|
|
238
|
+
tool_args: Dict[str, Any],
|
|
239
|
+
result: Any,
|
|
240
|
+
timing: float,
|
|
241
|
+
agent: Any
|
|
242
|
+
) -> None:
|
|
243
|
+
"""Print Rich table for @xray decorated tools.
|
|
244
|
+
|
|
245
|
+
Shows current tool execution details in a beautiful table format.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
tool_name: Name of the tool that was executed
|
|
249
|
+
tool_args: Arguments passed to the tool
|
|
250
|
+
result: Result returned by the tool
|
|
251
|
+
timing: Execution time in milliseconds
|
|
252
|
+
agent: Agent instance with current_session
|
|
253
|
+
"""
|
|
254
|
+
from rich.table import Table
|
|
255
|
+
from rich.console import Group
|
|
256
|
+
|
|
257
|
+
table = Table(show_header=False, box=None, padding=(0, 1))
|
|
258
|
+
table.add_column("Key", style="dim")
|
|
259
|
+
table.add_column("Value")
|
|
260
|
+
|
|
261
|
+
# Context information
|
|
262
|
+
table.add_row("agent", agent.name)
|
|
263
|
+
user_prompt = agent.current_session.get('user_prompt', '')
|
|
264
|
+
prompt_preview = user_prompt[:50] + "..." if len(user_prompt) > 50 else user_prompt
|
|
265
|
+
table.add_row("user_prompt", prompt_preview)
|
|
266
|
+
iteration = agent.current_session.get('iteration', 0)
|
|
267
|
+
max_iterations = getattr(agent, 'max_iterations', 10)
|
|
268
|
+
table.add_row("iteration", f"{iteration}/{max_iterations}")
|
|
269
|
+
|
|
270
|
+
# Separator
|
|
271
|
+
table.add_row("─" * 20, "─" * 40)
|
|
272
|
+
|
|
273
|
+
# Tool arguments
|
|
274
|
+
for k, v in tool_args.items():
|
|
275
|
+
val_str = str(v)
|
|
276
|
+
if len(val_str) > 60:
|
|
277
|
+
val_str = val_str[:60] + "..."
|
|
278
|
+
table.add_row(k, val_str)
|
|
279
|
+
|
|
280
|
+
# Result
|
|
281
|
+
result_str = str(result)
|
|
282
|
+
if len(result_str) > 60:
|
|
283
|
+
result_str = result_str[:60] + "..."
|
|
284
|
+
table.add_row("result", result_str)
|
|
285
|
+
# Show more precision for fast operations (<0.1s), less for slow ones
|
|
286
|
+
time_str = f"{timing/1000:.4f}s" if timing < 100 else f"{timing/1000:.1f}s"
|
|
287
|
+
table.add_row("timing", time_str)
|
|
288
|
+
|
|
289
|
+
# Add metadata footer
|
|
290
|
+
metadata = Text(
|
|
291
|
+
f"Execution time: {time_str} | Iteration: {iteration}/{max_iterations} | Breakpoint: @xray",
|
|
292
|
+
style="dim italic",
|
|
293
|
+
justify="center"
|
|
294
|
+
)
|
|
295
|
+
|
|
296
|
+
# Group table and metadata
|
|
297
|
+
content = Group(table, Text(""), metadata)
|
|
298
|
+
|
|
299
|
+
panel = Panel(content, title=f"[cyan]@xray: {tool_name}[/cyan]", border_style="cyan")
|
|
300
|
+
_rich_console.print(panel)
|
|
301
|
+
|
|
302
|
+
# Log to file if enabled (plain text version)
|
|
303
|
+
if self.log_file:
|
|
304
|
+
with open(self.log_file, 'a', encoding='utf-8') as f:
|
|
305
|
+
f.write(f"\n@xray: {tool_name}\n")
|
|
306
|
+
f.write(f" agent: {agent.name}\n")
|
|
307
|
+
f.write(f" task: {prompt_preview}\n")
|
|
308
|
+
f.write(f" iteration: {iteration}/{max_iterations}\n")
|
|
309
|
+
for k, v in tool_args.items():
|
|
310
|
+
val_str = str(v)[:60]
|
|
311
|
+
f.write(f" {k}: {val_str}\n")
|
|
312
|
+
f.write(f" result: {result_str}\n")
|
|
313
|
+
f.write(f" Execution time: {timing/1000:.4f}s | Iteration: {iteration}/{max_iterations} | Breakpoint: @xray\n\n")
|
|
314
|
+
|
|
315
|
+
def log_tool_call(self, tool_name: str, tool_args: Dict[str, Any]) -> None:
|
|
316
|
+
"""Log tool call start - stores info for log_tool_result.
|
|
317
|
+
|
|
318
|
+
[co] ▸ search(query="AI papers") ✓ 0.8s
|
|
319
|
+
"""
|
|
320
|
+
# Store for later completion by log_tool_result
|
|
321
|
+
self._current_tool = {
|
|
322
|
+
'name': tool_name,
|
|
323
|
+
'args': tool_args
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
def log_tool_result(self, result: str, timing_ms: float, success: bool = True) -> None:
|
|
327
|
+
"""Log tool completion with timing.
|
|
328
|
+
|
|
329
|
+
[co] ▸ search(query="AI papers") ✓ 0.8s
|
|
330
|
+
"""
|
|
331
|
+
tool_name = getattr(self, '_current_tool', {}).get('name', 'tool')
|
|
332
|
+
tool_args = getattr(self, '_current_tool', {}).get('args', {})
|
|
333
|
+
|
|
334
|
+
# Format tool call with smart truncation
|
|
335
|
+
tool_str = self._format_tool_display(tool_name, tool_args)
|
|
336
|
+
|
|
337
|
+
# Format timing
|
|
338
|
+
time_str = f"{timing_ms/1000:.1f}s" if timing_ms >= 100 else f"{timing_ms/1000:.2f}s"
|
|
339
|
+
|
|
340
|
+
# Build status indicator
|
|
341
|
+
if success:
|
|
342
|
+
status = f"[{SUCCESS_COLOR}]{SUCCESS_SYMBOL}[/{SUCCESS_COLOR}] [{DIM_COLOR}]{time_str}[/{DIM_COLOR}]"
|
|
343
|
+
else:
|
|
344
|
+
status = f"[{ERROR_COLOR}]{ERROR_SYMBOL}[/{ERROR_COLOR}] [{DIM_COLOR}]{time_str}[/{DIM_COLOR}]"
|
|
345
|
+
|
|
346
|
+
# Right-align the status (target ~55 chars for tool part)
|
|
347
|
+
# Green theme for tool action (triangle + tool name)
|
|
348
|
+
tool_display = f" [{TOOL_COLOR}]{TOOL_SYMBOL} {tool_str}[/{TOOL_COLOR}]"
|
|
349
|
+
# Calculate padding (account for markup being removed in display)
|
|
350
|
+
visible_len = len(f" {TOOL_SYMBOL} {tool_str}")
|
|
351
|
+
padding = max(1, 50 - visible_len)
|
|
352
|
+
|
|
353
|
+
self.print(f"{tool_display}{' ' * padding}{status}")
|
|
354
|
+
|
|
355
|
+
def _format_tool_display(self, name: str, args: Dict[str, Any], max_width: int = 45) -> str:
|
|
356
|
+
"""Format tool call with smart truncation.
|
|
357
|
+
|
|
358
|
+
Rules:
|
|
359
|
+
1. Max ~45 chars for tool display
|
|
360
|
+
2. Show all param names when possible
|
|
361
|
+
3. Use ... when truncation needed
|
|
362
|
+
"""
|
|
363
|
+
if not args:
|
|
364
|
+
return f"{name}()"
|
|
365
|
+
|
|
366
|
+
# Calculate available space for args
|
|
367
|
+
base_len = len(name) + 2 # name + ()
|
|
368
|
+
available = max_width - base_len
|
|
369
|
+
|
|
370
|
+
# Format each arg
|
|
371
|
+
formatted = []
|
|
372
|
+
for key, val in args.items():
|
|
373
|
+
if isinstance(val, str):
|
|
374
|
+
formatted.append((key, f'"{val}"'))
|
|
375
|
+
else:
|
|
376
|
+
formatted.append((key, str(val)))
|
|
377
|
+
|
|
378
|
+
# Try to fit all args
|
|
379
|
+
def build_args(items, max_val_len=None):
|
|
380
|
+
parts = []
|
|
381
|
+
for key, val in items:
|
|
382
|
+
if max_val_len and len(val) > max_val_len:
|
|
383
|
+
val = val[:max_val_len-3] + '..."' if val.startswith('"') else val[:max_val_len-3] + "..."
|
|
384
|
+
parts.append(f"{key}={val}")
|
|
385
|
+
return ", ".join(parts)
|
|
386
|
+
|
|
387
|
+
# First try: full values
|
|
388
|
+
args_str = build_args(formatted)
|
|
389
|
+
if len(args_str) <= available:
|
|
390
|
+
return f"{name}({args_str})"
|
|
391
|
+
|
|
392
|
+
# Second try: truncate values to 20 chars each
|
|
393
|
+
args_str = build_args(formatted, max_val_len=20)
|
|
394
|
+
if len(args_str) <= available:
|
|
395
|
+
return f"{name}({args_str})"
|
|
396
|
+
|
|
397
|
+
# Third try: truncate values to 10 chars each
|
|
398
|
+
args_str = build_args(formatted, max_val_len=10)
|
|
399
|
+
if len(args_str) <= available:
|
|
400
|
+
return f"{name}({args_str})"
|
|
401
|
+
|
|
402
|
+
# Last resort: first 2 args truncated + ...
|
|
403
|
+
if len(formatted) > 2:
|
|
404
|
+
args_str = build_args(formatted[:2], max_val_len=10) + ", ..."
|
|
405
|
+
else:
|
|
406
|
+
args_str = build_args(formatted, max_val_len=8)
|
|
407
|
+
|
|
408
|
+
return f"{name}({args_str})"
|
|
409
|
+
|
|
410
|
+
def print_llm_request(self, model: str, session: Dict[str, Any], max_iterations: int) -> None:
|
|
411
|
+
"""Print LLM request with violet empty circle (AI thinking).
|
|
412
|
+
|
|
413
|
+
[co] ○ gemini-2.5-flash 1/10
|
|
414
|
+
|
|
415
|
+
Args:
|
|
416
|
+
model: Model name
|
|
417
|
+
session: Agent's current_session dict
|
|
418
|
+
max_iterations: Agent's max_iterations setting
|
|
419
|
+
"""
|
|
420
|
+
iteration = session.get('iteration', 1)
|
|
421
|
+
|
|
422
|
+
# Build the line: violet circle, white model, dim metadata
|
|
423
|
+
main_part = f"[{LLM_COLOR}]{CIRCLE_EMPTY}[/{LLM_COLOR}] {model}"
|
|
424
|
+
meta_part = f"[{DIM_COLOR}]{iteration}/{max_iterations}[/{DIM_COLOR}]"
|
|
425
|
+
|
|
426
|
+
# Right-align the iteration
|
|
427
|
+
visible_len = len(f"{CIRCLE_EMPTY} {model}")
|
|
428
|
+
padding = max(1, 50 - visible_len)
|
|
429
|
+
|
|
430
|
+
self.print(f"{main_part}{' ' * padding}{meta_part}")
|
|
431
|
+
|
|
432
|
+
def log_llm_response(self, model: str, duration_ms: float, tool_count: int, usage) -> None:
|
|
433
|
+
"""Log LLM response with violet filled circle (AI done thinking).
|
|
434
|
+
|
|
435
|
+
[co] ● gemini-2.5-flash · 1 tools · 66 tok (42 cached) · $0.00 ✓ 1.8s
|
|
436
|
+
|
|
437
|
+
Args:
|
|
438
|
+
model: Model name
|
|
439
|
+
duration_ms: Response time in milliseconds
|
|
440
|
+
tool_count: Number of tool calls requested
|
|
441
|
+
usage: TokenUsage object with input_tokens, output_tokens, cached_tokens, cost
|
|
442
|
+
"""
|
|
443
|
+
# Format tokens with cache info
|
|
444
|
+
total_tokens = usage.input_tokens + usage.output_tokens
|
|
445
|
+
tokens_str = f"{total_tokens/1000:.1f}k tok" if total_tokens >= 1000 else f"{total_tokens} tok"
|
|
446
|
+
cached = getattr(usage, 'cached_tokens', 0)
|
|
447
|
+
if cached:
|
|
448
|
+
tokens_str = f"{tokens_str} ({cached} cached)"
|
|
449
|
+
|
|
450
|
+
# Format cost
|
|
451
|
+
cost_str = f"${usage.cost:.4f}" if usage.cost < 0.01 else f"${usage.cost:.2f}"
|
|
452
|
+
|
|
453
|
+
# Format timing
|
|
454
|
+
time_str = f"{duration_ms/1000:.1f}s" if duration_ms >= 100 else f"{duration_ms/1000:.2f}s"
|
|
455
|
+
|
|
456
|
+
# Build main part: violet circle, white model, white tools, dim metadata
|
|
457
|
+
circle = f"[{LLM_COLOR}]{CIRCLE_FILLED}[/{LLM_COLOR}]"
|
|
458
|
+
info_parts = [model]
|
|
459
|
+
if tool_count:
|
|
460
|
+
tool_word = "tool" if tool_count == 1 else "tools"
|
|
461
|
+
info_parts.append(f"{tool_count} {tool_word}")
|
|
462
|
+
info_parts.append(f"[{DIM_COLOR}]{tokens_str} · {cost_str}[/{DIM_COLOR}]")
|
|
463
|
+
main_part = f"{circle} " + " · ".join(info_parts)
|
|
464
|
+
|
|
465
|
+
# Build status: flash symbol for LLM completion, dim time
|
|
466
|
+
status = f"[{LLM_COLOR}]{LLM_DONE_SYMBOL}[/{LLM_COLOR}] [{DIM_COLOR}]{time_str}[/{DIM_COLOR}]"
|
|
467
|
+
|
|
468
|
+
# Calculate visible length for padding
|
|
469
|
+
visible_text = f"{CIRCLE_FILLED} {model}"
|
|
470
|
+
if tool_count:
|
|
471
|
+
visible_text += f" · {tool_count} tools"
|
|
472
|
+
visible_text += f" · {tokens_str} · {cost_str}"
|
|
473
|
+
padding = max(1, 55 - len(visible_text))
|
|
474
|
+
|
|
475
|
+
self.print(f"{main_part}{' ' * padding}{status}")
|
|
476
|
+
|
|
477
|
+
def print_completion(
|
|
478
|
+
self,
|
|
479
|
+
duration_s: float,
|
|
480
|
+
session: Dict[str, Any],
|
|
481
|
+
session_path: Optional[str] = None
|
|
482
|
+
) -> None:
|
|
483
|
+
"""Print completion summary.
|
|
484
|
+
|
|
485
|
+
[co] ═══════════════════════════════════════
|
|
486
|
+
[co] ✓ done · 2.3k tokens · $0.005 · 3.4s
|
|
487
|
+
[co] saved → .co/sessions/research-assistant.yaml
|
|
488
|
+
|
|
489
|
+
Args:
|
|
490
|
+
duration_s: Total duration in seconds
|
|
491
|
+
session: Agent's current_session dict (contains trace with usage)
|
|
492
|
+
session_path: Optional path to session file
|
|
493
|
+
"""
|
|
494
|
+
# Calculate totals from trace
|
|
495
|
+
trace = session.get('trace', [])
|
|
496
|
+
llm_calls = [t for t in trace if t.get('type') == 'llm_call']
|
|
497
|
+
total_tokens = sum(
|
|
498
|
+
(t.get('usage').input_tokens + t.get('usage').output_tokens)
|
|
499
|
+
for t in llm_calls if t.get('usage')
|
|
500
|
+
)
|
|
501
|
+
total_cost = sum(
|
|
502
|
+
t.get('usage').cost
|
|
503
|
+
for t in llm_calls if t.get('usage')
|
|
504
|
+
)
|
|
505
|
+
|
|
506
|
+
# Format tokens
|
|
507
|
+
tokens_str = f"{total_tokens/1000:.1f}k" if total_tokens >= 1000 else str(total_tokens)
|
|
508
|
+
|
|
509
|
+
# Format cost
|
|
510
|
+
cost_str = f"${total_cost:.4f}" if total_cost < 0.01 else f"${total_cost:.3f}"
|
|
511
|
+
|
|
512
|
+
# Format time
|
|
513
|
+
time_str = f"{duration_s:.1f}s"
|
|
514
|
+
|
|
515
|
+
# Print separator
|
|
516
|
+
_rich_console.print()
|
|
517
|
+
self.print(f"[{DIM_COLOR}]═══════════════════════════════════════════════[/{DIM_COLOR}]")
|
|
518
|
+
|
|
519
|
+
# Print summary: green check, white "complete", dim metadata
|
|
520
|
+
self.print(f"[{SUCCESS_COLOR}]{SUCCESS_SYMBOL}[/{SUCCESS_COLOR}] complete [{DIM_COLOR}]· {tokens_str} tokens · {cost_str} · {time_str}[/{DIM_COLOR}]")
|
|
521
|
+
|
|
522
|
+
# Print session path if provided (dim)
|
|
523
|
+
if session_path:
|
|
524
|
+
self.print(f" [{DIM_COLOR}]{session_path}[/{DIM_COLOR}]")
|
|
525
|
+
|
|
526
|
+
_rich_console.print()
|
|
527
|
+
|
|
528
|
+
def _to_plain_text(self, message: str) -> str:
|
|
529
|
+
"""Convert Rich markup to plain text for log file."""
|
|
530
|
+
# Remove Rich markup tags (matches anything in brackets: [bold cyan], [#FF0000], etc.)
|
|
531
|
+
text = re.sub(r'\[[^\]]*\]', '', message)
|
|
532
|
+
|
|
533
|
+
# Convert common symbols
|
|
534
|
+
text = text.replace('→', '->')
|
|
535
|
+
text = text.replace('←', '<-')
|
|
536
|
+
text = text.replace('✓', '[OK]')
|
|
537
|
+
text = text.replace('✗', '[ERROR]')
|
|
538
|
+
|
|
539
|
+
return text
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"""Debug agent for enhanced AI-powered exception analysis with runtime inspection.
|
|
2
|
+
|
|
3
|
+
A specialized agent that uses RuntimeInspector to experiment, test, and
|
|
4
|
+
validate fixes using the actual data that caused the crash.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .agent import create_debug_agent
|
|
8
|
+
from .runtime_inspector import RuntimeInspector
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
"create_debug_agent",
|
|
12
|
+
"RuntimeInspector"
|
|
13
|
+
]
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Purpose: Factory function to create debug agents with runtime inspection capabilities for AI-powered exception analysis
|
|
3
|
+
LLM-Note:
|
|
4
|
+
Dependencies: imports from [pathlib, ../agent.py, runtime_inspector.py] | imported by [__init__.py, auto_debug_exception.py] | tested by [tests/test_debug_agent.py]
|
|
5
|
+
Data flow: auto_debug_exception() calls create_debug_agent(frame, traceback, model) → creates RuntimeInspector(frame, traceback) → loads system prompt from prompts/debug_assistant.md → creates Agent with inspector as tool → returns Agent configured for debugging
|
|
6
|
+
State/Effects: reads debug_assistant.md file | creates Agent and RuntimeInspector instances | no writes or global state | inspector has frozen exception frame context
|
|
7
|
+
Integration: exposes create_debug_agent(frame, exception_traceback, model) function | agent gets RuntimeInspector methods as tools via automatic method extraction | used by auto_debug_exception() to analyze crashes
|
|
8
|
+
Performance: one-time setup per exception | loads prompt from file once | inspector methods have minimal overhead
|
|
9
|
+
Errors: FileNotFoundError if prompt file missing | Agent creation errors propagate | model defaults to "o4-mini" for speed | max_iterations=5 for experimentation loops
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from ..agent import Agent
|
|
14
|
+
from .runtime_inspector import RuntimeInspector
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def create_debug_agent(frame=None, exception_traceback=None, model: str = "o4-mini") -> Agent:
|
|
18
|
+
"""Create a debug agent with runtime inspection capabilities.
|
|
19
|
+
|
|
20
|
+
The agent uses a RuntimeInspector instance as a tool, which provides
|
|
21
|
+
access to the actual runtime state when an exception occurs.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
frame: The exception frame (from traceback.tb_frame)
|
|
25
|
+
exception_traceback: The traceback object
|
|
26
|
+
model: LLM model to use (default: o4-mini for speed)
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
Configured Agent with RuntimeInspector as a tool
|
|
30
|
+
"""
|
|
31
|
+
# Create the inspector with the runtime context
|
|
32
|
+
inspector = RuntimeInspector(frame=frame, exception_traceback=exception_traceback)
|
|
33
|
+
|
|
34
|
+
# Load prompt from file
|
|
35
|
+
prompt_file = Path(__file__).parent / "prompts" / "debug_assistant.md"
|
|
36
|
+
|
|
37
|
+
# Pass the inspector instance as a tool
|
|
38
|
+
# ConnectOnion will automatically discover all its public methods!
|
|
39
|
+
return Agent(
|
|
40
|
+
name="debug_agent",
|
|
41
|
+
model=model,
|
|
42
|
+
system_prompt=prompt_file,
|
|
43
|
+
tools=[inspector], # Just pass the class instance!
|
|
44
|
+
max_iterations=5 # More iterations for experimentation
|
|
45
|
+
)
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
# Python Runtime Debug Assistant
|
|
2
|
+
|
|
3
|
+
You are a Python debugging assistant with LIVE access to the crashed program's runtime state.
|
|
4
|
+
You can execute code, inspect objects, and test fixes using the actual data that caused the crash!
|
|
5
|
+
|
|
6
|
+
## Your Superpower: Runtime Access
|
|
7
|
+
You're not just analyzing code - you have the ACTUAL runtime context where the exception occurred.
|
|
8
|
+
This means you can:
|
|
9
|
+
- Execute any Python code with the real variables
|
|
10
|
+
- Inspect objects to see their actual state
|
|
11
|
+
- Test fixes with the problematic data
|
|
12
|
+
- Validate assumptions about types and values
|
|
13
|
+
- Trace how variables got their values
|
|
14
|
+
|
|
15
|
+
## Your Tools (Use Them!)
|
|
16
|
+
|
|
17
|
+
### 🔬 Runtime Investigation Tools
|
|
18
|
+
1. **execute_in_frame** - Run Python code in the exception context
|
|
19
|
+
- Check types: `execute_in_frame("type(profile)")`
|
|
20
|
+
- See keys: `execute_in_frame("list(profile.keys())")`
|
|
21
|
+
- Access values: `execute_in_frame("profile['name']")`
|
|
22
|
+
|
|
23
|
+
2. **inspect_object** - Deep dive into any object
|
|
24
|
+
- See all attributes, methods, and current state
|
|
25
|
+
- Example: `inspect_object("profile")`
|
|
26
|
+
|
|
27
|
+
3. **test_fix** - Test your proposed fix with real data
|
|
28
|
+
- Compare original vs fixed code behavior
|
|
29
|
+
- Verify the fix actually works before suggesting it
|
|
30
|
+
|
|
31
|
+
4. **validate_assumption** - Test hypotheses about the data
|
|
32
|
+
- Check types, membership, conditions
|
|
33
|
+
- Example: `validate_assumption("'notifications' in profile")`
|
|
34
|
+
|
|
35
|
+
5. **trace_variable** - See how a variable changed through the call stack
|
|
36
|
+
- Track variable values across function calls
|
|
37
|
+
|
|
38
|
+
### 📖 Static Analysis Tools
|
|
39
|
+
6. **read_source_around_error** - Read the source code context
|
|
40
|
+
|
|
41
|
+
## Debugging Strategy
|
|
42
|
+
|
|
43
|
+
1. **FIRST: Investigate the runtime state**
|
|
44
|
+
- Use `execute_in_frame` to check actual values
|
|
45
|
+
- Use `inspect_object` to understand data structures
|
|
46
|
+
- Use `validate_assumption` to test your hypotheses
|
|
47
|
+
|
|
48
|
+
2. **THEN: Test potential fixes**
|
|
49
|
+
- Use `test_fix` to verify fixes work with the actual data
|
|
50
|
+
- Don't guess - TEST with the real runtime values!
|
|
51
|
+
|
|
52
|
+
3. **FINALLY: Provide the solution**
|
|
53
|
+
- Only suggest fixes you've verified work
|
|
54
|
+
- Show the actual data that proves your solution
|
|
55
|
+
|
|
56
|
+
## Example Investigation Flow
|
|
57
|
+
|
|
58
|
+
For a KeyError on `data['key']`:
|
|
59
|
+
1. Check what keys exist: `execute_in_frame("list(data.keys())")`
|
|
60
|
+
2. Inspect the object: `inspect_object("data")`
|
|
61
|
+
3. Test the fix: `test_fix("data['key']", "data.get('key', default_value)")`
|
|
62
|
+
4. Validate it works: `validate_assumption("data.get('key') is not None")`
|
|
63
|
+
|
|
64
|
+
## Response Format
|
|
65
|
+
|
|
66
|
+
Keep responses concise but include evidence from your investigation:
|
|
67
|
+
|
|
68
|
+
1. **What I found** - Show actual runtime values you discovered
|
|
69
|
+
2. **Why it failed** - Explain with evidence from the runtime state
|
|
70
|
+
3. **Verified fix** - Solution you tested with `test_fix` that works
|
|
71
|
+
|
|
72
|
+
Remember: You have the actual crashed state - use it! Don't guess when you can test!
|