connectonion 0.5.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. connectonion/__init__.py +78 -0
  2. connectonion/address.py +320 -0
  3. connectonion/agent.py +450 -0
  4. connectonion/announce.py +84 -0
  5. connectonion/asgi.py +287 -0
  6. connectonion/auto_debug_exception.py +181 -0
  7. connectonion/cli/__init__.py +3 -0
  8. connectonion/cli/browser_agent/__init__.py +5 -0
  9. connectonion/cli/browser_agent/browser.py +243 -0
  10. connectonion/cli/browser_agent/prompt.md +107 -0
  11. connectonion/cli/commands/__init__.py +1 -0
  12. connectonion/cli/commands/auth_commands.py +527 -0
  13. connectonion/cli/commands/browser_commands.py +27 -0
  14. connectonion/cli/commands/create.py +511 -0
  15. connectonion/cli/commands/deploy_commands.py +220 -0
  16. connectonion/cli/commands/doctor_commands.py +173 -0
  17. connectonion/cli/commands/init.py +469 -0
  18. connectonion/cli/commands/project_cmd_lib.py +828 -0
  19. connectonion/cli/commands/reset_commands.py +149 -0
  20. connectonion/cli/commands/status_commands.py +168 -0
  21. connectonion/cli/docs/co-vibecoding-principles-docs-contexts-all-in-one.md +2010 -0
  22. connectonion/cli/docs/connectonion.md +1256 -0
  23. connectonion/cli/docs.md +123 -0
  24. connectonion/cli/main.py +148 -0
  25. connectonion/cli/templates/meta-agent/README.md +287 -0
  26. connectonion/cli/templates/meta-agent/agent.py +196 -0
  27. connectonion/cli/templates/meta-agent/prompts/answer_prompt.md +9 -0
  28. connectonion/cli/templates/meta-agent/prompts/docs_retrieve_prompt.md +15 -0
  29. connectonion/cli/templates/meta-agent/prompts/metagent.md +71 -0
  30. connectonion/cli/templates/meta-agent/prompts/think_prompt.md +18 -0
  31. connectonion/cli/templates/minimal/README.md +56 -0
  32. connectonion/cli/templates/minimal/agent.py +40 -0
  33. connectonion/cli/templates/playwright/README.md +118 -0
  34. connectonion/cli/templates/playwright/agent.py +336 -0
  35. connectonion/cli/templates/playwright/prompt.md +102 -0
  36. connectonion/cli/templates/playwright/requirements.txt +3 -0
  37. connectonion/cli/templates/web-research/agent.py +122 -0
  38. connectonion/connect.py +128 -0
  39. connectonion/console.py +539 -0
  40. connectonion/debug_agent/__init__.py +13 -0
  41. connectonion/debug_agent/agent.py +45 -0
  42. connectonion/debug_agent/prompts/debug_assistant.md +72 -0
  43. connectonion/debug_agent/runtime_inspector.py +406 -0
  44. connectonion/debug_explainer/__init__.py +10 -0
  45. connectonion/debug_explainer/explain_agent.py +114 -0
  46. connectonion/debug_explainer/explain_context.py +263 -0
  47. connectonion/debug_explainer/explainer_prompt.md +29 -0
  48. connectonion/debug_explainer/root_cause_analysis_prompt.md +43 -0
  49. connectonion/debugger_ui.py +1039 -0
  50. connectonion/decorators.py +208 -0
  51. connectonion/events.py +248 -0
  52. connectonion/execution_analyzer/__init__.py +9 -0
  53. connectonion/execution_analyzer/execution_analysis.py +93 -0
  54. connectonion/execution_analyzer/execution_analysis_prompt.md +47 -0
  55. connectonion/host.py +579 -0
  56. connectonion/interactive_debugger.py +342 -0
  57. connectonion/llm.py +801 -0
  58. connectonion/llm_do.py +307 -0
  59. connectonion/logger.py +300 -0
  60. connectonion/prompt_files/__init__.py +1 -0
  61. connectonion/prompt_files/analyze_contact.md +62 -0
  62. connectonion/prompt_files/eval_expected.md +12 -0
  63. connectonion/prompt_files/react_evaluate.md +11 -0
  64. connectonion/prompt_files/react_plan.md +16 -0
  65. connectonion/prompt_files/reflect.md +22 -0
  66. connectonion/prompts.py +144 -0
  67. connectonion/relay.py +200 -0
  68. connectonion/static/docs.html +688 -0
  69. connectonion/tool_executor.py +279 -0
  70. connectonion/tool_factory.py +186 -0
  71. connectonion/tool_registry.py +105 -0
  72. connectonion/trust.py +166 -0
  73. connectonion/trust_agents.py +71 -0
  74. connectonion/trust_functions.py +88 -0
  75. connectonion/tui/__init__.py +57 -0
  76. connectonion/tui/divider.py +39 -0
  77. connectonion/tui/dropdown.py +251 -0
  78. connectonion/tui/footer.py +31 -0
  79. connectonion/tui/fuzzy.py +56 -0
  80. connectonion/tui/input.py +278 -0
  81. connectonion/tui/keys.py +35 -0
  82. connectonion/tui/pick.py +130 -0
  83. connectonion/tui/providers.py +155 -0
  84. connectonion/tui/status_bar.py +163 -0
  85. connectonion/usage.py +161 -0
  86. connectonion/useful_events_handlers/__init__.py +16 -0
  87. connectonion/useful_events_handlers/reflect.py +116 -0
  88. connectonion/useful_plugins/__init__.py +20 -0
  89. connectonion/useful_plugins/calendar_plugin.py +163 -0
  90. connectonion/useful_plugins/eval.py +139 -0
  91. connectonion/useful_plugins/gmail_plugin.py +162 -0
  92. connectonion/useful_plugins/image_result_formatter.py +127 -0
  93. connectonion/useful_plugins/re_act.py +78 -0
  94. connectonion/useful_plugins/shell_approval.py +159 -0
  95. connectonion/useful_tools/__init__.py +44 -0
  96. connectonion/useful_tools/diff_writer.py +192 -0
  97. connectonion/useful_tools/get_emails.py +183 -0
  98. connectonion/useful_tools/gmail.py +1596 -0
  99. connectonion/useful_tools/google_calendar.py +613 -0
  100. connectonion/useful_tools/memory.py +380 -0
  101. connectonion/useful_tools/microsoft_calendar.py +604 -0
  102. connectonion/useful_tools/outlook.py +488 -0
  103. connectonion/useful_tools/send_email.py +205 -0
  104. connectonion/useful_tools/shell.py +97 -0
  105. connectonion/useful_tools/slash_command.py +201 -0
  106. connectonion/useful_tools/terminal.py +285 -0
  107. connectonion/useful_tools/todo_list.py +241 -0
  108. connectonion/useful_tools/web_fetch.py +216 -0
  109. connectonion/xray.py +467 -0
  110. connectonion-0.5.8.dist-info/METADATA +741 -0
  111. connectonion-0.5.8.dist-info/RECORD +113 -0
  112. connectonion-0.5.8.dist-info/WHEEL +4 -0
  113. connectonion-0.5.8.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,342 @@
1
+ """
2
+ Purpose: Orchestrate interactive debugging sessions by intercepting tool execution and pausing at breakpoints
3
+ LLM-Note:
4
+ Dependencies: imports from [typing, debugger_ui.py, tool_executor.py, xray.py, debug_explainer/] | imported by [agent.py via .auto_debug()] | no dedicated test file found
5
+ Data flow: Agent.auto_debug(prompt) → creates InteractiveDebugger(agent, ui) → start_debug_session(prompt) → _attach_debugger_to_tool_execution() patches tool_executor.execute_single_tool globally → agent.input(prompt) runs normally → interceptor checks @xray or error status → pauses at _show_breakpoint_ui_and_wait_for_continue() → ui.show_breakpoint(context) → user actions: CONTINUE, EDIT (modify values), WHY (AI explanation), QUIT → _detach_debugger_from_tool_execution() restores original
6
+ State/Effects: MODIFIES tool_executor.execute_single_tool GLOBALLY (monkey-patch) | stores original in self.original_execute_single_tool | only affects self.agent (interceptor checks agent identity) | can modify trace_entry['result'], agent.current_session['iteration'], agent.max_iterations based on user edits | restored in finally block
7
+ Integration: exposes InteractiveDebugger(agent, ui), .start_debug_session(prompt) | accessed via Agent.auto_debug() method | uses DebuggerUI for all display and input | creates BreakpointContext with tool_name, tool_args, trace_entry, user_prompt, iteration, max_iterations, previous_tools, next_actions, tool_function | calls explain_tool_choice() from debug_explainer for WHY action
8
+ Performance: zero overhead when not debugging | interceptor adds minimal check per tool | LLM preview call (_get_llm_next_action_preview) makes extra LLM request to show what agent plans next | single-session or interactive loop mode based on prompt parameter
9
+ Errors: wraps agent execution in try/except KeyboardInterrupt | raises KeyboardInterrupt on QUIT action | preview failures are caught and printed (non-fatal) | CRITICAL: interceptor only affects tools executed by self.agent (identity check prevents cross-agent interference)
10
+
11
+ CRITICAL TIMING NOTE for _get_llm_next_action_preview():
12
+ Debugger pauses DURING tool execution (inside execute_single_tool), so current tool's result hasn't been added to messages yet. Timeline: 1) assistant message with tool_calls added, 2) tool executes → PAUSE HERE, 3) tool result message NOT YET ADDED. Must manually append current tool result to temp_messages for LLM preview to work correctly.
13
+ """
14
+
15
+ from typing import Any, Dict, Optional, List
16
+ from .debugger_ui import DebuggerUI, BreakpointContext, BreakpointAction
17
+
18
+
19
+ class InteractiveDebugger:
20
+ """Orchestrates debugging sessions for AI agents.
21
+
22
+ This class handles the debugging logic and intercepts tool execution,
23
+ delegating all UI interactions to the DebuggerUI class.
24
+ """
25
+
26
+ def __init__(self, agent: Any, ui: Optional[DebuggerUI] = None):
27
+ """Initialize debugger with an agent instance and optional UI.
28
+
29
+ Args:
30
+ agent: The Agent instance to debug
31
+ ui: Optional DebuggerUI instance (creates default if None)
32
+ """
33
+ self.agent = agent
34
+ self.ui = ui or DebuggerUI()
35
+ self.original_execute_single_tool = None
36
+
37
+ def start_debug_session(self, prompt: Optional[str] = None):
38
+ """Start a debugging session for the agent.
39
+
40
+ Args:
41
+ prompt: Optional prompt to debug. If provided, runs single session.
42
+ If None, runs in interactive mode.
43
+
44
+ Orchestrates the debug session by:
45
+ 1. Showing welcome message via UI
46
+ 2. Either using provided prompt or getting from user
47
+ 3. Executing tasks with debugging enabled
48
+ 4. Showing results via UI
49
+ """
50
+ # Show welcome
51
+ self.ui.show_welcome(self.agent.name)
52
+
53
+ # Determine mode based on prompt
54
+ if prompt:
55
+ # Single prompt mode - execute once and exit
56
+ self._execute_debug_task(prompt)
57
+ else:
58
+ # Interactive mode - loop until user quits
59
+ while True:
60
+ # Get prompt from user
61
+ user_prompt = self.ui.get_user_prompt()
62
+ if user_prompt is None:
63
+ break # User wants to quit
64
+
65
+ self._execute_debug_task(user_prompt)
66
+
67
+ def _execute_debug_task(self, prompt: str):
68
+ """Execute a single task with debugging enabled.
69
+
70
+ Args:
71
+ prompt: The task prompt to execute
72
+ """
73
+ # Attach debugger to intercept tool execution
74
+ self._attach_debugger_to_tool_execution()
75
+
76
+ result = None
77
+ try:
78
+ # Execute the prompt with debugging
79
+ self.ui.show_executing(prompt)
80
+ result = self.agent.input(prompt)
81
+ self.ui.show_result(result)
82
+
83
+ # Post-execution analysis
84
+ self._show_execution_analysis(prompt, result)
85
+
86
+ except KeyboardInterrupt:
87
+ self.ui.show_interrupted()
88
+
89
+ finally:
90
+ # Detach debugger after task completes
91
+ self._detach_debugger_from_tool_execution()
92
+
93
+ def _attach_debugger_to_tool_execution(self):
94
+ """Attach debugger to intercept tool execution and pause at breakpoints.
95
+
96
+ This installs an interceptor that will:
97
+ - Execute tools normally
98
+ - Check if the tool has @xray or encountered an error
99
+ - Pause execution and show UI if breakpoint conditions are met
100
+ - Only affect this specific agent instance
101
+ """
102
+ from . import tool_executor
103
+ from .xray import is_xray_enabled
104
+
105
+ # Store original function for restoration later
106
+ self.original_execute_single_tool = tool_executor.execute_single_tool
107
+
108
+ # Create interceptor function
109
+ def tool_execution_interceptor(tool_name, tool_args, tool_id, tools, agent, console):
110
+ # Execute tool normally
111
+ trace_entry = self.original_execute_single_tool(
112
+ tool_name, tool_args, tool_id, tools, agent, console
113
+ )
114
+
115
+ # CRITICAL: Only debug OUR agent, not all agents in the process
116
+ if agent is not self.agent:
117
+ return trace_entry # Skip debugging for other agents
118
+
119
+ # Check if tool has @xray decorator or if there was an error
120
+ tool = tools.get(tool_name)
121
+ should_pause = False
122
+
123
+ if tool and is_xray_enabled(tool):
124
+ should_pause = True
125
+ elif trace_entry.get('status') == 'error':
126
+ should_pause = True # Always pause on errors for debugging
127
+
128
+ if should_pause:
129
+ # Pause at breakpoint and show UI
130
+ self._show_breakpoint_ui_and_wait_for_continue(tool_name, tool_args, trace_entry)
131
+
132
+ return trace_entry
133
+
134
+ # Install the interceptor
135
+ tool_executor.execute_single_tool = tool_execution_interceptor
136
+
137
+ def _detach_debugger_from_tool_execution(self):
138
+ """Detach debugger and restore normal tool execution flow.
139
+
140
+ This removes the interceptor and restores the original
141
+ tool execution function.
142
+ """
143
+ if self.original_execute_single_tool:
144
+ from . import tool_executor
145
+ tool_executor.execute_single_tool = self.original_execute_single_tool
146
+
147
+ def _show_breakpoint_ui_and_wait_for_continue(self, tool_name: str, tool_args: Dict, trace_entry: Dict):
148
+ """Show breakpoint UI and wait for user to continue.
149
+
150
+ This delegates all UI interaction to the DebuggerUI class and
151
+ handles the logic of what to do based on user choices.
152
+
153
+ Args:
154
+ tool_name: Name of the tool that executed
155
+ tool_args: Arguments passed to the tool
156
+ trace_entry: Trace entry with execution result (can be modified)
157
+ """
158
+ # Get session context and agent info
159
+ session = self.agent.current_session or {}
160
+
161
+ # Gather previous tools from trace
162
+ trace = session.get('trace', [])
163
+ previous_tools = [
164
+ entry['tool_name'] for entry in trace[-3:]
165
+ if entry.get('type') == 'tool_execution' and entry.get('tool_name') != tool_name
166
+ ]
167
+
168
+ # Get preview of next LLM action
169
+ next_actions = self._get_llm_next_action_preview(tool_name, trace_entry)
170
+
171
+ # Get the actual tool function for source inspection
172
+ tool = self.agent.tools.get(tool_name)
173
+ tool_function = tool.run if tool and hasattr(tool, 'run') else None
174
+
175
+ # Create context for UI with extended debugging info
176
+ context = BreakpointContext(
177
+ tool_name=tool_name,
178
+ tool_args=tool_args,
179
+ trace_entry=trace_entry,
180
+ user_prompt=session.get('user_prompt', ''),
181
+ iteration=session.get('iteration', 0),
182
+ max_iterations=self.agent.max_iterations,
183
+ previous_tools=previous_tools,
184
+ next_actions=next_actions,
185
+ tool_function=tool_function # Pass the actual function
186
+ )
187
+
188
+ # Keep showing menu until user chooses to continue
189
+ while True:
190
+ action = self.ui.show_breakpoint(context)
191
+
192
+ if action == BreakpointAction.CONTINUE:
193
+ break # Exit the breakpoint
194
+ elif action == BreakpointAction.EDIT:
195
+ # Let user edit values in Python REPL
196
+ modifications = self.ui.edit_value(context, agent=self.agent)
197
+
198
+ # Apply modifications
199
+ if 'result' in modifications:
200
+ trace_entry['result'] = modifications['result']
201
+ # Re-generate preview with edited value
202
+ next_actions = self._get_llm_next_action_preview(tool_name, trace_entry)
203
+ context.next_actions = next_actions
204
+
205
+ if 'tool_args' in modifications:
206
+ # Update tool_args in context (for display purposes)
207
+ context.tool_args.update(modifications['tool_args'])
208
+
209
+ if 'iteration' in modifications:
210
+ # Update iteration in session
211
+ if self.agent.current_session:
212
+ self.agent.current_session['iteration'] = modifications['iteration']
213
+ context.iteration = modifications['iteration']
214
+
215
+ if 'max_iterations' in modifications:
216
+ # Update max_iterations on agent
217
+ self.agent.max_iterations = modifications['max_iterations']
218
+ context.max_iterations = modifications['max_iterations']
219
+ elif action == BreakpointAction.WHY:
220
+ # User wants AI explanation of why tool was chosen
221
+ from .debug_explainer import explain_tool_choice
222
+ from rich.console import Console
223
+
224
+ # Show progress indicator while analyzing
225
+ console = Console()
226
+ with console.status("[bold cyan]🤔 Analyzing why this tool was chosen...[/bold cyan]", spinner="dots"):
227
+ explanation = explain_tool_choice(context, self.agent, model=self.agent.llm.model)
228
+
229
+ self.ui.display_explanation(explanation, context)
230
+ # Loop back to menu after showing explanation
231
+ elif action == BreakpointAction.QUIT:
232
+ # User wants to quit debugging
233
+ raise KeyboardInterrupt("User quit debugging session")
234
+
235
+ def _get_llm_next_action_preview(self, tool_name: str, trace_entry: Dict) -> Optional[List[Dict]]:
236
+ """Get a preview of what the LLM plans to do next without executing.
237
+
238
+ This simulates the next iteration by calling the LLM with the current
239
+ tool result, but doesn't actually execute the planned tools.
240
+
241
+ CRITICAL TIMING NOTE:
242
+ =====================
243
+ The debugger pauses DURING tool execution (inside execute_single_tool),
244
+ which means the current tool's result hasn't been added to messages yet.
245
+
246
+ Timeline:
247
+ 1. tool_executor.py:41 → Adds assistant message with tool_calls
248
+ 2. tool_executor.py:46-53 → Executes tool → **DEBUGGER PAUSES HERE**
249
+ 3. tool_executor.py:56-60 → Adds tool result message (NOT REACHED YET!)
250
+
251
+ So agent.current_session['messages'] contains:
252
+ - ✅ Assistant message with ALL tool_calls
253
+ - ✅ Results from PREVIOUS tools (if parallel execution)
254
+ - ❌ Result from CURRENT tool (not added yet - we're paused!)
255
+
256
+ Therefore, we must manually append the current tool's result to get
257
+ a complete message history for the LLM preview.
258
+
259
+ Args:
260
+ tool_name: Name of the tool that just executed
261
+ trace_entry: The execution result
262
+
263
+ Returns:
264
+ List of planned tool calls (each with 'name' and 'args'),
265
+ or None if no tools planned or error occurred
266
+ """
267
+ try:
268
+ # Start with current messages (has assistant message + previous tool results)
269
+ temp_messages = self.agent.current_session['messages'].copy()
270
+
271
+ # Add the current tool's result to complete the message history
272
+ # (See docstring above for why this is necessary - we're paused mid-execution)
273
+ for msg in reversed(temp_messages):
274
+ if msg.get('role') == 'assistant' and msg.get('tool_calls'):
275
+ # Find the tool_call_id matching our current tool
276
+ for tool_call in msg.get('tool_calls', []):
277
+ if tool_call.get('function', {}).get('name') == tool_name:
278
+ # Add missing tool result message for preview
279
+ temp_messages.append({
280
+ "role": "tool",
281
+ "tool_call_id": tool_call['id'],
282
+ "content": str(trace_entry.get('result', ''))
283
+ })
284
+ break
285
+ break
286
+
287
+ # Call LLM to get its next planned action
288
+ # Use the agent's LLM and tools configuration
289
+ tool_schemas = [tool.to_function_schema() for tool in self.agent.tools] if self.agent.tools else None
290
+
291
+ # Make the LLM call
292
+ response = self.agent.llm.complete(temp_messages, tools=tool_schemas)
293
+
294
+ # Extract planned tool calls
295
+ if response.tool_calls:
296
+ next_actions = []
297
+ for tool_call in response.tool_calls:
298
+ next_actions.append({
299
+ 'name': tool_call.name,
300
+ 'args': tool_call.arguments
301
+ })
302
+ return next_actions
303
+ else:
304
+ # No more tools planned - task might be complete
305
+ return []
306
+
307
+ except Exception as e:
308
+ # If preview fails, return None to indicate unavailable
309
+ # This is non-critical, so we don't want to break the debugger
310
+ # Show the actual error for debugging (remove this later)
311
+ print(f"[dim]Preview error: {type(e).__name__}: {str(e)}[/dim]")
312
+ return None
313
+
314
+ def _show_execution_analysis(self, user_prompt: str, result: str):
315
+ """Show post-execution analysis with improvement suggestions.
316
+
317
+ Args:
318
+ user_prompt: The user's original request
319
+ result: Final result from agent
320
+ """
321
+ from .execution_analyzer import analyze_execution
322
+ from rich.console import Console
323
+
324
+ # Get execution data
325
+ session = self.agent.current_session or {}
326
+ trace = session.get('trace', [])
327
+ iteration = session.get('iteration', 0)
328
+ max_iterations_reached = iteration >= self.agent.max_iterations
329
+
330
+ # Show progress
331
+ console = Console()
332
+ with console.status("[bold cyan]📊 Analyzing execution and generating suggestions...[/bold cyan]", spinner="dots"):
333
+ analysis = analyze_execution(
334
+ user_prompt=user_prompt,
335
+ agent_instance=self.agent,
336
+ final_result=result,
337
+ execution_trace=trace,
338
+ max_iterations_reached=max_iterations_reached
339
+ )
340
+
341
+ # Display analysis
342
+ self.ui.display_execution_analysis(analysis)