connectonion 0.5.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. connectonion/__init__.py +78 -0
  2. connectonion/address.py +320 -0
  3. connectonion/agent.py +450 -0
  4. connectonion/announce.py +84 -0
  5. connectonion/asgi.py +287 -0
  6. connectonion/auto_debug_exception.py +181 -0
  7. connectonion/cli/__init__.py +3 -0
  8. connectonion/cli/browser_agent/__init__.py +5 -0
  9. connectonion/cli/browser_agent/browser.py +243 -0
  10. connectonion/cli/browser_agent/prompt.md +107 -0
  11. connectonion/cli/commands/__init__.py +1 -0
  12. connectonion/cli/commands/auth_commands.py +527 -0
  13. connectonion/cli/commands/browser_commands.py +27 -0
  14. connectonion/cli/commands/create.py +511 -0
  15. connectonion/cli/commands/deploy_commands.py +220 -0
  16. connectonion/cli/commands/doctor_commands.py +173 -0
  17. connectonion/cli/commands/init.py +469 -0
  18. connectonion/cli/commands/project_cmd_lib.py +828 -0
  19. connectonion/cli/commands/reset_commands.py +149 -0
  20. connectonion/cli/commands/status_commands.py +168 -0
  21. connectonion/cli/docs/co-vibecoding-principles-docs-contexts-all-in-one.md +2010 -0
  22. connectonion/cli/docs/connectonion.md +1256 -0
  23. connectonion/cli/docs.md +123 -0
  24. connectonion/cli/main.py +148 -0
  25. connectonion/cli/templates/meta-agent/README.md +287 -0
  26. connectonion/cli/templates/meta-agent/agent.py +196 -0
  27. connectonion/cli/templates/meta-agent/prompts/answer_prompt.md +9 -0
  28. connectonion/cli/templates/meta-agent/prompts/docs_retrieve_prompt.md +15 -0
  29. connectonion/cli/templates/meta-agent/prompts/metagent.md +71 -0
  30. connectonion/cli/templates/meta-agent/prompts/think_prompt.md +18 -0
  31. connectonion/cli/templates/minimal/README.md +56 -0
  32. connectonion/cli/templates/minimal/agent.py +40 -0
  33. connectonion/cli/templates/playwright/README.md +118 -0
  34. connectonion/cli/templates/playwright/agent.py +336 -0
  35. connectonion/cli/templates/playwright/prompt.md +102 -0
  36. connectonion/cli/templates/playwright/requirements.txt +3 -0
  37. connectonion/cli/templates/web-research/agent.py +122 -0
  38. connectonion/connect.py +128 -0
  39. connectonion/console.py +539 -0
  40. connectonion/debug_agent/__init__.py +13 -0
  41. connectonion/debug_agent/agent.py +45 -0
  42. connectonion/debug_agent/prompts/debug_assistant.md +72 -0
  43. connectonion/debug_agent/runtime_inspector.py +406 -0
  44. connectonion/debug_explainer/__init__.py +10 -0
  45. connectonion/debug_explainer/explain_agent.py +114 -0
  46. connectonion/debug_explainer/explain_context.py +263 -0
  47. connectonion/debug_explainer/explainer_prompt.md +29 -0
  48. connectonion/debug_explainer/root_cause_analysis_prompt.md +43 -0
  49. connectonion/debugger_ui.py +1039 -0
  50. connectonion/decorators.py +208 -0
  51. connectonion/events.py +248 -0
  52. connectonion/execution_analyzer/__init__.py +9 -0
  53. connectonion/execution_analyzer/execution_analysis.py +93 -0
  54. connectonion/execution_analyzer/execution_analysis_prompt.md +47 -0
  55. connectonion/host.py +579 -0
  56. connectonion/interactive_debugger.py +342 -0
  57. connectonion/llm.py +801 -0
  58. connectonion/llm_do.py +307 -0
  59. connectonion/logger.py +300 -0
  60. connectonion/prompt_files/__init__.py +1 -0
  61. connectonion/prompt_files/analyze_contact.md +62 -0
  62. connectonion/prompt_files/eval_expected.md +12 -0
  63. connectonion/prompt_files/react_evaluate.md +11 -0
  64. connectonion/prompt_files/react_plan.md +16 -0
  65. connectonion/prompt_files/reflect.md +22 -0
  66. connectonion/prompts.py +144 -0
  67. connectonion/relay.py +200 -0
  68. connectonion/static/docs.html +688 -0
  69. connectonion/tool_executor.py +279 -0
  70. connectonion/tool_factory.py +186 -0
  71. connectonion/tool_registry.py +105 -0
  72. connectonion/trust.py +166 -0
  73. connectonion/trust_agents.py +71 -0
  74. connectonion/trust_functions.py +88 -0
  75. connectonion/tui/__init__.py +57 -0
  76. connectonion/tui/divider.py +39 -0
  77. connectonion/tui/dropdown.py +251 -0
  78. connectonion/tui/footer.py +31 -0
  79. connectonion/tui/fuzzy.py +56 -0
  80. connectonion/tui/input.py +278 -0
  81. connectonion/tui/keys.py +35 -0
  82. connectonion/tui/pick.py +130 -0
  83. connectonion/tui/providers.py +155 -0
  84. connectonion/tui/status_bar.py +163 -0
  85. connectonion/usage.py +161 -0
  86. connectonion/useful_events_handlers/__init__.py +16 -0
  87. connectonion/useful_events_handlers/reflect.py +116 -0
  88. connectonion/useful_plugins/__init__.py +20 -0
  89. connectonion/useful_plugins/calendar_plugin.py +163 -0
  90. connectonion/useful_plugins/eval.py +139 -0
  91. connectonion/useful_plugins/gmail_plugin.py +162 -0
  92. connectonion/useful_plugins/image_result_formatter.py +127 -0
  93. connectonion/useful_plugins/re_act.py +78 -0
  94. connectonion/useful_plugins/shell_approval.py +159 -0
  95. connectonion/useful_tools/__init__.py +44 -0
  96. connectonion/useful_tools/diff_writer.py +192 -0
  97. connectonion/useful_tools/get_emails.py +183 -0
  98. connectonion/useful_tools/gmail.py +1596 -0
  99. connectonion/useful_tools/google_calendar.py +613 -0
  100. connectonion/useful_tools/memory.py +380 -0
  101. connectonion/useful_tools/microsoft_calendar.py +604 -0
  102. connectonion/useful_tools/outlook.py +488 -0
  103. connectonion/useful_tools/send_email.py +205 -0
  104. connectonion/useful_tools/shell.py +97 -0
  105. connectonion/useful_tools/slash_command.py +201 -0
  106. connectonion/useful_tools/terminal.py +285 -0
  107. connectonion/useful_tools/todo_list.py +241 -0
  108. connectonion/useful_tools/web_fetch.py +216 -0
  109. connectonion/xray.py +467 -0
  110. connectonion-0.5.8.dist-info/METADATA +741 -0
  111. connectonion-0.5.8.dist-info/RECORD +113 -0
  112. connectonion-0.5.8.dist-info/WHEEL +4 -0
  113. connectonion-0.5.8.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,139 @@
1
+ """
2
+ Purpose: Evaluation plugin for testing and debugging agent prompts and tools
3
+ LLM-Note:
4
+ Dependencies: imports from [pathlib, typing, events.after_user_input, events.on_complete, llm_do] | imported by [useful_plugins/__init__.py] | uses prompt files [prompt_files/eval_expected.md, prompt_files/react_evaluate.md] | tested by [tests/unit/test_eval_plugin.py]
5
+ Data flow: after_user_input → generate_expected() creates expected outcome using llm_do() → stores in agent.current_session['expected'] | on_complete → evaluate_result() compares actual vs expected using llm_do() → stores evaluation in agent.current_session['evaluation']
6
+ State/Effects: modifies agent.current_session['expected'] and ['evaluation'] | makes LLM calls for expectation generation and evaluation | no file I/O | no network besides LLM
7
+ Integration: exposes eval plugin list with [generate_expected, evaluate_result] handlers | used via Agent(plugins=[eval]) | combines with re_act for full debugging
8
+ Performance: 2 LLM calls per task (generate + evaluate) | adds latency but enables automated testing
9
+ Errors: no explicit error handling | LLM failures propagate | skips if expected already set by re_act
10
+
11
+ Eval plugin - Debug and test AI agent prompts and tools.
12
+
13
+ Generates expected outcomes and evaluates if tasks completed correctly.
14
+ Use this during development to test if your prompts and tools work as intended.
15
+
16
+ Usage:
17
+ from connectonion import Agent
18
+ from connectonion.useful_plugins import eval
19
+
20
+ # For debugging/testing
21
+ agent = Agent("assistant", tools=[...], plugins=[eval])
22
+
23
+ # Combined with re_act for full debugging
24
+ from connectonion.useful_plugins import re_act, eval
25
+ agent = Agent("assistant", tools=[...], plugins=[re_act, eval])
26
+ """
27
+
28
+ from pathlib import Path
29
+ from typing import TYPE_CHECKING, List, Dict
30
+ from ..events import after_user_input, on_complete
31
+ from ..llm_do import llm_do
32
+
33
+ if TYPE_CHECKING:
34
+ from ..agent import Agent
35
+
36
+ # Prompts
37
+ EXPECTED_PROMPT = Path(__file__).parent.parent / "prompt_files" / "eval_expected.md"
38
+ EVALUATE_PROMPT = Path(__file__).parent.parent / "prompt_files" / "react_evaluate.md"
39
+
40
+
41
+ @after_user_input
42
+ def generate_expected(agent: 'Agent') -> None:
43
+ """Generate expected outcome for the task.
44
+
45
+ Only generates if not already set (e.g., by re_act's plan_task).
46
+ """
47
+ # Skip if expected already set by another plugin (e.g., re_act)
48
+ if agent.current_session.get('expected'):
49
+ return
50
+
51
+ user_prompt = agent.current_session.get('user_prompt', '')
52
+ if not user_prompt:
53
+ return
54
+
55
+ tool_names = agent.tools.names() if agent.tools else []
56
+ tools_str = ", ".join(tool_names) if tool_names else "no tools"
57
+
58
+ prompt = f"""User request: {user_prompt}
59
+
60
+ Available tools: {tools_str}
61
+
62
+ What should happen to complete this task? (1-2 sentences)"""
63
+
64
+ expected = llm_do(
65
+ prompt,
66
+ model="co/gemini-2.5-flash",
67
+ temperature=0.2,
68
+ system_prompt=EXPECTED_PROMPT
69
+ )
70
+
71
+ agent.current_session['expected'] = expected
72
+
73
+
74
+ def _summarize_trace(trace: List[Dict]) -> str:
75
+ """Summarize what actions were taken."""
76
+ actions = []
77
+ for entry in trace:
78
+ if entry['type'] == 'tool_execution':
79
+ status = entry['status']
80
+ tool = entry['tool_name']
81
+ if status == 'success':
82
+ result = str(entry.get('result', ''))[:100]
83
+ actions.append(f"- {tool}: {result}")
84
+ else:
85
+ actions.append(f"- {tool}: failed ({entry.get('error', 'unknown')})")
86
+ return "\n".join(actions) if actions else "No tools were used."
87
+
88
+
89
+ @on_complete
90
+ def evaluate_completion(agent: 'Agent') -> None:
91
+ """Evaluate if the task completed correctly."""
92
+ user_prompt = agent.current_session.get('user_prompt', '')
93
+ if not user_prompt:
94
+ return
95
+
96
+ trace = agent.current_session.get('trace', [])
97
+ actions_summary = _summarize_trace(trace)
98
+ result = agent.current_session.get('result', 'No response generated.')
99
+ expected = agent.current_session.get('expected', '')
100
+
101
+ # Build prompt based on whether expected is available
102
+ if expected:
103
+ prompt = f"""User's original request: {user_prompt}
104
+
105
+ Expected: {expected}
106
+
107
+ Actions taken:
108
+ {actions_summary}
109
+
110
+ Agent's response:
111
+ {result}
112
+
113
+ Is this task truly complete? What was achieved or what's missing?"""
114
+ else:
115
+ prompt = f"""User's original request: {user_prompt}
116
+
117
+ Actions taken:
118
+ {actions_summary}
119
+
120
+ Agent's response:
121
+ {result}
122
+
123
+ Is this task truly complete? What was achieved or what's missing?"""
124
+
125
+ agent.logger.print("[dim]/evaluating...[/dim]")
126
+
127
+ evaluation = llm_do(
128
+ prompt,
129
+ model="co/gemini-2.5-flash",
130
+ temperature=0.2,
131
+ system_prompt=EVALUATE_PROMPT
132
+ )
133
+
134
+ agent.current_session['evaluation'] = evaluation
135
+ agent.logger.print(f"[dim]✓ {evaluation}[/dim]")
136
+
137
+
138
+ # Bundle as plugin
139
+ eval = [generate_expected, evaluate_completion]
@@ -0,0 +1,162 @@
1
+ """
2
+ Purpose: Human-in-the-loop approval plugin for Gmail send operations with email preview
3
+ LLM-Note:
4
+ Dependencies: imports from [datetime, typing, events.before_each_tool, events.after_each_tool, tui.pick, rich.console, rich.panel, rich.text] | imported by [useful_plugins/__init__.py] | tested by [tests/unit/test_gmail_plugin.py]
5
+ Data flow: before_each_tool → check_email_approval() checks if tool is Gmail.send/reply → displays email preview with Rich panel → pick() prompts for user approval → raises ValueError to cancel if rejected
6
+ State/Effects: blocks on user input | displays Rich-formatted email preview | raises exception to cancel tool execution | no file I/O | no network
7
+ Integration: exposes gmail_plugin list with [check_email_approval, log_email] handlers | used via Agent(plugins=[gmail_plugin]) | works with Gmail tool
8
+ Performance: instant display | blocks on user input | no LLM calls
9
+ Errors: raises ValueError on rejection | keyboard interrupts handled gracefully
10
+
11
+ Gmail plugin - Approval and CRM sync for Gmail operations.
12
+
13
+ Usage:
14
+ from connectonion import Agent, Gmail
15
+ from connectonion.useful_plugins import gmail_plugin
16
+
17
+ gmail = Gmail()
18
+ agent = Agent("assistant", tools=[gmail], plugins=[gmail_plugin])
19
+ """
20
+
21
+ from datetime import datetime
22
+ from typing import TYPE_CHECKING
23
+ from ..events import before_each_tool, after_each_tool
24
+ from ..tui import pick
25
+ from rich.console import Console
26
+ from rich.panel import Panel
27
+ from rich.text import Text
28
+
29
+ if TYPE_CHECKING:
30
+ from ..agent import Agent
31
+
32
+ _console = Console()
33
+
34
+ # Gmail class method names that send emails
35
+ SEND_METHODS = ('send', 'reply')
36
+
37
+
38
+ @before_each_tool
39
+ def check_email_approval(agent: 'Agent') -> None:
40
+ """Ask user approval before sending emails via Gmail.
41
+
42
+ Raises:
43
+ ValueError: If user rejects the email
44
+ """
45
+ pending = agent.current_session.get('pending_tool')
46
+ if not pending:
47
+ return
48
+
49
+ tool_name = pending['name']
50
+ if tool_name not in SEND_METHODS:
51
+ return
52
+
53
+ args = pending['arguments']
54
+
55
+ # Skip if all emails auto-approved
56
+ if agent.current_session.get('gmail_approve_all', False):
57
+ return
58
+
59
+ preview = Text()
60
+
61
+ if tool_name == 'send':
62
+ to = args.get('to', '')
63
+ subject = args.get('subject', '')
64
+ body = args.get('body', '')
65
+ cc = args.get('cc', '')
66
+ bcc = args.get('bcc', '')
67
+
68
+ # Skip if this recipient was auto-approved
69
+ approved_recipients = agent.current_session.get('gmail_approved_recipients', set())
70
+ if to in approved_recipients:
71
+ return
72
+
73
+ preview.append("To: ", style="bold cyan")
74
+ preview.append(f"{to}\n")
75
+ if cc:
76
+ preview.append("CC: ", style="bold cyan")
77
+ preview.append(f"{cc}\n")
78
+ if bcc:
79
+ preview.append("BCC: ", style="bold cyan")
80
+ preview.append(f"{bcc}\n")
81
+ preview.append("Subject: ", style="bold cyan")
82
+ preview.append(f"{subject}\n\n")
83
+ body_preview = body[:500] + "..." if len(body) > 500 else body
84
+ preview.append(body_preview)
85
+
86
+ action = "Email"
87
+ recipient_key = to
88
+
89
+ elif tool_name == 'reply':
90
+ email_id = args.get('email_id', '')
91
+ body = args.get('body', '')
92
+
93
+ # Skip if replies auto-approved
94
+ if agent.current_session.get('gmail_approve_replies', False):
95
+ return
96
+
97
+ preview.append("Reply to thread: ", style="bold cyan")
98
+ preview.append(f"{email_id}\n\n")
99
+ body_preview = body[:500] + "..." if len(body) > 500 else body
100
+ preview.append(body_preview)
101
+
102
+ action = "Reply"
103
+ recipient_key = None
104
+
105
+ _console.print()
106
+ _console.print(Panel(preview, title=f"[yellow]{action} to Send[/yellow]", border_style="yellow"))
107
+
108
+ options = ["Yes, send it"]
109
+ if tool_name == 'send' and recipient_key:
110
+ options.append(f"Auto approve emails to '{recipient_key}'")
111
+ if tool_name == 'reply':
112
+ options.append("Auto approve all replies this session")
113
+ options.append("Auto approve all emails this session")
114
+
115
+ choice = pick(f"Send this {action.lower()}?", options, other=True, console=_console)
116
+
117
+ if choice == "Yes, send it":
118
+ return
119
+ if choice.startswith("Auto approve emails to"):
120
+ if 'gmail_approved_recipients' not in agent.current_session:
121
+ agent.current_session['gmail_approved_recipients'] = set()
122
+ agent.current_session['gmail_approved_recipients'].add(recipient_key)
123
+ return
124
+ if choice == "Auto approve all replies this session":
125
+ agent.current_session['gmail_approve_replies'] = True
126
+ return
127
+ if choice == "Auto approve all emails this session":
128
+ agent.current_session['gmail_approve_all'] = True
129
+ return
130
+ # User typed custom feedback via "Other"
131
+ raise ValueError(f"User feedback: {choice}")
132
+
133
+
134
+ @after_each_tool
135
+ def sync_crm_after_send(agent: 'Agent') -> None:
136
+ """Update CRM data after each email send - last_contact, clear next_contact_date."""
137
+ trace = agent.current_session['trace'][-1]
138
+ if trace['type'] != 'tool_execution':
139
+ return
140
+ if trace['tool_name'] not in SEND_METHODS:
141
+ return
142
+ if trace['status'] != 'success':
143
+ return
144
+
145
+ to = trace['arguments'].get('to', '')
146
+ if not to:
147
+ return
148
+
149
+ # Access Gmail instance via agent.tools.gmail
150
+ gmail = agent.tools.gmail
151
+ today = datetime.now().strftime('%Y-%m-%d')
152
+ result = gmail.update_contact(to, last_contact=today, next_contact_date='')
153
+
154
+ if 'Updated' in result:
155
+ _console.print(f"[dim]CRM updated: {to}[/dim]")
156
+
157
+
158
+ # Bundle as plugin
159
+ gmail_plugin = [
160
+ check_email_approval,
161
+ sync_crm_after_send,
162
+ ]
@@ -0,0 +1,127 @@
1
+ """
2
+ Purpose: Automatically format base64 image tool results for multimodal LLM consumption
3
+ LLM-Note:
4
+ Dependencies: imports from [re, typing, events.after_tools] | imported by [useful_plugins/__init__.py] | tested by [tests/unit/test_image_result_formatter.py]
5
+ Data flow: after_tools event → scans tool result messages for base64 images → detects data URL or raw base64 patterns → converts tool result message content to OpenAI vision API format with image_url type → allows LLM to visually interpret screenshots/images
6
+ State/Effects: modifies agent.current_session['messages'] in place | replaces text content with image content blocks | no file I/O | no network
7
+ Integration: exposes image_result_formatter plugin list with [format_images] handler | used via Agent(plugins=[image_result_formatter]) | works with screenshot tools, image generators
8
+ Performance: O(n) message scanning | regex pattern matching | no LLM calls
9
+ Errors: silent skip if no base64 images detected | malformed base64 may cause LLM confusion
10
+
11
+ Image Result Formatter Plugin - Automatically formats base64 image results for model consumption.
12
+
13
+ When a tool returns a base64 encoded image (screenshot, generated image, etc.), this plugin
14
+ detects it and converts the tool result message to image format that LLMs can properly
15
+ interpret visually instead of treating it as text.
16
+
17
+ Usage:
18
+ from connectonion import Agent
19
+ from connectonion.useful_plugins import image_result_formatter
20
+
21
+ agent = Agent("assistant", tools=[take_screenshot], plugins=[image_result_formatter])
22
+ """
23
+
24
+ import re
25
+ from typing import TYPE_CHECKING
26
+ from ..events import after_tools
27
+
28
+ if TYPE_CHECKING:
29
+ from ..agent import Agent
30
+
31
+
32
+ def _is_base64_image(text: str) -> tuple[bool, str, str]:
33
+ """
34
+ Check if text contains base64 image data.
35
+
36
+ Returns:
37
+ (is_image, mime_type, base64_data)
38
+ """
39
+ if not isinstance(text, str):
40
+ return False, "", ""
41
+
42
+ # Check for data URL format: data:image/png;base64,iVBORw0KGgo...
43
+ data_url_pattern = r'data:image/(png|jpeg|jpg|gif|webp);base64,([A-Za-z0-9+/=]+)'
44
+ match = re.search(data_url_pattern, text)
45
+
46
+ if match:
47
+ image_type = match.group(1)
48
+ base64_data = match.group(2)
49
+ mime_type = f"image/{image_type}"
50
+ return True, mime_type, base64_data
51
+
52
+ # Check if entire result is base64 (common for screenshot tools)
53
+ # Base64 strings are typically long and contain only valid base64 characters
54
+ if len(text) > 100 and re.match(r'^[A-Za-z0-9+/=\s]+$', text):
55
+ # Likely a base64 image, default to PNG
56
+ return True, "image/png", text.strip()
57
+
58
+ return False, "", ""
59
+
60
+
61
+ def _format_image_result(agent: 'Agent') -> None:
62
+ """
63
+ Format base64 image in tool result to proper multimodal message format.
64
+
65
+ When a tool returns base64 image data, this converts the tool message
66
+ to multimodal format (text + image) so the LLM can properly see and
67
+ analyze the image visually.
68
+
69
+ Uses OpenAI vision format:
70
+ content: [
71
+ {"type": "text", "text": "Tool 'tool_name' returned an image. See below."},
72
+ {"type": "image_url", "image_url": "data:image/png;base64,..."}
73
+ ]
74
+ """
75
+ trace = agent.current_session['trace'][-1]
76
+
77
+ if trace['type'] != 'tool_execution' or trace['status'] != 'success':
78
+ return
79
+
80
+ result = trace['result']
81
+ tool_call_id = trace.get('call_id') # Fixed: trace uses 'call_id' not 'tool_call_id'
82
+ tool_name = trace.get('tool_name', 'unknown')
83
+
84
+ # Check if result contains base64 image
85
+ is_image, mime_type, base64_data = _is_base64_image(result)
86
+
87
+ if not is_image:
88
+ return
89
+
90
+ # Find the tool result message and modify it
91
+ # Keep tool message with shortened text + insert user message with image
92
+ # This works around OpenAI's requirement (tool_calls must have tool responses)
93
+ # while also providing image in user message (only format that supports images)
94
+ messages = agent.current_session['messages']
95
+
96
+ for i in range(len(messages) - 1, -1, -1):
97
+ msg = messages[i]
98
+
99
+ if msg['role'] == 'tool' and msg.get('tool_call_id') == tool_call_id:
100
+ # Shorten the tool message content (remove base64 to save tokens)
101
+ messages[i]['content'] = f"Screenshot captured (image provided below)"
102
+
103
+ # Insert a user message with the image right after the tool message
104
+ messages.insert(i + 1, {
105
+ "role": "user",
106
+ "content": [
107
+ {
108
+ "type": "text",
109
+ "text": f"Tool '{tool_name}' returned an image result. See image below."
110
+ },
111
+ {
112
+ "type": "image_url",
113
+ "image_url": {"url": f"data:{mime_type};base64,{base64_data}"}
114
+ }
115
+ ]
116
+ })
117
+
118
+ agent.logger.print(f"[dim]🖼️ Formatted '{tool_name}' result as image[/dim]")
119
+ break
120
+
121
+ # Update trace result to short message (avoids token overflow in other plugins like ReAct)
122
+ trace['result'] = f"🖼️ Tool '{tool_name}' returned image ({mime_type})"
123
+
124
+
125
+ # Plugin is an event list
126
+ # Uses after_tools because message modification can only happen after all tools finish
127
+ image_result_formatter = [after_tools(_format_image_result)]
@@ -0,0 +1,78 @@
1
+ """
2
+ Purpose: ReAct (Reasoning + Acting) plugin that adds planning and reflection to agent execution
3
+ LLM-Note:
4
+ Dependencies: imports from [pathlib, typing, events.after_user_input, llm_do, useful_events_handlers.reflect] | imported by [useful_plugins/__init__.py] | uses prompt file [prompt_files/react_plan.md] | tested by [tests/unit/test_re_act_plugin.py]
5
+ Data flow: after_user_input → plan_task() generates a plan using llm_do() → stores in agent.current_session['plan'] → after_tools → reflect() from useful_events_handlers evaluates results → generates reflection for next step
6
+ State/Effects: modifies agent.current_session['plan'] and ['expected'] | makes LLM calls for planning and reflection | no file I/O | no network besides LLM
7
+ Integration: exposes re_act plugin list with [plan_task, reflect] event handlers | used via Agent(plugins=[re_act]) | works with eval plugin for debugging
8
+ Performance: 1-2 LLM calls per turn (plan + reflect) | adds latency but improves agent reasoning
9
+ Errors: no explicit error handling | LLM failures propagate | silent skip if no user_prompt
10
+
11
+ ReAct plugin - Reasoning and Acting pattern for AI agents.
12
+
13
+ Implements the ReAct (Reason + Act) pattern:
14
+ 1. After user input: Plan what to do
15
+ 2. After tool execution: Reflect on results and plan next step
16
+
17
+ For evaluation/debugging, use the separate `eval` plugin.
18
+
19
+ Usage:
20
+ from connectonion import Agent
21
+ from connectonion.useful_plugins import re_act
22
+
23
+ agent = Agent("assistant", tools=[...], plugins=[re_act])
24
+
25
+ # With evaluation for debugging:
26
+ from connectonion.useful_plugins import re_act, eval
27
+ agent = Agent("assistant", tools=[...], plugins=[re_act, eval])
28
+ """
29
+
30
+ from pathlib import Path
31
+ from typing import TYPE_CHECKING
32
+ from ..events import after_user_input
33
+ from ..llm_do import llm_do
34
+ from ..useful_events_handlers.reflect import reflect
35
+
36
+ if TYPE_CHECKING:
37
+ from ..agent import Agent
38
+
39
+ # Prompts
40
+ PLAN_PROMPT = Path(__file__).parent.parent / "prompt_files" / "react_plan.md"
41
+
42
+
43
+ @after_user_input
44
+ def plan_task(agent: 'Agent') -> None:
45
+ """Plan the task after receiving user input."""
46
+ user_prompt = agent.current_session.get('user_prompt', '')
47
+ if not user_prompt:
48
+ return
49
+
50
+ tool_names = agent.tools.names() if agent.tools else []
51
+ tools_str = ", ".join(tool_names) if tool_names else "no tools"
52
+
53
+ prompt = f"""User request: {user_prompt}
54
+
55
+ Available tools: {tools_str}
56
+
57
+ Brief plan (1-2 sentences): what to do first?"""
58
+
59
+ agent.logger.print("[dim]/planning...[/dim]")
60
+
61
+ plan = llm_do(
62
+ prompt,
63
+ model="co/gemini-2.5-flash",
64
+ temperature=0.2,
65
+ system_prompt=PLAN_PROMPT
66
+ )
67
+
68
+ # Store plan as expected outcome (used by eval plugin if present)
69
+ agent.current_session['expected'] = plan
70
+
71
+ agent.current_session['messages'].append({
72
+ 'role': 'assistant',
73
+ 'content': f"💭 {plan}"
74
+ })
75
+
76
+
77
+ # Bundle as plugin: plan (after_user_input) + reflect (after_tools)
78
+ re_act = [plan_task, reflect]
@@ -0,0 +1,159 @@
1
+ """
2
+ Purpose: Human-in-the-loop approval plugin for shell commands with safe command bypass
3
+ LLM-Note:
4
+ Dependencies: imports from [re, typing, events.before_each_tool, tui.pick, rich.console] | imported by [useful_plugins/__init__.py] | tested by [tests/unit/test_shell_approval.py]
5
+ Data flow: before_each_tool event → checks if tool is Shell.run → matches command against SAFE_PATTERNS (ls, cat, grep, git status, etc.) → if not safe, displays command with pick() for user approval → raises exception to cancel if rejected
6
+ State/Effects: blocks on user input | displays Rich-formatted command preview | raises exception to cancel tool execution | no file I/O | no network
7
+ Integration: exposes shell_approval plugin list with [approve_shell] handler | used via Agent(plugins=[shell_approval]) | works with Shell tool
8
+ Performance: O(n) regex pattern matching | blocks on user input | instant for safe commands
9
+ Errors: raises ToolCancelled exception on rejection | keyboard interrupts handled gracefully
10
+
11
+ Shell Approval plugin - Asks user approval for shell commands.
12
+
13
+ All shell commands require approval EXCEPT safe read-only commands
14
+ like ls, grep, cat, git status, etc.
15
+
16
+ Usage:
17
+ from connectonion import Agent
18
+ from connectonion.useful_plugins import shell_approval
19
+
20
+ agent = Agent("assistant", tools=[shell], plugins=[shell_approval])
21
+ """
22
+
23
+ import re
24
+ from typing import TYPE_CHECKING
25
+ from ..events import before_each_tool
26
+ from ..tui import pick
27
+ from rich.console import Console
28
+
29
+ if TYPE_CHECKING:
30
+ from ..agent import Agent
31
+
32
+ _console = Console()
33
+
34
+ # Safe read-only commands that don't need approval
35
+ SAFE_PATTERNS = [
36
+ r'^ls\b', # list files
37
+ r'^ll\b', # list files (alias)
38
+ r'^cat\b', # read file
39
+ r'^head\b', # read file head
40
+ r'^tail\b', # read file tail
41
+ r'^less\b', # read file
42
+ r'^more\b', # read file
43
+ r'^grep\b', # search
44
+ r'^rg\b', # ripgrep search
45
+ r'^find\b', # find files
46
+ r'^fd\b', # fd find
47
+ r'^which\b', # find executable
48
+ r'^whereis\b', # find executable
49
+ r'^type\b', # show type
50
+ r'^file\b', # file type
51
+ r'^stat\b', # file stats
52
+ r'^wc\b', # word count
53
+ r'^pwd\b', # print working dir
54
+ r'^echo\b', # echo (read-only)
55
+ r'^printf\b', # printf (read-only)
56
+ r'^date\b', # date
57
+ r'^whoami\b', # current user
58
+ r'^id\b', # user id
59
+ r'^env\b', # environment
60
+ r'^printenv\b', # print environment
61
+ r'^uname\b', # system info
62
+ r'^hostname\b', # hostname
63
+ r'^df\b', # disk free
64
+ r'^du\b', # disk usage
65
+ r'^free\b', # memory
66
+ r'^ps\b', # processes
67
+ r'^top\b', # top processes
68
+ r'^htop\b', # htop
69
+ r'^tree\b', # tree view
70
+ r'^git\s+status\b', # git status
71
+ r'^git\s+log\b', # git log
72
+ r'^git\s+diff\b', # git diff
73
+ r'^git\s+show\b', # git show
74
+ r'^git\s+branch\b', # git branch (list)
75
+ r'^git\s+remote\b', # git remote (list)
76
+ r'^git\s+tag\b', # git tag (list)
77
+ r'^npm\s+list\b', # npm list
78
+ r'^npm\s+ls\b', # npm ls
79
+ r'^pip\s+list\b', # pip list
80
+ r'^pip\s+show\b', # pip show
81
+ r'^python\s+--version\b', # python version
82
+ r'^node\s+--version\b', # node version
83
+ r'^cargo\s+--version\b', # cargo version
84
+ ]
85
+
86
+
87
+ def _is_safe(command: str) -> bool:
88
+ """Check if command is a safe read-only command."""
89
+ cmd = command.strip()
90
+ for pattern in SAFE_PATTERNS:
91
+ if re.search(pattern, cmd):
92
+ return True
93
+ return False
94
+
95
+
96
+ def _check_approval(agent: 'Agent') -> None:
97
+ """Check pending tool and ask for approval if not safe.
98
+
99
+ All shell commands require approval except safe read-only commands.
100
+
101
+ Raises:
102
+ ValueError: If user rejects the command
103
+ """
104
+ pending = agent.current_session.get('pending_tool')
105
+ if not pending:
106
+ return
107
+
108
+ # Only check bash/shell tools
109
+ tool_name = pending['name']
110
+ if tool_name not in ('bash', 'shell', 'run'):
111
+ return
112
+
113
+ # Get command from arguments
114
+ args = pending['arguments']
115
+ command = args.get('command', '')
116
+
117
+ # Get the base command (first word)
118
+ base_cmd = command.strip().split()[0] if command.strip() else ''
119
+
120
+ # Skip if this command type was auto-approved earlier
121
+ approved_cmds = agent.current_session.get('shell_approved_cmds', set())
122
+ if base_cmd in approved_cmds:
123
+ return
124
+
125
+ # Skip approval for safe read-only commands
126
+ if _is_safe(command):
127
+ return
128
+
129
+ # Show command in a visual box
130
+ from rich.panel import Panel
131
+ from rich.syntax import Syntax
132
+
133
+ _console.print()
134
+ syntax = Syntax(command, "bash", theme="monokai", word_wrap=True)
135
+ _console.print(Panel(syntax, title="[yellow]Shell Command[/yellow]", border_style="yellow"))
136
+
137
+ # Use pick for visual arrow-key selection
138
+ choice = pick("Execute this command?", [
139
+ "Yes, execute",
140
+ f"Auto approve '{base_cmd}' in this session",
141
+ "No, tell agent what I want"
142
+ ], console=_console)
143
+
144
+ if choice == "Yes, execute":
145
+ return # Execute the command
146
+ elif choice.startswith("Auto approve"):
147
+ # Add this command type to approved set
148
+ if 'shell_approved_cmds' not in agent.current_session:
149
+ agent.current_session['shell_approved_cmds'] = set()
150
+ agent.current_session['shell_approved_cmds'].add(base_cmd)
151
+ return # Execute the command
152
+ else:
153
+ # User wants to provide feedback
154
+ feedback = input("What do you want the agent to do instead? ")
155
+ raise ValueError(f"User feedback: {feedback}")
156
+
157
+
158
+ # Plugin is an event list
159
+ shell_approval = [before_each_tool(_check_approval)]