connectonion 0.4.12__py3-none-any.whl → 0.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- connectonion/__init__.py +11 -5
- connectonion/agent.py +44 -42
- connectonion/cli/commands/init.py +1 -1
- connectonion/cli/commands/project_cmd_lib.py +4 -4
- connectonion/cli/commands/reset_commands.py +1 -1
- connectonion/cli/docs/co-vibecoding-principles-docs-contexts-all-in-one.md +15 -11
- connectonion/cli/templates/minimal/agent.py +2 -2
- connectonion/console.py +55 -3
- connectonion/events.py +96 -17
- connectonion/llm.py +21 -3
- connectonion/logger.py +289 -0
- connectonion/prompt_files/eval_expected.md +12 -0
- connectonion/tool_executor.py +43 -32
- connectonion/usage.py +4 -0
- connectonion/useful_events_handlers/reflect.py +13 -9
- connectonion/useful_plugins/__init__.py +2 -1
- connectonion/useful_plugins/calendar_plugin.py +2 -2
- connectonion/useful_plugins/eval.py +130 -0
- connectonion/useful_plugins/gmail_plugin.py +4 -4
- connectonion/useful_plugins/image_result_formatter.py +4 -3
- connectonion/useful_plugins/re_act.py +14 -56
- connectonion/useful_plugins/shell_approval.py +2 -2
- connectonion/useful_tools/memory.py +4 -0
- {connectonion-0.4.12.dist-info → connectonion-0.5.1.dist-info}/METADATA +49 -49
- {connectonion-0.4.12.dist-info → connectonion-0.5.1.dist-info}/RECORD +27 -71
- {connectonion-0.4.12.dist-info → connectonion-0.5.1.dist-info}/WHEEL +1 -2
- connectonion/cli/templates/email-agent/.env.example +0 -23
- connectonion/cli/templates/email-agent/README.md +0 -240
- connectonion/cli/templates/email-agent/agent.py +0 -374
- connectonion/cli/templates/email-agent/demo.py +0 -71
- connectonion/cli/templates/meta-agent/.env.example +0 -11
- connectonion/cli/templates/minimal/.env.example +0 -5
- connectonion/cli/templates/playwright/.env.example +0 -5
- connectonion-0.4.12.dist-info/top_level.txt +0 -2
- tests/__init__.py +0 -0
- tests/cli/__init__.py +0 -1
- tests/cli/argparse_runner.py +0 -85
- tests/cli/conftest.py +0 -5
- tests/cli/test_browser_cli.py +0 -61
- tests/cli/test_cli.py +0 -143
- tests/cli/test_cli_auth_google.py +0 -344
- tests/cli/test_cli_auth_microsoft.py +0 -256
- tests/cli/test_cli_create.py +0 -283
- tests/cli/test_cli_help.py +0 -200
- tests/cli/test_cli_init.py +0 -318
- tests/conftest.py +0 -283
- tests/debug_gemini_models.py +0 -23
- tests/fixtures/__init__.py +0 -1
- tests/fixtures/test_tools.py +0 -112
- tests/fixtures/trust_fixtures.py +0 -257
- tests/real_api/__init__.py +0 -0
- tests/real_api/conftest.py +0 -9
- tests/real_api/test_llm_do.py +0 -174
- tests/real_api/test_llm_do_comprehensive.py +0 -527
- tests/real_api/test_production_client.py +0 -94
- tests/real_api/test_real_anthropic.py +0 -100
- tests/real_api/test_real_api.py +0 -113
- tests/real_api/test_real_auth.py +0 -130
- tests/real_api/test_real_email.py +0 -95
- tests/real_api/test_real_gemini.py +0 -96
- tests/real_api/test_real_llm_do.py +0 -81
- tests/real_api/test_real_managed.py +0 -208
- tests/real_api/test_real_multi_llm.py +0 -454
- tests/real_api/test_real_openai.py +0 -100
- tests/real_api/test_responses_parse.py +0 -88
- tests/test_diff_writer.py +0 -126
- tests/test_events.py +0 -677
- tests/test_gemini_co.py +0 -70
- tests/test_image_result_formatter.py +0 -88
- tests/test_plugin_system.py +0 -110
- tests/utils/__init__.py +0 -1
- tests/utils/config_helpers.py +0 -188
- tests/utils/mock_helpers.py +0 -237
- {connectonion-0.4.12.dist-info → connectonion-0.5.1.dist-info}/entry_points.txt +0 -0
connectonion/__init__.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""ConnectOnion - A simple agent framework with behavior tracking."""
|
|
2
2
|
|
|
3
|
-
__version__ = "0.
|
|
3
|
+
__version__ = "0.5.1"
|
|
4
4
|
|
|
5
5
|
# Auto-load .env files for the entire framework
|
|
6
6
|
from dotenv import load_dotenv
|
|
@@ -13,6 +13,7 @@ load_dotenv(_Path.cwd() / ".env")
|
|
|
13
13
|
from .agent import Agent
|
|
14
14
|
from .tool_factory import create_tool_from_function
|
|
15
15
|
from .llm import LLM
|
|
16
|
+
from .logger import Logger
|
|
16
17
|
from .llm_do import llm_do
|
|
17
18
|
from .prompts import load_system_prompt
|
|
18
19
|
from .xray import xray
|
|
@@ -24,8 +25,10 @@ from .events import (
|
|
|
24
25
|
after_user_input,
|
|
25
26
|
before_llm,
|
|
26
27
|
after_llm,
|
|
27
|
-
|
|
28
|
-
|
|
28
|
+
before_each_tool,
|
|
29
|
+
before_tools,
|
|
30
|
+
after_each_tool,
|
|
31
|
+
after_tools,
|
|
29
32
|
on_error,
|
|
30
33
|
on_complete
|
|
31
34
|
)
|
|
@@ -33,6 +36,7 @@ from .events import (
|
|
|
33
36
|
__all__ = [
|
|
34
37
|
"Agent",
|
|
35
38
|
"LLM",
|
|
39
|
+
"Logger",
|
|
36
40
|
"create_tool_from_function",
|
|
37
41
|
"llm_do",
|
|
38
42
|
"load_system_prompt",
|
|
@@ -62,8 +66,10 @@ __all__ = [
|
|
|
62
66
|
"after_user_input",
|
|
63
67
|
"before_llm",
|
|
64
68
|
"after_llm",
|
|
65
|
-
"
|
|
66
|
-
"
|
|
69
|
+
"before_each_tool",
|
|
70
|
+
"before_tools",
|
|
71
|
+
"after_each_tool",
|
|
72
|
+
"after_tools",
|
|
67
73
|
"on_error",
|
|
68
74
|
"on_complete"
|
|
69
75
|
]
|
connectonion/agent.py
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Purpose: Orchestrate AI agent execution with LLM calls, tool execution, and automatic logging
|
|
3
3
|
LLM-Note:
|
|
4
|
-
Dependencies: imports from [llm.py, tool_factory.py, prompts.py, decorators.py,
|
|
5
|
-
Data flow: receives user prompt: str from Agent.input() → creates/extends current_session with messages → calls llm.complete() with tool schemas → receives LLMResponse with tool_calls → executes tools via tool_executor.execute_and_record_tools() → appends tool results to messages → repeats loop until no tool_calls or max_iterations →
|
|
6
|
-
State/Effects: modifies self.current_session['messages', 'trace', 'turn', 'iteration'] | writes to .co/logs/{name}.log via
|
|
7
|
-
Integration: exposes Agent(name, tools, system_prompt, model, trust, log), .input(prompt), .execute_tool(name, args), .add_tool(func), .remove_tool(name), .list_tools(), .reset_conversation() | tools stored in ToolRegistry with attribute access (agent.tools.tool_name) and instance storage (agent.tools.gmail) | tool execution delegates to tool_executor module | trust system via trust.create_trust_agent() | log defaults to .co/logs/ (None), can be True (current dir), False (disabled), or custom path
|
|
4
|
+
Dependencies: imports from [llm.py, tool_factory.py, prompts.py, decorators.py, logger.py, tool_executor.py, trust.py, tool_registry.py] | imported by [__init__.py, trust.py, debug_agent/__init__.py] | tested by [tests/test_agent.py, tests/test_agent_prompts.py, tests/test_agent_workflows.py]
|
|
5
|
+
Data flow: receives user prompt: str from Agent.input() → creates/extends current_session with messages → calls llm.complete() with tool schemas → receives LLMResponse with tool_calls → executes tools via tool_executor.execute_and_record_tools() → appends tool results to messages → repeats loop until no tool_calls or max_iterations → logger logs to .co/logs/{name}.log and .co/sessions/{name}_{timestamp}.yaml → returns final response: str
|
|
6
|
+
State/Effects: modifies self.current_session['messages', 'trace', 'turn', 'iteration'] | writes to .co/logs/{name}.log and .co/sessions/ via logger.py | initializes trust agent if trust parameter provided
|
|
7
|
+
Integration: exposes Agent(name, tools, system_prompt, model, trust, log, quiet), .input(prompt), .execute_tool(name, args), .add_tool(func), .remove_tool(name), .list_tools(), .reset_conversation() | tools stored in ToolRegistry with attribute access (agent.tools.tool_name) and instance storage (agent.tools.gmail) | tool execution delegates to tool_executor module | trust system via trust.create_trust_agent() | log defaults to .co/logs/ (None), can be True (current dir), False (disabled), or custom path | quiet=True suppresses console but keeps session logging
|
|
8
8
|
Performance: max_iterations=10 default (configurable per-input) | session state persists across turns for multi-turn conversations | ToolRegistry provides O(1) tool lookup via .get() or attribute access
|
|
9
9
|
Errors: LLM errors bubble up | tool execution errors captured in trace and returned to LLM for retry | trust agent creation can fail if invalid trust parameter
|
|
10
10
|
"""
|
|
@@ -22,7 +22,7 @@ from .prompts import load_system_prompt
|
|
|
22
22
|
from .decorators import (
|
|
23
23
|
_is_replay_enabled # Only need this for replay check
|
|
24
24
|
)
|
|
25
|
-
from .
|
|
25
|
+
from .logger import Logger
|
|
26
26
|
from .tool_executor import execute_and_record_tools, execute_single_tool
|
|
27
27
|
from .events import EventHandler
|
|
28
28
|
|
|
@@ -38,10 +38,11 @@ class Agent:
|
|
|
38
38
|
tools: Optional[Union[List[Callable], Callable, Any]] = None,
|
|
39
39
|
system_prompt: Union[str, Path, None] = None,
|
|
40
40
|
api_key: Optional[str] = None,
|
|
41
|
-
model: str = "co/
|
|
41
|
+
model: str = "co/gemini-2.5-pro",
|
|
42
42
|
max_iterations: int = 10,
|
|
43
43
|
trust: Optional[Union[str, Path, 'Agent']] = None,
|
|
44
44
|
log: Optional[Union[bool, str, Path]] = None,
|
|
45
|
+
quiet: bool = False,
|
|
45
46
|
plugins: Optional[List[List[EventHandler]]] = None,
|
|
46
47
|
on_events: Optional[List[EventHandler]] = None
|
|
47
48
|
):
|
|
@@ -56,27 +57,13 @@ class Agent:
|
|
|
56
57
|
self.total_cost: float = 0.0 # Cumulative cost in USD
|
|
57
58
|
self.last_usage: Optional[TokenUsage] = None # From most recent LLM call
|
|
58
59
|
|
|
59
|
-
#
|
|
60
|
-
log_file = None
|
|
61
|
-
if log is None:
|
|
62
|
-
# NEW: Default to .co/logs/ for automatic audit trail
|
|
63
|
-
log_file = Path.cwd() / '.co' / 'logs' / f'{name}.log'
|
|
64
|
-
elif log is True:
|
|
65
|
-
# Explicit True: {name}.log in current directory
|
|
66
|
-
log_file = Path(f"{name}.log")
|
|
67
|
-
elif log is False:
|
|
68
|
-
# Explicit opt-out: no logging
|
|
69
|
-
log_file = None
|
|
70
|
-
elif log:
|
|
71
|
-
# Custom log file path
|
|
72
|
-
log_file = Path(log)
|
|
73
|
-
|
|
60
|
+
# Initialize logger (unified: terminal + file + YAML sessions)
|
|
74
61
|
# Environment variable override (highest priority)
|
|
62
|
+
effective_log = log
|
|
75
63
|
if os.getenv('CONNECTONION_LOG'):
|
|
76
|
-
|
|
64
|
+
effective_log = Path(os.getenv('CONNECTONION_LOG'))
|
|
77
65
|
|
|
78
|
-
|
|
79
|
-
self.console = Console(log_file=log_file)
|
|
66
|
+
self.logger = Logger(agent_name=name, quiet=quiet, log=effective_log)
|
|
80
67
|
|
|
81
68
|
|
|
82
69
|
|
|
@@ -93,12 +80,16 @@ class Agent:
|
|
|
93
80
|
self.trust = create_trust_agent(trust, api_key=api_key, model=model)
|
|
94
81
|
|
|
95
82
|
# Initialize event registry
|
|
83
|
+
# Note: before_each_tool/after_each_tool fire for EACH tool
|
|
84
|
+
# before_tools/after_tools fire ONCE per batch (safe for adding messages)
|
|
96
85
|
self.events = {
|
|
97
86
|
'after_user_input': [],
|
|
98
87
|
'before_llm': [],
|
|
99
88
|
'after_llm': [],
|
|
100
|
-
'
|
|
101
|
-
'
|
|
89
|
+
'before_each_tool': [], # Fires before EACH tool
|
|
90
|
+
'before_tools': [], # Fires ONCE before ALL tools in a batch
|
|
91
|
+
'after_each_tool': [], # Fires after EACH tool (don't add messages here!)
|
|
92
|
+
'after_tools': [], # Fires ONCE after ALL tools (safe for messages)
|
|
102
93
|
'on_error': [],
|
|
103
94
|
'on_complete': []
|
|
104
95
|
}
|
|
@@ -200,7 +191,7 @@ class Agent:
|
|
|
200
191
|
The agent's response after processing the input
|
|
201
192
|
"""
|
|
202
193
|
start_time = time.time()
|
|
203
|
-
self.
|
|
194
|
+
self.logger.print(f"[bold]INPUT:[/bold] {prompt[:100]}...")
|
|
204
195
|
|
|
205
196
|
# Initialize session on first input, or continue existing conversation
|
|
206
197
|
if self.current_session is None:
|
|
@@ -209,6 +200,8 @@ class Agent:
|
|
|
209
200
|
'trace': [],
|
|
210
201
|
'turn': 0 # Track conversation turns
|
|
211
202
|
}
|
|
203
|
+
# Start YAML session logging
|
|
204
|
+
self.logger.start_session(self.system_prompt)
|
|
212
205
|
|
|
213
206
|
# Add user message to conversation
|
|
214
207
|
self.current_session['messages'].append({
|
|
@@ -238,11 +231,17 @@ class Agent:
|
|
|
238
231
|
max_iterations or self.max_iterations
|
|
239
232
|
)
|
|
240
233
|
|
|
241
|
-
# Calculate duration
|
|
234
|
+
# Calculate duration
|
|
242
235
|
duration = time.time() - turn_start
|
|
243
236
|
|
|
244
|
-
self.
|
|
237
|
+
self.current_session['result'] = result
|
|
238
|
+
|
|
239
|
+
self.logger.print(f"[green]✓ Complete[/green] ({duration:.1f}s)")
|
|
245
240
|
self._invoke_events('on_complete')
|
|
241
|
+
|
|
242
|
+
# Log turn to YAML session (after on_complete so handlers can modify state)
|
|
243
|
+
self.logger.log_turn(prompt, result, duration * 1000, self.current_session, self.llm.model)
|
|
244
|
+
|
|
246
245
|
return result
|
|
247
246
|
|
|
248
247
|
def reset_conversation(self):
|
|
@@ -278,18 +277,21 @@ class Agent:
|
|
|
278
277
|
tool_id=f"manual_{tool_name}_{time.time()}",
|
|
279
278
|
tools=self.tools,
|
|
280
279
|
agent=self,
|
|
281
|
-
|
|
280
|
+
logger=self.logger
|
|
282
281
|
)
|
|
283
282
|
|
|
284
283
|
# Note: trace_entry already added to session in execute_single_tool
|
|
285
284
|
|
|
286
285
|
# Fire events (same as execute_and_record_tools)
|
|
287
|
-
# on_error fires first for errors/not_found
|
|
286
|
+
# on_error fires first for errors/not_found
|
|
288
287
|
if trace_entry["status"] in ("error", "not_found"):
|
|
289
288
|
self._invoke_events('on_error')
|
|
290
289
|
|
|
291
|
-
#
|
|
292
|
-
self._invoke_events('
|
|
290
|
+
# after_each_tool fires for this tool execution
|
|
291
|
+
self._invoke_events('after_each_tool')
|
|
292
|
+
|
|
293
|
+
# after_tools fires after all tools in batch (for single execution, fires once)
|
|
294
|
+
self._invoke_events('after_tools')
|
|
293
295
|
|
|
294
296
|
# Return simplified result (omit internal fields)
|
|
295
297
|
return {
|
|
@@ -313,7 +315,7 @@ class Agent:
|
|
|
313
315
|
self.current_session['iteration'] += 1
|
|
314
316
|
iteration = self.current_session['iteration']
|
|
315
317
|
|
|
316
|
-
self.
|
|
318
|
+
self.logger.print(f"[dim]Iteration {iteration}/{max_iterations}[/dim]")
|
|
317
319
|
|
|
318
320
|
# Get LLM response
|
|
319
321
|
response = self._get_llm_decision()
|
|
@@ -339,7 +341,7 @@ class Agent:
|
|
|
339
341
|
# Show request info
|
|
340
342
|
msg_count = len(self.current_session['messages'])
|
|
341
343
|
tool_count = len(self.tools) if self.tools else 0
|
|
342
|
-
self.
|
|
344
|
+
self.logger.print(f"[yellow]→[/yellow] LLM Request ({self.llm.model}) • {msg_count} msgs • {tool_count} tools")
|
|
343
345
|
|
|
344
346
|
# Invoke before_llm events
|
|
345
347
|
self._invoke_events('before_llm')
|
|
@@ -367,7 +369,7 @@ class Agent:
|
|
|
367
369
|
# Invoke after_llm events (after trace entry is added)
|
|
368
370
|
self._invoke_events('after_llm')
|
|
369
371
|
|
|
370
|
-
self.
|
|
372
|
+
self.logger.log_llm_response(duration, len(response.tool_calls), response.usage)
|
|
371
373
|
|
|
372
374
|
return response
|
|
373
375
|
|
|
@@ -377,7 +379,7 @@ class Agent:
|
|
|
377
379
|
tool_calls=tool_calls,
|
|
378
380
|
tools=self.tools,
|
|
379
381
|
agent=self,
|
|
380
|
-
|
|
382
|
+
logger=self.logger
|
|
381
383
|
)
|
|
382
384
|
|
|
383
385
|
def add_tool(self, tool: Callable):
|
|
@@ -467,10 +469,10 @@ class Agent:
|
|
|
467
469
|
addr_data = address.load(co_dir)
|
|
468
470
|
|
|
469
471
|
if addr_data is None:
|
|
470
|
-
self.
|
|
472
|
+
self.logger.print("[yellow]No keys found, generating new identity...[/yellow]")
|
|
471
473
|
addr_data = address.generate()
|
|
472
474
|
address.save(addr_data, co_dir)
|
|
473
|
-
self.
|
|
475
|
+
self.logger.print(f"[green]✓ Keys saved to {co_dir / 'keys'}[/green]")
|
|
474
476
|
|
|
475
477
|
# Create ANNOUNCE message
|
|
476
478
|
# Use system_prompt as summary (first 1000 chars)
|
|
@@ -481,9 +483,9 @@ class Agent:
|
|
|
481
483
|
endpoints=[] # MVP: No direct endpoints yet
|
|
482
484
|
)
|
|
483
485
|
|
|
484
|
-
self.
|
|
485
|
-
self.
|
|
486
|
-
self.
|
|
486
|
+
self.logger.print(f"\n[bold]Starting agent: {self.name}[/bold]")
|
|
487
|
+
self.logger.print(f"Address: {addr_data['address']}")
|
|
488
|
+
self.logger.print(f"Debug: https://oo.openonion.ai/agent/{addr_data['address']}\n")
|
|
487
489
|
|
|
488
490
|
# Define async task handler
|
|
489
491
|
async def task_handler(prompt: str) -> str:
|
|
@@ -534,7 +534,7 @@ def configure_env_for_provider(provider: str, api_key: str) -> str:
|
|
|
534
534
|
configs = {
|
|
535
535
|
'openai': {
|
|
536
536
|
'var': 'OPENAI_API_KEY',
|
|
537
|
-
'model': '
|
|
537
|
+
'model': 'gpt-4o-mini'
|
|
538
538
|
},
|
|
539
539
|
'anthropic': {
|
|
540
540
|
'var': 'ANTHROPIC_API_KEY',
|
|
@@ -624,8 +624,8 @@ def generate_custom_template_with_name(description: str, api_key: str, model: st
|
|
|
624
624
|
try:
|
|
625
625
|
from ...llm import create_llm
|
|
626
626
|
|
|
627
|
-
# Use the model specified or default to
|
|
628
|
-
llm_model = model if model else "
|
|
627
|
+
# Use the model specified or default to co/gemini-2.5-pro
|
|
628
|
+
llm_model = model if model else "co/gemini-2.5-pro"
|
|
629
629
|
|
|
630
630
|
if loading_animation:
|
|
631
631
|
loading_animation.update(f"Connecting to {llm_model}...")
|
|
@@ -710,7 +710,7 @@ def process_request(query: str) -> str:
|
|
|
710
710
|
# Create agent
|
|
711
711
|
agent = Agent(
|
|
712
712
|
name="{suggested_name.replace('-', '_')}",
|
|
713
|
-
model="{'co/
|
|
713
|
+
model="{'co/gemini-2.5-pro' if model and model.startswith('co/') else 'co/gemini-2.5-pro'}",
|
|
714
714
|
system_prompt=\"\"\"You are an AI agent designed to: {description}
|
|
715
715
|
|
|
716
716
|
Provide helpful, accurate, and concise responses.\"\"\",
|
|
@@ -1530,13 +1530,13 @@ agent.input("Search for Python and calculate 15 * 8")
|
|
|
1530
1530
|
**A plugin is an event list** - just like `on_events`, but reusable across agents:
|
|
1531
1531
|
|
|
1532
1532
|
```python
|
|
1533
|
-
from connectonion import
|
|
1533
|
+
from connectonion import after_tools, after_each_tool, after_llm
|
|
1534
1534
|
|
|
1535
1535
|
# This is a plugin (one event list)
|
|
1536
|
-
reflection = [
|
|
1536
|
+
reflection = [after_tools(add_reflection)] # after_tools for message injection
|
|
1537
1537
|
|
|
1538
1538
|
# This is also a plugin (multiple events in one list)
|
|
1539
|
-
logger = [after_llm(log_llm),
|
|
1539
|
+
logger = [after_llm(log_llm), after_each_tool(log_tool)] # after_each_tool for per-tool logging
|
|
1540
1540
|
|
|
1541
1541
|
# Use them (plugins takes a list of plugins)
|
|
1542
1542
|
agent = Agent("assistant", tools=[search], plugins=[reflection, logger])
|
|
@@ -1560,8 +1560,8 @@ logger = [after_llm(log_llm)]
|
|
|
1560
1560
|
agent = Agent(
|
|
1561
1561
|
name="assistant",
|
|
1562
1562
|
tools=[search],
|
|
1563
|
-
plugins=[logger],
|
|
1564
|
-
on_events=[after_llm(add_timestamp),
|
|
1563
|
+
plugins=[logger], # List of event lists
|
|
1564
|
+
on_events=[after_llm(add_timestamp), after_each_tool(log_tool)] # One event list
|
|
1565
1565
|
)
|
|
1566
1566
|
```
|
|
1567
1567
|
|
|
@@ -1734,7 +1734,7 @@ Reflect in 1-2 sentences on what we learned:"""
|
|
|
1734
1734
|
|
|
1735
1735
|
```python
|
|
1736
1736
|
# Plugin is an event list
|
|
1737
|
-
reflection = [
|
|
1737
|
+
reflection = [after_tools(_add_reflection)] # after_tools for message injection
|
|
1738
1738
|
```
|
|
1739
1739
|
|
|
1740
1740
|
**That's it!** A plugin is just an event list.
|
|
@@ -1757,7 +1757,7 @@ def log_tool(agent):
|
|
|
1757
1757
|
print(f"✓ {trace['tool_name']} completed in {trace['timing']}ms")
|
|
1758
1758
|
|
|
1759
1759
|
# Plugin is an event list
|
|
1760
|
-
logger = [
|
|
1760
|
+
logger = [after_each_tool(log_tool)] # after_each_tool for per-tool logging
|
|
1761
1761
|
|
|
1762
1762
|
# Use it
|
|
1763
1763
|
agent = Agent("assistant", tools=[search], plugins=[logger])
|
|
@@ -1769,8 +1769,8 @@ Use the same plugin across multiple agents:
|
|
|
1769
1769
|
|
|
1770
1770
|
```python
|
|
1771
1771
|
# Define once
|
|
1772
|
-
reflection = [
|
|
1773
|
-
logger = [after_llm(log_llm),
|
|
1772
|
+
reflection = [after_tools(add_reflection)] # after_tools for message injection
|
|
1773
|
+
logger = [after_llm(log_llm), after_each_tool(log_tool)] # after_each_tool for per-tool logging
|
|
1774
1774
|
|
|
1775
1775
|
# Use in multiple agents
|
|
1776
1776
|
researcher = Agent("researcher", tools=[search], plugins=[reflection, logger])
|
|
@@ -1784,16 +1784,20 @@ analyst = Agent("analyst", tools=[calculate], plugins=[logger])
|
|
|
1784
1784
|
|
|
1785
1785
|
```python
|
|
1786
1786
|
# Define a plugin (an event list)
|
|
1787
|
-
my_plugin = [after_llm(handler1),
|
|
1787
|
+
my_plugin = [after_llm(handler1), after_tools(handler2)] # after_tools for message injection
|
|
1788
1788
|
|
|
1789
1789
|
# Use it (plugins takes a list of event lists)
|
|
1790
1790
|
agent = Agent("assistant", tools=[search], plugins=[my_plugin])
|
|
1791
1791
|
```
|
|
1792
1792
|
|
|
1793
1793
|
**on_events vs plugins:**
|
|
1794
|
-
- `on_events=[after_llm(h1),
|
|
1794
|
+
- `on_events=[after_llm(h1), after_each_tool(h2)]` → one event list
|
|
1795
1795
|
- `plugins=[plugin1, plugin2]` → list of event lists
|
|
1796
1796
|
|
|
1797
|
+
**Event naming:**
|
|
1798
|
+
- `after_each_tool` → fires for EACH tool (per-tool logging/monitoring)
|
|
1799
|
+
- `after_tools` → fires ONCE after all tools (safe for message injection)
|
|
1800
|
+
|
|
1797
1801
|
---
|
|
1798
1802
|
|
|
1799
1803
|
## Best Practices
|
|
@@ -3,7 +3,7 @@ Purpose: Minimal agent template demonstrating basic ConnectOnion usage with a ca
|
|
|
3
3
|
LLM-Note:
|
|
4
4
|
Dependencies: imports from [connectonion.Agent] | template file copied by [cli/commands/init.py, cli/commands/create.py] | default template for 'co create' and 'co init'
|
|
5
5
|
Data flow: user query → Agent.input() → calculator tool called if math expression → eval() computes result → returns answer
|
|
6
|
-
State/Effects: no persistent state | single Agent.input() call | uses co/
|
|
6
|
+
State/Effects: no persistent state | single Agent.input() call | uses co/gemini-2.5-pro model (OpenOnion hosted)
|
|
7
7
|
Integration: template for 'co create --template minimal' | demonstrates function-as-tool pattern | shows system_prompt and model configuration
|
|
8
8
|
Performance: single LLM call | eval() is fast
|
|
9
9
|
Errors: ⚠️ Security: uses eval() - for demo only, not production safe
|
|
@@ -32,7 +32,7 @@ agent = Agent(
|
|
|
32
32
|
name="calculator-agent",
|
|
33
33
|
system_prompt="pls use the calculator tool to answer math questions", # you can also pass a markdown file like system_prompt="path/to/your_markdown_file.md"
|
|
34
34
|
tools=[calculator], # tools can be python classes or functions
|
|
35
|
-
model="co/
|
|
35
|
+
model="co/gemini-2.5-pro" # co/gemini-2.5-pro is hosted by OpenOnion, you can use your own API key by setting OPENAI_API_KEY in .env
|
|
36
36
|
)
|
|
37
37
|
|
|
38
38
|
# Run the agent
|
connectonion/console.py
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Purpose: Handle agent terminal output with Rich formatting and optional file logging
|
|
3
3
|
LLM-Note:
|
|
4
|
-
Dependencies: imports from [sys, datetime, pathlib, typing, rich.console, rich.panel, rich.text] | imported by [
|
|
5
|
-
Data flow: receives from
|
|
4
|
+
Dependencies: imports from [sys, datetime, pathlib, typing, rich.console, rich.panel, rich.text] | imported by [logger.py, tool_executor.py] | tested by [tests/test_console.py]
|
|
5
|
+
Data flow: receives from Logger/tool_executor → .print(), .log_tool_call(), .log_tool_result() → formats with timestamp → prints to stderr via RichConsole → optionally appends to log_file as plain text
|
|
6
6
|
State/Effects: writes to stderr (not stdout, to avoid mixing with agent results) | writes to log_file if provided (plain text with timestamps) | creates log file parent directories if needed | appends session separator on init
|
|
7
|
-
Integration: exposes Console(log_file), .print(message, style), .
|
|
7
|
+
Integration: exposes Console(log_file), .print(message, style), .log_tool_call(name, args), .log_tool_result(result, timing), .log_llm_response(), .print_xray_table() | tool calls formatted as natural function-call style: greet(name='Alice')
|
|
8
8
|
Performance: direct stderr writes (no buffering delays) | Rich formatting uses stderr (separate from stdout results) | regex-based markup removal for log files
|
|
9
9
|
Errors: no error handling (let I/O errors bubble up) | assumes log_file parent can be created | assumes stderr is available
|
|
10
10
|
"""
|
|
@@ -159,6 +159,58 @@ class Console:
|
|
|
159
159
|
f.write(f" result: {result_str}\n")
|
|
160
160
|
f.write(f" Execution time: {timing/1000:.4f}s | Iteration: {iteration}/{max_iterations} | Breakpoint: @xray\n\n")
|
|
161
161
|
|
|
162
|
+
def log_tool_call(self, tool_name: str, tool_args: Dict[str, Any]) -> None:
|
|
163
|
+
"""Log tool call - separate from result for clarity.
|
|
164
|
+
|
|
165
|
+
Short: → Tool: greet(name='Alice')
|
|
166
|
+
Long: → Tool: write_file(path='test.py',
|
|
167
|
+
content='...'
|
|
168
|
+
)
|
|
169
|
+
"""
|
|
170
|
+
formatted_args = self._format_tool_args_list(tool_args)
|
|
171
|
+
single_line = ", ".join(formatted_args)
|
|
172
|
+
|
|
173
|
+
if len(single_line) < 60 and len(formatted_args) <= 2:
|
|
174
|
+
self.print(f"[blue]→[/blue] Tool: {tool_name}({single_line})")
|
|
175
|
+
elif len(formatted_args) == 1:
|
|
176
|
+
# Single long arg: put on same line, will wrap naturally
|
|
177
|
+
self.print(f"[blue]→[/blue] Tool: {tool_name}({formatted_args[0]})")
|
|
178
|
+
else:
|
|
179
|
+
# Multi-line: first arg on same line as bracket, rest indented
|
|
180
|
+
base_indent = " " * (9 + len(tool_name) + 1) # align with after "("
|
|
181
|
+
lines = [f"[blue]→[/blue] Tool: {tool_name}({formatted_args[0]},"]
|
|
182
|
+
for arg in formatted_args[1:-1]:
|
|
183
|
+
lines.append(f"{base_indent}{arg},")
|
|
184
|
+
lines.append(f"{base_indent}{formatted_args[-1]})")
|
|
185
|
+
self.print("\n".join(lines))
|
|
186
|
+
|
|
187
|
+
def log_tool_result(self, result: str, timing_ms: float) -> None:
|
|
188
|
+
"""Log tool result - separate line for clarity."""
|
|
189
|
+
result_preview = result[:80] + "..." if len(result) > 80 else result
|
|
190
|
+
result_preview = result_preview.replace('\n', '\\n')
|
|
191
|
+
time_str = f"{timing_ms/1000:.4f}s" if timing_ms < 100 else f"{timing_ms/1000:.1f}s"
|
|
192
|
+
self.print(f"[green]←[/green] Tool Result ({time_str}): {result_preview}")
|
|
193
|
+
|
|
194
|
+
def _format_tool_args_list(self, args: Dict[str, Any]) -> list:
|
|
195
|
+
"""Format each arg as key='value' with 150 char limit per value.
|
|
196
|
+
|
|
197
|
+
Escapes newlines so each arg stays on one line.
|
|
198
|
+
"""
|
|
199
|
+
parts = []
|
|
200
|
+
for k, v in args.items():
|
|
201
|
+
if isinstance(v, str):
|
|
202
|
+
# Escape newlines for single-line display
|
|
203
|
+
v_str = v.replace('\n', '\\n').replace('\r', '\\r')
|
|
204
|
+
if len(v_str) > 150:
|
|
205
|
+
v_str = v_str[:150] + "..."
|
|
206
|
+
parts.append(f"{k}='{v_str}'")
|
|
207
|
+
else:
|
|
208
|
+
v_str = str(v)
|
|
209
|
+
if len(v_str) > 150:
|
|
210
|
+
v_str = v_str[:150] + "..."
|
|
211
|
+
parts.append(f"{k}={v_str}")
|
|
212
|
+
return parts
|
|
213
|
+
|
|
162
214
|
def log_llm_response(self, duration_ms: float, tool_count: int, usage) -> None:
|
|
163
215
|
"""Log LLM response with token usage."""
|
|
164
216
|
total_tokens = usage.input_tokens + usage.output_tokens
|
connectonion/events.py
CHANGED
|
@@ -4,7 +4,7 @@ LLM-Note:
|
|
|
4
4
|
Dependencies: None (standalone module) | imported by [agent.py, __init__.py] | tested by [tests/test_events.py]
|
|
5
5
|
Data flow: Wrapper functions tag event handlers with _event_type attribute → Agent organizes handlers by type → Agent invokes handlers at specific lifecycle points passing agent instance
|
|
6
6
|
State/Effects: Event handlers receive agent instance and can modify agent.current_session (messages, trace, etc.)
|
|
7
|
-
Integration: exposes after_user_input(), before_llm(), after_llm(),
|
|
7
|
+
Integration: exposes after_user_input(), before_llm(), after_llm(), before_each_tool(), before_tools(), after_each_tool(), after_tools(), on_error(), on_complete()
|
|
8
8
|
Performance: Minimal overhead - just function attribute checking and iteration over handler lists
|
|
9
9
|
Errors: Event handler exceptions propagate and stop agent execution (fail fast)
|
|
10
10
|
"""
|
|
@@ -79,11 +79,11 @@ def after_llm(*funcs: EventHandler) -> Union[EventHandler, List[EventHandler]]:
|
|
|
79
79
|
return funcs[0] if len(funcs) == 1 else list(funcs)
|
|
80
80
|
|
|
81
81
|
|
|
82
|
-
def
|
|
82
|
+
def before_each_tool(*funcs: EventHandler) -> Union[EventHandler, List[EventHandler]]:
|
|
83
83
|
"""
|
|
84
|
-
Mark function(s) as
|
|
84
|
+
Mark function(s) as before_each_tool event handlers.
|
|
85
85
|
|
|
86
|
-
Fires before
|
|
86
|
+
Fires before EACH individual tool execution.
|
|
87
87
|
Use for: validating arguments, approval prompts, logging.
|
|
88
88
|
|
|
89
89
|
Access pending tool via agent.current_session['pending_tool']:
|
|
@@ -94,36 +94,115 @@ def before_tool(*funcs: EventHandler) -> Union[EventHandler, List[EventHandler]]
|
|
|
94
94
|
Raise an exception to cancel the tool execution.
|
|
95
95
|
|
|
96
96
|
Supports both decorator and wrapper syntax:
|
|
97
|
-
@
|
|
97
|
+
@before_each_tool
|
|
98
98
|
def approve_dangerous(agent):
|
|
99
99
|
...
|
|
100
100
|
|
|
101
101
|
# Multiple handlers
|
|
102
|
-
on_events=[
|
|
102
|
+
on_events=[before_each_tool(check_shell, check_email)]
|
|
103
103
|
"""
|
|
104
104
|
for fn in funcs:
|
|
105
|
-
fn._event_type = '
|
|
105
|
+
fn._event_type = 'before_each_tool' # type: ignore
|
|
106
106
|
return funcs[0] if len(funcs) == 1 else list(funcs)
|
|
107
107
|
|
|
108
108
|
|
|
109
|
-
def
|
|
109
|
+
def before_tools(*funcs: EventHandler) -> Union[EventHandler, List[EventHandler]]:
|
|
110
110
|
"""
|
|
111
|
-
Mark function(s) as
|
|
111
|
+
Mark function(s) as before_tools event handlers.
|
|
112
112
|
|
|
113
|
-
Fires
|
|
114
|
-
|
|
113
|
+
Fires ONCE before ALL tools in a batch execute.
|
|
114
|
+
|
|
115
|
+
What is a "tools batch"?
|
|
116
|
+
When the LLM responds, it can request multiple tools at once. For example:
|
|
117
|
+
LLM Response: tool_calls = [search("python"), read_file("docs.md"), calculate(2+2)]
|
|
118
|
+
|
|
119
|
+
This group of tools from ONE LLM response is called a "tools batch".
|
|
120
|
+
- before_tools fires ONCE before the batch starts
|
|
121
|
+
- after_tools fires ONCE after ALL tools in the batch complete
|
|
122
|
+
|
|
123
|
+
Use for: batch validation, user approval before execution, setup.
|
|
115
124
|
|
|
116
125
|
Supports both decorator and wrapper syntax:
|
|
117
|
-
@
|
|
118
|
-
def
|
|
126
|
+
@before_tools
|
|
127
|
+
def log_batch_start(agent):
|
|
128
|
+
...
|
|
129
|
+
|
|
130
|
+
on_events=[before_tools(handler)]
|
|
131
|
+
"""
|
|
132
|
+
for fn in funcs:
|
|
133
|
+
fn._event_type = 'before_tools' # type: ignore
|
|
134
|
+
return funcs[0] if len(funcs) == 1 else list(funcs)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def after_each_tool(*funcs: EventHandler) -> Union[EventHandler, List[EventHandler]]:
|
|
138
|
+
"""
|
|
139
|
+
Mark function(s) as after_each_tool event handlers.
|
|
140
|
+
|
|
141
|
+
Fires after EACH individual tool execution (success, error, or not_found).
|
|
142
|
+
Use for: logging individual tool performance, debugging.
|
|
143
|
+
|
|
144
|
+
⚠️ WARNING: Do NOT add messages to agent.current_session['messages'] here!
|
|
145
|
+
When LLM returns multiple tool_calls, this fires after EACH tool, which would
|
|
146
|
+
interleave messages between tool results. This breaks Anthropic Claude's API
|
|
147
|
+
which requires all tool_results to immediately follow the tool_use message.
|
|
148
|
+
|
|
149
|
+
If you need to add messages after tools complete, use `after_tools` instead.
|
|
150
|
+
|
|
151
|
+
Supports both decorator and wrapper syntax:
|
|
152
|
+
@after_each_tool
|
|
153
|
+
def log_tool(agent):
|
|
119
154
|
trace = agent.current_session['trace'][-1]
|
|
120
|
-
if trace['type'] == 'tool_execution'
|
|
121
|
-
print(f"Tool
|
|
155
|
+
if trace['type'] == 'tool_execution':
|
|
156
|
+
print(f"Tool: {trace['tool_name']} in {trace['timing']:.0f}ms")
|
|
157
|
+
|
|
158
|
+
on_events=[after_each_tool(handler1, handler2)]
|
|
159
|
+
"""
|
|
160
|
+
for fn in funcs:
|
|
161
|
+
fn._event_type = 'after_each_tool' # type: ignore
|
|
162
|
+
return funcs[0] if len(funcs) == 1 else list(funcs)
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def after_tools(*funcs: EventHandler) -> Union[EventHandler, List[EventHandler]]:
|
|
166
|
+
"""
|
|
167
|
+
Mark function(s) as after_tools event handlers.
|
|
168
|
+
|
|
169
|
+
Fires ONCE after ALL tools in a batch complete.
|
|
170
|
+
|
|
171
|
+
What is a "tools batch"?
|
|
172
|
+
When the LLM responds, it can request multiple tools at once. For example:
|
|
173
|
+
LLM Response: tool_calls = [search("python"), read_file("docs.md"), calculate(2+2)]
|
|
174
|
+
|
|
175
|
+
This group of tools from ONE LLM response is called a "tools batch".
|
|
176
|
+
- before_tools fires ONCE before the batch starts
|
|
177
|
+
- after_tools fires ONCE after ALL tools in the batch complete
|
|
178
|
+
|
|
179
|
+
This is the SAFE place to add messages to agent.current_session['messages']
|
|
180
|
+
after tool execution, because all tool_results have been added and message
|
|
181
|
+
ordering is correct for all LLM providers (including Anthropic Claude).
|
|
182
|
+
|
|
183
|
+
Message ordering when this event fires:
|
|
184
|
+
- assistant (with tool_calls)
|
|
185
|
+
- tool result 1
|
|
186
|
+
- tool result 2
|
|
187
|
+
- tool result N
|
|
188
|
+
- [YOUR MESSAGE HERE - safe to add]
|
|
189
|
+
|
|
190
|
+
Use for: reflection/reasoning injection, ReAct pattern, batch cleanup.
|
|
191
|
+
|
|
192
|
+
Supports both decorator and wrapper syntax:
|
|
193
|
+
@after_tools
|
|
194
|
+
def add_reflection(agent):
|
|
195
|
+
trace = agent.current_session['trace']
|
|
196
|
+
recent = [t for t in trace if t['type'] == 'tool_execution'][-3:]
|
|
197
|
+
agent.current_session['messages'].append({
|
|
198
|
+
'role': 'assistant',
|
|
199
|
+
'content': f"Completed {len(recent)} tools"
|
|
200
|
+
})
|
|
122
201
|
|
|
123
|
-
on_events=[
|
|
202
|
+
on_events=[after_tools(add_reflection)]
|
|
124
203
|
"""
|
|
125
204
|
for fn in funcs:
|
|
126
|
-
fn._event_type = '
|
|
205
|
+
fn._event_type = 'after_tools' # type: ignore
|
|
127
206
|
return funcs[0] if len(funcs) == 1 else list(funcs)
|
|
128
207
|
|
|
129
208
|
|