connectonion 0.5.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. connectonion/__init__.py +78 -0
  2. connectonion/address.py +320 -0
  3. connectonion/agent.py +450 -0
  4. connectonion/announce.py +84 -0
  5. connectonion/asgi.py +287 -0
  6. connectonion/auto_debug_exception.py +181 -0
  7. connectonion/cli/__init__.py +3 -0
  8. connectonion/cli/browser_agent/__init__.py +5 -0
  9. connectonion/cli/browser_agent/browser.py +243 -0
  10. connectonion/cli/browser_agent/prompt.md +107 -0
  11. connectonion/cli/commands/__init__.py +1 -0
  12. connectonion/cli/commands/auth_commands.py +527 -0
  13. connectonion/cli/commands/browser_commands.py +27 -0
  14. connectonion/cli/commands/create.py +511 -0
  15. connectonion/cli/commands/deploy_commands.py +220 -0
  16. connectonion/cli/commands/doctor_commands.py +173 -0
  17. connectonion/cli/commands/init.py +469 -0
  18. connectonion/cli/commands/project_cmd_lib.py +828 -0
  19. connectonion/cli/commands/reset_commands.py +149 -0
  20. connectonion/cli/commands/status_commands.py +168 -0
  21. connectonion/cli/docs/co-vibecoding-principles-docs-contexts-all-in-one.md +2010 -0
  22. connectonion/cli/docs/connectonion.md +1256 -0
  23. connectonion/cli/docs.md +123 -0
  24. connectonion/cli/main.py +148 -0
  25. connectonion/cli/templates/meta-agent/README.md +287 -0
  26. connectonion/cli/templates/meta-agent/agent.py +196 -0
  27. connectonion/cli/templates/meta-agent/prompts/answer_prompt.md +9 -0
  28. connectonion/cli/templates/meta-agent/prompts/docs_retrieve_prompt.md +15 -0
  29. connectonion/cli/templates/meta-agent/prompts/metagent.md +71 -0
  30. connectonion/cli/templates/meta-agent/prompts/think_prompt.md +18 -0
  31. connectonion/cli/templates/minimal/README.md +56 -0
  32. connectonion/cli/templates/minimal/agent.py +40 -0
  33. connectonion/cli/templates/playwright/README.md +118 -0
  34. connectonion/cli/templates/playwright/agent.py +336 -0
  35. connectonion/cli/templates/playwright/prompt.md +102 -0
  36. connectonion/cli/templates/playwright/requirements.txt +3 -0
  37. connectonion/cli/templates/web-research/agent.py +122 -0
  38. connectonion/connect.py +128 -0
  39. connectonion/console.py +539 -0
  40. connectonion/debug_agent/__init__.py +13 -0
  41. connectonion/debug_agent/agent.py +45 -0
  42. connectonion/debug_agent/prompts/debug_assistant.md +72 -0
  43. connectonion/debug_agent/runtime_inspector.py +406 -0
  44. connectonion/debug_explainer/__init__.py +10 -0
  45. connectonion/debug_explainer/explain_agent.py +114 -0
  46. connectonion/debug_explainer/explain_context.py +263 -0
  47. connectonion/debug_explainer/explainer_prompt.md +29 -0
  48. connectonion/debug_explainer/root_cause_analysis_prompt.md +43 -0
  49. connectonion/debugger_ui.py +1039 -0
  50. connectonion/decorators.py +208 -0
  51. connectonion/events.py +248 -0
  52. connectonion/execution_analyzer/__init__.py +9 -0
  53. connectonion/execution_analyzer/execution_analysis.py +93 -0
  54. connectonion/execution_analyzer/execution_analysis_prompt.md +47 -0
  55. connectonion/host.py +579 -0
  56. connectonion/interactive_debugger.py +342 -0
  57. connectonion/llm.py +801 -0
  58. connectonion/llm_do.py +307 -0
  59. connectonion/logger.py +300 -0
  60. connectonion/prompt_files/__init__.py +1 -0
  61. connectonion/prompt_files/analyze_contact.md +62 -0
  62. connectonion/prompt_files/eval_expected.md +12 -0
  63. connectonion/prompt_files/react_evaluate.md +11 -0
  64. connectonion/prompt_files/react_plan.md +16 -0
  65. connectonion/prompt_files/reflect.md +22 -0
  66. connectonion/prompts.py +144 -0
  67. connectonion/relay.py +200 -0
  68. connectonion/static/docs.html +688 -0
  69. connectonion/tool_executor.py +279 -0
  70. connectonion/tool_factory.py +186 -0
  71. connectonion/tool_registry.py +105 -0
  72. connectonion/trust.py +166 -0
  73. connectonion/trust_agents.py +71 -0
  74. connectonion/trust_functions.py +88 -0
  75. connectonion/tui/__init__.py +57 -0
  76. connectonion/tui/divider.py +39 -0
  77. connectonion/tui/dropdown.py +251 -0
  78. connectonion/tui/footer.py +31 -0
  79. connectonion/tui/fuzzy.py +56 -0
  80. connectonion/tui/input.py +278 -0
  81. connectonion/tui/keys.py +35 -0
  82. connectonion/tui/pick.py +130 -0
  83. connectonion/tui/providers.py +155 -0
  84. connectonion/tui/status_bar.py +163 -0
  85. connectonion/usage.py +161 -0
  86. connectonion/useful_events_handlers/__init__.py +16 -0
  87. connectonion/useful_events_handlers/reflect.py +116 -0
  88. connectonion/useful_plugins/__init__.py +20 -0
  89. connectonion/useful_plugins/calendar_plugin.py +163 -0
  90. connectonion/useful_plugins/eval.py +139 -0
  91. connectonion/useful_plugins/gmail_plugin.py +162 -0
  92. connectonion/useful_plugins/image_result_formatter.py +127 -0
  93. connectonion/useful_plugins/re_act.py +78 -0
  94. connectonion/useful_plugins/shell_approval.py +159 -0
  95. connectonion/useful_tools/__init__.py +44 -0
  96. connectonion/useful_tools/diff_writer.py +192 -0
  97. connectonion/useful_tools/get_emails.py +183 -0
  98. connectonion/useful_tools/gmail.py +1596 -0
  99. connectonion/useful_tools/google_calendar.py +613 -0
  100. connectonion/useful_tools/memory.py +380 -0
  101. connectonion/useful_tools/microsoft_calendar.py +604 -0
  102. connectonion/useful_tools/outlook.py +488 -0
  103. connectonion/useful_tools/send_email.py +205 -0
  104. connectonion/useful_tools/shell.py +97 -0
  105. connectonion/useful_tools/slash_command.py +201 -0
  106. connectonion/useful_tools/terminal.py +285 -0
  107. connectonion/useful_tools/todo_list.py +241 -0
  108. connectonion/useful_tools/web_fetch.py +216 -0
  109. connectonion/xray.py +467 -0
  110. connectonion-0.5.8.dist-info/METADATA +741 -0
  111. connectonion-0.5.8.dist-info/RECORD +113 -0
  112. connectonion-0.5.8.dist-info/WHEEL +4 -0
  113. connectonion-0.5.8.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,2010 @@
1
+ # ConnectOnion Framework - Complete Reference for AI Assistants
2
+
3
+ ## Context for AI Assistants
4
+
5
+ You are helping a developer who wants to use ConnectOnion, a Python framework for creating AI agents with behavior tracking. This document contains everything you need to help them write effective ConnectOnion code.
6
+
7
+ **Key Principles:**
8
+ - Keep simple things simple, make hard things possible
9
+ - Function-based tools are preferred over classes
10
+ - **For class-based tools: Pass instances directly (not individual methods)**
11
+ - Activity logging to .co/logs/ (Python SDK only)
12
+ - Default settings work for most use cases
13
+
14
+ ---
15
+
16
+ ## What is ConnectOnion?
17
+
18
+ ConnectOnion is a simple Python framework for creating AI agents that can use tools and track their behavior. Think of it as a way to build ChatGPT-like agents with custom tools.
19
+
20
+ **Core Features:**
21
+ - Turn regular Python functions into agent tools automatically
22
+ - Control agent behavior with max_iterations parameter
23
+ - Automatic behavior tracking and history
24
+ - System prompts for agent personality
25
+ - Built-in OpenAI integration
26
+ - Interactive debugging with @xray and agent.auto_debug()
27
+
28
+ ---
29
+
30
+ ## Installation & Setup
31
+
32
+ ```bash
33
+ pip install connectonion
34
+ ```
35
+
36
+ **Environment Setup:**
37
+ ```bash
38
+ export OPENAI_API_KEY="your-api-key-here"
39
+ # Or use .env file
40
+ ```
41
+
42
+ ---
43
+
44
+ ## CLI Reference - Quick Project Setup
45
+
46
+ ConnectOnion includes a CLI for quickly scaffolding agent projects.
47
+
48
+ ### Installation
49
+ The CLI is automatically installed with ConnectOnion:
50
+ ```bash
51
+ pip install connectonion
52
+ # Provides two commands: 'co' and 'connectonion'
53
+ ```
54
+
55
+ ### Initialize a Project
56
+
57
+ ```bash
58
+ # Create meta-agent (default) - ConnectOnion development assistant
59
+ mkdir meta-agent
60
+ cd meta-agent
61
+ co init
62
+
63
+ # Create web automation agent
64
+ mkdir playwright-agent
65
+ cd playwright-agent
66
+ co init --template playwright
67
+ ```
68
+
69
+ ### CLI Options
70
+
71
+ - `co init` - Initialize a new agent project
72
+ - `--template, -t` - Choose template: `meta-agent` (default), `playwright`, `basic` (alias)
73
+ - `--with-examples` - Include additional example tools
74
+ - `--force` - Overwrite existing files
75
+
76
+ ### What Gets Created
77
+
78
+ ```
79
+ my-project/
80
+ ├── agent.py # Main agent implementation
81
+ ├── prompt.md # System prompt (markdown)
82
+ ├── .env.example # Environment variables template
83
+ ├── .co/ # ConnectOnion metadata
84
+ │ ├── config.toml # Project configuration
85
+ │ └── docs/
86
+ │ └── connectonion.md # Embedded framework documentation
87
+ └── .gitignore # Git ignore rules (if in git repo)
88
+ ```
89
+
90
+ ### Available Templates
91
+
92
+ **Meta-Agent (Default)** - ConnectOnion development assistant with built-in tools:
93
+ - `answer_connectonion_question()` - Expert answers from embedded docs
94
+ - `create_agent_from_template()` - Generate complete agent code
95
+ - `generate_tool_code()` - Create tool functions
96
+ - `create_test_for_agent()` - Generate pytest test suites
97
+ - `think()` - Self-reflection to analyze task completion
98
+ - `generate_todo_list()` - Create structured plans (uses GPT-4o-mini)
99
+ - `suggest_project_structure()` - Architecture recommendations
100
+
101
+ **Playwright Template** - Web automation with stateful browser control:
102
+ - `start_browser()` - Launch browser instance
103
+ - `navigate()` - Go to URLs
104
+ - `scrape_content()` - Extract page content
105
+ - `fill_form()` - Fill and submit forms
106
+ - `take_screenshot()` - Capture pages
107
+ - `extract_links()` - Get all links
108
+ - `execute_javascript()` - Run JS code
109
+ - `close_browser()` - Clean up resources
110
+
111
+ Note: Playwright template requires `pip install playwright && playwright install`
112
+
113
+ ### Interactive Features
114
+
115
+ The CLI will:
116
+ - Warn if you're in a special directory (home, root, system)
117
+ - Ask for confirmation if the directory is not empty
118
+ - Automatically detect git repositories and update `.gitignore`
119
+ - Provide clear next steps after initialization
120
+
121
+ ### Quick Start After Init
122
+
123
+ ```bash
124
+ # 1. Copy environment template
125
+ cp .env.example .env
126
+
127
+ # 2. Add your OpenAI API key to .env
128
+ echo "OPENAI_API_KEY=sk-your-key-here" > .env
129
+
130
+ # 3. Run your agent
131
+ python agent.py
132
+ ```
133
+
134
+ ---
135
+
136
+ ## Quick Start Template
137
+
138
+ ```python
139
+ from connectonion import Agent
140
+
141
+ # 1. Define tools as regular functions
142
+ def search(query: str) -> str:
143
+ """Search for information."""
144
+ return f"Found information about {query}"
145
+
146
+ def calculate(expression: str) -> float:
147
+ """Perform mathematical calculations."""
148
+ return eval(expression) # Use safely in production
149
+
150
+ # 2. Create agent
151
+ agent = Agent(
152
+ name="my_assistant",
153
+ system_prompt="You are a helpful assistant.",
154
+ tools=[search, calculate]
155
+ # max_iterations=10 (default)
156
+ )
157
+
158
+ # 3. Use agent
159
+ result = agent.input("What is 25 * 4?")
160
+ print(result)
161
+ ```
162
+
163
+ **Example output (will vary):**
164
+
165
+ ```
166
+ 100
167
+ ```
168
+
169
+ ---
170
+
171
+ ## How ConnectOnion Works - The Agent Loop
172
+
173
+ ### Input → Processing → Output Flow
174
+
175
+ ```python
176
+ # 1. User provides input
177
+ result = agent.input("Search for Python tutorials and summarize them")
178
+
179
+ # 2. Agent processes in iterations:
180
+ # Iteration 1: LLM decides → "I need to search first"
181
+ # → Calls search("Python tutorials")
182
+ # → Gets result: "Found 10 tutorials about Python"
183
+
184
+ # Iteration 2: LLM continues → "Now I need to summarize"
185
+ # → Calls summarize("Found 10 tutorials...")
186
+ # → Gets result: "Summary: Python tutorials cover..."
187
+
188
+ # Iteration 3: LLM concludes → "Task complete"
189
+ # → Returns final answer (no more tool calls)
190
+
191
+ # 3. User gets final result
192
+ print(result) # "Here's a summary of Python tutorials: ..."
193
+ ```
194
+
195
+ ### The Agent Execution Loop
196
+
197
+ Each `agent.input()` call follows this pattern:
198
+
199
+ 1. **Setup**: Agent receives user prompt + system prompt
200
+ 2. **Loop** (up to `max_iterations` times):
201
+ - Send current conversation to LLM
202
+ - If LLM returns tool calls → execute them → add results to conversation
203
+ - If LLM returns text only → task complete, exit loop
204
+ 3. **Return**: Final LLM response to user
205
+
206
+ ### Message Flow Example
207
+
208
+ ```python
209
+ # Internal conversation that builds up:
210
+
211
+ # Initial messages
212
+ [
213
+ {"role": "system", "content": "You are a helpful assistant..."},
214
+ {"role": "user", "content": "Search for Python tutorials and summarize"}
215
+ ]
216
+
217
+ # After iteration 1 (LLM called search tool)
218
+ [
219
+ {"role": "system", "content": "You are a helpful assistant..."},
220
+ {"role": "user", "content": "Search for Python tutorials and summarize"},
221
+ {"role": "assistant", "tool_calls": [{"name": "search", "arguments": {"query": "Python tutorials"}}]},
222
+ {"role": "tool", "content": "Found 10 tutorials about Python basics...", "tool_call_id": "call_1"}
223
+ ]
224
+
225
+ # After iteration 2 (LLM called summarize tool)
226
+ [
227
+ # ... previous messages ...
228
+ {"role": "assistant", "tool_calls": [{"name": "summarize", "arguments": {"text": "Found 10 tutorials..."}}]},
229
+ {"role": "tool", "content": "Summary: Python tutorials cover variables, functions...", "tool_call_id": "call_2"}
230
+ ]
231
+
232
+ # Final iteration (LLM provides answer)
233
+ [
234
+ # ... previous messages ...
235
+ {"role": "assistant", "content": "Here's a summary of Python tutorials: They cover..."}
236
+ ]
237
+ ```
238
+
239
+ ### Input/Output Types
240
+
241
+ **Input to `agent.input()`:**
242
+ - `prompt` (str): User's request/question
243
+ - `max_iterations` (optional int): Override iteration limit for this request
244
+
245
+ **Output from `agent.input()`:**
246
+ - String: Final LLM response to user
247
+ - If max iterations reached: `"Task incomplete: Maximum iterations (N) reached."`
248
+
249
+ **Tool Function Signatures:**
250
+ ```python
251
+ # Tools always follow this pattern:
252
+ def tool_name(param1: type, param2: type = default) -> return_type:
253
+ """Description for LLM."""
254
+ # Your logic here
255
+ return result # Must match return_type
256
+ ```
257
+
258
+ ### Automatic Behavior Tracking
259
+
260
+ Every `agent.input()` call creates a record:
261
+
262
+ ```python
263
+ # Automatic tracking in ~/.connectonion/agents/{name}/behavior.json
264
+ {
265
+ "timestamp": "2024-01-15T10:30:00",
266
+ "user_prompt": "Search for Python tutorials and summarize",
267
+ "tool_calls": [
268
+ {
269
+ "name": "search",
270
+ "arguments": {"query": "Python tutorials"},
271
+ "result": "Found 10 tutorials...",
272
+ "status": "success",
273
+ "timing": 245.3 # milliseconds
274
+ },
275
+ {
276
+ "name": "summarize",
277
+ "arguments": {"text": "Found 10 tutorials..."},
278
+ "result": "Summary: Python tutorials...",
279
+ "status": "success",
280
+ "timing": 156.7
281
+ }
282
+ ],
283
+ "result": "Here's a summary of Python tutorials...",
284
+ "duration": 2.34 # total seconds
285
+ }
286
+
287
+ # Access history
288
+ print(agent.history.summary()) # Human-readable summary
289
+ print(len(agent.history.records)) # Number of tasks completed
290
+ ```
291
+
292
+ ---
293
+
294
+ ## Core API Reference
295
+
296
+ ### Agent Class
297
+
298
+ ```python
299
+ class Agent:
300
+ def __init__(
301
+ self,
302
+ name: str,
303
+ llm: Optional[LLM] = None,
304
+ tools: Optional[List[Callable]] = None,
305
+ system_prompt: Union[str, Path, None] = None,
306
+ api_key: Optional[str] = None,
307
+ model: str = "gpt-4-mini",
308
+ max_iterations: int = 10
309
+ )
310
+
311
+ def input(self, prompt: str, max_iterations: Optional[int] = None) -> str:
312
+ """Send input to agent and get response."""
313
+
314
+ def add_tool(self, tool: Callable):
315
+ """Add a new tool to the agent."""
316
+
317
+ def remove_tool(self, tool_name: str) -> bool:
318
+ """Remove a tool by name."""
319
+
320
+ def list_tools(self) -> List[str]:
321
+ """List all available tool names."""
322
+ ```
323
+
324
+ ### Key Parameters Explained
325
+
326
+ **max_iterations** (Default: 10):
327
+ - Controls how many tool calls the agent can make per task
328
+ - Simple tasks: 3-5 iterations
329
+ - Standard workflows: 10-15 iterations
330
+ - Complex analysis: 20-40 iterations
331
+ - Research projects: 30-50 iterations
332
+
333
+ **system_prompt** (Recommended: Use markdown files):
334
+ - **Path/str: Load from file (RECOMMENDED)** - Keep prompts separate from code
335
+ - String: Direct prompt text (only for very simple cases)
336
+ - None: Uses default helpful assistant prompt
337
+
338
+ ---
339
+
340
+ ## Function-Based Tools (Recommended Approach)
341
+
342
+ ### Basic Tool Creation
343
+
344
+ ```python
345
+ def my_tool(param: str, optional_param: int = 10) -> str:
346
+ """This docstring becomes the tool description."""
347
+ return f"Processed {param} with value {optional_param}"
348
+
349
+ # Automatic conversion - just pass the function!
350
+ agent = Agent("assistant", tools=[my_tool])
351
+ ```
352
+
353
+ ### Tool Guidelines
354
+
355
+ **Type Hints are Required:**
356
+ ```python
357
+ # Good - clear types
358
+ def search(query: str, limit: int = 10) -> str:
359
+ return f"Found {limit} results for {query}"
360
+
361
+ # Bad - no type hints
362
+ def search(query, limit=10):
363
+ return f"Found {limit} results for {query}"
364
+ ```
365
+
366
+ **Docstrings Become Descriptions:**
367
+ ```python
368
+ def analyze_data(data: str, method: str = "standard") -> str:
369
+ """Analyze data using specified method.
370
+
371
+ Methods: standard, detailed, quick
372
+ """
373
+ return f"Analysis complete using {method} method"
374
+ ```
375
+
376
+ ### Tool Descriptions and Schemas (What the LLM Sees)
377
+
378
+ The first line of a tool's docstring is used as the human‑readable description. ConnectOnion also builds a JSON schema from the function signature and type hints.
379
+
380
+ ```python
381
+ from typing import Literal, Annotated
382
+
383
+ Priority = Literal["low", "normal", "high"]
384
+
385
+ def create_ticket(
386
+ title: str,
387
+ description: str,
388
+ priority: Priority = "normal",
389
+ assignee: Annotated[str, "email"] | None = None,
390
+ ) -> str:
391
+ """Create a ticket and return its ID."""
392
+ return "T-1024"
393
+
394
+ # Internally, the agent exposes a schema like this to the LLM:
395
+ schema = {
396
+ "name": "create_ticket",
397
+ "description": "Create a ticket and return its ID.",
398
+ "parameters": {
399
+ "type": "object",
400
+ "properties": {
401
+ "title": {"type": "string"},
402
+ "description": {"type": "string"},
403
+ "priority": {"enum": ["low", "normal", "high"]},
404
+ "assignee": {"type": "string"}
405
+ },
406
+ "required": ["title", "description"]
407
+ }
408
+ }
409
+ ```
410
+
411
+ Best practices for descriptions:
412
+
413
+ - Start with a concise, imperative one‑liner: “Create…”, “Search…”, “Summarize…”.
414
+ - Mention key constraints and side effects (“Sends network request”, “Writes to disk”).
415
+ - Clarify required vs optional parameters and valid ranges/enums.
416
+ - Prefer deterministic behavior; if not, state what is non‑deterministic.
417
+ - Keep the first line under ~90 characters; add additional details on following lines.
418
+
419
+ ---
420
+
421
+ ## Stateful Tools with Playwright (Shared Context via Classes)
422
+
423
+ **✅ RECOMMENDED: Pass the class instance directly to ConnectOnion!**
424
+
425
+ ConnectOnion automatically discovers all public methods with type hints when you pass a class instance. This is much cleaner than listing methods individually.
426
+
427
+ Use a class instance when tools need to share state (browser, cache, DB handles). You can also mix class methods with regular function tools.
428
+
429
+ Prerequisites:
430
+
431
+ ```bash
432
+ pip install playwright
433
+ playwright install
434
+ ```
435
+
436
+ ```python
437
+ from connectonion import Agent
438
+
439
+ try:
440
+ from playwright.sync_api import sync_playwright
441
+ except ImportError:
442
+ raise SystemExit("Install Playwright: pip install playwright && playwright install")
443
+
444
+
445
+ class BrowserAutomation:
446
+ """Real browser session with shared context across tool calls."""
447
+
448
+ def __init__(self):
449
+ self._p = None
450
+ self._browser = None
451
+ self._page = None
452
+ self._screenshots: list[str] = []
453
+
454
+ def start_browser(self, headless: bool = True) -> str:
455
+ """Start a Chromium browser session."""
456
+ self._p = sync_playwright().start()
457
+ self._browser = self._p.chromium.launch(headless=headless)
458
+ self._page = self._browser.new_page()
459
+ return f"Browser started (headless={headless})"
460
+
461
+ def goto(self, url: str) -> str:
462
+ """Navigate to a URL and return the page title."""
463
+ if not self._page:
464
+ return "Error: Browser not started"
465
+ self._page.goto(url)
466
+ return self._page.title()
467
+
468
+ def screenshot(self, filename: str = "page.png") -> str:
469
+ """Save a screenshot and return the filename."""
470
+ if not self._page:
471
+ return "Error: Browser not started"
472
+ self._page.screenshot(path=filename)
473
+ self._screenshots.append(filename)
474
+ return filename
475
+
476
+ def close(self) -> str:
477
+ """Close resources and end the session."""
478
+ try:
479
+ if self._page:
480
+ self._page.close()
481
+ if self._browser:
482
+ self._browser.close()
483
+ if self._p:
484
+ self._p.stop()
485
+ return "Browser closed"
486
+ finally:
487
+ self._page = None
488
+ self._browser = None
489
+ self._p = None
490
+
491
+
492
+ def format_title(title: str) -> str:
493
+ """Format a page title for logs or UIs."""
494
+ return f"[PAGE] {title}"
495
+
496
+
497
+ # ✅ BEST PRACTICE: Pass class instances directly!
498
+ # ConnectOnion automatically extracts all public methods as tools
499
+ browser = BrowserAutomation()
500
+ agent = Agent(
501
+ name="web_assistant",
502
+ tools=[browser, format_title], # Mix class instance + functions
503
+ system_prompt="You are a web automation assistant. Be explicit about each step."
504
+ )
505
+
506
+ # Manual session (no LLM) — call tools directly
507
+ print(agent.tools.start_browser.run(headless=True))
508
+ title = agent.tools.goto.run("https://example.com")
509
+ print(agent.tools.format_title.run(title=title))
510
+ print(agent.tools.screenshot.run(filename="example.png"))
511
+ print(agent.tools.close.run())
512
+ ```
513
+
514
+ **Example output:**
515
+
516
+ ```
517
+ Browser started (headless=True)
518
+ [PAGE] Example Domain
519
+ example.png
520
+ Browser closed
521
+ ```
522
+
523
+ Agent‑driven session (LLM decides which tools to call):
524
+
525
+ ```python
526
+ # Natural language instruction — the agent chooses and orders tool calls
527
+ result = agent.input(
528
+ """
529
+ Open https://example.com, return the page title, take a screenshot named
530
+ example.png, then close the browser.
531
+ """
532
+ )
533
+ print(result)
534
+ ```
535
+
536
+ **Example output (simplified):**
537
+
538
+ ```
539
+ Title: Example Domain
540
+ Screenshot saved: example.png
541
+ Browser session closed.
542
+ ```
543
+
544
+ Why this pattern works:
545
+
546
+ - Class instance keeps shared state (browser/page) across calls.
547
+ - Function tools are great for lightweight utilities (formatting, parsing, saving records).
548
+ - The agent exposes both as callable tools with proper schemas and docstring descriptions.
549
+
550
+ ---
551
+
552
+ ## max_iterations Control
553
+
554
+ ### Basic Usage
555
+
556
+ ```python
557
+ # Default: 10 iterations (good for most tasks)
558
+ agent = Agent("helper", tools=[...])
559
+
560
+ # Simple tasks - fewer iterations
561
+ calc_agent = Agent("calculator", tools=[calculate], max_iterations=5)
562
+
563
+ # Complex tasks - more iterations
564
+ research_agent = Agent("researcher", tools=[...], max_iterations=25)
565
+ ```
566
+
567
+ ### Per-Request Override
568
+
569
+ ```python
570
+ agent = Agent("flexible", tools=[...])
571
+
572
+ # Normal task
573
+ result = agent.input("Simple question")
574
+
575
+ # Complex task needs more iterations
576
+ result = agent.input(
577
+ "Analyze all data and generate comprehensive report",
578
+ max_iterations=30
579
+ )
580
+ ```
581
+
582
+ ### When You Hit the Limit
583
+
584
+ ```python
585
+ # Error message when limit reached:
586
+ "Task incomplete: Maximum iterations (10) reached."
587
+
588
+ # Solutions:
589
+ # 1. Increase agent's default
590
+ agent.max_iterations = 20
591
+
592
+ # 2. Override for specific task
593
+ result = agent.input("complex task", max_iterations=25)
594
+
595
+ # 3. Break task into smaller parts
596
+ result1 = agent.input("First analyze the data")
597
+ result2 = agent.input(f"Based on {result1}, create summary")
598
+ ```
599
+
600
+ ---
601
+
602
+ ## System Prompts & Personality
603
+
604
+ **Best Practice: Use Markdown Files for System Prompts**
605
+
606
+ Keep your prompts separate from code for better maintainability, version control, and collaboration.
607
+
608
+ ### Recommended: Load from Markdown File
609
+
610
+ ```python
611
+ # ✅ RECOMMENDED: Load from markdown file
612
+ agent = Agent(
613
+ name="support_agent",
614
+ system_prompt="prompts/customer_support.md", # Clean separation
615
+ tools=[...]
616
+ )
617
+
618
+ # Using Path object (also good)
619
+ from pathlib import Path
620
+ agent = Agent(
621
+ name="data_analyst",
622
+ system_prompt=Path("prompts") / "data_analyst.md",
623
+ tools=[...]
624
+ )
625
+
626
+ # Any extension works (.md, .txt, .prompt, etc.)
627
+ agent = Agent(
628
+ name="coder",
629
+ system_prompt="prompts/senior_developer.txt",
630
+ tools=[...]
631
+ )
632
+ ```
633
+
634
+ ### Example Prompt File (`prompts/customer_support.md`)
635
+
636
+ ```markdown
637
+ # Customer Support Agent
638
+
639
+ You are a senior customer support specialist with 10+ years of experience.
640
+
641
+ ## Your Expertise
642
+ - Empathetic communication with frustrated customers
643
+ - Root cause analysis for technical issues
644
+ - Clear, step-by-step problem solving
645
+ - Escalation management
646
+
647
+ ## Guidelines
648
+ 1. **Always acknowledge** the customer's concern first
649
+ 2. **Ask clarifying questions** to understand the real problem
650
+ 3. **Provide actionable solutions** with clear next steps
651
+ 4. **Follow up** to ensure satisfaction
652
+
653
+ ## Tone
654
+ - Professional but warm
655
+ - Patient and understanding
656
+ - Confident in your recommendations
657
+ - Never dismissive of concerns
658
+
659
+ ## Example Responses
660
+ When a customer is frustrated:
661
+ > "I completely understand your frustration with this issue. Let me help you resolve this right away. Can you tell me exactly what happened when you tried to [action]?"
662
+ ```
663
+
664
+ ### Why Markdown Files Are Better
665
+
666
+ **✅ Advantages:**
667
+ - **Version Control**: Track prompt changes over time
668
+ - **Collaboration**: Team members can easily review and edit prompts
669
+ - **Readability**: Markdown formatting makes prompts clear and professional
670
+ - **Reusability**: Share prompts across different agents
671
+ - **No Code Pollution**: Keep business logic separate from implementation
672
+ - **IDE Support**: Syntax highlighting and formatting in markdown files
673
+
674
+ **❌ Avoid Inline Strings:**
675
+ ```python
676
+ # ❌ DON'T DO THIS - Hard to maintain
677
+ agent = Agent(
678
+ name="support",
679
+ system_prompt="You are a customer support agent. Be helpful and friendly. Always ask follow-up questions. Use empathetic language. Provide step-by-step solutions...", # This gets messy!
680
+ tools=[...]
681
+ )
682
+ ```
683
+
684
+ ### Advanced Prompt Organization
685
+
686
+ ```python
687
+ # Organize prompts by role/domain
688
+ prompts/
689
+ ├── customer_support/
690
+ │ ├── tier1_support.md
691
+ │ ├── technical_support.md
692
+ │ └── billing_support.md
693
+ ├── data_analysis/
694
+ │ ├── financial_analyst.md
695
+ │ └── research_analyst.md
696
+ └── development/
697
+ ├── code_reviewer.md
698
+ └── senior_developer.md
699
+
700
+ # Load specific prompts
701
+ support_agent = Agent(
702
+ name="tier1_support",
703
+ system_prompt="prompts/customer_support/tier1_support.md",
704
+ tools=[create_ticket, search_kb, escalate]
705
+ )
706
+
707
+ analyst_agent = Agent(
708
+ name="financial_analyst",
709
+ system_prompt="prompts/data_analysis/financial_analyst.md",
710
+ tools=[fetch_data, analyze_trends, generate_report]
711
+ )
712
+ ```
713
+
714
+ ### Simple Cases Only
715
+
716
+ For very simple, single-line prompts, inline strings are acceptable:
717
+
718
+ ```python
719
+ # ✅ OK for simple cases
720
+ calculator = Agent(
721
+ name="calc",
722
+ system_prompt="You are a helpful calculator. Always show your work step by step.",
723
+ tools=[calculate]
724
+ )
725
+ ```
726
+
727
+ ---
728
+
729
+ ## Debugging with @xray
730
+
731
+ Debug your agent's tool execution with real-time insights - see what your AI agent is thinking.
732
+
733
+ ### Quick Start
734
+
735
+ ```python
736
+ from connectonion.decorators import xray
737
+
738
+ @xray
739
+ def my_tool(text: str) -> str:
740
+ """Process some text."""
741
+
742
+ # Now you can see inside the agent's mind!
743
+ print(xray.agent.name) # "my_assistant"
744
+ print(xray.task) # "Process this document"
745
+ print(xray.iteration) # 1, 2, 3...
746
+
747
+ return f"Processed: {text}"
748
+ ```
749
+
750
+ That's it! Add `@xray` to any tool to unlock debugging superpowers.
751
+
752
+ ### What You Can Access
753
+
754
+ Inside any `@xray` decorated function:
755
+
756
+ ```python
757
+ xray.agent # The Agent instance calling this tool
758
+ xray.task # Original request from user
759
+ xray.messages # Full conversation history
760
+ xray.iteration # Which round of tool calls (1-10)
761
+ xray.previous_tools # Tools called before this one
762
+ ```
763
+
764
+ ### Real Example
765
+
766
+ ```python
767
+ @xray
768
+ def search_database(query: str) -> str:
769
+ """Search our database."""
770
+
771
+ # See what led to this search
772
+ print(f"User asked: {xray.task}")
773
+ print(f"This is iteration {xray.iteration}")
774
+
775
+ if xray.previous_tools:
776
+ print(f"Already tried: {xray.previous_tools}")
777
+
778
+ # Adjust behavior based on context
779
+ if xray.iteration > 2:
780
+ return "No results found, please refine your search"
781
+
782
+ return f"Found 5 results for '{query}'"
783
+ ```
784
+
785
+ ### Visual Execution Trace
786
+
787
+ See the complete flow of your agent's work from inside a tool:
788
+
789
+ ```python
790
+ @xray
791
+ def analyze_data(text: str) -> str:
792
+ """Analyze data and show execution trace."""
793
+
794
+ # Show what happened so far
795
+ xray.trace()
796
+
797
+ return "Analysis complete"
798
+ ```
799
+
800
+ **Output:**
801
+ ```
802
+ Task: "Find Python tutorials and summarize them"
803
+
804
+ [1] • 89ms search_database(query="Python tutorials")
805
+ IN → query: "Python tutorials"
806
+ OUT ← "Found 5 results for 'Python tutorials'"
807
+
808
+ [2] • 234ms summarize_text(text="Found 5 results...", max_words=50)
809
+ IN → text: "Found 5 results for 'Python tutorials'"
810
+ IN → max_words: 50
811
+ OUT ← "5 Python tutorials found covering basics to advanced topics"
812
+
813
+ Total: 323ms • 2 steps • 1 iterations
814
+ ```
815
+
816
+ ### Debug in Your IDE
817
+
818
+ Set a breakpoint and explore:
819
+
820
+ ```python
821
+ @xray
822
+ def analyze_sentiment(text: str) -> str:
823
+ # 🎯 Set breakpoint on next line
824
+ sentiment = "positive" # When stopped here in debugger:
825
+ # >>> xray
826
+ # <XrayContext active>
827
+ # agent: 'my_bot'
828
+ # task: 'How do people feel about Python?'
829
+ # >>> xray.messages
830
+ # [{'role': 'user', 'content': '...'}, ...]
831
+
832
+ return sentiment
833
+ ```
834
+
835
+ ### Practical Use Cases
836
+
837
+ **1. Understand Why a Tool Was Called**
838
+ ```python
839
+ @xray
840
+ def emergency_shutdown():
841
+ """Shutdown the system."""
842
+
843
+ # Check why this drastic action was requested
844
+ print(f"Shutdown requested because: {xray.task}")
845
+ print(f"After trying: {xray.previous_tools}")
846
+
847
+ # Maybe don't shutdown if it's the first try
848
+ if xray.iteration == 1:
849
+ return "Try restarting first"
850
+
851
+ return "System shutdown complete"
852
+ ```
853
+
854
+ **2. Adaptive Tool Behavior**
855
+ ```python
856
+ @xray
857
+ def fetch_data(source: str) -> str:
858
+ """Fetch data from a source."""
859
+
860
+ # Use cache on repeated calls
861
+ if "fetch_data" in xray.previous_tools:
862
+ return "Using cached data"
863
+
864
+ # Fresh fetch on first call
865
+ return f"Fresh data from {source}"
866
+ ```
867
+
868
+ **3. Debug Complex Flows**
869
+ ```python
870
+ @xray
871
+ def process_order(order_id: str) -> str:
872
+ """Process an order."""
873
+
874
+ # See the full context when debugging
875
+ if xray.agent:
876
+ print(f"Processing for agent: {xray.agent.name}")
877
+ print(f"Original request: {xray.task}")
878
+ print(f"Conversation length: {len(xray.messages)}")
879
+
880
+ return f"Order {order_id} processed"
881
+ ```
882
+
883
+ ### Tips
884
+
885
+ 1. **Development Only** - Remove @xray in production for best performance
886
+ 2. **Combine with IDE** - Set breakpoints for interactive debugging
887
+ 3. **Use trace()** - Call `xray.trace()` to see full execution flow
888
+ 4. **Check context** - Always verify `xray.agent` exists before using
889
+
890
+ ### Common Patterns
891
+
892
+ **Logging What Matters:**
893
+ ```python
894
+ @xray
895
+ def important_action(data: str) -> str:
896
+ # Log with context
897
+ if xray.agent:
898
+ logger.info(f"Agent {xray.agent.name} performing action")
899
+ logger.info(f"Original task: {xray.task}")
900
+ logger.info(f"Iteration: {xray.iteration}")
901
+
902
+ return "Action completed"
903
+ ```
904
+
905
+ **Conditional Logic:**
906
+ ```python
907
+ @xray
908
+ def smart_search(query: str) -> str:
909
+ # Different strategies based on context
910
+ if xray.iteration > 1:
911
+ # Broaden search on retry
912
+ query = f"{query} OR related"
913
+
914
+ if "analyze" in xray.previous_tools:
915
+ # We already analyzed, search differently
916
+ query = f"summary of {query}"
917
+
918
+ return f"Results for: {query}"
919
+ ```
920
+
921
+ ---
922
+
923
+ ## Interactive Debugging with agent.auto_debug()
924
+
925
+ Debug your agents interactively - pause at breakpoints, inspect variables, and modify behavior in real-time.
926
+
927
+ ### Quick Start
928
+
929
+ ```python
930
+ from connectonion import Agent
931
+ from connectonion.decorators import xray
932
+
933
+ @xray # Tools with @xray become breakpoints
934
+ def search(query: str):
935
+ return f"Results for {query}"
936
+
937
+ agent = Agent("assistant", tools=[search])
938
+ agent.auto_debug() # Enable interactive debugging
939
+
940
+ # Agent will pause at @xray tools
941
+ agent.input("Search for Python tutorials")
942
+ ```
943
+
944
+ ### What Happens at Breakpoints
945
+
946
+ When execution pauses, you'll see:
947
+ ```
948
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
949
+ @xray BREAKPOINT: search
950
+
951
+ Local Variables:
952
+ query = "Python tutorials"
953
+ result = "Results for Python tutorials"
954
+
955
+ What do you want to do?
956
+ → Continue execution 🚀 [c or Enter]
957
+ Edit values 🔍 [e]
958
+ Quit debugging 🚫 [q]
959
+ >
960
+ ```
961
+
962
+ ### Debug Menu Commands
963
+
964
+ - **Continue** (`c` or Enter): Resume execution
965
+ - **Edit** (`e`): Open Python REPL to modify variables
966
+ - **Quit** (`q`): Stop debugging
967
+
968
+ ### Edit Values in Python REPL
969
+
970
+ ```python
971
+ > e
972
+
973
+ >>> # See current values
974
+ >>> query
975
+ 'Python tutorials'
976
+
977
+ >>> # Modify them
978
+ >>> query = "Python advanced tutorials"
979
+ >>> result = search(query)
980
+
981
+ >>> # Continue with changes
982
+ >>> /continue
983
+ ```
984
+
985
+ ### Use Cases
986
+
987
+ **1. Test "What If" Scenarios:**
988
+ ```python
989
+ @xray
990
+ def calculate_price(quantity: int):
991
+ return quantity * 10
992
+
993
+ agent.auto_debug()
994
+ agent.input("Calculate price for 5 items")
995
+
996
+ # At breakpoint:
997
+ # > e
998
+ # >>> quantity = 100 # Test bulk pricing
999
+ # >>> /continue
1000
+ ```
1001
+
1002
+ **2. Debug Wrong Decisions:**
1003
+ ```python
1004
+ @xray
1005
+ def search_contacts(name: str):
1006
+ # Agent searched for "Jon" but database has "John"
1007
+ contacts = {"John": "john@email.com"}
1008
+ return contacts.get(name, "Not found")
1009
+
1010
+ agent.auto_debug()
1011
+ agent.input("Email Jon about meeting")
1012
+
1013
+ # At breakpoint:
1014
+ # > e
1015
+ # >>> name = "John" # Fix typo
1016
+ # >>> result = search_contacts(name)
1017
+ # >>> /continue
1018
+ ```
1019
+
1020
+ **3. Understand Agent Behavior:**
1021
+ ```python
1022
+ # Just press 'c' at each breakpoint to see:
1023
+ # - What tools are called
1024
+ # - What parameters are used
1025
+ # - What results are returned
1026
+ # Perfect for learning how your agent works!
1027
+ ```
1028
+
1029
+ ### Tips
1030
+
1031
+ - **Development only**: Use `auto_debug()` during development, not production
1032
+ - **Combine with @xray**: Mark critical tools with `@xray` for breakpoints
1033
+ - **Press 'c' to skip**: If you just want to observe, press 'c' at each pause
1034
+ - **Full Python access**: In edit mode, you have full Python REPL access
1035
+
1036
+ For more details, see [docs/auto_debug.md](https://github.com/openonion/connectonion/blob/main/docs/auto_debug.md)
1037
+
1038
+ ---
1039
+
1040
+ ## Common Patterns & Examples
1041
+
1042
+ ### Pattern 1: Simple Calculator Bot
1043
+
1044
+ ```python
1045
+ def calculate(expression: str) -> float:
1046
+ """Perform mathematical calculations."""
1047
+ try:
1048
+ # Safe eval for demo - use proper parsing in production
1049
+ allowed = "0123456789+-*/(). "
1050
+ if all(c in allowed for c in expression):
1051
+ return eval(expression)
1052
+ else:
1053
+ raise ValueError("Invalid characters")
1054
+ except Exception as e:
1055
+ raise ValueError(f"Calculation error: {e}")
1056
+
1057
+ calc_agent = Agent(
1058
+ name="calculator",
1059
+ system_prompt="You are a helpful calculator. Always show your work.",
1060
+ tools=[calculate],
1061
+ max_iterations=5 # Math rarely needs many iterations
1062
+ )
1063
+
1064
+ result = calc_agent.input("What is (25 + 15) * 3?")
1065
+ ```
1066
+
1067
+ ### Pattern 2: Research Assistant
1068
+
1069
+ ```python
1070
+ def web_search(query: str, num_results: int = 5) -> str:
1071
+ """Search the web for information."""
1072
+ # Your search implementation
1073
+ return f"Found {num_results} results for '{query}'"
1074
+
1075
+ def summarize(text: str, length: str = "medium") -> str:
1076
+ """Summarize text content."""
1077
+ # Your summarization implementation
1078
+ return f"Summary ({length}): {text[:100]}..."
1079
+
1080
+ def save_notes(content: str, filename: str = "research.txt") -> str:
1081
+ """Save content to a file."""
1082
+ # Your file saving implementation
1083
+ return f"Saved content to {filename}"
1084
+
1085
+ research_agent = Agent(
1086
+ name="researcher",
1087
+ system_prompt="You are a thorough researcher who provides well-sourced information.",
1088
+ tools=[web_search, summarize, save_notes],
1089
+ max_iterations=25 # Research involves many steps
1090
+ )
1091
+
1092
+ result = research_agent.input(
1093
+ "Research the latest developments in quantum computing and save a summary"
1094
+ )
1095
+ ```
1096
+
1097
+ ### Pattern 3: File Analyzer
1098
+
1099
+ ```python
1100
+ def read_file(filepath: str) -> str:
1101
+ """Read contents of a text file."""
1102
+ try:
1103
+ with open(filepath, 'r', encoding='utf-8') as f:
1104
+ return f.read()
1105
+ except FileNotFoundError:
1106
+ return f"Error: File {filepath} not found"
1107
+ except Exception as e:
1108
+ return f"Error reading file: {e}"
1109
+
1110
+ def analyze_text(text: str, analysis_type: str = "summary") -> str:
1111
+ """Analyze text content."""
1112
+ if analysis_type == "summary":
1113
+ return f"Text summary: {len(text)} characters, {len(text.split())} words"
1114
+ elif analysis_type == "sentiment":
1115
+ return "Sentiment analysis: Neutral tone detected"
1116
+ else:
1117
+ return f"Analysis type '{analysis_type}' not supported"
1118
+
1119
+ def generate_report(findings: str, format: str = "markdown") -> str:
1120
+ """Generate a formatted report."""
1121
+ if format == "markdown":
1122
+ return f"# Analysis Report\n\n{findings}\n\nGenerated by ConnectOnion"
1123
+ else:
1124
+ return findings
1125
+
1126
+ file_agent = Agent(
1127
+ name="file_analyzer",
1128
+ system_prompt="You are a document analyst who provides detailed insights.",
1129
+ tools=[read_file, analyze_text, generate_report],
1130
+ max_iterations=15
1131
+ )
1132
+ ```
1133
+
1134
+ ### Pattern 4: Multi-Agent Coordination
1135
+
1136
+ ```python
1137
+ # Specialized agents for different tasks
1138
+ calculator = Agent("calc", tools=[calculate], max_iterations=5)
1139
+ researcher = Agent("research", tools=[web_search, summarize], max_iterations=20)
1140
+ writer = Agent("writer", tools=[generate_report, save_notes], max_iterations=10)
1141
+
1142
+ def coordinate_agents(task: str) -> str:
1143
+ """Coordinate multiple agents for complex tasks."""
1144
+ if "calculate" in task.lower():
1145
+ return calculator.input(task)
1146
+ elif "research" in task.lower():
1147
+ return researcher.input(task)
1148
+ elif "write" in task.lower():
1149
+ return writer.input(task)
1150
+ else:
1151
+ # Default to research agent for general tasks
1152
+ return researcher.input(task)
1153
+ ```
1154
+
1155
+ ---
1156
+
1157
+ ## Advanced Patterns
1158
+
1159
+ ### Auto-Retry with Increasing Limits
1160
+
1161
+ ```python
1162
+ def smart_input(agent: Agent, prompt: str, max_retries: int = 3) -> str:
1163
+ """Automatically retry with higher iteration limits if needed."""
1164
+ limits = [10, 25, 50]
1165
+
1166
+ for i, limit in enumerate(limits):
1167
+ result = agent.input(prompt, max_iterations=limit)
1168
+ if "Maximum iterations" not in result:
1169
+ return result
1170
+ if i < max_retries - 1:
1171
+ print(f"Retrying with {limits[i+1]} iterations...")
1172
+
1173
+ return "Task too complex even with maximum iterations"
1174
+
1175
+ # Usage
1176
+ agent = Agent("adaptive", tools=[...])
1177
+ result = smart_input(agent, "Complex multi-step task")
1178
+ ```
1179
+
1180
+ ### Self-Adjusting Agent
1181
+
1182
+ ```python
1183
+ class SmartAgent:
1184
+ def __init__(self, name: str, tools: list):
1185
+ self.agent = Agent(name, tools=tools)
1186
+ self.task_patterns = {
1187
+ 'simple': (['what', 'when', 'calculate'], 5),
1188
+ 'moderate': (['analyze', 'compare', 'summarize'], 15),
1189
+ 'complex': (['research', 'comprehensive', 'detailed'], 30)
1190
+ }
1191
+
1192
+ def input(self, prompt: str) -> str:
1193
+ # Detect complexity from keywords
1194
+ max_iter = 10 # default
1195
+ prompt_lower = prompt.lower()
1196
+
1197
+ for pattern_name, (keywords, iterations) in self.task_patterns.items():
1198
+ if any(keyword in prompt_lower for keyword in keywords):
1199
+ max_iter = iterations
1200
+ break
1201
+
1202
+ return self.agent.input(prompt, max_iterations=max_iter)
1203
+
1204
+ # Usage
1205
+ smart = SmartAgent("adaptive", tools=[...])
1206
+ smart.input("What is 2+2?") # Uses 5 iterations
1207
+ smart.input("Research and analyze market trends") # Uses 30 iterations
1208
+ ```
1209
+
1210
+ ---
1211
+
1212
+ ## Send Email - Built-in Email Capability
1213
+
1214
+ ConnectOnion includes built-in email functionality that allows agents to send emails with a single line of code. No configuration, no complexity.
1215
+
1216
+ ### Quick Start
1217
+
1218
+ ```python
1219
+ from connectonion import send_email
1220
+
1221
+ # Send an email with one line
1222
+ send_email("alice@example.com", "Welcome!", "Thanks for joining us!")
1223
+ ```
1224
+
1225
+ **Result:**
1226
+ ```python
1227
+ {'success': True, 'message_id': 'msg_123', 'from': '0x1234abcd@mail.openonion.ai'}
1228
+ ```
1229
+
1230
+ ### Core Concept
1231
+
1232
+ The `send_email` function provides:
1233
+ - Simple three-parameter interface: `send_email(to, subject, message)`
1234
+ - No API keys to manage (already configured)
1235
+ - Automatic email address for every agent
1236
+ - Professional delivery with good reputation
1237
+
1238
+ ### Your Agent's Email Address
1239
+
1240
+ Every agent automatically gets an email address:
1241
+ ```
1242
+ 0x1234abcd@mail.openonion.ai
1243
+ ```
1244
+
1245
+ - Based on your public key (first 10 characters)
1246
+ - Generated during `co init` or `co create`
1247
+ - Activated with `co auth`
1248
+
1249
+ ### Email Configuration
1250
+
1251
+ Your email is stored in `.co/config.toml`:
1252
+ ```toml
1253
+ [agent]
1254
+ address = "0x04e1c4ae3c57d716383153479dae869e51e86d43d88db8dfa22fba7533f3968d"
1255
+ short_address = "0x04e1c4ae"
1256
+ email = "0x04e1c4ae@mail.openonion.ai"
1257
+ email_active = false # Becomes true after 'co auth'
1258
+ ```
1259
+
1260
+ ### Using with an Agent
1261
+
1262
+ Give your agent email capability:
1263
+
1264
+ ```python
1265
+ from connectonion import Agent, send_email
1266
+
1267
+ # Create an agent with email capability
1268
+ agent = Agent(
1269
+ "customer_support",
1270
+ tools=[send_email],
1271
+ instructions="You help users and send them email confirmations"
1272
+ )
1273
+
1274
+ # The agent can now send emails autonomously
1275
+ response = agent("Send a welcome email to alice@example.com")
1276
+ # Agent sends: send_email("alice@example.com", "Welcome!", "Thanks for joining...")
1277
+ ```
1278
+
1279
+ ### Real-World Monitoring Example
1280
+
1281
+ ```python
1282
+ from connectonion import Agent, send_email
1283
+
1284
+ def check_system_status() -> dict:
1285
+ """Check if the system is running properly."""
1286
+ cpu_usage = 95 # Simulated high CPU
1287
+ return {"status": "warning", "cpu": cpu_usage}
1288
+
1289
+ # Create monitoring agent
1290
+ monitor = Agent(
1291
+ "system_monitor",
1292
+ tools=[check_system_status, send_email],
1293
+ instructions="Monitor system health and alert admin@example.com if issues"
1294
+ )
1295
+
1296
+ # Agent checks system and sends alerts
1297
+ monitor("Check the system and alert if there are problems")
1298
+ # Agent will:
1299
+ # 1. Call check_system_status()
1300
+ # 2. See high CPU (95%)
1301
+ # 3. Call send_email("admin@example.com", "Alert: High CPU", "CPU at 95%...")
1302
+ ```
1303
+
1304
+ ### Return Values
1305
+
1306
+ **Success:**
1307
+ ```python
1308
+ {
1309
+ 'success': True,
1310
+ 'message_id': 'msg_123',
1311
+ 'from': '0x1234abcd@mail.openonion.ai'
1312
+ }
1313
+ ```
1314
+
1315
+ **Failure:**
1316
+ ```python
1317
+ {
1318
+ 'success': False,
1319
+ 'error': 'Rate limit exceeded'
1320
+ }
1321
+ ```
1322
+
1323
+ Common errors:
1324
+ - `"Rate limit exceeded"` - Hit your quota
1325
+ - `"Invalid email address"` - Check the recipient
1326
+ - `"Authentication failed"` - Token issue
1327
+
1328
+ ### Content Types
1329
+
1330
+ - **Plain text**: Just send a string
1331
+ - **HTML**: Include HTML tags, automatically detected
1332
+ - **Mixed**: HTML with plain text fallback
1333
+
1334
+ ### Quotas & Limits
1335
+
1336
+ - **Free tier**: 100 emails/month
1337
+ - **Plus tier**: 1,000 emails/month
1338
+ - **Pro tier**: 10,000 emails/month
1339
+ - Automatic rate limiting with monthly reset
1340
+
1341
+ ---
1342
+
1343
+ ## llm_do - One-shot LLM Calls & When to Use AI vs Code
1344
+
1345
+ ### Core Principle: Use LLMs for Language, Code for Logic
1346
+
1347
+ **Fundamental rule**: If a task involves understanding, generating, or transforming natural language, use an LLM. If it's deterministic computation, use code.
1348
+
1349
+ ### Quick Start with llm_do
1350
+
1351
+ ```python
1352
+ from connectonion import llm_do
1353
+ from pydantic import BaseModel
1354
+
1355
+ # Simple one-shot call
1356
+ answer = llm_do("Summarize this in one sentence: The weather today is sunny with...")
1357
+ print(answer) # "The weather is sunny today."
1358
+
1359
+ # Structured output
1360
+ class EmailDraft(BaseModel):
1361
+ subject: str
1362
+ body: str
1363
+ tone: str
1364
+
1365
+ draft = llm_do(
1366
+ "Write an email thanking the team for their hard work",
1367
+ output=EmailDraft
1368
+ )
1369
+ print(draft.subject) # "Thank You Team"
1370
+ print(draft.tone) # "appreciative"
1371
+ ```
1372
+
1373
+ ### When to Use llm_do (LLM) vs Code
1374
+
1375
+ #### ✅ Use llm_do for:
1376
+
1377
+ 1. **Natural Language Generation** - Writing emails, messages, documents
1378
+ 2. **Content Understanding & Extraction** - Parse intent, extract structured data from text
1379
+ 3. **Translation & Transformation** - Language translation, tone conversion
1380
+ 4. **Summarization & Analysis** - Summaries, sentiment analysis, insights
1381
+ 5. **Creative Tasks** - Generating names, taglines, creative content
1382
+
1383
+ #### ❌ DON'T Use llm_do for:
1384
+
1385
+ 1. **Deterministic Calculations** - Math, date arithmetic, counters
1386
+ 2. **Data Lookups** - Database queries, file searches
1387
+ 3. **Simple Formatting** - Date formats, string manipulation
1388
+ 4. **Rule-Based Logic** - Validation, regex matching, conditionals
1389
+
1390
+ ### Real-World Example: Email Manager
1391
+
1392
+ ```python
1393
+ class EmailManager:
1394
+ def draft_email(self, to: str, subject: str, context: str) -> str:
1395
+ """LLM composes, code formats."""
1396
+ class EmailDraft(BaseModel):
1397
+ subject: str
1398
+ body: str
1399
+ tone: str
1400
+
1401
+ # LLM: Natural language generation
1402
+ draft = llm_do(
1403
+ f"Write email to {to} about: {context}",
1404
+ output=EmailDraft,
1405
+ temperature=0.7
1406
+ )
1407
+
1408
+ # Code: Formatting and structure
1409
+ return f"To: {to}\nSubject: {draft.subject}\n\n{draft.body}"
1410
+
1411
+ def search_emails(self, query: str) -> List[Email]:
1412
+ """Code searches, LLM understands if needed."""
1413
+ # Code: Actual database/API call
1414
+ emails = get_emails(last=100)
1415
+
1416
+ # LLM: Only for natural language understanding
1417
+ if needs_parsing(query):
1418
+ params = llm_do(f"Parse: {query}", output=SearchParams)
1419
+ query = build_query(params)
1420
+
1421
+ # Code: Filtering logic
1422
+ return [e for e in emails if matches(e, query)]
1423
+ ```
1424
+
1425
+ ### Cost & Performance Principles
1426
+
1427
+ 1. **One-shot is cheaper than iterations** - Use llm_do for single tasks, Agent for multi-step workflows
1428
+ 2. **Always use structured output** - Pass Pydantic models to avoid parsing errors
1429
+ 3. **Cache prompts in files** - Reuse prompt files for consistency and maintainability
1430
+
1431
+ ### Prompt Management Principle
1432
+
1433
+ **If a prompt is more than 3 lines, use a separate file:**
1434
+
1435
+ ```python
1436
+ # ❌ BAD: Long inline prompts clutter code
1437
+ draft = llm_do(
1438
+ """You are a professional email writer.
1439
+ Please write a formal business email that:
1440
+ - Uses appropriate business language
1441
+ - Includes a clear subject line
1442
+ - Has proper greeting and closing
1443
+ - Is concise but thorough
1444
+ Write about: {context}""",
1445
+ output=EmailDraft
1446
+ )
1447
+
1448
+ # ✅ GOOD: Clean separation of concerns
1449
+ draft = llm_do(
1450
+ context,
1451
+ system_prompt="prompts/email_writer.md", # Loads from file
1452
+ output=EmailDraft
1453
+ )
1454
+ ```
1455
+
1456
+ ### Guidelines for Tool Design
1457
+
1458
+ When creating tools for agents, follow this pattern:
1459
+
1460
+ ```python
1461
+ def my_tool(natural_input: str) -> str:
1462
+ """Tool that combines LLM understanding with code execution."""
1463
+
1464
+ # Step 1: Use LLM to understand intent (if needed)
1465
+ if needs_understanding(natural_input):
1466
+ intent = llm_do(
1467
+ f"What does user want: {natural_input}",
1468
+ output=IntentModel
1469
+ )
1470
+
1471
+ # Step 2: Use code for the actual work
1472
+ result = perform_action(intent)
1473
+
1474
+ # Step 3: Use LLM to format response (if needed)
1475
+ if needs_natural_response(result):
1476
+ response = llm_do(
1477
+ f"Explain this result conversationally: {result}",
1478
+ temperature=0.3
1479
+ )
1480
+ return response
1481
+
1482
+ return str(result)
1483
+ ```
1484
+
1485
+ ### Summary: The Right Tool for the Right Job
1486
+
1487
+ | Task | Use LLM | Use Code | Note |
1488
+ |------|---------|----------|------|
1489
+ | Writing emails | ✅ | ❌ | Natural language generation |
1490
+ | Extracting structured data | ✅ | ❌ | **Always use llm_do with Pydantic models** |
1491
+ | Parsing JSON from text | ✅ | ❌ | **Use llm_do with output=dict or custom model** |
1492
+ | Understanding intent | ✅ | ❌ | Natural language understanding |
1493
+ | Summarizing content | ✅ | ❌ | Language comprehension |
1494
+ | Translating text | ✅ | ❌ | Language transformation |
1495
+ | Database queries | ❌ | ✅ | Structured data access |
1496
+ | Math calculations | ❌ | ✅ | Deterministic computation |
1497
+ | Format validation | ❌ | ✅ | Rule-based patterns |
1498
+ | Date filtering | ❌ | ✅ | Simple comparisons |
1499
+
1500
+ **Remember**: LLMs are powerful but expensive. Use them for what they're best at - understanding and generating natural language. Use code for everything else.
1501
+
1502
+ ---
1503
+
1504
+ ## Plugin System - Reusable Event Bundles
1505
+
1506
+ Plugins are reusable event lists that package capabilities like reflection and reasoning for use across multiple agents.
1507
+
1508
+ ### Quick Start (60 seconds)
1509
+
1510
+ ```python
1511
+ from connectonion import Agent
1512
+ from connectonion.useful_plugins import reflection, react
1513
+
1514
+ # Add built-in plugins to any agent
1515
+ agent = Agent(
1516
+ name="assistant",
1517
+ tools=[search, calculate],
1518
+ plugins=[reflection, react] # One line adds both!
1519
+ )
1520
+
1521
+ agent.input("Search for Python and calculate 15 * 8")
1522
+
1523
+ # After each tool execution:
1524
+ # 💭 We learned that Python is a popular programming language...
1525
+ # 🤔 We should next calculate 15 * 8 to complete the request.
1526
+ ```
1527
+
1528
+ ### What is a Plugin?
1529
+
1530
+ **A plugin is an event list** - just like `on_events`, but reusable across agents:
1531
+
1532
+ ```python
1533
+ from connectonion import after_tools, after_each_tool, after_llm
1534
+
1535
+ # This is a plugin (one event list)
1536
+ reflection = [after_tools(add_reflection)] # after_tools for message injection
1537
+
1538
+ # This is also a plugin (multiple events in one list)
1539
+ logger = [after_llm(log_llm), after_each_tool(log_tool)] # after_each_tool for per-tool logging
1540
+
1541
+ # Use them (plugins takes a list of plugins)
1542
+ agent = Agent("assistant", tools=[search], plugins=[reflection, logger])
1543
+ ```
1544
+
1545
+ **Just like tools:**
1546
+ - Tools: `Agent(tools=[search, calculate])`
1547
+ - Plugins: `Agent(plugins=[reflection, logger])`
1548
+
1549
+ ### Plugin vs on_events
1550
+
1551
+ The difference:
1552
+ - **on_events**: Takes one event list (custom for this agent)
1553
+ - **plugins**: Takes a list of event lists (reusable across agents)
1554
+
1555
+ ```python
1556
+ # Reusable plugin (an event list)
1557
+ logger = [after_llm(log_llm)]
1558
+
1559
+ # Use both together
1560
+ agent = Agent(
1561
+ name="assistant",
1562
+ tools=[search],
1563
+ plugins=[logger], # List of event lists
1564
+ on_events=[after_llm(add_timestamp), after_each_tool(log_tool)] # One event list
1565
+ )
1566
+ ```
1567
+
1568
+ ### Built-in Plugins (useful_plugins)
1569
+
1570
+ ConnectOnion provides ready-to-use plugins:
1571
+
1572
+ **Reflection Plugin** - Generates insights after each tool execution:
1573
+
1574
+ ```python
1575
+ from connectonion import Agent
1576
+ from connectonion.useful_plugins import reflection
1577
+
1578
+ agent = Agent("assistant", tools=[search], plugins=[reflection])
1579
+
1580
+ agent.input("Search for Python")
1581
+ # 💭 We learned that Python is a popular high-level programming language known for simplicity
1582
+ ```
1583
+
1584
+ **ReAct Plugin** - Uses ReAct-style reasoning to plan next steps:
1585
+
1586
+ ```python
1587
+ from connectonion import Agent
1588
+ from connectonion.useful_plugins import react
1589
+
1590
+ agent = Agent("assistant", tools=[search], plugins=[react])
1591
+
1592
+ agent.input("Search for Python and explain it")
1593
+ # 🤔 We learned Python is widely used. We should next explain its key features and use cases.
1594
+ ```
1595
+
1596
+ **Image Result Formatter Plugin** - Converts base64 image results to proper image messages for vision models:
1597
+
1598
+ ```python
1599
+ from connectonion import Agent
1600
+ from connectonion.useful_plugins import image_result_formatter
1601
+
1602
+ agent = Agent("assistant", tools=[take_screenshot], plugins=[image_result_formatter])
1603
+
1604
+ agent.input("Take a screenshot of the homepage and describe what you see")
1605
+ # 🖼️ Formatted tool result as image (image/png)
1606
+ # Agent can now see and analyze the actual image, not just base64 text!
1607
+ ```
1608
+
1609
+ **When to use:** Tools that return screenshots, generated images, or any visual data as base64.
1610
+ **Supported formats:** PNG, JPEG, WebP, GIF
1611
+ **What it does:** Detects base64 images in tool results and converts them to OpenAI vision API format, allowing multimodal LLMs to see images visually instead of as text.
1612
+
1613
+ **Using Multiple Plugins Together:**
1614
+
1615
+ ```python
1616
+ from connectonion import Agent
1617
+ from connectonion.useful_plugins import reflection, react, image_result_formatter
1618
+
1619
+ # Combine plugins for powerful agents
1620
+ agent = Agent(
1621
+ name="visual_researcher",
1622
+ tools=[take_screenshot, search, analyze],
1623
+ plugins=[image_result_formatter, reflection, react]
1624
+ )
1625
+
1626
+ # Now you get:
1627
+ # 🖼️ Image formatting for screenshots
1628
+ # 💭 Reflection: What we learned
1629
+ # 🤔 ReAct: What to do next
1630
+ ```
1631
+
1632
+ ### Writing Custom Plugins
1633
+
1634
+ Learn by example - here's how the reflection plugin works:
1635
+
1636
+ **Step 1: Message Compression Helper**
1637
+
1638
+ ```python
1639
+ from typing import List, Dict
1640
+
1641
+ def _compress_messages(messages: List[Dict], tool_result_limit: int = 150) -> str:
1642
+ """
1643
+ Compress conversation messages with structure:
1644
+ - USER messages → Keep FULL
1645
+ - ASSISTANT tool_calls → Keep parameters FULL
1646
+ - ASSISTANT text → Keep FULL
1647
+ - TOOL results → Truncate to tool_result_limit chars
1648
+ """
1649
+ lines = []
1650
+
1651
+ for msg in messages:
1652
+ role = msg['role']
1653
+
1654
+ if role == 'user':
1655
+ lines.append(f"USER: {msg['content']}")
1656
+
1657
+ elif role == 'assistant':
1658
+ if 'tool_calls' in msg:
1659
+ tools = [f"{tc['function']['name']}({tc['function']['arguments']})"
1660
+ for tc in msg['tool_calls']]
1661
+ lines.append(f"ASSISTANT: {', '.join(tools)}")
1662
+ else:
1663
+ lines.append(f"ASSISTANT: {msg['content']}")
1664
+
1665
+ elif role == 'tool':
1666
+ result = msg['content']
1667
+ if len(result) > tool_result_limit:
1668
+ result = result[:tool_result_limit] + '...'
1669
+ lines.append(f"TOOL: {result}")
1670
+
1671
+ return "\n".join(lines)
1672
+ ```
1673
+
1674
+ **Why this works:**
1675
+ - Keep user messages FULL (need to know what they asked)
1676
+ - Keep tool parameters FULL (exactly what actions were taken)
1677
+ - Keep assistant text FULL (reasoning/responses)
1678
+ - Truncate tool results (save tokens while maintaining overview)
1679
+
1680
+ **Step 2: Event Handler Function**
1681
+
1682
+ ```python
1683
+ from connectonion.events import after_tool
1684
+ from connectonion.llm_do import llm_do
1685
+
1686
+ def _add_reflection(agent) -> None:
1687
+ """Reflect on tool execution result"""
1688
+ trace = agent.current_session['trace'][-1]
1689
+
1690
+ if trace['type'] == 'tool_execution' and trace['status'] == 'success':
1691
+ # Extract current tool execution
1692
+ user_prompt = agent.current_session.get('user_prompt', '')
1693
+ tool_name = trace['tool_name']
1694
+ tool_args = trace['arguments']
1695
+ tool_result = trace['result']
1696
+
1697
+ # Compress conversation messages
1698
+ conversation = _compress_messages(agent.current_session['messages'])
1699
+
1700
+ # Build prompt with conversation context + current execution
1701
+ prompt = f"""CONVERSATION:
1702
+ {conversation}
1703
+
1704
+ CURRENT EXECUTION:
1705
+ User asked: {user_prompt}
1706
+ Tool: {tool_name}({tool_args})
1707
+ Result: {tool_result}
1708
+
1709
+ Reflect in 1-2 sentences on what we learned:"""
1710
+
1711
+ reflection_text = llm_do(
1712
+ prompt,
1713
+ model="co/gpt-4o",
1714
+ temperature=0.3,
1715
+ system_prompt="You reflect on tool execution results to generate insights."
1716
+ )
1717
+
1718
+ # Add reflection as assistant message
1719
+ agent.current_session['messages'].append({
1720
+ 'role': 'assistant',
1721
+ 'content': f"💭 {reflection_text}"
1722
+ })
1723
+
1724
+ agent.console.print(f"[dim]💭 {reflection_text}[/dim]")
1725
+ ```
1726
+
1727
+ **Key insights:**
1728
+ - Access agent state via `agent.current_session`
1729
+ - Use `llm_do()` for AI-powered analysis
1730
+ - Add results back to conversation messages
1731
+ - Print to console for user feedback
1732
+
1733
+ **Step 3: Create Plugin (Event List)**
1734
+
1735
+ ```python
1736
+ # Plugin is an event list
1737
+ reflection = [after_tools(_add_reflection)] # after_tools for message injection
1738
+ ```
1739
+
1740
+ **That's it!** A plugin is just an event list.
1741
+
1742
+ **Step 4: Use Your Plugin**
1743
+
1744
+ ```python
1745
+ agent = Agent("assistant", tools=[search], plugins=[reflection])
1746
+ ```
1747
+
1748
+ ### Quick Custom Plugin Example
1749
+
1750
+ Build a simple plugin in 3 lines:
1751
+
1752
+ ```python
1753
+ from connectonion import Agent, after_tool
1754
+
1755
+ def log_tool(agent):
1756
+ trace = agent.current_session['trace'][-1]
1757
+ print(f"✓ {trace['tool_name']} completed in {trace['timing']}ms")
1758
+
1759
+ # Plugin is an event list
1760
+ logger = [after_each_tool(log_tool)] # after_each_tool for per-tool logging
1761
+
1762
+ # Use it
1763
+ agent = Agent("assistant", tools=[search], plugins=[logger])
1764
+ ```
1765
+
1766
+ ### Reusing Plugins
1767
+
1768
+ Use the same plugin across multiple agents:
1769
+
1770
+ ```python
1771
+ # Define once
1772
+ reflection = [after_tools(add_reflection)] # after_tools for message injection
1773
+ logger = [after_llm(log_llm), after_each_tool(log_tool)] # after_each_tool for per-tool logging
1774
+
1775
+ # Use in multiple agents
1776
+ researcher = Agent("researcher", tools=[search], plugins=[reflection, logger])
1777
+ writer = Agent("writer", tools=[generate], plugins=[reflection])
1778
+ analyst = Agent("analyst", tools=[calculate], plugins=[logger])
1779
+ ```
1780
+
1781
+ ### Summary
1782
+
1783
+ **A plugin is an event list:**
1784
+
1785
+ ```python
1786
+ # Define a plugin (an event list)
1787
+ my_plugin = [after_llm(handler1), after_tools(handler2)] # after_tools for message injection
1788
+
1789
+ # Use it (plugins takes a list of event lists)
1790
+ agent = Agent("assistant", tools=[search], plugins=[my_plugin])
1791
+ ```
1792
+
1793
+ **on_events vs plugins:**
1794
+ - `on_events=[after_llm(h1), after_each_tool(h2)]` → one event list
1795
+ - `plugins=[plugin1, plugin2]` → list of event lists
1796
+
1797
+ **Event naming:**
1798
+ - `after_each_tool` → fires for EACH tool (per-tool logging/monitoring)
1799
+ - `after_tools` → fires ONCE after all tools (safe for message injection)
1800
+
1801
+ ---
1802
+
1803
+ ## Best Practices
1804
+
1805
+ ### Principles: Avoid over‑engineering with agents
1806
+
1807
+ - **Delegate interpretation to the agent**: Don't hard‑code parsing rules or use regex to extract parameters; let the agent interpret requests and decide tool arguments.
1808
+ - **Natural language output, not regex parsing**: Let the agent format its own responses naturally. Don't use regex to parse agent output - simply pass through the agent's natural language response. The AI knows how to communicate effectively with users.
1809
+ - **Prompt‑driven clarification**: Put concise follow‑up behavior in the system prompt so the agent asks for missing details (URL, viewport, full‑page, save path) before acting.
1810
+ - **Thin integration layer**: Keep wrappers like `execute_*` minimal—construct the agent, call `agent.input(...)`, and return the natural language response directly.
1811
+ - **No heuristic fallbacks**: If AI is unavailable (e.g., missing API key), return a clear error instead of attempting clever fallback logic.
1812
+ - **Fail fast and clearly**: Only catch exceptions when you can improve user feedback; otherwise surface the error with a short, actionable message.
1813
+ - **Sane defaults, minimal knobs**: Tools should have sensible defaults; the agent overrides them via tool arguments as needed.
1814
+ - **Single source of truth in prompts**: Centralize behavior (clarification rules, parameter choices) in markdown prompts, not scattered in code.
1815
+ - **Test at the seam**: Mock `Agent.input` in tests and validate outcomes; avoid baking tests around internal parsing/branches that shouldn’t exist.
1816
+ - **Extract helpers sparingly**: Factor out helpers only when reused across multiple places; otherwise inline to reduce cognitive load.
1817
+ - **Prefer clarity over cleverness**: Favor descriptive, actionable errors over complex branches trying to “guess” behavior.
1818
+ - **Give the agent interaction budget**: Set `max_iterations` high enough to allow clarification turns rather than coding preemptive guesswork.
1819
+ - **Keep demos separate**: Place advanced flows in examples; keep the core CLI path straightforward and predictable.
1820
+
1821
+ ### Tool Design
1822
+
1823
+ ✅ **Good:**
1824
+ ```python
1825
+ def search_papers(query: str, max_results: int = 10, field: str = "all") -> str:
1826
+ """Search academic papers with specific parameters."""
1827
+ return f"Found {max_results} papers about '{query}' in {field}"
1828
+ ```
1829
+
1830
+ ❌ **Avoid:**
1831
+ ```python
1832
+ def search(q, n=10): # No type hints
1833
+ return "some results" # Vague return
1834
+ ```
1835
+
1836
+ ### Error Handling
1837
+
1838
+ ✅ **Good:**
1839
+ ```python
1840
+ def read_file(filepath: str) -> str:
1841
+ """Read file with proper error handling."""
1842
+ try:
1843
+ with open(filepath, 'r') as f:
1844
+ return f.read()
1845
+ except FileNotFoundError:
1846
+ return f"Error: File '{filepath}' not found"
1847
+ except PermissionError:
1848
+ return f"Error: Permission denied for '{filepath}'"
1849
+ except Exception as e:
1850
+ return f"Error reading file: {e}"
1851
+ ```
1852
+
1853
+ ### Agent Configuration
1854
+
1855
+ ✅ **Good:**
1856
+ ```python
1857
+ # Clear purpose, markdown prompts, appropriate limits
1858
+ data_analyst = Agent(
1859
+ name="data_analyst",
1860
+ system_prompt="prompts/data_scientist.md", # Use markdown files!
1861
+ tools=[load_data, analyze_stats, create_visualization],
1862
+ max_iterations=20 # Data analysis can be multi-step
1863
+ )
1864
+ ```
1865
+
1866
+ ❌ **Avoid:**
1867
+ ```python
1868
+ # Vague purpose, inline prompts, arbitrary limits
1869
+ agent = Agent(
1870
+ name="agent",
1871
+ system_prompt="You are an agent that does stuff. Be helpful and do things when asked. Always be polite and provide good answers...", # Too long inline!
1872
+ tools=[lots_of_random_tools],
1873
+ max_iterations=100 # Way too high
1874
+ )
1875
+ ```
1876
+
1877
+ ### System Prompt Best Practices
1878
+
1879
+ ✅ **Use Markdown Files:**
1880
+ ```python
1881
+ # Recommended approach
1882
+ agent = Agent(
1883
+ name="support_specialist",
1884
+ system_prompt="prompts/customer_support.md",
1885
+ tools=[create_ticket, search_kb]
1886
+ )
1887
+ ```
1888
+
1889
+ ❌ **Avoid Inline Strings:**
1890
+ ```python
1891
+ # Hard to maintain and review
1892
+ agent = Agent(
1893
+ name="support_specialist",
1894
+ system_prompt="You are a customer support specialist. Always be empathetic. Ask clarifying questions. Provide step-by-step solutions. Use professional language...",
1895
+ tools=[create_ticket, search_kb]
1896
+ )
1897
+ ```
1898
+
1899
+ ---
1900
+
1901
+ ## Troubleshooting Guide
1902
+
1903
+ ### Common Issues & Solutions
1904
+
1905
+ **Issue: "Maximum iterations reached"**
1906
+ ```python
1907
+ # Check what happened
1908
+ if "Maximum iterations" in result:
1909
+ # Look at the last record to see what went wrong
1910
+ last_record = agent.history.records[-1]
1911
+ for tool_call in last_record.tool_calls:
1912
+ if tool_call['status'] == 'error':
1913
+ print(f"Tool {tool_call['name']} failed: {tool_call['result']}")
1914
+
1915
+ # Solutions:
1916
+ # 1. Increase iterations
1917
+ result = agent.input(prompt, max_iterations=30)
1918
+
1919
+ # 2. Break down the task
1920
+ step1 = agent.input("First, analyze the data")
1921
+ step2 = agent.input(f"Based on {step1}, create summary")
1922
+ ```
1923
+
1924
+ **Issue: Tools not working**
1925
+ ```python
1926
+ # Check tool registration
1927
+ print(agent.list_tools()) # See what tools are available
1928
+
1929
+ # Check tool schemas
1930
+ for tool in agent.tools:
1931
+ print(tool.to_function_schema())
1932
+ ```
1933
+
1934
+ **Issue: Unexpected behavior**
1935
+ ```python
1936
+ # Use @xray for debugging
1937
+ @xray
1938
+ def debug_tool(input: str) -> str:
1939
+ context = get_xray_context()
1940
+ print(f"Iteration: {context.iteration}")
1941
+ print(f"Previous tools: {context.previous_tools}")
1942
+ return f"Processed: {input}"
1943
+ ```
1944
+
1945
+ ---
1946
+
1947
+ ## When to Use ConnectOnion
1948
+
1949
+ ### Good Use Cases
1950
+ - Building custom AI assistants with specific tools
1951
+ - Automating workflows that need multiple steps
1952
+ - Creating domain-specific chatbots (customer support, data analysis, etc.)
1953
+ - Prototyping agent behaviors with automatic tracking
1954
+ - Educational projects to understand agent architectures
1955
+
1956
+ ### Not Ideal For
1957
+ - Simple single-function calls (just call the function directly)
1958
+ - Real-time applications requiring <100ms response times
1959
+ - Production systems without proper error handling and security
1960
+ - Tasks that don't benefit from LLM reasoning
1961
+
1962
+ ---
1963
+
1964
+ ## Links & Resources
1965
+
1966
+ - **GitHub**: https://github.com/openonion/connectonion
1967
+ - **PyPI**: https://pypi.org/project/connectonion/
1968
+ - **Latest Version**: 0.0.4
1969
+
1970
+ ---
1971
+
1972
+ ## AI Assistant Instructions
1973
+
1974
+ When helping users with ConnectOnion:
1975
+
1976
+ 1. **Start Simple**: Use the basic patterns first, add complexity only when needed
1977
+ 2. **Type Hints**: Always include proper type hints in tool functions
1978
+ 3. **Error Handling**: Add try/catch blocks for robust tools
1979
+ 4. **Iteration Limits**: Help users choose appropriate max_iterations based on task complexity
1980
+ 5. **Debugging**: Suggest @xray decorator when users have issues
1981
+ 6. **Best Practices**: Guide users toward function-based tools over complex classes
1982
+ 7. **Class Instance Tools**: Always recommend passing class instances directly rather than individual methods
1983
+
1984
+ ## Class Instance vs Individual Methods - Key Teaching Point
1985
+
1986
+ **✅ ALWAYS RECOMMEND THIS (Clean & Automatic):**
1987
+ ```python
1988
+ browser = BrowserAutomation()
1989
+ agent = Agent("browser_agent", tools=[browser]) # Auto-discovers all methods!
1990
+ ```
1991
+
1992
+ **❌ AVOID RECOMMENDING THIS (Verbose & Error-prone):**
1993
+ ```python
1994
+ browser = BrowserAutomation()
1995
+ agent = Agent("browser_agent", tools=[
1996
+ browser.start_browser,
1997
+ browser.navigate,
1998
+ browser.take_screenshot,
1999
+ # ... listing every method manually
2000
+ ])
2001
+ ```
2002
+
2003
+ **Why Class Instances Are Better:**
2004
+ - Much cleaner code - one line instead of many
2005
+ - Automatic method discovery - no manual listing required
2006
+ - Less maintenance - add methods to class, they're auto-available
2007
+ - No forgotten methods - everything gets included automatically
2008
+ - This is how ConnectOnion was designed to be used
2009
+
2010
+ Remember: ConnectOnion is designed to make simple things simple and hard things possible. Start with the basics and build up complexity gradually.