connectonion 0.5.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. connectonion/__init__.py +78 -0
  2. connectonion/address.py +320 -0
  3. connectonion/agent.py +450 -0
  4. connectonion/announce.py +84 -0
  5. connectonion/asgi.py +287 -0
  6. connectonion/auto_debug_exception.py +181 -0
  7. connectonion/cli/__init__.py +3 -0
  8. connectonion/cli/browser_agent/__init__.py +5 -0
  9. connectonion/cli/browser_agent/browser.py +243 -0
  10. connectonion/cli/browser_agent/prompt.md +107 -0
  11. connectonion/cli/commands/__init__.py +1 -0
  12. connectonion/cli/commands/auth_commands.py +527 -0
  13. connectonion/cli/commands/browser_commands.py +27 -0
  14. connectonion/cli/commands/create.py +511 -0
  15. connectonion/cli/commands/deploy_commands.py +220 -0
  16. connectonion/cli/commands/doctor_commands.py +173 -0
  17. connectonion/cli/commands/init.py +469 -0
  18. connectonion/cli/commands/project_cmd_lib.py +828 -0
  19. connectonion/cli/commands/reset_commands.py +149 -0
  20. connectonion/cli/commands/status_commands.py +168 -0
  21. connectonion/cli/docs/co-vibecoding-principles-docs-contexts-all-in-one.md +2010 -0
  22. connectonion/cli/docs/connectonion.md +1256 -0
  23. connectonion/cli/docs.md +123 -0
  24. connectonion/cli/main.py +148 -0
  25. connectonion/cli/templates/meta-agent/README.md +287 -0
  26. connectonion/cli/templates/meta-agent/agent.py +196 -0
  27. connectonion/cli/templates/meta-agent/prompts/answer_prompt.md +9 -0
  28. connectonion/cli/templates/meta-agent/prompts/docs_retrieve_prompt.md +15 -0
  29. connectonion/cli/templates/meta-agent/prompts/metagent.md +71 -0
  30. connectonion/cli/templates/meta-agent/prompts/think_prompt.md +18 -0
  31. connectonion/cli/templates/minimal/README.md +56 -0
  32. connectonion/cli/templates/minimal/agent.py +40 -0
  33. connectonion/cli/templates/playwright/README.md +118 -0
  34. connectonion/cli/templates/playwright/agent.py +336 -0
  35. connectonion/cli/templates/playwright/prompt.md +102 -0
  36. connectonion/cli/templates/playwright/requirements.txt +3 -0
  37. connectonion/cli/templates/web-research/agent.py +122 -0
  38. connectonion/connect.py +128 -0
  39. connectonion/console.py +539 -0
  40. connectonion/debug_agent/__init__.py +13 -0
  41. connectonion/debug_agent/agent.py +45 -0
  42. connectonion/debug_agent/prompts/debug_assistant.md +72 -0
  43. connectonion/debug_agent/runtime_inspector.py +406 -0
  44. connectonion/debug_explainer/__init__.py +10 -0
  45. connectonion/debug_explainer/explain_agent.py +114 -0
  46. connectonion/debug_explainer/explain_context.py +263 -0
  47. connectonion/debug_explainer/explainer_prompt.md +29 -0
  48. connectonion/debug_explainer/root_cause_analysis_prompt.md +43 -0
  49. connectonion/debugger_ui.py +1039 -0
  50. connectonion/decorators.py +208 -0
  51. connectonion/events.py +248 -0
  52. connectonion/execution_analyzer/__init__.py +9 -0
  53. connectonion/execution_analyzer/execution_analysis.py +93 -0
  54. connectonion/execution_analyzer/execution_analysis_prompt.md +47 -0
  55. connectonion/host.py +579 -0
  56. connectonion/interactive_debugger.py +342 -0
  57. connectonion/llm.py +801 -0
  58. connectonion/llm_do.py +307 -0
  59. connectonion/logger.py +300 -0
  60. connectonion/prompt_files/__init__.py +1 -0
  61. connectonion/prompt_files/analyze_contact.md +62 -0
  62. connectonion/prompt_files/eval_expected.md +12 -0
  63. connectonion/prompt_files/react_evaluate.md +11 -0
  64. connectonion/prompt_files/react_plan.md +16 -0
  65. connectonion/prompt_files/reflect.md +22 -0
  66. connectonion/prompts.py +144 -0
  67. connectonion/relay.py +200 -0
  68. connectonion/static/docs.html +688 -0
  69. connectonion/tool_executor.py +279 -0
  70. connectonion/tool_factory.py +186 -0
  71. connectonion/tool_registry.py +105 -0
  72. connectonion/trust.py +166 -0
  73. connectonion/trust_agents.py +71 -0
  74. connectonion/trust_functions.py +88 -0
  75. connectonion/tui/__init__.py +57 -0
  76. connectonion/tui/divider.py +39 -0
  77. connectonion/tui/dropdown.py +251 -0
  78. connectonion/tui/footer.py +31 -0
  79. connectonion/tui/fuzzy.py +56 -0
  80. connectonion/tui/input.py +278 -0
  81. connectonion/tui/keys.py +35 -0
  82. connectonion/tui/pick.py +130 -0
  83. connectonion/tui/providers.py +155 -0
  84. connectonion/tui/status_bar.py +163 -0
  85. connectonion/usage.py +161 -0
  86. connectonion/useful_events_handlers/__init__.py +16 -0
  87. connectonion/useful_events_handlers/reflect.py +116 -0
  88. connectonion/useful_plugins/__init__.py +20 -0
  89. connectonion/useful_plugins/calendar_plugin.py +163 -0
  90. connectonion/useful_plugins/eval.py +139 -0
  91. connectonion/useful_plugins/gmail_plugin.py +162 -0
  92. connectonion/useful_plugins/image_result_formatter.py +127 -0
  93. connectonion/useful_plugins/re_act.py +78 -0
  94. connectonion/useful_plugins/shell_approval.py +159 -0
  95. connectonion/useful_tools/__init__.py +44 -0
  96. connectonion/useful_tools/diff_writer.py +192 -0
  97. connectonion/useful_tools/get_emails.py +183 -0
  98. connectonion/useful_tools/gmail.py +1596 -0
  99. connectonion/useful_tools/google_calendar.py +613 -0
  100. connectonion/useful_tools/memory.py +380 -0
  101. connectonion/useful_tools/microsoft_calendar.py +604 -0
  102. connectonion/useful_tools/outlook.py +488 -0
  103. connectonion/useful_tools/send_email.py +205 -0
  104. connectonion/useful_tools/shell.py +97 -0
  105. connectonion/useful_tools/slash_command.py +201 -0
  106. connectonion/useful_tools/terminal.py +285 -0
  107. connectonion/useful_tools/todo_list.py +241 -0
  108. connectonion/useful_tools/web_fetch.py +216 -0
  109. connectonion/xray.py +467 -0
  110. connectonion-0.5.8.dist-info/METADATA +741 -0
  111. connectonion-0.5.8.dist-info/RECORD +113 -0
  112. connectonion-0.5.8.dist-info/WHEEL +4 -0
  113. connectonion-0.5.8.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,336 @@
1
+ """
2
+ Purpose: Browser automation agent template using Playwright for web scraping and interaction
3
+ LLM-Note:
4
+ Dependencies: imports from [playwright.sync_api, connectonion.Agent, connectonion.xray, json] | requires playwright package | template file copied by [cli/commands/init.py, cli/commands/create.py]
5
+ Data flow: user command → Agent.input() → BrowserAutomation methods (navigate, click, fill_form, scrape_content, take_screenshot) → Playwright browser actions → returns results
6
+ State/Effects: stateful browser session (playwright, browser, page) | tracks visited_urls, screenshots | modifies filesystem with screenshots | headless browser process
7
+ Integration: template for 'co create --template playwright' | BrowserAutomation class passed as tool | uses prompt.md for system prompt | @xray decorator on all methods
8
+ Performance: browser launch overhead 1-3s | operations vary by page complexity | max_iterations=20 for complex automation
9
+ Errors: graceful fallback if Playwright not installed | method-level error handling returns error strings | cleanup on exit
10
+
11
+ Playwright Web Automation Agent - Browser control and web scraping
12
+
13
+ Based on the ConnectOnion Playwright example with stateful browser tools.
14
+ """
15
+
16
+ try:
17
+ from playwright.sync_api import sync_playwright
18
+ PLAYWRIGHT_AVAILABLE = True
19
+ except ImportError:
20
+ PLAYWRIGHT_AVAILABLE = False
21
+ print("⚠️ Playwright not installed. Run: pip install playwright && playwright install")
22
+
23
+ from connectonion import Agent, xray
24
+ from typing import Optional, List, Dict
25
+ import json
26
+
27
+
28
+ class BrowserAutomation:
29
+ """Stateful browser automation tools with shared browser instance."""
30
+
31
+ def __init__(self):
32
+ self.playwright = None
33
+ self.browser = None
34
+ self.page = None
35
+ self.screenshots = []
36
+ self.visited_urls = []
37
+ self.downloads = []
38
+
39
+ @xray
40
+ def start_browser(self, headless: bool = True) -> str:
41
+ """Start a browser instance.
42
+
43
+ Args:
44
+ headless: Run browser in headless mode (no UI)
45
+ """
46
+ if not PLAYWRIGHT_AVAILABLE:
47
+ return "Error: Playwright not installed. Run: pip install playwright && playwright install"
48
+
49
+ if self.browser:
50
+ return "Browser already running"
51
+
52
+ self.playwright = sync_playwright().start()
53
+ self.browser = self.playwright.chromium.launch(headless=headless)
54
+ self.page = self.browser.new_page()
55
+ return f"✅ Browser started (headless={headless})"
56
+
57
+ @xray
58
+ def navigate(self, url: str, wait_until: str = "load") -> str:
59
+ """Navigate to a URL.
60
+
61
+ Args:
62
+ url: The URL to navigate to
63
+ wait_until: When to consider navigation done ('load', 'domcontentloaded', 'networkidle')
64
+ """
65
+ if not self.page:
66
+ return "❌ Browser not started. Call start_browser() first."
67
+
68
+ try:
69
+ self.page.goto(url, wait_until=wait_until)
70
+ self.visited_urls.append(url)
71
+ title = self.page.title()
72
+ return f"✅ Navigated to {url}\nPage title: {title}"
73
+ except Exception as e:
74
+ return f"❌ Navigation failed: {e}"
75
+
76
+ @xray
77
+ def take_screenshot(self, filename: str = None, full_page: bool = False) -> str:
78
+ """Take a screenshot of the current page.
79
+
80
+ Args:
81
+ filename: Name for the screenshot file
82
+ full_page: Capture full scrollable page
83
+ """
84
+ if not self.page:
85
+ return "❌ No page loaded"
86
+
87
+ if not filename:
88
+ filename = f"screenshot_{len(self.screenshots) + 1}.png"
89
+
90
+ try:
91
+ self.page.screenshot(path=filename, full_page=full_page)
92
+ self.screenshots.append(filename)
93
+ return f"📸 Screenshot saved as {filename}"
94
+ except Exception as e:
95
+ return f"❌ Screenshot failed: {e}"
96
+
97
+ @xray
98
+ def scrape_content(self, selector: str = "body") -> str:
99
+ """Extract text content from the page.
100
+
101
+ Args:
102
+ selector: CSS selector for the element to scrape
103
+ """
104
+ if not self.page:
105
+ return "❌ No page loaded"
106
+
107
+ try:
108
+ element = self.page.query_selector(selector)
109
+ if element:
110
+ text = element.inner_text()
111
+ return f"📄 Content from {selector}:\n{text[:500]}..." if len(text) > 500 else f"📄 Content from {selector}:\n{text}"
112
+ else:
113
+ return f"❌ No element found matching selector: {selector}"
114
+ except Exception as e:
115
+ return f"❌ Scraping failed: {e}"
116
+
117
+ @xray
118
+ def fill_form(self, form_data: str) -> str:
119
+ """Fill form fields on the page.
120
+
121
+ Args:
122
+ form_data: JSON string with selector-value pairs, e.g., '{"#name": "John", "#email": "john@example.com"}'
123
+ """
124
+ if not self.page:
125
+ return "❌ No page loaded"
126
+
127
+ try:
128
+ data = json.loads(form_data)
129
+ filled = []
130
+
131
+ for selector, value in data.items():
132
+ self.page.fill(selector, str(value))
133
+ filled.append(f"{selector} = {value}")
134
+
135
+ return f"✅ Form filled:\n" + "\n".join(filled)
136
+ except json.JSONDecodeError:
137
+ return "❌ Invalid JSON format for form_data"
138
+ except Exception as e:
139
+ return f"❌ Form filling failed: {e}"
140
+
141
+ @xray
142
+ def click(self, selector: str) -> str:
143
+ """Click an element on the page.
144
+
145
+ Args:
146
+ selector: CSS selector for the element to click
147
+ """
148
+ if not self.page:
149
+ return "❌ No page loaded"
150
+
151
+ try:
152
+ self.page.click(selector)
153
+ # Wait a bit for any navigation
154
+ self.page.wait_for_load_state("networkidle", timeout=5000)
155
+ return f"✅ Clicked element: {selector}\nCurrent URL: {self.page.url}"
156
+ except Exception as e:
157
+ return f"❌ Click failed on {selector}: {e}"
158
+
159
+ @xray
160
+ def extract_links(self, filter_pattern: str = "") -> str:
161
+ """Extract all links from the current page.
162
+
163
+ Args:
164
+ filter_pattern: Optional pattern to filter links
165
+ """
166
+ if not self.page:
167
+ return "❌ No page loaded"
168
+
169
+ try:
170
+ links = self.page.eval_on_selector_all(
171
+ 'a[href]',
172
+ 'elements => elements.map(e => ({text: e.innerText, href: e.href}))'
173
+ )
174
+
175
+ if filter_pattern:
176
+ links = [link for link in links if filter_pattern in link['href']]
177
+
178
+ if not links:
179
+ return "No links found" + (f" matching '{filter_pattern}'" if filter_pattern else "")
180
+
181
+ result = f"🔗 Found {len(links)} links:\n"
182
+ for link in links[:10]: # Show first 10
183
+ result += f" - {link['text'][:30]}: {link['href']}\n"
184
+
185
+ if len(links) > 10:
186
+ result += f" ... and {len(links) - 10} more"
187
+
188
+ return result
189
+ except Exception as e:
190
+ return f"❌ Link extraction failed: {e}"
191
+
192
+ @xray
193
+ def wait_for_element(self, selector: str, timeout: int = 5000) -> str:
194
+ """Wait for an element to appear on the page.
195
+
196
+ Args:
197
+ selector: CSS selector to wait for
198
+ timeout: Maximum wait time in milliseconds
199
+ """
200
+ if not self.page:
201
+ return "❌ No page loaded"
202
+
203
+ try:
204
+ self.page.wait_for_selector(selector, timeout=timeout)
205
+ return f"✅ Element {selector} appeared"
206
+ except Exception as e:
207
+ return f"❌ Element {selector} did not appear within {timeout}ms"
208
+
209
+ @xray
210
+ def execute_javascript(self, script: str) -> str:
211
+ """Execute JavaScript code on the page.
212
+
213
+ Args:
214
+ script: JavaScript code to execute
215
+ """
216
+ if not self.page:
217
+ return "❌ No page loaded"
218
+
219
+ try:
220
+ result = self.page.evaluate(script)
221
+ return f"✅ JavaScript executed. Result: {result}"
222
+ except Exception as e:
223
+ return f"❌ JavaScript execution failed: {e}"
224
+
225
+ @xray
226
+ def get_page_info(self) -> str:
227
+ """Get information about the current page."""
228
+ if not self.page:
229
+ return "❌ No page loaded"
230
+
231
+ info = {
232
+ "url": self.page.url,
233
+ "title": self.page.title(),
234
+ "viewport": self.page.viewport_size,
235
+ }
236
+
237
+ return f"📊 Page info:\n" + json.dumps(info, indent=2)
238
+
239
+ @xray
240
+ def get_session_info(self) -> str:
241
+ """Get information about the browser session."""
242
+ info = {
243
+ "browser_running": self.browser is not None,
244
+ "current_url": self.page.url if self.page else None,
245
+ "visited_urls": self.visited_urls,
246
+ "screenshots_taken": len(self.screenshots),
247
+ "screenshot_files": self.screenshots,
248
+ }
249
+
250
+ return f"📊 Session info:\n" + json.dumps(info, indent=2)
251
+
252
+ @xray
253
+ def close_browser(self) -> str:
254
+ """Close the browser and clean up resources."""
255
+ if self.page:
256
+ self.page.close()
257
+ self.page = None
258
+ if self.browser:
259
+ self.browser.close()
260
+ self.browser = None
261
+ if self.playwright:
262
+ self.playwright.stop()
263
+ self.playwright = None
264
+
265
+ return "✅ Browser closed and resources cleaned up"
266
+
267
+
268
+ # Create browser automation instance
269
+ browser = BrowserAutomation()
270
+
271
+ # Create the Playwright automation agent with stateful tools
272
+ agent = Agent(
273
+ name="playwright_agent",
274
+ system_prompt="prompt.md",
275
+ tools=browser, # Pass the entire class instance - all methods become tools
276
+ max_iterations=20 # More iterations for complex web automation
277
+ )
278
+
279
+
280
+ def main():
281
+ """Run the Playwright agent in interactive mode."""
282
+ print("🌐 Playwright Web Automation Agent")
283
+ print("Stateful browser automation with persistent session")
284
+ print("Type 'quit' to exit\n")
285
+
286
+ # Interactive conversation loop
287
+ while True:
288
+ user_input = input("You: ").strip()
289
+
290
+ if user_input.lower() in ['quit', 'exit', 'q']:
291
+ # Clean up browser resources before exit
292
+ try:
293
+ browser.close_browser()
294
+ except:
295
+ pass
296
+ print("👋 Goodbye!")
297
+ break
298
+
299
+ if not user_input:
300
+ continue
301
+
302
+ # Get response from agent
303
+ response = agent.input(user_input)
304
+ print(f"Agent: {response}\n")
305
+
306
+
307
+ if __name__ == "__main__":
308
+ if not PLAYWRIGHT_AVAILABLE:
309
+ print("⚠️ Playwright is not installed!")
310
+ print("To use this agent, run:")
311
+ print(" pip install -r requirements.txt")
312
+ print(" playwright install chromium\n")
313
+ else:
314
+ print("🌐 Playwright Web Automation Agent initialized!")
315
+ print("Stateful browser automation with persistent session\n")
316
+
317
+ print("Available browser tools:")
318
+ for tool_name in agent.list_tools():
319
+ print(f" 🔧 {tool_name}")
320
+
321
+ print("\n📚 Example commands:")
322
+ print(' "Start the browser and go to example.com"')
323
+ print(' "Take a screenshot of the page"')
324
+ print(' "Extract all links from the page"')
325
+ print(' "Get the page title and URL"')
326
+
327
+ print("\n💡 Tips:")
328
+ print(" - Browser state persists across commands")
329
+ print(" - Always start_browser() before other operations")
330
+ print(" - Use close_browser() when done (or type 'quit')")
331
+ print(" - Ask for session_info() to see browser state\n")
332
+
333
+ print("=" * 50 + "\n")
334
+
335
+ # Start interactive mode
336
+ main()
@@ -0,0 +1,102 @@
1
+ # Playwright Web Automation Agent
2
+
3
+ You are a web automation specialist using Playwright for browser automation, web scraping, and testing. You can interact with web pages programmatically to extract data, fill forms, take screenshots, and perform complex web interactions.
4
+
5
+ ## Core Capabilities
6
+
7
+ ### 🌐 Navigation & Loading
8
+ - **navigate_to_url**: Browse to any URL with configurable wait conditions
9
+ - Handle different page load states (load, domcontentloaded, networkidle)
10
+
11
+ ### 📊 Data Extraction
12
+ - **scrape_page_content**: Extract content using CSS selectors
13
+ - **extract_links**: Gather all links with optional filtering
14
+ - Parse and structure web data efficiently
15
+
16
+ ### 📝 Form Interaction
17
+ - **fill_form**: Complete and submit web forms
18
+ - Handle various input types and validation
19
+
20
+ ### 📸 Visual Capture
21
+ - **take_screenshot**: Capture page screenshots (full or viewport)
22
+ - Document visual states and layouts
23
+
24
+ ### 🖱️ User Interactions
25
+ - **wait_and_click**: Click elements with smart waiting
26
+ - **execute_javascript**: Run custom JavaScript on pages
27
+ - Simulate complex user behaviors
28
+
29
+ ### 💾 File Operations
30
+ - **download_file**: Download files from web sources
31
+ - Handle various file types and download scenarios
32
+
33
+ ## Best Practices
34
+
35
+ 1. **Wait Strategies**: Always use appropriate wait conditions for dynamic content
36
+ 2. **Error Handling**: Gracefully handle timeouts and missing elements
37
+ 3. **Selector Strategy**: Use stable, specific CSS selectors
38
+ 4. **Performance**: Minimize unnecessary page loads and interactions
39
+ 5. **Data Validation**: Verify scraped data before processing
40
+
41
+ ## Common Use Cases
42
+
43
+ ### Web Scraping
44
+ - Extract product information from e-commerce sites
45
+ - Gather news articles or blog posts
46
+ - Collect structured data from tables
47
+
48
+ ### Automation
49
+ - Fill and submit contact forms
50
+ - Automate repetitive web tasks
51
+ - Navigate multi-step processes
52
+
53
+ ### Testing
54
+ - Capture screenshots for visual regression
55
+ - Verify page elements and content
56
+ - Test form submissions and interactions
57
+
58
+ ### Data Collection
59
+ - Download reports and documents
60
+ - Extract API responses from network traffic
61
+ - Gather links for crawling
62
+
63
+ ## Interaction Guidelines
64
+
65
+ When users request web automation:
66
+ 1. Understand the target website and goal
67
+ 2. Plan the automation sequence
68
+ 3. Use appropriate tools for each step
69
+ 4. Handle potential errors gracefully
70
+ 5. Return structured, useful data
71
+
72
+ ## Example Workflows
73
+
74
+ ### Scraping Workflow
75
+ 1. Navigate to target URL
76
+ 2. Wait for content to load
77
+ 3. Extract data using selectors
78
+ 4. Process and structure the data
79
+ 5. Return formatted results
80
+
81
+ ### Form Automation
82
+ 1. Navigate to form page
83
+ 2. Fill in form fields
84
+ 3. Handle any validation
85
+ 4. Submit the form
86
+ 5. Verify submission success
87
+
88
+ ### Screenshot Documentation
89
+ 1. Navigate to pages
90
+ 2. Wait for full render
91
+ 3. Capture screenshots
92
+ 4. Save with descriptive names
93
+ 5. Report completion
94
+
95
+ ## Important Notes
96
+
97
+ - **Installation Required**: Users need to install Playwright (`pip install playwright`) and browsers (`playwright install chromium`)
98
+ - **Headless Mode**: Default operations run headless for efficiency
99
+ - **Rate Limiting**: Respect website rate limits and robots.txt
100
+ - **Legal Compliance**: Ensure web scraping complies with website terms of service
101
+
102
+ You are here to make web automation simple, reliable, and efficient for users.
@@ -0,0 +1,3 @@
1
+ connectonion
2
+ playwright
3
+ python-dotenv
@@ -0,0 +1,122 @@
1
+ """
2
+ Purpose: Web research agent template for searching, extracting, and analyzing web data
3
+ LLM-Note:
4
+ Dependencies: imports from [os, json, requests, connectonion.Agent, connectonion.llm_do] | template file copied by [cli/commands/init.py, cli/commands/create.py]
5
+ Data flow: user query → Agent.input() → search_web (placeholder) | extract_data fetches URL → returns content preview | analyze_data uses llm_do | save_research writes JSON file
6
+ State/Effects: HTTP requests to external URLs | writes research JSON files | uses MODEL env var or co/gemini-2.5-pro
7
+ Integration: template for 'co create --template web-research' | tools: search_web, extract_data, analyze_data, save_research | extensible for real search APIs
8
+ Performance: HTTP timeout 10s | analysis truncates to 1000 chars for llm_do
9
+ Errors: request exceptions caught and returned | ⚠️ TODO: search_web is placeholder, needs real API integration
10
+
11
+ Web research agent with data extraction capabilities.
12
+ """
13
+
14
+ import os
15
+ import json
16
+ import requests
17
+ from typing import Dict, List, Any
18
+ from connectonion import Agent, llm_do
19
+
20
+
21
+ def search_web(query: str) -> str:
22
+ """Search the web for information.
23
+
24
+ Args:
25
+ query: Search query
26
+
27
+ Returns:
28
+ Search results summary
29
+ """
30
+ # This is a placeholder - you would integrate with a real search API
31
+ return f"Searching for: {query}\n\nResults:\n1. Example result about {query}\n2. Another relevant finding"
32
+
33
+
34
+ def extract_data(url: str, data_type: str = "text") -> Dict[str, Any]:
35
+ """Extract data from a webpage.
36
+
37
+ Args:
38
+ url: URL to extract data from
39
+ data_type: Type of data to extract (text, links, images)
40
+
41
+ Returns:
42
+ Extracted data dictionary
43
+ """
44
+ try:
45
+ response = requests.get(url, timeout=10)
46
+ response.raise_for_status()
47
+
48
+ # Simple extraction logic - expand as needed
49
+ if data_type == "text":
50
+ # In production, use BeautifulSoup or similar
51
+ return {
52
+ "url": url,
53
+ "status": response.status_code,
54
+ "content_length": len(response.text),
55
+ "preview": response.text[:500]
56
+ }
57
+ else:
58
+ return {"url": url, "data_type": data_type, "note": "Extraction not implemented"}
59
+
60
+ except Exception as e:
61
+ return {"error": str(e), "url": url}
62
+
63
+
64
+ def analyze_data(data: str, analysis_type: str = "summary") -> str:
65
+ """Analyze extracted data.
66
+
67
+ Args:
68
+ data: Data to analyze
69
+ analysis_type: Type of analysis (summary, sentiment, keywords)
70
+
71
+ Returns:
72
+ Analysis results
73
+ """
74
+ # Use LLM for analysis
75
+ prompt = f"Perform {analysis_type} analysis on this data: {data[:1000]}"
76
+ return llm_do(prompt)
77
+
78
+
79
+ def save_research(topic: str, findings: List[str], filename: str = None) -> str:
80
+ """Save research findings to a file.
81
+
82
+ Args:
83
+ topic: Research topic
84
+ findings: List of findings
85
+ filename: Output filename (optional)
86
+
87
+ Returns:
88
+ Confirmation message
89
+ """
90
+ if not filename:
91
+ filename = f"research_{topic.replace(' ', '_')}.json"
92
+
93
+ research_data = {
94
+ "topic": topic,
95
+ "findings": findings,
96
+ "timestamp": __import__('datetime').datetime.now().isoformat()
97
+ }
98
+
99
+ with open(filename, 'w') as f:
100
+ json.dump(research_data, f, indent=2)
101
+
102
+ return f"Research saved to {filename}"
103
+
104
+
105
+ def main():
106
+ """Run the web research agent."""
107
+ # Create agent with web research tools
108
+ agent = Agent(
109
+ name="web-research-agent",
110
+ tools=[search_web, extract_data, analyze_data, save_research],
111
+ model=os.getenv("MODEL", "co/gemini-2.5-pro")
112
+ )
113
+
114
+ # Example research task
115
+ response = agent.input(
116
+ "Research the latest trends in AI and summarize the key findings"
117
+ )
118
+ print(response)
119
+
120
+
121
+ if __name__ == "__main__":
122
+ main()
@@ -0,0 +1,128 @@
1
+ """
2
+ Purpose: Client interface for connecting to remote agents via relay network using INPUT/OUTPUT protocol
3
+ LLM-Note:
4
+ Dependencies: imports from [asyncio, json, uuid, websockets] | imported by [__init__.py, tests/test_connect.py, examples/] | tested by [tests/test_connect.py, tests/integration/manual/network_connect_manual.py]
5
+ Data flow: user calls connect(address, relay_url) → creates RemoteAgent instance → user calls .input(prompt) → _send_task() creates WebSocket to relay /ws/input → sends INPUT message with {type, input_id, to, prompt} → waits for OUTPUT response from relay → returns result string OR raises ConnectionError
6
+ State/Effects: establishes temporary WebSocket connection per task (no persistent connection) | sends INPUT messages to relay | receives OUTPUT/ERROR messages | no file I/O or global state | asyncio.run() blocks on .input(), await on .input_async()
7
+ Integration: exposes connect(address, relay_url), RemoteAgent class with .input(prompt, timeout), .input_async(prompt, timeout) | default relay_url="wss://oo.openonion.ai/ws/announce" | address format: 0x + 64 hex chars (Ed25519 public key) | complements host() with relay_url which listens for INPUT on relay | Protocol: INPUT type with to/prompt fields → OUTPUT type with input_id/result fields
8
+ Performance: creates new WebSocket connection per input() call (no connection pooling) | default timeout=30s | async under the hood (asyncio.run wraps for sync API) | no caching or retry logic
9
+ Errors: raises ImportError if websockets not installed | raises ConnectionError for ERROR responses from relay | raises ConnectionError for unexpected response types | asyncio.TimeoutError if no response within timeout | WebSocket connection errors bubble up
10
+
11
+ Connect to remote agents on the network.
12
+
13
+ Simple function-based API for using remote agents.
14
+ """
15
+
16
+ import asyncio
17
+ import json
18
+ import uuid
19
+
20
+
21
+ class RemoteAgent:
22
+ """
23
+ Interface to a remote agent.
24
+
25
+ Minimal MVP: Just input() method.
26
+ """
27
+
28
+ def __init__(self, address: str, relay_url: str):
29
+ self.address = address
30
+ self._relay_url = relay_url
31
+
32
+ def input(self, prompt: str, timeout: float = 30.0) -> str:
33
+ """
34
+ Send task to remote agent and get response (sync version).
35
+
36
+ Use this in normal synchronous code.
37
+ For async code, use input_async() instead.
38
+
39
+ Args:
40
+ prompt: Task/prompt to send
41
+ timeout: Seconds to wait for response (default 30)
42
+
43
+ Returns:
44
+ Agent's response string
45
+
46
+ Example:
47
+ >>> translator = connect("0x3d40...")
48
+ >>> result = translator.input("Translate 'hello' to Spanish")
49
+ """
50
+ return asyncio.run(self._send_task(prompt, timeout))
51
+
52
+ async def input_async(self, prompt: str, timeout: float = 30.0) -> str:
53
+ """
54
+ Send task to remote agent and get response (async version).
55
+
56
+ Use this when calling from async code.
57
+
58
+ Args:
59
+ prompt: Task/prompt to send
60
+ timeout: Seconds to wait for response (default 30)
61
+
62
+ Returns:
63
+ Agent's response string
64
+
65
+ Example:
66
+ >>> remote = connect("0x3d40...")
67
+ >>> result = await remote.input_async("Translate 'hello' to Spanish")
68
+ """
69
+ return await self._send_task(prompt, timeout)
70
+
71
+ async def _send_task(self, prompt: str, timeout: float) -> str:
72
+ """
73
+ Send input via relay and wait for output.
74
+
75
+ MVP: Uses relay to route INPUT/OUTPUT messages between agents.
76
+ """
77
+ import websockets
78
+
79
+ input_id = str(uuid.uuid4())
80
+
81
+ # Connect to relay input endpoint
82
+ relay_input_url = self._relay_url.replace("/ws/announce", "/ws/input")
83
+
84
+ async with websockets.connect(relay_input_url) as ws:
85
+ # Send INPUT message
86
+ input_message = {
87
+ "type": "INPUT",
88
+ "input_id": input_id,
89
+ "to": self.address,
90
+ "prompt": prompt
91
+ }
92
+
93
+ await ws.send(json.dumps(input_message))
94
+
95
+ # Wait for OUTPUT
96
+ response_data = await asyncio.wait_for(ws.recv(), timeout=timeout)
97
+ response = json.loads(response_data)
98
+
99
+ # Return result
100
+ if response.get("type") == "OUTPUT" and response.get("input_id") == input_id:
101
+ return response.get("result", "")
102
+ elif response.get("type") == "ERROR":
103
+ raise ConnectionError(f"Agent error: {response.get('error')}")
104
+ else:
105
+ raise ConnectionError(f"Unexpected response: {response}")
106
+
107
+ def __repr__(self):
108
+ short = self.address[:12] + "..." if len(self.address) > 12 else self.address
109
+ return f"RemoteAgent({short})"
110
+
111
+
112
+ def connect(address: str, relay_url: str = "wss://oo.openonion.ai/ws/announce") -> RemoteAgent:
113
+ """
114
+ Connect to a remote agent.
115
+
116
+ Args:
117
+ address: Agent's public key address (0x...)
118
+ relay_url: Relay server URL (default: production)
119
+
120
+ Returns:
121
+ RemoteAgent interface
122
+
123
+ Example:
124
+ >>> from connectonion import connect
125
+ >>> translator = connect("0x3d4017c3...")
126
+ >>> result = translator.input("Translate 'hello' to Spanish")
127
+ """
128
+ return RemoteAgent(address, relay_url)