connectonion 0.5.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. connectonion/__init__.py +78 -0
  2. connectonion/address.py +320 -0
  3. connectonion/agent.py +450 -0
  4. connectonion/announce.py +84 -0
  5. connectonion/asgi.py +287 -0
  6. connectonion/auto_debug_exception.py +181 -0
  7. connectonion/cli/__init__.py +3 -0
  8. connectonion/cli/browser_agent/__init__.py +5 -0
  9. connectonion/cli/browser_agent/browser.py +243 -0
  10. connectonion/cli/browser_agent/prompt.md +107 -0
  11. connectonion/cli/commands/__init__.py +1 -0
  12. connectonion/cli/commands/auth_commands.py +527 -0
  13. connectonion/cli/commands/browser_commands.py +27 -0
  14. connectonion/cli/commands/create.py +511 -0
  15. connectonion/cli/commands/deploy_commands.py +220 -0
  16. connectonion/cli/commands/doctor_commands.py +173 -0
  17. connectonion/cli/commands/init.py +469 -0
  18. connectonion/cli/commands/project_cmd_lib.py +828 -0
  19. connectonion/cli/commands/reset_commands.py +149 -0
  20. connectonion/cli/commands/status_commands.py +168 -0
  21. connectonion/cli/docs/co-vibecoding-principles-docs-contexts-all-in-one.md +2010 -0
  22. connectonion/cli/docs/connectonion.md +1256 -0
  23. connectonion/cli/docs.md +123 -0
  24. connectonion/cli/main.py +148 -0
  25. connectonion/cli/templates/meta-agent/README.md +287 -0
  26. connectonion/cli/templates/meta-agent/agent.py +196 -0
  27. connectonion/cli/templates/meta-agent/prompts/answer_prompt.md +9 -0
  28. connectonion/cli/templates/meta-agent/prompts/docs_retrieve_prompt.md +15 -0
  29. connectonion/cli/templates/meta-agent/prompts/metagent.md +71 -0
  30. connectonion/cli/templates/meta-agent/prompts/think_prompt.md +18 -0
  31. connectonion/cli/templates/minimal/README.md +56 -0
  32. connectonion/cli/templates/minimal/agent.py +40 -0
  33. connectonion/cli/templates/playwright/README.md +118 -0
  34. connectonion/cli/templates/playwright/agent.py +336 -0
  35. connectonion/cli/templates/playwright/prompt.md +102 -0
  36. connectonion/cli/templates/playwright/requirements.txt +3 -0
  37. connectonion/cli/templates/web-research/agent.py +122 -0
  38. connectonion/connect.py +128 -0
  39. connectonion/console.py +539 -0
  40. connectonion/debug_agent/__init__.py +13 -0
  41. connectonion/debug_agent/agent.py +45 -0
  42. connectonion/debug_agent/prompts/debug_assistant.md +72 -0
  43. connectonion/debug_agent/runtime_inspector.py +406 -0
  44. connectonion/debug_explainer/__init__.py +10 -0
  45. connectonion/debug_explainer/explain_agent.py +114 -0
  46. connectonion/debug_explainer/explain_context.py +263 -0
  47. connectonion/debug_explainer/explainer_prompt.md +29 -0
  48. connectonion/debug_explainer/root_cause_analysis_prompt.md +43 -0
  49. connectonion/debugger_ui.py +1039 -0
  50. connectonion/decorators.py +208 -0
  51. connectonion/events.py +248 -0
  52. connectonion/execution_analyzer/__init__.py +9 -0
  53. connectonion/execution_analyzer/execution_analysis.py +93 -0
  54. connectonion/execution_analyzer/execution_analysis_prompt.md +47 -0
  55. connectonion/host.py +579 -0
  56. connectonion/interactive_debugger.py +342 -0
  57. connectonion/llm.py +801 -0
  58. connectonion/llm_do.py +307 -0
  59. connectonion/logger.py +300 -0
  60. connectonion/prompt_files/__init__.py +1 -0
  61. connectonion/prompt_files/analyze_contact.md +62 -0
  62. connectonion/prompt_files/eval_expected.md +12 -0
  63. connectonion/prompt_files/react_evaluate.md +11 -0
  64. connectonion/prompt_files/react_plan.md +16 -0
  65. connectonion/prompt_files/reflect.md +22 -0
  66. connectonion/prompts.py +144 -0
  67. connectonion/relay.py +200 -0
  68. connectonion/static/docs.html +688 -0
  69. connectonion/tool_executor.py +279 -0
  70. connectonion/tool_factory.py +186 -0
  71. connectonion/tool_registry.py +105 -0
  72. connectonion/trust.py +166 -0
  73. connectonion/trust_agents.py +71 -0
  74. connectonion/trust_functions.py +88 -0
  75. connectonion/tui/__init__.py +57 -0
  76. connectonion/tui/divider.py +39 -0
  77. connectonion/tui/dropdown.py +251 -0
  78. connectonion/tui/footer.py +31 -0
  79. connectonion/tui/fuzzy.py +56 -0
  80. connectonion/tui/input.py +278 -0
  81. connectonion/tui/keys.py +35 -0
  82. connectonion/tui/pick.py +130 -0
  83. connectonion/tui/providers.py +155 -0
  84. connectonion/tui/status_bar.py +163 -0
  85. connectonion/usage.py +161 -0
  86. connectonion/useful_events_handlers/__init__.py +16 -0
  87. connectonion/useful_events_handlers/reflect.py +116 -0
  88. connectonion/useful_plugins/__init__.py +20 -0
  89. connectonion/useful_plugins/calendar_plugin.py +163 -0
  90. connectonion/useful_plugins/eval.py +139 -0
  91. connectonion/useful_plugins/gmail_plugin.py +162 -0
  92. connectonion/useful_plugins/image_result_formatter.py +127 -0
  93. connectonion/useful_plugins/re_act.py +78 -0
  94. connectonion/useful_plugins/shell_approval.py +159 -0
  95. connectonion/useful_tools/__init__.py +44 -0
  96. connectonion/useful_tools/diff_writer.py +192 -0
  97. connectonion/useful_tools/get_emails.py +183 -0
  98. connectonion/useful_tools/gmail.py +1596 -0
  99. connectonion/useful_tools/google_calendar.py +613 -0
  100. connectonion/useful_tools/memory.py +380 -0
  101. connectonion/useful_tools/microsoft_calendar.py +604 -0
  102. connectonion/useful_tools/outlook.py +488 -0
  103. connectonion/useful_tools/send_email.py +205 -0
  104. connectonion/useful_tools/shell.py +97 -0
  105. connectonion/useful_tools/slash_command.py +201 -0
  106. connectonion/useful_tools/terminal.py +285 -0
  107. connectonion/useful_tools/todo_list.py +241 -0
  108. connectonion/useful_tools/web_fetch.py +216 -0
  109. connectonion/xray.py +467 -0
  110. connectonion-0.5.8.dist-info/METADATA +741 -0
  111. connectonion-0.5.8.dist-info/RECORD +113 -0
  112. connectonion-0.5.8.dist-info/WHEEL +4 -0
  113. connectonion-0.5.8.dist-info/entry_points.txt +3 -0
connectonion/agent.py ADDED
@@ -0,0 +1,450 @@
1
+ """
2
+ Purpose: Orchestrate AI agent execution with LLM calls, tool execution, and automatic logging
3
+ LLM-Note:
4
+ Dependencies: imports from [llm.py, tool_factory.py, prompts.py, decorators.py, logger.py, tool_executor.py, tool_registry.py] | imported by [__init__.py, debug_agent/__init__.py] | tested by [tests/test_agent.py, tests/test_agent_prompts.py, tests/test_agent_workflows.py]
5
+ Data flow: receives user prompt: str from Agent.input() → creates/extends current_session with messages → calls llm.complete() with tool schemas → receives LLMResponse with tool_calls → executes tools via tool_executor.execute_and_record_tools() → appends tool results to messages → repeats loop until no tool_calls or max_iterations → logger logs to .co/logs/{name}.log and .co/sessions/{name}_{timestamp}.yaml → returns final response: str
6
+ State/Effects: modifies self.current_session['messages', 'trace', 'turn', 'iteration'] | writes to .co/logs/{name}.log and .co/sessions/ via logger.py
7
+ Integration: exposes Agent(name, tools, system_prompt, model, log, quiet), .input(prompt), .execute_tool(name, args), .add_tool(func), .remove_tool(name), .list_tools(), .reset_conversation() | tools stored in ToolRegistry with attribute access (agent.tools.tool_name) and instance storage (agent.tools.gmail) | tool execution delegates to tool_executor module | log defaults to .co/logs/ (None), can be True (current dir), False (disabled), or custom path | quiet=True suppresses console but keeps session logging | trust enforcement moved to host() for network access control
8
+ Performance: max_iterations=10 default (configurable per-input) | session state persists across turns for multi-turn conversations | ToolRegistry provides O(1) tool lookup via .get() or attribute access
9
+ Errors: LLM errors bubble up | tool execution errors captured in trace and returned to LLM for retry
10
+ """
11
+
12
+ import os
13
+ import sys
14
+ import time
15
+ from typing import List, Optional, Dict, Any, Callable, Union
16
+ from pathlib import Path
17
+ from .llm import LLM, create_llm, TokenUsage
18
+ from .usage import get_context_limit
19
+ from .tool_factory import create_tool_from_function, extract_methods_from_instance, is_class_instance
20
+ from .tool_registry import ToolRegistry
21
+ from .prompts import load_system_prompt
22
+ from .decorators import (
23
+ _is_replay_enabled # Only need this for replay check
24
+ )
25
+ from .logger import Logger
26
+ from .tool_executor import execute_and_record_tools, execute_single_tool
27
+ from .events import EventHandler
28
+
29
+
30
+ class Agent:
31
+ """Agent that can use tools to complete tasks."""
32
+
33
+ def __init__(
34
+ self,
35
+ name: str,
36
+ llm: Optional[LLM] = None,
37
+ tools: Optional[Union[List[Callable], Callable, Any]] = None,
38
+ system_prompt: Union[str, Path, None] = None,
39
+ api_key: Optional[str] = None,
40
+ model: str = "co/gemini-2.5-pro",
41
+ max_iterations: int = 10,
42
+ log: Optional[Union[bool, str, Path]] = None,
43
+ quiet: bool = False,
44
+ plugins: Optional[List[List[EventHandler]]] = None,
45
+ on_events: Optional[List[EventHandler]] = None
46
+ ):
47
+ self.name = name
48
+ self.system_prompt = load_system_prompt(system_prompt)
49
+ self.max_iterations = max_iterations
50
+
51
+ # Current session context (runtime only)
52
+ self.current_session = None
53
+
54
+ # Token usage tracking
55
+ self.total_cost: float = 0.0 # Cumulative cost in USD
56
+ self.last_usage: Optional[TokenUsage] = None # From most recent LLM call
57
+
58
+ # Initialize logger (unified: terminal + file + YAML sessions)
59
+ # Environment variable override (highest priority)
60
+ effective_log = log
61
+ if os.getenv('CONNECTONION_LOG'):
62
+ effective_log = Path(os.getenv('CONNECTONION_LOG'))
63
+
64
+ self.logger = Logger(agent_name=name, quiet=quiet, log=effective_log)
65
+
66
+ # Initialize event registry
67
+ # Note: before_each_tool/after_each_tool fire for EACH tool
68
+ # before_tools/after_tools fire ONCE per batch (safe for adding messages)
69
+ self.events = {
70
+ 'after_user_input': [],
71
+ 'before_llm': [],
72
+ 'after_llm': [],
73
+ 'before_each_tool': [], # Fires before EACH tool
74
+ 'before_tools': [], # Fires ONCE before ALL tools in a batch
75
+ 'after_each_tool': [], # Fires after EACH tool (don't add messages here!)
76
+ 'after_tools': [], # Fires ONCE after ALL tools (safe for messages)
77
+ 'on_error': [],
78
+ 'on_complete': []
79
+ }
80
+
81
+ # Register plugin events (flatten list of lists)
82
+ if plugins:
83
+ for event_list in plugins:
84
+ for event_func in event_list:
85
+ self._register_event(event_func)
86
+
87
+ # Register custom event handlers (supports both single functions and lists)
88
+ if on_events:
89
+ for item in on_events:
90
+ if isinstance(item, list):
91
+ # Multiple handlers: before_tool(fn1, fn2) returns [fn1, fn2]
92
+ for fn in item:
93
+ self._register_event(fn)
94
+ else:
95
+ # Single handler: @before_tool or before_tool(fn)
96
+ self._register_event(item)
97
+
98
+ # Process tools: convert raw functions and class instances to tool schemas automatically
99
+ self.tools = ToolRegistry()
100
+
101
+ if tools is not None:
102
+ tools_list = tools if isinstance(tools, list) else [tools]
103
+
104
+ for tool in tools_list:
105
+ if is_class_instance(tool):
106
+ # Store instance (agent.tools.gmail.my_id)
107
+ class_name = tool.__class__.__name__.lower()
108
+ self.tools.add_instance(class_name, tool)
109
+
110
+ # Extract methods as tools (agent.tools.send())
111
+ for method_tool in extract_methods_from_instance(tool):
112
+ self.tools.add(method_tool)
113
+ elif callable(tool):
114
+ if not hasattr(tool, 'to_function_schema'):
115
+ processed = create_tool_from_function(tool)
116
+ else:
117
+ processed = tool
118
+ self.tools.add(processed)
119
+
120
+ # Initialize LLM
121
+ if llm:
122
+ self.llm = llm
123
+ else:
124
+ # Use factory function to create appropriate LLM based on model
125
+ # Each LLM provider checks its own env var if api_key is None:
126
+ # - OpenAI models check OPENAI_API_KEY
127
+ # - Anthropic models check ANTHROPIC_API_KEY
128
+ # - Google models check GOOGLE_API_KEY
129
+ # - co/ models check OPENONION_API_KEY
130
+ self.llm = create_llm(model=model, api_key=api_key)
131
+
132
+ # Print banner (if console enabled)
133
+ if self.logger.console:
134
+ # Determine log_dir if logging is enabled
135
+ log_dir = ".co/" if self.logger.enable_sessions else None
136
+ self.logger.console.print_banner(
137
+ agent_name=self.name,
138
+ model=self.llm.model,
139
+ tools=len(self.tools),
140
+ log_dir=log_dir,
141
+ llm=self.llm
142
+ )
143
+
144
+ def _invoke_events(self, event_type: str):
145
+ """Invoke all event handlers for given type. Exceptions propagate (fail fast)."""
146
+ for handler in self.events.get(event_type, []):
147
+ handler(self)
148
+
149
+ def _register_event(self, event_func: EventHandler):
150
+ """
151
+ Register a single event handler to appropriate event type.
152
+
153
+ Args:
154
+ event_func: Event handler wrapped with after_llm(), after_tool(), etc.
155
+
156
+ Raises:
157
+ TypeError: If event handler is not callable
158
+ ValueError: If event handler missing _event_type or invalid event type
159
+ """
160
+ # First check if it's callable (type validation)
161
+ if not callable(event_func):
162
+ raise TypeError(f"Event must be callable, got {type(event_func).__name__}")
163
+
164
+ # Then check if it has _event_type attribute (wrapper validation)
165
+ event_type = getattr(event_func, '_event_type', None)
166
+ if not event_type:
167
+ func_name = getattr(event_func, '__name__', str(event_func))
168
+ raise ValueError(
169
+ f"Event handler '{func_name}' missing _event_type. "
170
+ f"Did you forget to wrap it? Use after_llm({func_name}), etc."
171
+ )
172
+
173
+ # Finally check if it's a valid event type (value validation)
174
+ if event_type not in self.events:
175
+ raise ValueError(f"Invalid event type: {event_type}")
176
+
177
+ self.events[event_type].append(event_func)
178
+
179
+ def input(self, prompt: str, max_iterations: Optional[int] = None,
180
+ session: Optional[Dict] = None) -> str:
181
+ """Provide input to the agent and get response.
182
+
183
+ Args:
184
+ prompt: The input prompt or data to process
185
+ max_iterations: Override agent's max_iterations for this request
186
+ session: Optional session to continue a conversation. Pass the session
187
+ from a previous response to maintain context. Contains:
188
+ - session_id: Conversation identifier
189
+ - messages: Conversation history
190
+ - trace: Execution trace for debugging
191
+ - turn: Turn counter
192
+
193
+ Returns:
194
+ The agent's response after processing the input
195
+ """
196
+ start_time = time.time()
197
+ if self.logger.console:
198
+ self.logger.console.print_task(prompt)
199
+
200
+ # Session restoration: if session passed, restore it (stateless API continuation)
201
+ if session is not None:
202
+ self.current_session = {
203
+ 'session_id': session.get('session_id'),
204
+ 'messages': list(session.get('messages', [])),
205
+ 'trace': list(session.get('trace', [])),
206
+ 'turn': session.get('turn', 0)
207
+ }
208
+ # Start YAML session logging with session_id for thread safety
209
+ self.logger.start_session(self.system_prompt, session_id=session.get('session_id'))
210
+ elif self.current_session is None:
211
+ # Initialize new session
212
+ self.current_session = {
213
+ 'messages': [{"role": "system", "content": self.system_prompt}],
214
+ 'trace': [],
215
+ 'turn': 0 # Track conversation turns
216
+ }
217
+ # Start YAML session logging
218
+ self.logger.start_session(self.system_prompt)
219
+
220
+ # Add user message to conversation
221
+ self.current_session['messages'].append({
222
+ "role": "user",
223
+ "content": prompt
224
+ })
225
+
226
+ # Track this turn
227
+ self.current_session['turn'] += 1
228
+ self.current_session['user_prompt'] = prompt # Store user prompt for xray/debugging
229
+ turn_start = time.time()
230
+
231
+ # Add trace entry for this input
232
+ self.current_session['trace'].append({
233
+ 'type': 'user_input',
234
+ 'turn': self.current_session['turn'],
235
+ 'prompt': prompt, # Keep 'prompt' in trace for backward compatibility
236
+ 'timestamp': turn_start
237
+ })
238
+
239
+ # Invoke after_user_input events
240
+ self._invoke_events('after_user_input')
241
+
242
+ # Process
243
+ self.current_session['iteration'] = 0 # Reset iteration for this turn
244
+ result = self._run_iteration_loop(
245
+ max_iterations or self.max_iterations
246
+ )
247
+
248
+ # Calculate duration
249
+ duration = time.time() - turn_start
250
+
251
+ self.current_session['result'] = result
252
+
253
+ # Print completion summary
254
+ if self.logger.console:
255
+ session_path = f".co/sessions/{self.name}.yaml" if self.logger.enable_sessions else None
256
+ self.logger.console.print_completion(duration, self.current_session, session_path)
257
+
258
+ self._invoke_events('on_complete')
259
+
260
+ # Log turn to YAML session (after on_complete so handlers can modify state)
261
+ self.logger.log_turn(prompt, result, duration * 1000, self.current_session, self.llm.model)
262
+
263
+ return result
264
+
265
+ def reset_conversation(self):
266
+ """Reset the conversation session. Start fresh."""
267
+ self.current_session = None
268
+
269
+ def execute_tool(self, tool_name: str, arguments: Optional[Dict] = None) -> Dict[str, Any]:
270
+ """Execute a single tool by name. Useful for testing and debugging.
271
+
272
+ Args:
273
+ tool_name: Name of the tool to execute
274
+ arguments: Tool arguments (default: {})
275
+
276
+ Returns:
277
+ Dict with: result, status, timing, name, arguments
278
+ """
279
+ arguments = arguments or {}
280
+
281
+ # Create temporary session if needed
282
+ if self.current_session is None:
283
+ self.current_session = {
284
+ 'messages': [{"role": "system", "content": self.system_prompt}],
285
+ 'trace': [],
286
+ 'turn': 0,
287
+ 'iteration': 1,
288
+ 'user_prompt': 'Manual tool execution'
289
+ }
290
+
291
+ # Execute using the tool_executor
292
+ trace_entry = execute_single_tool(
293
+ tool_name=tool_name,
294
+ tool_args=arguments,
295
+ tool_id=f"manual_{tool_name}_{time.time()}",
296
+ tools=self.tools,
297
+ agent=self,
298
+ logger=self.logger
299
+ )
300
+
301
+ # Note: trace_entry already added to session in execute_single_tool
302
+
303
+ # Fire events (same as execute_and_record_tools)
304
+ # on_error fires first for errors/not_found
305
+ if trace_entry["status"] in ("error", "not_found"):
306
+ self._invoke_events('on_error')
307
+
308
+ # after_each_tool fires for this tool execution
309
+ self._invoke_events('after_each_tool')
310
+
311
+ # after_tools fires after all tools in batch (for single execution, fires once)
312
+ self._invoke_events('after_tools')
313
+
314
+ # Return simplified result (omit internal fields)
315
+ return {
316
+ "name": trace_entry["tool_name"],
317
+ "arguments": trace_entry["arguments"],
318
+ "result": trace_entry["result"],
319
+ "status": trace_entry["status"],
320
+ "timing": trace_entry["timing"]
321
+ }
322
+
323
+ def _create_initial_messages(self, prompt: str) -> List[Dict[str, Any]]:
324
+ """Create initial conversation messages."""
325
+ return [
326
+ {"role": "system", "content": self.system_prompt},
327
+ {"role": "user", "content": prompt}
328
+ ]
329
+
330
+ def _run_iteration_loop(self, max_iterations: int) -> str:
331
+ """Run the main LLM/tool iteration loop until complete or max iterations."""
332
+ while self.current_session['iteration'] < max_iterations:
333
+ self.current_session['iteration'] += 1
334
+
335
+ # Get LLM response
336
+ response = self._get_llm_decision()
337
+
338
+ # If no tool calls, we're done
339
+ if not response.tool_calls:
340
+ return response.content if response.content else "Task completed."
341
+
342
+ # Process tool calls
343
+ self._execute_and_record_tools(response.tool_calls)
344
+
345
+ # After executing tools, continue the loop to let LLM decide next action
346
+ # The LLM will see the tool results and decide if task is complete
347
+
348
+ # Hit max iterations
349
+ return f"Task incomplete: Maximum iterations ({max_iterations}) reached."
350
+
351
+ def _get_llm_decision(self):
352
+ """Get the next action/decision from the LLM."""
353
+ # Get tool schemas
354
+ tool_schemas = [tool.to_function_schema() for tool in self.tools] if self.tools else None
355
+
356
+ # Show request info
357
+ if self.logger.console:
358
+ self.logger.console.print_llm_request(self.llm.model, self.current_session, self.max_iterations)
359
+
360
+ # Invoke before_llm events
361
+ self._invoke_events('before_llm')
362
+
363
+ start = time.time()
364
+ response = self.llm.complete(self.current_session['messages'], tools=tool_schemas)
365
+ duration = (time.time() - start) * 1000 # milliseconds
366
+
367
+ # Track token usage
368
+ if response.usage:
369
+ self.last_usage = response.usage
370
+ self.total_cost += response.usage.cost
371
+
372
+ # Add to trace
373
+ self.current_session['trace'].append({
374
+ 'type': 'llm_call',
375
+ 'model': self.llm.model,
376
+ 'timestamp': start,
377
+ 'duration_ms': duration,
378
+ 'tool_calls_count': len(response.tool_calls) if response.tool_calls else 0,
379
+ 'iteration': self.current_session['iteration'],
380
+ 'usage': response.usage,
381
+ })
382
+
383
+ # Invoke after_llm events (after trace entry is added)
384
+ self._invoke_events('after_llm')
385
+
386
+ self.logger.log_llm_response(self.llm.model, duration, len(response.tool_calls), response.usage)
387
+
388
+ return response
389
+
390
+ def _execute_and_record_tools(self, tool_calls):
391
+ """Execute requested tools and update conversation messages."""
392
+ execute_and_record_tools(
393
+ tool_calls=tool_calls,
394
+ tools=self.tools,
395
+ agent=self,
396
+ logger=self.logger
397
+ )
398
+
399
+ def add_tool(self, tool: Callable):
400
+ """Add a new tool to the agent."""
401
+ if not hasattr(tool, 'to_function_schema'):
402
+ processed_tool = create_tool_from_function(tool)
403
+ else:
404
+ processed_tool = tool
405
+ self.tools.add(processed_tool)
406
+
407
+ def remove_tool(self, tool_name: str) -> bool:
408
+ """Remove a tool by name."""
409
+ return self.tools.remove(tool_name)
410
+
411
+ def list_tools(self) -> List[str]:
412
+ """List all available tool names."""
413
+ return self.tools.names()
414
+
415
+ @property
416
+ def context_percent(self) -> float:
417
+ """Get current context window usage as percentage (0-100).
418
+
419
+ Returns the percentage of context window used based on input_tokens
420
+ from the last LLM call. Returns 0 if no LLM calls have been made yet.
421
+ """
422
+ if not self.last_usage:
423
+ return 0.0
424
+ limit = get_context_limit(self.llm.model)
425
+ return (self.last_usage.input_tokens / limit) * 100
426
+
427
+ def auto_debug(self, prompt: Optional[str] = None):
428
+ """Start a debugging session for the agent.
429
+
430
+ Args:
431
+ prompt: Optional prompt to debug. If provided, runs single debug session.
432
+ If None, starts interactive debug mode.
433
+
434
+ This MVP version provides:
435
+ - Breakpoints at @xray decorated tools
436
+ - Display of tool execution context
437
+ - Interactive menu to continue or edit values
438
+
439
+ Examples:
440
+ # Interactive mode
441
+ agent = Agent("my_agent", tools=[search, analyze])
442
+ agent.auto_debug()
443
+
444
+ # Single prompt mode
445
+ agent.auto_debug("Find information about Python")
446
+ """
447
+ from .interactive_debugger import InteractiveDebugger
448
+ debugger = InteractiveDebugger(self)
449
+ debugger.start_debug_session(prompt)
450
+
@@ -0,0 +1,84 @@
1
+ """
2
+ Purpose: Build and sign ANNOUNCE messages for agent relay network registration
3
+ LLM-Note:
4
+ Dependencies: imports from [json, time, typing, address.py] | imported by [host.py] | tested by [tests/test_announce.py]
5
+ Data flow: receives from host() → create_announce_message(address_data, summary, endpoints) → builds message dict without signature → serializes to deterministic JSON (sort_keys=True) → calls address.sign() to create Ed25519 signature → returns signed message ready for relay
6
+ State/Effects: no side effects | pure function | deterministic JSON serialization (matches server verification) | signature is hex string without 0x prefix
7
+ Integration: exposes create_announce_message(address_data, summary, endpoints) | used by host() to announce agent presence to relay network | relay server verifies signature using address (public key) | heartbeat re-sends with updated timestamp
8
+ Performance: Ed25519 signing is fast (sub-millisecond) | JSON serialization minimal overhead | no I/O or network calls
9
+ Errors: raises KeyError if address_data missing required keys | address.sign() errors bubble up | no validation of summary length or endpoint format
10
+
11
+ Build ANNOUNCE messages for relay registration.
12
+
13
+ Simple function-based approach - no classes needed for MVP.
14
+ """
15
+
16
+ import json
17
+ import time
18
+ from typing import Dict, List, Any
19
+
20
+
21
+ def create_announce_message(
22
+ address_data: Dict[str, Any],
23
+ summary: str,
24
+ endpoints: List[str] = None
25
+ ) -> Dict[str, Any]:
26
+ """
27
+ Build and sign an ANNOUNCE message for relay registration.
28
+
29
+ Args:
30
+ address_data: Dictionary from address.load() or address.generate()
31
+ containing 'address' and 'signing_key'
32
+ summary: Description of agent's capabilities (max 1000 chars)
33
+ endpoints: List of connection endpoints (optional, default=[])
34
+ Format: ["tcp://host:port"] or ["ws://host:port"]
35
+
36
+ Returns:
37
+ Dictionary ready to send to relay's /ws/announce endpoint:
38
+ {
39
+ "type": "ANNOUNCE",
40
+ "address": "0x...",
41
+ "timestamp": 1234567890,
42
+ "summary": "...",
43
+ "endpoints": [],
44
+ "signature": "abc123..."
45
+ }
46
+
47
+ Example:
48
+ >>> import address
49
+ >>> addr = address.load()
50
+ >>> msg = create_announce_message(
51
+ ... addr,
52
+ ... "Translator agent with 50+ languages",
53
+ ... ["tcp://127.0.0.1:8080"]
54
+ ... )
55
+ >>> # Now send msg through WebSocket to relay
56
+ """
57
+ if endpoints is None:
58
+ endpoints = []
59
+
60
+ # Build message WITHOUT signature first
61
+ message = {
62
+ "type": "ANNOUNCE",
63
+ "address": address_data["address"],
64
+ "timestamp": int(time.time()),
65
+ "summary": summary,
66
+ "endpoints": endpoints
67
+ }
68
+
69
+ # Create deterministic JSON for signing
70
+ # MUST match server's verification: json.dumps(message, sort_keys=True)
71
+ message_json = json.dumps(message, sort_keys=True)
72
+ message_bytes = message_json.encode('utf-8')
73
+
74
+ # Sign with Ed25519
75
+ from . import address
76
+ signature_bytes = address.sign(address_data, message_bytes)
77
+
78
+ # Convert to hex string (NO 0x prefix - matches auth system convention)
79
+ signature_hex = signature_bytes.hex()
80
+
81
+ # Add signature to message
82
+ message["signature"] = signature_hex
83
+
84
+ return message