connectonion 0.5.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- connectonion/__init__.py +78 -0
- connectonion/address.py +320 -0
- connectonion/agent.py +450 -0
- connectonion/announce.py +84 -0
- connectonion/asgi.py +287 -0
- connectonion/auto_debug_exception.py +181 -0
- connectonion/cli/__init__.py +3 -0
- connectonion/cli/browser_agent/__init__.py +5 -0
- connectonion/cli/browser_agent/browser.py +243 -0
- connectonion/cli/browser_agent/prompt.md +107 -0
- connectonion/cli/commands/__init__.py +1 -0
- connectonion/cli/commands/auth_commands.py +527 -0
- connectonion/cli/commands/browser_commands.py +27 -0
- connectonion/cli/commands/create.py +511 -0
- connectonion/cli/commands/deploy_commands.py +220 -0
- connectonion/cli/commands/doctor_commands.py +173 -0
- connectonion/cli/commands/init.py +469 -0
- connectonion/cli/commands/project_cmd_lib.py +828 -0
- connectonion/cli/commands/reset_commands.py +149 -0
- connectonion/cli/commands/status_commands.py +168 -0
- connectonion/cli/docs/co-vibecoding-principles-docs-contexts-all-in-one.md +2010 -0
- connectonion/cli/docs/connectonion.md +1256 -0
- connectonion/cli/docs.md +123 -0
- connectonion/cli/main.py +148 -0
- connectonion/cli/templates/meta-agent/README.md +287 -0
- connectonion/cli/templates/meta-agent/agent.py +196 -0
- connectonion/cli/templates/meta-agent/prompts/answer_prompt.md +9 -0
- connectonion/cli/templates/meta-agent/prompts/docs_retrieve_prompt.md +15 -0
- connectonion/cli/templates/meta-agent/prompts/metagent.md +71 -0
- connectonion/cli/templates/meta-agent/prompts/think_prompt.md +18 -0
- connectonion/cli/templates/minimal/README.md +56 -0
- connectonion/cli/templates/minimal/agent.py +40 -0
- connectonion/cli/templates/playwright/README.md +118 -0
- connectonion/cli/templates/playwright/agent.py +336 -0
- connectonion/cli/templates/playwright/prompt.md +102 -0
- connectonion/cli/templates/playwright/requirements.txt +3 -0
- connectonion/cli/templates/web-research/agent.py +122 -0
- connectonion/connect.py +128 -0
- connectonion/console.py +539 -0
- connectonion/debug_agent/__init__.py +13 -0
- connectonion/debug_agent/agent.py +45 -0
- connectonion/debug_agent/prompts/debug_assistant.md +72 -0
- connectonion/debug_agent/runtime_inspector.py +406 -0
- connectonion/debug_explainer/__init__.py +10 -0
- connectonion/debug_explainer/explain_agent.py +114 -0
- connectonion/debug_explainer/explain_context.py +263 -0
- connectonion/debug_explainer/explainer_prompt.md +29 -0
- connectonion/debug_explainer/root_cause_analysis_prompt.md +43 -0
- connectonion/debugger_ui.py +1039 -0
- connectonion/decorators.py +208 -0
- connectonion/events.py +248 -0
- connectonion/execution_analyzer/__init__.py +9 -0
- connectonion/execution_analyzer/execution_analysis.py +93 -0
- connectonion/execution_analyzer/execution_analysis_prompt.md +47 -0
- connectonion/host.py +579 -0
- connectonion/interactive_debugger.py +342 -0
- connectonion/llm.py +801 -0
- connectonion/llm_do.py +307 -0
- connectonion/logger.py +300 -0
- connectonion/prompt_files/__init__.py +1 -0
- connectonion/prompt_files/analyze_contact.md +62 -0
- connectonion/prompt_files/eval_expected.md +12 -0
- connectonion/prompt_files/react_evaluate.md +11 -0
- connectonion/prompt_files/react_plan.md +16 -0
- connectonion/prompt_files/reflect.md +22 -0
- connectonion/prompts.py +144 -0
- connectonion/relay.py +200 -0
- connectonion/static/docs.html +688 -0
- connectonion/tool_executor.py +279 -0
- connectonion/tool_factory.py +186 -0
- connectonion/tool_registry.py +105 -0
- connectonion/trust.py +166 -0
- connectonion/trust_agents.py +71 -0
- connectonion/trust_functions.py +88 -0
- connectonion/tui/__init__.py +57 -0
- connectonion/tui/divider.py +39 -0
- connectonion/tui/dropdown.py +251 -0
- connectonion/tui/footer.py +31 -0
- connectonion/tui/fuzzy.py +56 -0
- connectonion/tui/input.py +278 -0
- connectonion/tui/keys.py +35 -0
- connectonion/tui/pick.py +130 -0
- connectonion/tui/providers.py +155 -0
- connectonion/tui/status_bar.py +163 -0
- connectonion/usage.py +161 -0
- connectonion/useful_events_handlers/__init__.py +16 -0
- connectonion/useful_events_handlers/reflect.py +116 -0
- connectonion/useful_plugins/__init__.py +20 -0
- connectonion/useful_plugins/calendar_plugin.py +163 -0
- connectonion/useful_plugins/eval.py +139 -0
- connectonion/useful_plugins/gmail_plugin.py +162 -0
- connectonion/useful_plugins/image_result_formatter.py +127 -0
- connectonion/useful_plugins/re_act.py +78 -0
- connectonion/useful_plugins/shell_approval.py +159 -0
- connectonion/useful_tools/__init__.py +44 -0
- connectonion/useful_tools/diff_writer.py +192 -0
- connectonion/useful_tools/get_emails.py +183 -0
- connectonion/useful_tools/gmail.py +1596 -0
- connectonion/useful_tools/google_calendar.py +613 -0
- connectonion/useful_tools/memory.py +380 -0
- connectonion/useful_tools/microsoft_calendar.py +604 -0
- connectonion/useful_tools/outlook.py +488 -0
- connectonion/useful_tools/send_email.py +205 -0
- connectonion/useful_tools/shell.py +97 -0
- connectonion/useful_tools/slash_command.py +201 -0
- connectonion/useful_tools/terminal.py +285 -0
- connectonion/useful_tools/todo_list.py +241 -0
- connectonion/useful_tools/web_fetch.py +216 -0
- connectonion/xray.py +467 -0
- connectonion-0.5.8.dist-info/METADATA +741 -0
- connectonion-0.5.8.dist-info/RECORD +113 -0
- connectonion-0.5.8.dist-info/WHEEL +4 -0
- connectonion-0.5.8.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
"""StatusBar - Powerline-style info bar with segments.
|
|
2
|
+
|
|
3
|
+
Creates a professional terminal prompt bar like powerlevel10k.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from rich.text import Text
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# Powerline characters
|
|
11
|
+
ARROW_RIGHT = "" # U+E0B0 - requires powerline font
|
|
12
|
+
ARROW_RIGHT_THIN = "" # U+E0B1
|
|
13
|
+
ARROW_LEFT = "" # U+E0B2
|
|
14
|
+
ARROW_LEFT_THIN = "" # U+E0B3
|
|
15
|
+
|
|
16
|
+
# Fallback for terminals without powerline fonts
|
|
17
|
+
ARROW_FALLBACK = "▶"
|
|
18
|
+
|
|
19
|
+
# Progress bar characters
|
|
20
|
+
PROGRESS_FILLED = "█"
|
|
21
|
+
PROGRESS_EMPTY = "░"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class ProgressSegment:
|
|
26
|
+
"""A progress bar segment for StatusBar.
|
|
27
|
+
|
|
28
|
+
Usage:
|
|
29
|
+
from connectonion.tui import StatusBar, ProgressSegment
|
|
30
|
+
|
|
31
|
+
status = StatusBar([
|
|
32
|
+
("🤖", "co/gemini-2.5-pro", "magenta"),
|
|
33
|
+
ProgressSegment(percent=78, bg_color="green"),
|
|
34
|
+
])
|
|
35
|
+
"""
|
|
36
|
+
percent: float # 0-100, how much is USED
|
|
37
|
+
bg_color: str = "green"
|
|
38
|
+
width: int = 10
|
|
39
|
+
show_percent: bool = True
|
|
40
|
+
|
|
41
|
+
def render(self) -> str:
|
|
42
|
+
"""Render progress bar text."""
|
|
43
|
+
pct = max(0, min(100, self.percent))
|
|
44
|
+
filled = int(self.width * pct / 100)
|
|
45
|
+
empty = self.width - filled
|
|
46
|
+
bar = PROGRESS_FILLED * filled + PROGRESS_EMPTY * empty
|
|
47
|
+
if self.show_percent:
|
|
48
|
+
return f"{bar} {int(pct)}%"
|
|
49
|
+
return bar
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class StatusBar:
|
|
53
|
+
"""Powerline-style status bar with colored segments.
|
|
54
|
+
|
|
55
|
+
Supports both text segments and progress bar segments.
|
|
56
|
+
|
|
57
|
+
Usage:
|
|
58
|
+
from connectonion.tui import StatusBar, ProgressSegment
|
|
59
|
+
|
|
60
|
+
# Text segments only
|
|
61
|
+
bar = StatusBar([
|
|
62
|
+
("🤖", "co/gemini-2.5-pro", "magenta"),
|
|
63
|
+
("", "main", "blue"),
|
|
64
|
+
])
|
|
65
|
+
|
|
66
|
+
# With progress bar for context window
|
|
67
|
+
bar = StatusBar([
|
|
68
|
+
("🤖", "co/gemini-2.5-pro", "magenta"),
|
|
69
|
+
ProgressSegment(percent=78, bg_color="green"),
|
|
70
|
+
])
|
|
71
|
+
console.print(bar.render())
|
|
72
|
+
|
|
73
|
+
Output (with powerline font):
|
|
74
|
+
🤖 co/gemini-2.5-pro ███████░░░ 78%
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
def __init__(
|
|
78
|
+
self,
|
|
79
|
+
segments: list,
|
|
80
|
+
use_powerline: bool = True,
|
|
81
|
+
):
|
|
82
|
+
"""
|
|
83
|
+
Args:
|
|
84
|
+
segments: List of (icon, text, bg_color) tuples or ProgressSegment
|
|
85
|
+
use_powerline: Use powerline arrow chars (requires font)
|
|
86
|
+
"""
|
|
87
|
+
self.segments = segments
|
|
88
|
+
self.use_powerline = use_powerline
|
|
89
|
+
|
|
90
|
+
def _get_bg_color(self, segment) -> str:
|
|
91
|
+
"""Get background color from segment."""
|
|
92
|
+
if isinstance(segment, ProgressSegment):
|
|
93
|
+
return segment.bg_color
|
|
94
|
+
return segment[2]
|
|
95
|
+
|
|
96
|
+
def _render_segment_content(self, segment) -> str:
|
|
97
|
+
"""Render segment content text."""
|
|
98
|
+
if isinstance(segment, ProgressSegment):
|
|
99
|
+
return f" {segment.render()} "
|
|
100
|
+
icon, text, _ = segment
|
|
101
|
+
return f" {icon} {text} " if icon else f" {text} "
|
|
102
|
+
|
|
103
|
+
def render(self) -> Text:
|
|
104
|
+
"""Render the status bar."""
|
|
105
|
+
result = Text()
|
|
106
|
+
|
|
107
|
+
for i, segment in enumerate(self.segments):
|
|
108
|
+
bg_color = self._get_bg_color(segment)
|
|
109
|
+
content = self._render_segment_content(segment)
|
|
110
|
+
|
|
111
|
+
# Add segment with background
|
|
112
|
+
result.append(content, style=f"bold white on {bg_color}")
|
|
113
|
+
|
|
114
|
+
# Add arrow separator
|
|
115
|
+
if i < len(self.segments) - 1:
|
|
116
|
+
next_bg = self._get_bg_color(self.segments[i + 1])
|
|
117
|
+
if self.use_powerline:
|
|
118
|
+
result.append(ARROW_RIGHT, style=f"{bg_color} on {next_bg}")
|
|
119
|
+
else:
|
|
120
|
+
result.append(ARROW_FALLBACK, style=f"{bg_color} on {next_bg}")
|
|
121
|
+
else:
|
|
122
|
+
# Final arrow to terminal background
|
|
123
|
+
if self.use_powerline:
|
|
124
|
+
result.append(ARROW_RIGHT, style=f"{bg_color}")
|
|
125
|
+
else:
|
|
126
|
+
result.append(ARROW_FALLBACK, style=f"{bg_color}")
|
|
127
|
+
|
|
128
|
+
return result
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
class SimpleStatusBar:
|
|
132
|
+
"""Simpler status bar without powerline fonts.
|
|
133
|
+
|
|
134
|
+
Uses brackets and pipes instead of arrows.
|
|
135
|
+
|
|
136
|
+
Usage:
|
|
137
|
+
bar = SimpleStatusBar([
|
|
138
|
+
("🤖", "co/gemini-2.5-pro", "magenta"),
|
|
139
|
+
("📊", "50%", "green"),
|
|
140
|
+
])
|
|
141
|
+
console.print(bar.render())
|
|
142
|
+
|
|
143
|
+
Output:
|
|
144
|
+
[🤖 co/gemini-2.5-pro] [📊 50%]
|
|
145
|
+
"""
|
|
146
|
+
|
|
147
|
+
def __init__(self, segments: list[tuple[str, str, str]]):
|
|
148
|
+
self.segments = segments
|
|
149
|
+
|
|
150
|
+
def render(self) -> Text:
|
|
151
|
+
result = Text()
|
|
152
|
+
|
|
153
|
+
for i, (icon, text, color) in enumerate(self.segments):
|
|
154
|
+
if i > 0:
|
|
155
|
+
result.append(" ")
|
|
156
|
+
|
|
157
|
+
result.append("[", style="dim")
|
|
158
|
+
if icon:
|
|
159
|
+
result.append(f"{icon} ", style=color)
|
|
160
|
+
result.append(text, style=f"bold {color}")
|
|
161
|
+
result.append("]", style="dim")
|
|
162
|
+
|
|
163
|
+
return result
|
connectonion/usage.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Purpose: Token usage tracking and cost calculation for LLM calls
|
|
3
|
+
LLM-Note:
|
|
4
|
+
Dependencies: pydantic | imported by [llm.py, agent.py]
|
|
5
|
+
Data flow: receives model name + token counts → returns cost in USD
|
|
6
|
+
Integration: exposes TokenUsage, MODEL_PRICING, MODEL_CONTEXT_LIMITS, calculate_cost(), get_context_limit()
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from pydantic import BaseModel
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class TokenUsage(BaseModel):
|
|
13
|
+
"""Token usage from a single LLM call.
|
|
14
|
+
|
|
15
|
+
Uses Pydantic BaseModel for:
|
|
16
|
+
- Native JSON serialization via .model_dump()
|
|
17
|
+
- Type validation at runtime
|
|
18
|
+
- Future-proof API response compatibility
|
|
19
|
+
"""
|
|
20
|
+
input_tokens: int = 0
|
|
21
|
+
output_tokens: int = 0
|
|
22
|
+
cached_tokens: int = 0 # Tokens read from cache (subset of input_tokens)
|
|
23
|
+
cache_write_tokens: int = 0 # Tokens written to cache (Anthropic only)
|
|
24
|
+
cost: float = 0.0 # USD cost for this call
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
# Pricing per 1M tokens (USD)
|
|
28
|
+
# Format: {"input": $, "output": $, "cached": $, "cache_write": $}
|
|
29
|
+
MODEL_PRICING = {
|
|
30
|
+
# OpenAI models - cached = 50% of input
|
|
31
|
+
"gpt-4o": {"input": 2.50, "output": 10.00, "cached": 1.25},
|
|
32
|
+
"gpt-4o-mini": {"input": 0.15, "output": 0.60, "cached": 0.075},
|
|
33
|
+
"gpt-4-turbo": {"input": 10.00, "output": 30.00, "cached": 5.00},
|
|
34
|
+
"o1": {"input": 15.00, "output": 60.00, "cached": 7.50},
|
|
35
|
+
"o1-mini": {"input": 3.00, "output": 12.00, "cached": 1.50},
|
|
36
|
+
"o1-preview": {"input": 15.00, "output": 60.00, "cached": 7.50},
|
|
37
|
+
"o3-mini": {"input": 1.10, "output": 4.40, "cached": 0.55},
|
|
38
|
+
"o4-mini": {"input": 1.10, "output": 4.40, "cached": 0.55},
|
|
39
|
+
|
|
40
|
+
# Anthropic Claude models - cached = 10% of input, cache_write = 125% of input
|
|
41
|
+
"claude-3-5-sonnet-20241022": {"input": 3.00, "output": 15.00, "cached": 0.30, "cache_write": 3.75},
|
|
42
|
+
"claude-3-5-sonnet-latest": {"input": 3.00, "output": 15.00, "cached": 0.30, "cache_write": 3.75},
|
|
43
|
+
"claude-3-5-haiku-20241022": {"input": 0.80, "output": 4.00, "cached": 0.08, "cache_write": 1.00},
|
|
44
|
+
"claude-3-5-haiku-latest": {"input": 0.80, "output": 4.00, "cached": 0.08, "cache_write": 1.00},
|
|
45
|
+
"claude-3-opus-20240229": {"input": 15.00, "output": 75.00, "cached": 1.50, "cache_write": 18.75},
|
|
46
|
+
"claude-3-sonnet-20240229": {"input": 3.00, "output": 15.00, "cached": 0.30, "cache_write": 3.75},
|
|
47
|
+
"claude-3-haiku-20240307": {"input": 0.25, "output": 1.25, "cached": 0.025, "cache_write": 0.3125},
|
|
48
|
+
|
|
49
|
+
# Claude 4 models
|
|
50
|
+
"claude-sonnet-4-20250514": {"input": 3.00, "output": 15.00, "cached": 0.30, "cache_write": 3.75},
|
|
51
|
+
"claude-opus-4-20250514": {"input": 15.00, "output": 75.00, "cached": 1.50, "cache_write": 18.75},
|
|
52
|
+
|
|
53
|
+
# Google Gemini models - cached = 25% of input (75% discount)
|
|
54
|
+
"gemini-3-pro-preview": {"input": 2.00, "output": 12.00, "cached": 0.50},
|
|
55
|
+
"gemini-3-pro-image-preview": {"input": 2.00, "output": 0.134},
|
|
56
|
+
"gemini-2.5-pro": {"input": 1.25, "output": 10.00, "cached": 0.3125},
|
|
57
|
+
"gemini-2.5-flash": {"input": 0.15, "output": 0.60, "cached": 0.0375},
|
|
58
|
+
"gemini-2.0-flash": {"input": 0.10, "output": 0.40, "cached": 0.025},
|
|
59
|
+
"gemini-1.5-pro": {"input": 1.25, "output": 5.00, "cached": 0.3125},
|
|
60
|
+
"gemini-1.5-flash": {"input": 0.075, "output": 0.30, "cached": 0.01875},
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
# Context window limits (tokens)
|
|
64
|
+
MODEL_CONTEXT_LIMITS = {
|
|
65
|
+
# OpenAI
|
|
66
|
+
"gpt-4o": 128000,
|
|
67
|
+
"gpt-4o-mini": 128000,
|
|
68
|
+
"gpt-4-turbo": 128000,
|
|
69
|
+
"o1": 200000,
|
|
70
|
+
"o1-mini": 128000,
|
|
71
|
+
"o1-preview": 128000,
|
|
72
|
+
"o3-mini": 200000,
|
|
73
|
+
"o4-mini": 200000,
|
|
74
|
+
|
|
75
|
+
# Anthropic
|
|
76
|
+
"claude-3-5-sonnet-20241022": 200000,
|
|
77
|
+
"claude-3-5-sonnet-latest": 200000,
|
|
78
|
+
"claude-3-5-haiku-20241022": 200000,
|
|
79
|
+
"claude-3-5-haiku-latest": 200000,
|
|
80
|
+
"claude-3-opus-20240229": 200000,
|
|
81
|
+
"claude-3-sonnet-20240229": 200000,
|
|
82
|
+
"claude-3-haiku-20240307": 200000,
|
|
83
|
+
"claude-sonnet-4-20250514": 200000,
|
|
84
|
+
"claude-opus-4-20250514": 200000,
|
|
85
|
+
|
|
86
|
+
# Gemini
|
|
87
|
+
"gemini-3-pro-preview": 1000000,
|
|
88
|
+
"gemini-3-pro-image-preview": 65000,
|
|
89
|
+
"gemini-2.5-pro": 1000000,
|
|
90
|
+
"gemini-2.5-flash": 1000000,
|
|
91
|
+
"gemini-2.0-flash": 1000000,
|
|
92
|
+
"gemini-1.5-pro": 2000000,
|
|
93
|
+
"gemini-1.5-flash": 1000000,
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
# Default values for unknown models
|
|
97
|
+
DEFAULT_PRICING = {"input": 1.00, "output": 3.00, "cached": 0.50}
|
|
98
|
+
DEFAULT_CONTEXT_LIMIT = 128000
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def get_pricing(model: str) -> dict:
|
|
102
|
+
"""Get pricing for a model, with fallback to default."""
|
|
103
|
+
# Try exact match
|
|
104
|
+
if model in MODEL_PRICING:
|
|
105
|
+
return MODEL_PRICING[model]
|
|
106
|
+
|
|
107
|
+
# Try prefix match (e.g., "gpt-4o-2024-08-06" -> "gpt-4o")
|
|
108
|
+
for known_model in MODEL_PRICING:
|
|
109
|
+
if model.startswith(known_model):
|
|
110
|
+
return MODEL_PRICING[known_model]
|
|
111
|
+
|
|
112
|
+
return DEFAULT_PRICING
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def get_context_limit(model: str) -> int:
|
|
116
|
+
"""Get context limit for a model, with fallback to default."""
|
|
117
|
+
if model in MODEL_CONTEXT_LIMITS:
|
|
118
|
+
return MODEL_CONTEXT_LIMITS[model]
|
|
119
|
+
|
|
120
|
+
for known_model in MODEL_CONTEXT_LIMITS:
|
|
121
|
+
if model.startswith(known_model):
|
|
122
|
+
return MODEL_CONTEXT_LIMITS[known_model]
|
|
123
|
+
|
|
124
|
+
return DEFAULT_CONTEXT_LIMIT
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def calculate_cost(
|
|
128
|
+
model: str,
|
|
129
|
+
input_tokens: int,
|
|
130
|
+
output_tokens: int,
|
|
131
|
+
cached_tokens: int = 0,
|
|
132
|
+
cache_write_tokens: int = 0,
|
|
133
|
+
) -> float:
|
|
134
|
+
"""Calculate USD cost for token usage.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
model: Model name
|
|
138
|
+
input_tokens: Total input tokens (includes cached)
|
|
139
|
+
output_tokens: Output/completion tokens
|
|
140
|
+
cached_tokens: Tokens read from cache (subset of input_tokens)
|
|
141
|
+
cache_write_tokens: Tokens written to cache (Anthropic)
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
Cost in USD
|
|
145
|
+
"""
|
|
146
|
+
pricing = get_pricing(model)
|
|
147
|
+
|
|
148
|
+
# Non-cached input tokens = total input - cached
|
|
149
|
+
non_cached_input = max(0, input_tokens - cached_tokens)
|
|
150
|
+
|
|
151
|
+
# Calculate costs (pricing is per 1M tokens)
|
|
152
|
+
input_cost = (non_cached_input / 1_000_000) * pricing["input"]
|
|
153
|
+
output_cost = (output_tokens / 1_000_000) * pricing["output"]
|
|
154
|
+
cached_cost = (cached_tokens / 1_000_000) * pricing.get("cached", pricing["input"] * 0.5)
|
|
155
|
+
|
|
156
|
+
# Cache write cost (Anthropic only)
|
|
157
|
+
cache_write_cost = 0.0
|
|
158
|
+
if cache_write_tokens > 0 and "cache_write" in pricing:
|
|
159
|
+
cache_write_cost = (cache_write_tokens / 1_000_000) * pricing["cache_write"]
|
|
160
|
+
|
|
161
|
+
return input_cost + output_cost + cached_cost + cache_write_cost
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Useful event handlers for ConnectOnion agents.
|
|
3
|
+
|
|
4
|
+
Event handlers fire at specific points in the agent lifecycle.
|
|
5
|
+
Use on_events parameter to register them with your agent.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
from connectonion import Agent
|
|
9
|
+
from connectonion.useful_events_handlers import reflect
|
|
10
|
+
|
|
11
|
+
agent = Agent("assistant", on_events=[reflect])
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from .reflect import reflect
|
|
15
|
+
|
|
16
|
+
__all__ = ['reflect']
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Reflect event handler - Adds reflection after tool execution.
|
|
3
|
+
|
|
4
|
+
Fires ONCE after ALL tools in a batch complete (when LLM returns multiple tool_calls).
|
|
5
|
+
Generates reasoning about what we learned and what to do next.
|
|
6
|
+
|
|
7
|
+
This uses `after_tools` (not `after_each_tool`) intentionally because:
|
|
8
|
+
1. Adding messages after EACH tool breaks Anthropic Claude's message ordering
|
|
9
|
+
2. Reflecting once after all tools provides better context for next steps
|
|
10
|
+
3. Fewer LLM calls = faster execution
|
|
11
|
+
|
|
12
|
+
Usage:
|
|
13
|
+
from connectonion import Agent
|
|
14
|
+
from connectonion.useful_events_handlers import reflect
|
|
15
|
+
|
|
16
|
+
agent = Agent("assistant", tools=[search], on_events=[reflect])
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
from typing import TYPE_CHECKING, List, Dict
|
|
21
|
+
from ..events import after_tools
|
|
22
|
+
from ..llm_do import llm_do
|
|
23
|
+
|
|
24
|
+
if TYPE_CHECKING:
|
|
25
|
+
from ..agent import Agent
|
|
26
|
+
|
|
27
|
+
# Path to reflect prompt (inside connectonion package for proper packaging)
|
|
28
|
+
REFLECT_PROMPT = Path(__file__).parent.parent / "prompt_files" / "reflect.md"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _compress_messages(messages: List[Dict], tool_result_limit: int = 150) -> str:
|
|
32
|
+
"""
|
|
33
|
+
Compress conversation messages with structure:
|
|
34
|
+
- USER messages → Keep FULL
|
|
35
|
+
- ASSISTANT tool_calls → Keep parameters FULL
|
|
36
|
+
- ASSISTANT text → Keep FULL
|
|
37
|
+
- TOOL results → Truncate to tool_result_limit chars
|
|
38
|
+
"""
|
|
39
|
+
lines = []
|
|
40
|
+
|
|
41
|
+
for msg in messages:
|
|
42
|
+
role = msg['role']
|
|
43
|
+
|
|
44
|
+
if role == 'user':
|
|
45
|
+
lines.append(f"USER: {msg['content']}")
|
|
46
|
+
|
|
47
|
+
elif role == 'assistant':
|
|
48
|
+
if 'tool_calls' in msg:
|
|
49
|
+
tools = [f"{tc['function']['name']}({tc['function']['arguments']})"
|
|
50
|
+
for tc in msg['tool_calls']]
|
|
51
|
+
lines.append(f"ASSISTANT: {', '.join(tools)}")
|
|
52
|
+
else:
|
|
53
|
+
lines.append(f"ASSISTANT: {msg['content']}")
|
|
54
|
+
|
|
55
|
+
elif role == 'tool':
|
|
56
|
+
result = msg['content']
|
|
57
|
+
if len(result) > tool_result_limit:
|
|
58
|
+
result = result[:tool_result_limit] + '...'
|
|
59
|
+
lines.append(f"TOOL: {result}")
|
|
60
|
+
|
|
61
|
+
return "\n".join(lines)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
@after_tools
|
|
65
|
+
def reflect(agent: 'Agent') -> None:
|
|
66
|
+
"""
|
|
67
|
+
Reflection after tool execution.
|
|
68
|
+
|
|
69
|
+
Fires ONCE after ALL tools in a batch complete. Generates reasoning about:
|
|
70
|
+
- What we learned from the most recent action
|
|
71
|
+
- What we should do next
|
|
72
|
+
"""
|
|
73
|
+
trace = agent.current_session['trace'][-1]
|
|
74
|
+
|
|
75
|
+
if trace['type'] != 'tool_execution':
|
|
76
|
+
return
|
|
77
|
+
|
|
78
|
+
user_prompt = agent.current_session.get('user_prompt', '')
|
|
79
|
+
tool_name = trace['tool_name']
|
|
80
|
+
tool_args = trace['arguments']
|
|
81
|
+
status = trace['status']
|
|
82
|
+
|
|
83
|
+
conversation = _compress_messages(agent.current_session['messages'])
|
|
84
|
+
|
|
85
|
+
if status == 'success':
|
|
86
|
+
tool_result = trace['result']
|
|
87
|
+
prompt = f"""Context:
|
|
88
|
+
{conversation}
|
|
89
|
+
|
|
90
|
+
Current:
|
|
91
|
+
User asked: {user_prompt}
|
|
92
|
+
Action: {tool_name}({tool_args})
|
|
93
|
+
Result: {str(tool_result)[:300]}"""
|
|
94
|
+
else:
|
|
95
|
+
error = trace.get('error', 'Unknown error')
|
|
96
|
+
prompt = f"""Context:
|
|
97
|
+
{conversation}
|
|
98
|
+
|
|
99
|
+
Current:
|
|
100
|
+
User asked: {user_prompt}
|
|
101
|
+
Action: {tool_name}({tool_args})
|
|
102
|
+
Error: {error}"""
|
|
103
|
+
|
|
104
|
+
reasoning = llm_do(
|
|
105
|
+
prompt,
|
|
106
|
+
model="co/gemini-2.5-flash",
|
|
107
|
+
temperature=0.2,
|
|
108
|
+
system_prompt=REFLECT_PROMPT
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
agent.logger.print("[dim]/reflecting...[/dim]")
|
|
112
|
+
|
|
113
|
+
agent.current_session['messages'].append({
|
|
114
|
+
'role': 'assistant',
|
|
115
|
+
'content': f"🤔 {reasoning}"
|
|
116
|
+
})
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Purpose: Export pre-built plugins that extend agent behavior via event hooks
|
|
3
|
+
LLM-Note:
|
|
4
|
+
Dependencies: imports from [re_act, image_result_formatter, shell_approval, gmail_plugin, calendar_plugin] | imported by [__init__.py main package] | re-exports plugins for agent consumption
|
|
5
|
+
Data flow: agent imports plugin → passes to Agent(plugins=[plugin]) → plugin event handlers fire on agent lifecycle events
|
|
6
|
+
State/Effects: no state | pure re-exports | plugins modify agent behavior at runtime
|
|
7
|
+
Integration: exposes re_act (ReAct prompting), image_result_formatter (base64 image handling), shell_approval (user confirmation for shell commands), gmail_plugin (Gmail OAuth flow), calendar_plugin (Google Calendar integration) | plugins are lists of event handlers
|
|
8
|
+
Errors: ImportError if underlying plugin dependencies not installed
|
|
9
|
+
|
|
10
|
+
Pre-built plugins that can be easily imported and used across agents.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from .re_act import re_act
|
|
14
|
+
from .eval import eval
|
|
15
|
+
from .image_result_formatter import image_result_formatter
|
|
16
|
+
from .shell_approval import shell_approval
|
|
17
|
+
from .gmail_plugin import gmail_plugin
|
|
18
|
+
from .calendar_plugin import calendar_plugin
|
|
19
|
+
|
|
20
|
+
__all__ = ['re_act', 'eval', 'image_result_formatter', 'shell_approval', 'gmail_plugin', 'calendar_plugin']
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Purpose: Human-in-the-loop approval plugin for Google Calendar write operations
|
|
3
|
+
LLM-Note:
|
|
4
|
+
Dependencies: imports from [typing, events.before_each_tool, tui.pick, rich.console, rich.panel, rich.text] | imported by [useful_plugins/__init__.py] | tested by [tests/unit/test_calendar_plugin.py]
|
|
5
|
+
Data flow: before_each_tool → check_calendar_approval() checks if tool is create_event/create_meet/update_event/delete_event → displays event preview with Rich panel → pick() prompts for user approval → raises ValueError to cancel if rejected
|
|
6
|
+
State/Effects: blocks on user input | displays Rich-formatted event preview | raises exception to cancel tool execution | no file I/O | no network
|
|
7
|
+
Integration: exposes calendar_plugin list with [check_calendar_approval] handler | used via Agent(plugins=[calendar_plugin]) | works with GoogleCalendar tool
|
|
8
|
+
Performance: instant display | blocks on user input | no LLM calls
|
|
9
|
+
Errors: raises ValueError on rejection | keyboard interrupts handled gracefully
|
|
10
|
+
|
|
11
|
+
Calendar plugin - Approval for Google Calendar operations.
|
|
12
|
+
|
|
13
|
+
Usage:
|
|
14
|
+
from connectonion import Agent, GoogleCalendar
|
|
15
|
+
from connectonion.useful_plugins import calendar_plugin
|
|
16
|
+
|
|
17
|
+
calendar = GoogleCalendar()
|
|
18
|
+
agent = Agent("assistant", tools=[calendar], plugins=[calendar_plugin])
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
from typing import TYPE_CHECKING
|
|
22
|
+
from ..events import before_each_tool
|
|
23
|
+
from ..tui import pick
|
|
24
|
+
from rich.console import Console
|
|
25
|
+
from rich.panel import Panel
|
|
26
|
+
from rich.text import Text
|
|
27
|
+
|
|
28
|
+
if TYPE_CHECKING:
|
|
29
|
+
from ..agent import Agent
|
|
30
|
+
|
|
31
|
+
_console = Console()
|
|
32
|
+
|
|
33
|
+
# Calendar methods that create/modify/delete events
|
|
34
|
+
WRITE_METHODS = ('create_event', 'create_meet', 'update_event', 'delete_event')
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@before_each_tool
|
|
38
|
+
def check_calendar_approval(agent: 'Agent') -> None:
|
|
39
|
+
"""Ask user approval before modifying calendar.
|
|
40
|
+
|
|
41
|
+
Raises:
|
|
42
|
+
ValueError: If user rejects the action
|
|
43
|
+
"""
|
|
44
|
+
pending = agent.current_session.get('pending_tool')
|
|
45
|
+
if not pending:
|
|
46
|
+
return
|
|
47
|
+
|
|
48
|
+
tool_name = pending['name']
|
|
49
|
+
if tool_name not in WRITE_METHODS:
|
|
50
|
+
return
|
|
51
|
+
|
|
52
|
+
args = pending['arguments']
|
|
53
|
+
|
|
54
|
+
# Skip if all calendar actions auto-approved
|
|
55
|
+
if agent.current_session.get('calendar_approve_all', False):
|
|
56
|
+
return
|
|
57
|
+
|
|
58
|
+
preview = Text()
|
|
59
|
+
|
|
60
|
+
if tool_name == 'create_event':
|
|
61
|
+
title = args.get('title', '')
|
|
62
|
+
start = args.get('start_time', '')
|
|
63
|
+
end = args.get('end_time', '')
|
|
64
|
+
attendees = args.get('attendees', '')
|
|
65
|
+
location = args.get('location', '')
|
|
66
|
+
description = args.get('description', '')
|
|
67
|
+
|
|
68
|
+
preview.append("Title: ", style="bold cyan")
|
|
69
|
+
preview.append(f"{title}\n")
|
|
70
|
+
preview.append("Start: ", style="bold cyan")
|
|
71
|
+
preview.append(f"{start}\n")
|
|
72
|
+
preview.append("End: ", style="bold cyan")
|
|
73
|
+
preview.append(f"{end}\n")
|
|
74
|
+
if attendees:
|
|
75
|
+
preview.append("Attendees: ", style="bold yellow")
|
|
76
|
+
preview.append(f"{attendees} (will receive invite!)\n")
|
|
77
|
+
if location:
|
|
78
|
+
preview.append("Location: ", style="bold cyan")
|
|
79
|
+
preview.append(f"{location}\n")
|
|
80
|
+
if description:
|
|
81
|
+
preview.append("\n")
|
|
82
|
+
preview.append(description[:300])
|
|
83
|
+
|
|
84
|
+
action = "Create Event"
|
|
85
|
+
|
|
86
|
+
elif tool_name == 'create_meet':
|
|
87
|
+
title = args.get('title', '')
|
|
88
|
+
start = args.get('start_time', '')
|
|
89
|
+
end = args.get('end_time', '')
|
|
90
|
+
attendees = args.get('attendees', '')
|
|
91
|
+
description = args.get('description', '')
|
|
92
|
+
|
|
93
|
+
preview.append("Title: ", style="bold cyan")
|
|
94
|
+
preview.append(f"{title}\n")
|
|
95
|
+
preview.append("Start: ", style="bold cyan")
|
|
96
|
+
preview.append(f"{start}\n")
|
|
97
|
+
preview.append("End: ", style="bold cyan")
|
|
98
|
+
preview.append(f"{end}\n")
|
|
99
|
+
preview.append("Attendees: ", style="bold yellow")
|
|
100
|
+
preview.append(f"{attendees} (will receive Meet invite!)\n")
|
|
101
|
+
if description:
|
|
102
|
+
preview.append("\n")
|
|
103
|
+
preview.append(description[:300])
|
|
104
|
+
|
|
105
|
+
action = "Create Meeting"
|
|
106
|
+
|
|
107
|
+
elif tool_name == 'update_event':
|
|
108
|
+
event_id = args.get('event_id', '')
|
|
109
|
+
title = args.get('title', '')
|
|
110
|
+
start = args.get('start_time', '')
|
|
111
|
+
end = args.get('end_time', '')
|
|
112
|
+
attendees = args.get('attendees', '')
|
|
113
|
+
|
|
114
|
+
preview.append("Event ID: ", style="bold cyan")
|
|
115
|
+
preview.append(f"{event_id}\n")
|
|
116
|
+
if title:
|
|
117
|
+
preview.append("New Title: ", style="bold cyan")
|
|
118
|
+
preview.append(f"{title}\n")
|
|
119
|
+
if start:
|
|
120
|
+
preview.append("New Start: ", style="bold cyan")
|
|
121
|
+
preview.append(f"{start}\n")
|
|
122
|
+
if end:
|
|
123
|
+
preview.append("New End: ", style="bold cyan")
|
|
124
|
+
preview.append(f"{end}\n")
|
|
125
|
+
if attendees:
|
|
126
|
+
preview.append("New Attendees: ", style="bold yellow")
|
|
127
|
+
preview.append(f"{attendees} (will be notified!)\n")
|
|
128
|
+
|
|
129
|
+
action = "Update Event"
|
|
130
|
+
|
|
131
|
+
elif tool_name == 'delete_event':
|
|
132
|
+
event_id = args.get('event_id', '')
|
|
133
|
+
|
|
134
|
+
preview.append("Event ID: ", style="bold red")
|
|
135
|
+
preview.append(f"{event_id}\n")
|
|
136
|
+
preview.append("\n", style="bold red")
|
|
137
|
+
preview.append("This will permanently delete the event!", style="red")
|
|
138
|
+
|
|
139
|
+
action = "Delete Event"
|
|
140
|
+
|
|
141
|
+
_console.print()
|
|
142
|
+
_console.print(Panel(preview, title=f"[yellow]{action}[/yellow]", border_style="yellow"))
|
|
143
|
+
|
|
144
|
+
options = [f"Yes, {action.lower()}"]
|
|
145
|
+
options.append("Auto approve all calendar actions this session")
|
|
146
|
+
options.append("No, tell agent what I want")
|
|
147
|
+
|
|
148
|
+
choice = pick(f"Proceed with {action.lower()}?", options, console=_console)
|
|
149
|
+
|
|
150
|
+
if choice.startswith("Yes"):
|
|
151
|
+
return
|
|
152
|
+
elif choice == "Auto approve all calendar actions this session":
|
|
153
|
+
agent.current_session['calendar_approve_all'] = True
|
|
154
|
+
return
|
|
155
|
+
else:
|
|
156
|
+
feedback = input("What do you want the agent to do instead? ")
|
|
157
|
+
raise ValueError(f"User feedback: {feedback}")
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
# Bundle as plugin
|
|
161
|
+
calendar_plugin = [
|
|
162
|
+
check_calendar_approval,
|
|
163
|
+
]
|