tinyagent-py 0.0.5__py3-none-any.whl → 0.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tinyagent/hooks/rich_code_ui_callback.py +434 -0
- tinyagent/mcp_client.py +0 -5
- tinyagent/memory_manager.py +1067 -0
- tinyagent/storage/__init__.py +2 -4
- tinyagent/storage/sqlite_storage.py +4 -1
- tinyagent/tiny_agent.py +52 -6
- {tinyagent_py-0.0.5.dist-info → tinyagent_py-0.0.7.dist-info}/METADATA +2 -2
- tinyagent_py-0.0.7.dist-info/RECORD +20 -0
- {tinyagent_py-0.0.5.dist-info → tinyagent_py-0.0.7.dist-info}/WHEEL +1 -1
- tinyagent/hooks/agno_storage_hook.py +0 -128
- tinyagent/storage/agno_storage.py +0 -114
- tinyagent_py-0.0.5.dist-info/RECORD +0 -20
- {tinyagent_py-0.0.5.dist-info → tinyagent_py-0.0.7.dist-info}/licenses/LICENSE +0 -0
- {tinyagent_py-0.0.5.dist-info → tinyagent_py-0.0.7.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,434 @@
|
|
1
|
+
import json
|
2
|
+
import logging
|
3
|
+
from typing import Any, Optional, Set
|
4
|
+
|
5
|
+
from rich.console import Console, Group
|
6
|
+
from rich.markdown import Markdown
|
7
|
+
from rich.text import Text
|
8
|
+
from rich.json import JSON
|
9
|
+
|
10
|
+
from tinyagent.hooks.rich_ui_callback import RichUICallback, create_panel, escape_markdown_tags
|
11
|
+
|
12
|
+
__all__ = ["RichCodeUICallback"]
|
13
|
+
|
14
|
+
|
15
|
+
class RichCodeUICallback(RichUICallback):
|
16
|
+
"""
|
17
|
+
A callback for TinyAgent that extends RichUICallback with special handling for code tools.
|
18
|
+
Provides richer display for Python code execution in run_python tool calls.
|
19
|
+
"""
|
20
|
+
|
21
|
+
def __init__(
|
22
|
+
self,
|
23
|
+
console: Optional[Console] = None,
|
24
|
+
markdown: bool = True,
|
25
|
+
show_message: bool = True,
|
26
|
+
show_thinking: bool = True,
|
27
|
+
show_tool_calls: bool = True,
|
28
|
+
tags_to_include_in_markdown: Set[str] = {"think", "thinking"},
|
29
|
+
logger: Optional[logging.Logger] = None,
|
30
|
+
):
|
31
|
+
"""
|
32
|
+
Initialize the Rich Code UI callback.
|
33
|
+
|
34
|
+
Args:
|
35
|
+
console: Optional Rich console to use
|
36
|
+
markdown: Whether to render responses as markdown
|
37
|
+
show_message: Whether to show the user message
|
38
|
+
show_thinking: Whether to show the thinking process
|
39
|
+
show_tool_calls: Whether to show tool calls
|
40
|
+
tags_to_include_in_markdown: Tags to include in markdown rendering
|
41
|
+
logger: Optional logger to use
|
42
|
+
"""
|
43
|
+
super().__init__(
|
44
|
+
console=console,
|
45
|
+
markdown=markdown,
|
46
|
+
show_message=show_message,
|
47
|
+
show_thinking=show_thinking,
|
48
|
+
show_tool_calls=show_tool_calls,
|
49
|
+
tags_to_include_in_markdown=tags_to_include_in_markdown,
|
50
|
+
logger=logger or logging.getLogger(__name__)
|
51
|
+
)
|
52
|
+
self.logger.debug("RichCodeUICallback initialized")
|
53
|
+
|
54
|
+
async def _handle_message_add(self, agent: Any, **kwargs: Any) -> None:
|
55
|
+
"""Handle the message_add event with special handling for run_python tool."""
|
56
|
+
# Call the parent method first to ensure all standard processing occurs
|
57
|
+
await super()._handle_message_add(agent, **kwargs)
|
58
|
+
|
59
|
+
# Then perform our special handling for run_python tool calls
|
60
|
+
message = kwargs.get("message", {})
|
61
|
+
|
62
|
+
# Process tool calls in assistant messages
|
63
|
+
if message.get("role") == "assistant" and "tool_calls" in message:
|
64
|
+
for tool_call in message.get("tool_calls", []):
|
65
|
+
function_info = tool_call.get("function", {})
|
66
|
+
tool_name = function_info.get("name", "unknown")
|
67
|
+
|
68
|
+
# Only process run_python tool calls
|
69
|
+
if tool_name == "run_python":
|
70
|
+
args = function_info.get("arguments", "{}")
|
71
|
+
tool_id = tool_call.get("id", "unknown")
|
72
|
+
|
73
|
+
try:
|
74
|
+
args_dict = json.loads(args)
|
75
|
+
|
76
|
+
# Find the corresponding tool call in our detailed list
|
77
|
+
for tool_detail in self.tool_call_details:
|
78
|
+
if tool_detail.get("id") == tool_id and tool_detail.get("name") == "run_python":
|
79
|
+
# Check if code_lines is present
|
80
|
+
if "code_lines" in args_dict and isinstance(args_dict["code_lines"], list):
|
81
|
+
# Store the original code_lines for display purposes
|
82
|
+
tool_detail["code_lines"] = args_dict["code_lines"]
|
83
|
+
self.logger.debug(f"Stored code_lines for run_python tool {tool_id}")
|
84
|
+
break
|
85
|
+
except Exception as e:
|
86
|
+
self.logger.error(f"Error processing run_python arguments: {e}")
|
87
|
+
|
88
|
+
def _update_display(self) -> None:
|
89
|
+
"""Update the live display with current panels, with special handling for code output."""
|
90
|
+
# Don't call super() yet - we'll build the panels ourselves to have more control
|
91
|
+
|
92
|
+
# If we don't have live display, nothing to do
|
93
|
+
if not self.live:
|
94
|
+
return
|
95
|
+
|
96
|
+
# Start with a fresh list of panels in the specified order
|
97
|
+
ordered_panels = []
|
98
|
+
|
99
|
+
# 1. Status (if exists)
|
100
|
+
status_panel = next((p for p in self.panels if hasattr(p, "renderable") and isinstance(p.renderable, Group) and
|
101
|
+
hasattr(p.renderable[0], "spinner")), None)
|
102
|
+
if status_panel:
|
103
|
+
ordered_panels.append(status_panel)
|
104
|
+
|
105
|
+
# 2. User Message (if exists)
|
106
|
+
user_message_panel = next((p for p in self.panels if hasattr(p, "title") and "User Message" in str(p.title)), None)
|
107
|
+
if user_message_panel:
|
108
|
+
ordered_panels.append(user_message_panel)
|
109
|
+
|
110
|
+
# 3. Tool Calls summary (if we have tool calls)
|
111
|
+
if self.show_tool_calls and self.tool_calls:
|
112
|
+
# Create the tool calls summary panel
|
113
|
+
self.logger.debug(f"Creating tool calls summary panel with {len(self.tool_calls)} calls")
|
114
|
+
tool_calls_content = Text()
|
115
|
+
for i, tool_call in enumerate(self.tool_calls):
|
116
|
+
if "result:" not in tool_call: # Only show the calls, not results
|
117
|
+
tool_calls_content.append(f"• {tool_call}\n")
|
118
|
+
|
119
|
+
if tool_calls_content:
|
120
|
+
tool_calls_panel = create_panel(
|
121
|
+
content=tool_calls_content,
|
122
|
+
title="Tool Calls Summary",
|
123
|
+
border_style="yellow",
|
124
|
+
logger=self.logger
|
125
|
+
)
|
126
|
+
ordered_panels.append(tool_calls_panel)
|
127
|
+
|
128
|
+
# 4. Assistant Text Responses
|
129
|
+
for i, response_data in enumerate(self.assistant_text_responses):
|
130
|
+
content = response_data["content"]
|
131
|
+
token_count = response_data.get("token_count", 0)
|
132
|
+
|
133
|
+
if self.markdown:
|
134
|
+
self.logger.debug("Converting assistant response to markdown")
|
135
|
+
escaped_content = escape_markdown_tags(content, self.tags_to_include_in_markdown)
|
136
|
+
content = Markdown(escaped_content)
|
137
|
+
|
138
|
+
response_panel = create_panel(
|
139
|
+
content=content,
|
140
|
+
title=f"Assistant Response {i+1}",
|
141
|
+
border_style="blue",
|
142
|
+
logger=self.logger
|
143
|
+
)
|
144
|
+
ordered_panels.append(response_panel)
|
145
|
+
|
146
|
+
# Add token count panel with purple border
|
147
|
+
token_panel = create_panel(
|
148
|
+
content=Text(f"Token count: {token_count}", style="bold"),
|
149
|
+
title="Tokens",
|
150
|
+
border_style="purple",
|
151
|
+
logger=self.logger
|
152
|
+
)
|
153
|
+
ordered_panels.append(token_panel)
|
154
|
+
|
155
|
+
# 5. Token Usage Panel
|
156
|
+
if any(self.token_usage.values()):
|
157
|
+
token_content = Text()
|
158
|
+
token_content.append(f"Prompt Tokens: {self.token_usage['prompt_tokens']}\n", style="cyan")
|
159
|
+
token_content.append(f"Completion Tokens: {self.token_usage['completion_tokens']}\n", style="green")
|
160
|
+
token_content.append(f"Total Tokens: {self.token_usage['total_tokens']}", style="bold magenta")
|
161
|
+
|
162
|
+
token_panel = create_panel(
|
163
|
+
content=token_content,
|
164
|
+
title="Token Usage",
|
165
|
+
border_style="bright_blue",
|
166
|
+
logger=self.logger
|
167
|
+
)
|
168
|
+
ordered_panels.append(token_panel)
|
169
|
+
|
170
|
+
# 6. Detailed Tool Calls - with special handling for run_python
|
171
|
+
if self.show_tool_calls:
|
172
|
+
for tool_detail in self.tool_call_details:
|
173
|
+
tool_name = tool_detail["name"]
|
174
|
+
tool_id = tool_detail.get("id", "unknown")
|
175
|
+
|
176
|
+
# Special handling for run_python tools
|
177
|
+
if tool_name == "run_python" and "code_lines" in tool_detail:
|
178
|
+
# Create a markdown-formatted Python code block
|
179
|
+
code_lines = tool_detail["code_lines"]
|
180
|
+
combined_code = "\n".join(code_lines)
|
181
|
+
python_code_markdown = f"```python\n{combined_code}\n```"
|
182
|
+
|
183
|
+
# Create a group with the code and result (if available)
|
184
|
+
content_group = []
|
185
|
+
|
186
|
+
# Add the markdown-formatted code
|
187
|
+
if self.markdown:
|
188
|
+
try:
|
189
|
+
code_content = Markdown(python_code_markdown)
|
190
|
+
content_group.append(code_content)
|
191
|
+
|
192
|
+
# Add the result if available
|
193
|
+
if tool_detail.get("result"):
|
194
|
+
result = tool_detail.get("result")
|
195
|
+
content_group.append(Text("\nOutput:", style="bold"))
|
196
|
+
|
197
|
+
# Handle different result types properly
|
198
|
+
if isinstance(result, dict):
|
199
|
+
# If result is already a dict, use JSON formatter
|
200
|
+
content_group.append(JSON(result))
|
201
|
+
else:
|
202
|
+
try:
|
203
|
+
# Try to parse string result as JSON
|
204
|
+
result_json = json.loads(result)
|
205
|
+
content_group.append(JSON(result_json))
|
206
|
+
except:
|
207
|
+
# Handle plain text with proper formatting
|
208
|
+
# Replace escaped newlines with actual newlines
|
209
|
+
if isinstance(result, str):
|
210
|
+
formatted_result = result.replace("\\n", "\n")
|
211
|
+
# Split by lines and preserve indentation
|
212
|
+
lines = formatted_result.split("\n")
|
213
|
+
formatted_text = Text()
|
214
|
+
for line in lines:
|
215
|
+
formatted_text.append(f"{line}\n")
|
216
|
+
content_group.append(formatted_text)
|
217
|
+
else:
|
218
|
+
# For any other type, convert to string
|
219
|
+
content_group.append(Text(str(result)))
|
220
|
+
|
221
|
+
# Create a panel with the content group
|
222
|
+
code_panel = create_panel(
|
223
|
+
content=Group(*content_group),
|
224
|
+
title=f"Tool: run_python ({tool_id})",
|
225
|
+
border_style="yellow",
|
226
|
+
logger=self.logger
|
227
|
+
)
|
228
|
+
ordered_panels.append(code_panel)
|
229
|
+
|
230
|
+
# Add token count panel with purple border
|
231
|
+
input_token_count = tool_detail.get("token_count", 0)
|
232
|
+
result_token_count = tool_detail.get("result_token_count", 0)
|
233
|
+
|
234
|
+
token_content = Text()
|
235
|
+
token_content.append(f"Input tokens: {input_token_count}\n", style="cyan")
|
236
|
+
if tool_detail.get("result") is not None:
|
237
|
+
token_content.append(f"Output tokens: {result_token_count}\n", style="green")
|
238
|
+
token_content.append(f"Total tokens: {input_token_count + result_token_count}", style="bold")
|
239
|
+
|
240
|
+
token_panel = create_panel(
|
241
|
+
content=token_content,
|
242
|
+
title="Tokens",
|
243
|
+
border_style="purple",
|
244
|
+
logger=self.logger
|
245
|
+
)
|
246
|
+
ordered_panels.append(token_panel)
|
247
|
+
|
248
|
+
except Exception as e:
|
249
|
+
self.logger.error(f"Error creating markdown panel: {e}")
|
250
|
+
# Fallback to standard panel
|
251
|
+
self._add_standard_tool_panel(ordered_panels, tool_detail)
|
252
|
+
else:
|
253
|
+
# If markdown is disabled, use standard panel
|
254
|
+
self._add_standard_tool_panel(ordered_panels, tool_detail)
|
255
|
+
else:
|
256
|
+
# Standard handling for other tools
|
257
|
+
self._add_standard_tool_panel(ordered_panels, tool_detail)
|
258
|
+
|
259
|
+
# 7. Thinking panel (if we have thinking content)
|
260
|
+
if self.show_thinking and self.thinking_content:
|
261
|
+
self.logger.debug("Adding thinking panel")
|
262
|
+
thinking_panel = create_panel(
|
263
|
+
content=Text(self.thinking_content),
|
264
|
+
title=f"Thinking ({self.timer.elapsed:.1f}s)",
|
265
|
+
border_style="green",
|
266
|
+
logger=self.logger
|
267
|
+
)
|
268
|
+
ordered_panels.append(thinking_panel)
|
269
|
+
|
270
|
+
# Add token count panel for thinking content
|
271
|
+
thinking_token_count = self.count_tokens(self.thinking_content)
|
272
|
+
token_panel = create_panel(
|
273
|
+
content=Text(f"Token count: {thinking_token_count}", style="bold"),
|
274
|
+
title="Tokens",
|
275
|
+
border_style="purple",
|
276
|
+
logger=self.logger
|
277
|
+
)
|
278
|
+
ordered_panels.append(token_panel)
|
279
|
+
|
280
|
+
# 8. Final response panel (if we have a response)
|
281
|
+
if self.response_content:
|
282
|
+
content = self.response_content
|
283
|
+
if self.markdown:
|
284
|
+
self.logger.debug("Converting response to markdown")
|
285
|
+
escaped_content = escape_markdown_tags(content, self.tags_to_include_in_markdown)
|
286
|
+
content = Markdown(escaped_content)
|
287
|
+
|
288
|
+
response_panel = create_panel(
|
289
|
+
content=content,
|
290
|
+
title=f"Response ({self.timer.elapsed:.1f}s)",
|
291
|
+
border_style="blue",
|
292
|
+
logger=self.logger
|
293
|
+
)
|
294
|
+
ordered_panels.append(response_panel)
|
295
|
+
|
296
|
+
# Add token count panel for final response
|
297
|
+
response_token_count = self.count_tokens(self.response_content)
|
298
|
+
token_panel = create_panel(
|
299
|
+
content=Text(f"Token count: {response_token_count}", style="bold"),
|
300
|
+
title="Tokens",
|
301
|
+
border_style="purple",
|
302
|
+
logger=self.logger
|
303
|
+
)
|
304
|
+
ordered_panels.append(token_panel)
|
305
|
+
|
306
|
+
try:
|
307
|
+
self.logger.debug(f"Updating live display with {len(ordered_panels)} panels")
|
308
|
+
self.live.update(Group(*ordered_panels))
|
309
|
+
except Exception as e:
|
310
|
+
self.logger.error(f"Error updating display: {e}")
|
311
|
+
|
312
|
+
def _add_standard_tool_panel(self, ordered_panels, tool_detail):
|
313
|
+
"""Add a standard tool panel for non-run_python tools or as fallback."""
|
314
|
+
tool_name = tool_detail["name"]
|
315
|
+
arguments = tool_detail["arguments"]
|
316
|
+
result = tool_detail.get("result")
|
317
|
+
input_token_count = tool_detail.get("token_count", 0)
|
318
|
+
result_token_count = tool_detail.get("result_token_count", 0)
|
319
|
+
tool_id = tool_detail.get("id", "unknown")
|
320
|
+
|
321
|
+
tool_content = Text()
|
322
|
+
tool_content.append("Input:\n", style="bold")
|
323
|
+
tool_content.append(f"{arguments}\n\n")
|
324
|
+
|
325
|
+
if result is not None:
|
326
|
+
tool_content.append("Output:\n", style="bold")
|
327
|
+
tool_content.append(f"{result}")
|
328
|
+
else:
|
329
|
+
tool_content.append("Waiting for response...", style="italic")
|
330
|
+
|
331
|
+
tool_panel = create_panel(
|
332
|
+
content=tool_content,
|
333
|
+
title=f"Tool: {tool_name} ({tool_id})",
|
334
|
+
border_style="yellow",
|
335
|
+
logger=self.logger
|
336
|
+
)
|
337
|
+
ordered_panels.append(tool_panel)
|
338
|
+
|
339
|
+
# Add token count panel with purple border
|
340
|
+
token_content = Text()
|
341
|
+
token_content.append(f"Input tokens: {input_token_count}\n", style="cyan")
|
342
|
+
if result is not None:
|
343
|
+
token_content.append(f"Output tokens: {result_token_count}\n", style="green")
|
344
|
+
token_content.append(f"Total tokens: {input_token_count + result_token_count}", style="bold")
|
345
|
+
|
346
|
+
token_panel = create_panel(
|
347
|
+
content=token_content,
|
348
|
+
title="Tokens",
|
349
|
+
border_style="purple",
|
350
|
+
logger=self.logger
|
351
|
+
)
|
352
|
+
ordered_panels.append(token_panel)
|
353
|
+
|
354
|
+
|
355
|
+
async def run_example():
|
356
|
+
"""Example usage of RichCodeUICallback with TinyAgent."""
|
357
|
+
import asyncio
|
358
|
+
import os
|
359
|
+
import sys
|
360
|
+
from tinyagent import TinyAgent
|
361
|
+
from tinyagent.hooks.logging_manager import LoggingManager
|
362
|
+
|
363
|
+
# Create and configure logging manager
|
364
|
+
log_manager = LoggingManager(default_level=logging.INFO)
|
365
|
+
log_manager.set_levels({
|
366
|
+
'tinyagent.hooks.rich_code_ui_callback': logging.DEBUG, # Debug for this module
|
367
|
+
'tinyagent.tiny_agent': logging.INFO, # Info for TinyAgent
|
368
|
+
'tinyagent.mcp_client': logging.INFO, # Info for MCPClient
|
369
|
+
})
|
370
|
+
|
371
|
+
# Configure a console handler
|
372
|
+
console_handler = logging.StreamHandler(sys.stdout)
|
373
|
+
log_manager.configure_handler(
|
374
|
+
console_handler,
|
375
|
+
format_string='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
376
|
+
level=logging.DEBUG
|
377
|
+
)
|
378
|
+
|
379
|
+
# Get module-specific loggers
|
380
|
+
ui_logger = log_manager.get_logger('tinyagent.hooks.rich_code_ui_callback')
|
381
|
+
agent_logger = log_manager.get_logger('tinyagent.tiny_agent')
|
382
|
+
|
383
|
+
ui_logger.debug("Starting RichCodeUICallback example")
|
384
|
+
|
385
|
+
# Get API key from environment
|
386
|
+
api_key = os.environ.get("OPENAI_API_KEY")
|
387
|
+
if not api_key:
|
388
|
+
ui_logger.error("Please set the OPENAI_API_KEY environment variable")
|
389
|
+
return
|
390
|
+
|
391
|
+
# Initialize the agent with our logger
|
392
|
+
agent = TinyAgent(model="gpt-4.1-mini", api_key=api_key, logger=agent_logger)
|
393
|
+
|
394
|
+
# Connect to MCP servers as required
|
395
|
+
await agent.connect_to_server("npx", ["-y", "@openbnb/mcp-server-airbnb", "--ignore-robots-txt"])
|
396
|
+
await agent.connect_to_server("npx", ["-y", "@modelcontextprotocol/server-sequential-thinking"])
|
397
|
+
|
398
|
+
# Define a run_python tool
|
399
|
+
async def run_python(code_lines):
|
400
|
+
try:
|
401
|
+
code = "\n".join(code_lines)
|
402
|
+
# In a real implementation, you'd run this code with appropriate safety measures
|
403
|
+
result = f"Executed Python code successfully. Result: This is a simulated result for demo"
|
404
|
+
return result
|
405
|
+
except Exception as e:
|
406
|
+
return f"Error executing Python code: {str(e)}"
|
407
|
+
|
408
|
+
# Register the tool with the agent
|
409
|
+
agent.register_tool(run_python)
|
410
|
+
|
411
|
+
# Add the Rich Code UI callback with our logger
|
412
|
+
rich_ui = RichCodeUICallback(
|
413
|
+
markdown=True,
|
414
|
+
show_message=True,
|
415
|
+
show_thinking=True,
|
416
|
+
show_tool_calls=True,
|
417
|
+
logger=ui_logger
|
418
|
+
)
|
419
|
+
agent.add_callback(rich_ui)
|
420
|
+
|
421
|
+
# Run the agent with the required example input
|
422
|
+
user_input = "Plan a trip to Toronto for 7 days. In the next month."
|
423
|
+
ui_logger.info(f"Running agent with input: {user_input}")
|
424
|
+
result = await agent.run(user_input)
|
425
|
+
|
426
|
+
ui_logger.info(f"Final result: {result}")
|
427
|
+
|
428
|
+
# Clean up
|
429
|
+
await agent.close()
|
430
|
+
|
431
|
+
|
432
|
+
if __name__ == "__main__":
|
433
|
+
import asyncio
|
434
|
+
asyncio.run(run_example())
|
tinyagent/mcp_client.py
CHANGED
@@ -106,11 +106,6 @@ class MCPClient:
|
|
106
106
|
|
107
107
|
async def close(self):
|
108
108
|
"""Clean up subprocess and streams."""
|
109
|
-
if self.session:
|
110
|
-
try:
|
111
|
-
await self.session.close()
|
112
|
-
except Exception as e:
|
113
|
-
self.logger.error(f"Error closing session: {e}")
|
114
109
|
if self.exit_stack:
|
115
110
|
try:
|
116
111
|
await self.exit_stack.aclose()
|