tinyagent-py 0.0.1__py3-none-any.whl → 0.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hooks/__init__.py +4 -0
- hooks/agno_storage_hook.py +128 -0
- hooks/gradio_callback.py +966 -0
- hooks/logging_manager.py +213 -0
- hooks/rich_ui_callback.py +559 -0
- storage/__init__.py +7 -0
- storage/agno_storage.py +114 -0
- storage/base.py +49 -0
- storage/json_file_storage.py +30 -0
- storage/postgres_storage.py +201 -0
- storage/redis_storage.py +48 -0
- storage/sqlite_storage.py +156 -0
- tinyagent_py-0.0.4.dist-info/METADATA +252 -0
- tinyagent_py-0.0.4.dist-info/RECORD +17 -0
- {tinyagent_py-0.0.1.dist-info → tinyagent_py-0.0.4.dist-info}/WHEEL +1 -1
- tinyagent_py-0.0.4.dist-info/top_level.txt +2 -0
- tinyagent/__init__.py +0 -4
- tinyagent/mcp_client.py +0 -52
- tinyagent/tiny_agent.py +0 -247
- tinyagent_py-0.0.1.dist-info/METADATA +0 -79
- tinyagent_py-0.0.1.dist-info/RECORD +0 -8
- tinyagent_py-0.0.1.dist-info/top_level.txt +0 -1
- {tinyagent_py-0.0.1.dist-info → tinyagent_py-0.0.4.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,559 @@
|
|
1
|
+
import asyncio
|
2
|
+
import json
|
3
|
+
import time
|
4
|
+
import logging
|
5
|
+
import tiktoken # Add tiktoken import for token counting
|
6
|
+
from typing import Any, Dict, List, Optional, Set, Union
|
7
|
+
|
8
|
+
from rich.console import Console, Group
|
9
|
+
from rich.json import JSON
|
10
|
+
from rich.live import Live
|
11
|
+
from rich.markdown import Markdown
|
12
|
+
from rich.panel import Panel
|
13
|
+
from rich.status import Status
|
14
|
+
from rich.text import Text
|
15
|
+
from rich.box import HEAVY
|
16
|
+
|
17
|
+
|
18
|
+
|
19
|
+
class Timer:
|
20
|
+
"""Simple timer to track elapsed time."""
|
21
|
+
|
22
|
+
def __init__(self, logger=None):
|
23
|
+
self.start_time = None
|
24
|
+
self.end_time = None
|
25
|
+
self.logger = logger or logging.getLogger(__name__)
|
26
|
+
|
27
|
+
def start(self):
|
28
|
+
self.start_time = time.time()
|
29
|
+
self.end_time = None
|
30
|
+
self.logger.debug("Timer started")
|
31
|
+
|
32
|
+
def stop(self):
|
33
|
+
self.end_time = time.time()
|
34
|
+
self.logger.debug(f"Timer stopped. Total elapsed: {self.elapsed:.2f}s")
|
35
|
+
|
36
|
+
@property
|
37
|
+
def elapsed(self) -> float:
|
38
|
+
"""Return elapsed time in seconds."""
|
39
|
+
if self.start_time is None:
|
40
|
+
return 0.0
|
41
|
+
end = self.end_time if self.end_time is not None else time.time()
|
42
|
+
return end - self.start_time
|
43
|
+
|
44
|
+
|
45
|
+
def create_panel(content, title, border_style="blue", logger=None):
|
46
|
+
"""Create a rich panel with consistent styling."""
|
47
|
+
log = logger or logging.getLogger(__name__)
|
48
|
+
log.debug(f"Creating panel with title: {title}")
|
49
|
+
return Panel(
|
50
|
+
content,
|
51
|
+
title=title,
|
52
|
+
title_align="left",
|
53
|
+
border_style=border_style,
|
54
|
+
box=HEAVY,
|
55
|
+
expand=True,
|
56
|
+
padding=(1, 1)
|
57
|
+
)
|
58
|
+
|
59
|
+
|
60
|
+
def escape_markdown_tags(content: str, tags: Set[str]) -> str:
|
61
|
+
"""Escape special tags in markdown content."""
|
62
|
+
escaped_content = content
|
63
|
+
for tag in tags:
|
64
|
+
# Escape opening tag
|
65
|
+
escaped_content = escaped_content.replace(f"<{tag}>", f"<{tag}>")
|
66
|
+
# Escape closing tag
|
67
|
+
escaped_content = escaped_content.replace(f"</{tag}>", f"</{tag}>")
|
68
|
+
return escaped_content
|
69
|
+
|
70
|
+
|
71
|
+
class RichUICallback:
|
72
|
+
"""
|
73
|
+
A callback for TinyAgent that provides a rich terminal UI similar to Agno.
|
74
|
+
"""
|
75
|
+
|
76
|
+
def __init__(
|
77
|
+
self,
|
78
|
+
console: Optional[Console] = None,
|
79
|
+
markdown: bool = True,
|
80
|
+
show_message: bool = True,
|
81
|
+
show_thinking: bool = True,
|
82
|
+
show_tool_calls: bool = True,
|
83
|
+
tags_to_include_in_markdown: Set[str] = {"think", "thinking"},
|
84
|
+
logger: Optional[logging.Logger] = None,
|
85
|
+
):
|
86
|
+
"""
|
87
|
+
Initialize the Rich UI callback.
|
88
|
+
|
89
|
+
Args:
|
90
|
+
console: Optional Rich console to use
|
91
|
+
markdown: Whether to render responses as markdown
|
92
|
+
show_message: Whether to show the user message
|
93
|
+
show_thinking: Whether to show the thinking process
|
94
|
+
show_tool_calls: Whether to show tool calls
|
95
|
+
tags_to_include_in_markdown: Tags to include in markdown rendering
|
96
|
+
logger: Optional logger to use
|
97
|
+
"""
|
98
|
+
self.console = console or Console()
|
99
|
+
self.markdown = markdown
|
100
|
+
self.show_message = show_message
|
101
|
+
self.show_thinking = show_thinking
|
102
|
+
self.show_tool_calls = show_tool_calls
|
103
|
+
self.tags_to_include_in_markdown = tags_to_include_in_markdown
|
104
|
+
self.logger = logger or logging.getLogger(__name__)
|
105
|
+
|
106
|
+
# State tracking
|
107
|
+
self.live = None
|
108
|
+
self.timer = Timer(logger=self.logger)
|
109
|
+
self.panels = []
|
110
|
+
self.status = None
|
111
|
+
self.thinking_content = ""
|
112
|
+
self.response_content = ""
|
113
|
+
self.tool_calls = []
|
114
|
+
self.tool_call_details = [] # Store detailed tool call info with inputs and outputs
|
115
|
+
self.current_user_input = ""
|
116
|
+
self.assistant_text_responses = [] # Store text responses from assistant
|
117
|
+
self.token_usage = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
|
118
|
+
|
119
|
+
# Initialize tiktoken encoder for token counting
|
120
|
+
try:
|
121
|
+
self.encoder = tiktoken.get_encoding("o200k_base")
|
122
|
+
self.logger.debug("Initialized tiktoken encoder with o200k_base encoding")
|
123
|
+
except Exception as e:
|
124
|
+
self.logger.error(f"Failed to initialize tiktoken encoder: {e}")
|
125
|
+
self.encoder = None
|
126
|
+
|
127
|
+
self.logger.debug("RichUICallback initialized")
|
128
|
+
|
129
|
+
def count_tokens(self, text: str) -> int:
|
130
|
+
"""Count tokens in a string using tiktoken."""
|
131
|
+
if not self.encoder or not text:
|
132
|
+
return 0
|
133
|
+
try:
|
134
|
+
return len(self.encoder.encode(text))
|
135
|
+
except Exception as e:
|
136
|
+
self.logger.error(f"Error counting tokens: {e}")
|
137
|
+
return 0
|
138
|
+
|
139
|
+
async def __call__(self, event_name: str, agent: Any, **kwargs: Any) -> None:
|
140
|
+
"""
|
141
|
+
Process events from the TinyAgent.
|
142
|
+
|
143
|
+
Args:
|
144
|
+
event_name: The name of the event
|
145
|
+
agent: The TinyAgent instance
|
146
|
+
**kwargs: Additional event data
|
147
|
+
"""
|
148
|
+
self.logger.debug(f"Event received: {event_name}")
|
149
|
+
|
150
|
+
if event_name == "agent_start":
|
151
|
+
await self._handle_agent_start(agent, **kwargs)
|
152
|
+
elif event_name == "message_add":
|
153
|
+
await self._handle_message_add(agent, **kwargs)
|
154
|
+
elif event_name == "llm_start":
|
155
|
+
await self._handle_llm_start(agent, **kwargs)
|
156
|
+
elif event_name == "llm_end":
|
157
|
+
await self._handle_llm_end(agent, **kwargs)
|
158
|
+
elif event_name == "agent_end":
|
159
|
+
await self._handle_agent_end(agent, **kwargs)
|
160
|
+
|
161
|
+
# Update the UI if we have an active live display
|
162
|
+
if self.live:
|
163
|
+
self.logger.debug("Updating display")
|
164
|
+
self._update_display()
|
165
|
+
|
166
|
+
async def _handle_agent_start(self, agent: Any, **kwargs: Any) -> None:
|
167
|
+
"""Handle the agent_start event."""
|
168
|
+
self.logger.debug("Handling agent_start event")
|
169
|
+
self.timer.start()
|
170
|
+
self.panels = []
|
171
|
+
self.thinking_content = ""
|
172
|
+
self.response_content = ""
|
173
|
+
self.tool_calls = []
|
174
|
+
self.tool_call_details = []
|
175
|
+
self.assistant_text_responses = []
|
176
|
+
|
177
|
+
# Store the user input for display
|
178
|
+
self.current_user_input = kwargs.get("user_input", "")
|
179
|
+
self.logger.debug(f"User input: {self.current_user_input}")
|
180
|
+
|
181
|
+
# Initialize the live display with auto_refresh
|
182
|
+
self.live = Live(
|
183
|
+
console=self.console,
|
184
|
+
auto_refresh=True,
|
185
|
+
refresh_per_second=4,
|
186
|
+
)
|
187
|
+
self.logger.debug("Starting live display")
|
188
|
+
self.live.start()
|
189
|
+
|
190
|
+
# Add the initial status
|
191
|
+
self.status = Status("Thinking...", spinner="aesthetic", speed=0.4, refresh_per_second=10)
|
192
|
+
self.panels = [self.status]
|
193
|
+
|
194
|
+
# Add user message panel if enabled
|
195
|
+
if self.show_message and self.current_user_input:
|
196
|
+
self.logger.debug("Adding user message panel")
|
197
|
+
message_panel = create_panel(
|
198
|
+
content=Text(self.current_user_input, style="green"),
|
199
|
+
title="User Message",
|
200
|
+
border_style="cyan"
|
201
|
+
)
|
202
|
+
self.panels.append(message_panel)
|
203
|
+
|
204
|
+
self._update_display()
|
205
|
+
|
206
|
+
async def _handle_message_add(self, agent: Any, **kwargs: Any) -> None:
|
207
|
+
"""Handle the message_add event."""
|
208
|
+
message = kwargs.get("message", {})
|
209
|
+
self.logger.debug(f"Handling message_add event: {message.get('role', 'unknown')}")
|
210
|
+
|
211
|
+
# Process tool calls in assistant messages
|
212
|
+
if message.get("role") == "assistant":
|
213
|
+
if "tool_calls" in message:
|
214
|
+
self.logger.debug(f"Processing {len(message.get('tool_calls', []))} tool calls")
|
215
|
+
for tool_call in message.get("tool_calls", []):
|
216
|
+
function_info = tool_call.get("function", {})
|
217
|
+
tool_name = function_info.get("name", "unknown")
|
218
|
+
args = function_info.get("arguments", "{}")
|
219
|
+
tool_id = tool_call.get("id", "unknown")
|
220
|
+
|
221
|
+
try:
|
222
|
+
formatted_args = json.dumps(json.loads(args), indent=2)
|
223
|
+
except:
|
224
|
+
formatted_args = args
|
225
|
+
|
226
|
+
# Count tokens in the tool call
|
227
|
+
token_count = self.count_tokens(f"{tool_name}({formatted_args})")
|
228
|
+
|
229
|
+
# Add to simple tool calls list (for the summary panel)
|
230
|
+
self.tool_calls.append(f"{tool_name}({formatted_args})")
|
231
|
+
|
232
|
+
# Add to detailed tool call info
|
233
|
+
self.tool_call_details.append({
|
234
|
+
"id": tool_id,
|
235
|
+
"name": tool_name,
|
236
|
+
"arguments": formatted_args,
|
237
|
+
"result": None, # Will be filled when tool response comes
|
238
|
+
"token_count": token_count # Store token count
|
239
|
+
})
|
240
|
+
|
241
|
+
self.logger.debug(f"Added tool call: {tool_name} ({token_count} tokens)")
|
242
|
+
elif "content" in message and message.get("content"):
|
243
|
+
# This is a text response from the assistant
|
244
|
+
content = message.get("content", "")
|
245
|
+
token_count = self.count_tokens(content)
|
246
|
+
self.assistant_text_responses.append({
|
247
|
+
"content": content,
|
248
|
+
"token_count": token_count
|
249
|
+
})
|
250
|
+
self.logger.debug(f"Added assistant text response: {content[:50]}... ({token_count} tokens)")
|
251
|
+
|
252
|
+
# Process tool responses
|
253
|
+
if message.get("role") == "tool":
|
254
|
+
tool_name = message.get("name", "unknown")
|
255
|
+
content = message.get("content", "")
|
256
|
+
tool_call_id = message.get("tool_call_id", None)
|
257
|
+
token_count = self.count_tokens(content)
|
258
|
+
|
259
|
+
# Update the corresponding tool call detail with the result
|
260
|
+
if tool_call_id:
|
261
|
+
for tool_detail in self.tool_call_details:
|
262
|
+
if tool_detail["id"] == tool_call_id:
|
263
|
+
tool_detail["result"] = content
|
264
|
+
tool_detail["result_token_count"] = token_count
|
265
|
+
self.logger.debug(f"Updated tool call {tool_call_id} with result ({token_count} tokens)")
|
266
|
+
break
|
267
|
+
|
268
|
+
# Also keep the old format for backward compatibility
|
269
|
+
self.tool_calls.append(f"{tool_name} result: {content}")
|
270
|
+
self.logger.debug(f"Added tool result: {tool_name} ({token_count} tokens)")
|
271
|
+
|
272
|
+
async def _handle_llm_start(self, agent: Any, **kwargs: Any) -> None:
|
273
|
+
"""Handle the llm_start event."""
|
274
|
+
self.logger.debug("Handling llm_start event")
|
275
|
+
# Nothing specific to do here, the status is already showing "Thinking..."
|
276
|
+
|
277
|
+
async def _handle_llm_end(self, agent: Any, **kwargs: Any) -> None:
|
278
|
+
"""Handle the llm_end event."""
|
279
|
+
self.logger.debug("Handling llm_end event")
|
280
|
+
response = kwargs.get("response", {})
|
281
|
+
|
282
|
+
# Extract thinking content if available (from response.choices[0].message.content)
|
283
|
+
try:
|
284
|
+
message = response.choices[0].message
|
285
|
+
if hasattr(message, "content") and message.content:
|
286
|
+
self.thinking_content = message.content
|
287
|
+
self.logger.debug(f"Extracted thinking content: {self.thinking_content[:50]}...")
|
288
|
+
except (AttributeError, IndexError) as e:
|
289
|
+
self.logger.debug(f"Could not extract thinking content: {e}")
|
290
|
+
|
291
|
+
# Track token usage if available
|
292
|
+
try:
|
293
|
+
usage = response.usage
|
294
|
+
if usage:
|
295
|
+
self.token_usage["prompt_tokens"] += usage.prompt_tokens
|
296
|
+
self.token_usage["completion_tokens"] += usage.completion_tokens
|
297
|
+
self.token_usage["total_tokens"] += usage.total_tokens
|
298
|
+
self.logger.debug(f"Updated token usage: {self.token_usage}")
|
299
|
+
except (AttributeError, TypeError) as e:
|
300
|
+
self.logger.debug(f"Could not extract token usage: {e}")
|
301
|
+
|
302
|
+
async def _handle_agent_end(self, agent: Any, **kwargs: Any) -> None:
|
303
|
+
"""Handle the agent_end event."""
|
304
|
+
self.logger.debug("Handling agent_end event")
|
305
|
+
self.timer.stop()
|
306
|
+
self.response_content = kwargs.get("result", "")
|
307
|
+
self.logger.debug(f"Final response: {self.response_content[:50]}...")
|
308
|
+
|
309
|
+
# Remove the status panel
|
310
|
+
self.panels = [p for p in self.panels if not isinstance(p, Status)]
|
311
|
+
|
312
|
+
# Add the final response panel
|
313
|
+
if self.response_content:
|
314
|
+
content = self.response_content
|
315
|
+
if self.markdown:
|
316
|
+
self.logger.debug("Converting response to markdown")
|
317
|
+
escaped_content = escape_markdown_tags(content, self.tags_to_include_in_markdown)
|
318
|
+
content = Markdown(escaped_content)
|
319
|
+
|
320
|
+
response_panel = create_panel(
|
321
|
+
content=content,
|
322
|
+
title=f"Response ({self.timer.elapsed:.1f}s)",
|
323
|
+
border_style="blue"
|
324
|
+
)
|
325
|
+
self.panels.append(response_panel)
|
326
|
+
|
327
|
+
self._update_display()
|
328
|
+
|
329
|
+
self.live.stop()
|
330
|
+
self.logger.debug("Live display stopped")
|
331
|
+
|
332
|
+
|
333
|
+
def _update_display(self) -> None:
|
334
|
+
"""Update the live display with current panels."""
|
335
|
+
if not self.live:
|
336
|
+
self.logger.debug("No live display to update")
|
337
|
+
return
|
338
|
+
|
339
|
+
# Start with a fresh list of panels in the specified order
|
340
|
+
ordered_panels = []
|
341
|
+
|
342
|
+
# 1. Status (if exists)
|
343
|
+
status_panel = next((p for p in self.panels if isinstance(p, Status)), None)
|
344
|
+
if status_panel:
|
345
|
+
ordered_panels.append(status_panel)
|
346
|
+
|
347
|
+
# 2. User Message (if exists)
|
348
|
+
user_message_panel = next((p for p in self.panels if isinstance(p, Panel) and "User Message" in p.title), None)
|
349
|
+
if user_message_panel:
|
350
|
+
ordered_panels.append(user_message_panel)
|
351
|
+
|
352
|
+
# 3. Tool Calls summary (if we have tool calls)
|
353
|
+
if self.show_tool_calls and self.tool_calls:
|
354
|
+
# Create the tool calls summary panel
|
355
|
+
self.logger.debug(f"Creating tool calls summary panel with {len(self.tool_calls)} calls")
|
356
|
+
tool_calls_content = Text()
|
357
|
+
for i, tool_call in enumerate(self.tool_calls):
|
358
|
+
if "result:" not in tool_call: # Only show the calls, not results
|
359
|
+
tool_calls_content.append(f"• {tool_call}\n")
|
360
|
+
|
361
|
+
if tool_calls_content:
|
362
|
+
tool_calls_panel = create_panel(
|
363
|
+
content=tool_calls_content,
|
364
|
+
title="Tool Calls Summary",
|
365
|
+
border_style="yellow"
|
366
|
+
)
|
367
|
+
ordered_panels.append(tool_calls_panel)
|
368
|
+
|
369
|
+
# 4. Assistant Text Responses
|
370
|
+
for i, response_data in enumerate(self.assistant_text_responses):
|
371
|
+
content = response_data["content"]
|
372
|
+
token_count = response_data["token_count"]
|
373
|
+
|
374
|
+
if self.markdown:
|
375
|
+
self.logger.debug("Converting assistant response to markdown")
|
376
|
+
escaped_content = escape_markdown_tags(content, self.tags_to_include_in_markdown)
|
377
|
+
content = Markdown(escaped_content)
|
378
|
+
|
379
|
+
response_panel = create_panel(
|
380
|
+
content=content,
|
381
|
+
title=f"Assistant Response {i+1}",
|
382
|
+
border_style="blue"
|
383
|
+
)
|
384
|
+
ordered_panels.append(response_panel)
|
385
|
+
|
386
|
+
# Add token count panel with purple border
|
387
|
+
token_panel = create_panel(
|
388
|
+
content=Text(f"Token count: {token_count}", style="bold"),
|
389
|
+
title="Tokens",
|
390
|
+
border_style="purple"
|
391
|
+
)
|
392
|
+
ordered_panels.append(token_panel)
|
393
|
+
|
394
|
+
# 5. Token Usage Panel
|
395
|
+
if any(self.token_usage.values()):
|
396
|
+
token_content = Text()
|
397
|
+
token_content.append(f"Prompt Tokens: {self.token_usage['prompt_tokens']}\n", style="cyan")
|
398
|
+
token_content.append(f"Completion Tokens: {self.token_usage['completion_tokens']}\n", style="green")
|
399
|
+
token_content.append(f"Total Tokens: {self.token_usage['total_tokens']}", style="bold magenta")
|
400
|
+
|
401
|
+
token_panel = create_panel(
|
402
|
+
content=token_content,
|
403
|
+
title="Token Usage",
|
404
|
+
border_style="bright_blue"
|
405
|
+
)
|
406
|
+
ordered_panels.append(token_panel)
|
407
|
+
|
408
|
+
# 6. Detailed Tool Calls
|
409
|
+
if self.show_tool_calls:
|
410
|
+
for tool_detail in self.tool_call_details:
|
411
|
+
tool_name = tool_detail["name"]
|
412
|
+
arguments = tool_detail["arguments"]
|
413
|
+
result = tool_detail["result"]
|
414
|
+
input_token_count = tool_detail.get("token_count", 0)
|
415
|
+
result_token_count = tool_detail.get("result_token_count", 0)
|
416
|
+
|
417
|
+
tool_content = Text()
|
418
|
+
tool_content.append("Input:\n", style="bold")
|
419
|
+
tool_content.append(f"{arguments}\n\n")
|
420
|
+
|
421
|
+
if result is not None:
|
422
|
+
tool_content.append("Output:\n", style="bold")
|
423
|
+
tool_content.append(f"{result}")
|
424
|
+
else:
|
425
|
+
tool_content.append("Waiting for response...", style="italic")
|
426
|
+
|
427
|
+
tool_panel = create_panel(
|
428
|
+
content=tool_content,
|
429
|
+
title=f"Tool: {tool_name}",
|
430
|
+
border_style="yellow"
|
431
|
+
)
|
432
|
+
ordered_panels.append(tool_panel)
|
433
|
+
|
434
|
+
# Add token count panel with purple border
|
435
|
+
token_content = Text()
|
436
|
+
token_content.append(f"Input tokens: {input_token_count}\n", style="cyan")
|
437
|
+
if result is not None:
|
438
|
+
token_content.append(f"Output tokens: {result_token_count}\n", style="green")
|
439
|
+
token_content.append(f"Total tokens: {input_token_count + result_token_count}", style="bold")
|
440
|
+
|
441
|
+
token_panel = create_panel(
|
442
|
+
content=token_content,
|
443
|
+
title="Tokens",
|
444
|
+
border_style="purple"
|
445
|
+
)
|
446
|
+
ordered_panels.append(token_panel)
|
447
|
+
|
448
|
+
# 7. Thinking panel (if we have thinking content)
|
449
|
+
if self.show_thinking and self.thinking_content:
|
450
|
+
self.logger.debug("Adding thinking panel")
|
451
|
+
thinking_panel = create_panel(
|
452
|
+
content=Text(self.thinking_content),
|
453
|
+
title=f"Response ({self.timer.elapsed:.1f}s)",
|
454
|
+
border_style="green"
|
455
|
+
)
|
456
|
+
ordered_panels.append(thinking_panel)
|
457
|
+
|
458
|
+
# Add token count panel for thinking content
|
459
|
+
thinking_token_count = self.count_tokens(self.thinking_content)
|
460
|
+
token_panel = create_panel(
|
461
|
+
content=Text(f"Token count: {thinking_token_count}", style="bold"),
|
462
|
+
title="Tokens",
|
463
|
+
border_style="purple"
|
464
|
+
)
|
465
|
+
ordered_panels.append(token_panel)
|
466
|
+
|
467
|
+
# 8. Final response panel (if we have a response)
|
468
|
+
if self.response_content:
|
469
|
+
content = self.response_content
|
470
|
+
if self.markdown:
|
471
|
+
self.logger.debug("Converting response to markdown")
|
472
|
+
escaped_content = escape_markdown_tags(content, self.tags_to_include_in_markdown)
|
473
|
+
content = Markdown(escaped_content)
|
474
|
+
|
475
|
+
response_panel = create_panel(
|
476
|
+
content=content,
|
477
|
+
title=f"Response ({self.timer.elapsed:.1f}s)",
|
478
|
+
border_style="blue"
|
479
|
+
)
|
480
|
+
ordered_panels.append(response_panel)
|
481
|
+
|
482
|
+
# Add token count panel for final response
|
483
|
+
response_token_count = self.count_tokens(self.response_content)
|
484
|
+
token_panel = create_panel(
|
485
|
+
content=Text(f"Token count: {response_token_count}", style="bold"),
|
486
|
+
title="Tokens",
|
487
|
+
border_style="purple"
|
488
|
+
)
|
489
|
+
ordered_panels.append(token_panel)
|
490
|
+
|
491
|
+
try:
|
492
|
+
self.logger.debug(f"Updating live display with {len(ordered_panels)} panels")
|
493
|
+
self.live.update(Group(*ordered_panels))
|
494
|
+
except Exception as e:
|
495
|
+
self.logger.error(f"Error updating display: {e}")
|
496
|
+
|
497
|
+
|
498
|
+
async def run_example():
|
499
|
+
"""Example usage of RichUICallback with TinyAgent."""
|
500
|
+
import os
|
501
|
+
import sys
|
502
|
+
from tinyagent import TinyAgent
|
503
|
+
from tinyagent.hooks.logging_manager import LoggingManager
|
504
|
+
|
505
|
+
# Create and configure logging manager
|
506
|
+
log_manager = LoggingManager(default_level=logging.INFO)
|
507
|
+
log_manager.set_levels({
|
508
|
+
'tinyagent.hooks.rich_ui_callback': logging.DEBUG, # Debug for this module
|
509
|
+
'tinyagent.tiny_agent': logging.INFO, # Info for TinyAgent
|
510
|
+
'tinyagent.mcp_client': logging.INFO, # Info for MCPClient
|
511
|
+
})
|
512
|
+
|
513
|
+
# Configure a console handler
|
514
|
+
console_handler = logging.StreamHandler(sys.stdout)
|
515
|
+
log_manager.configure_handler(
|
516
|
+
console_handler,
|
517
|
+
format_string='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
518
|
+
level=logging.DEBUG
|
519
|
+
)
|
520
|
+
|
521
|
+
# Get module-specific loggers
|
522
|
+
ui_logger = log_manager.get_logger('tinyagent.hooks.rich_ui_callback')
|
523
|
+
agent_logger = log_manager.get_logger('tinyagent.tiny_agent')
|
524
|
+
mcp_logger = log_manager.get_logger('tinyagent.mcp_client')
|
525
|
+
|
526
|
+
ui_logger.debug("Starting RichUICallback example")
|
527
|
+
|
528
|
+
# Get API key from environment
|
529
|
+
api_key = os.environ.get("OPENAI_API_KEY")
|
530
|
+
if not api_key:
|
531
|
+
ui_logger.error("Please set the OPENAI_API_KEY environment variable")
|
532
|
+
return
|
533
|
+
|
534
|
+
# Initialize the agent with our logger
|
535
|
+
agent = TinyAgent(model="gpt-4.1-mini", api_key=api_key, logger=agent_logger)
|
536
|
+
|
537
|
+
# Add the Rich UI callback with our logger
|
538
|
+
rich_ui = RichUICallback(
|
539
|
+
markdown=True,
|
540
|
+
show_message=True,
|
541
|
+
show_thinking=True,
|
542
|
+
show_tool_calls=True,
|
543
|
+
logger=ui_logger # Pass DEBUG level logger to RichUICallback
|
544
|
+
)
|
545
|
+
agent.add_callback(rich_ui)
|
546
|
+
|
547
|
+
# Run the agent with a user query
|
548
|
+
user_input = "What is the capital of France and what's the population this year?"
|
549
|
+
ui_logger.info(f"Running agent with input: {user_input}")
|
550
|
+
result = await agent.run(user_input)
|
551
|
+
|
552
|
+
ui_logger.info(f"Final result: {result}")
|
553
|
+
|
554
|
+
# Clean up
|
555
|
+
await agent.close()
|
556
|
+
|
557
|
+
|
558
|
+
if __name__ == "__main__":
|
559
|
+
asyncio.run(run_example())
|
storage/__init__.py
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
from .base import Storage
|
2
|
+
from .json_file_storage import JsonFileStorage
|
3
|
+
# Import your concrete backends here if you like:
|
4
|
+
# from .postgres_storage import PostgresStorage
|
5
|
+
# from .json_file_storage import JsonFileStorage
|
6
|
+
|
7
|
+
__all__ = ["Storage", "JsonFileStorage"]
|
storage/agno_storage.py
ADDED
@@ -0,0 +1,114 @@
|
|
1
|
+
import asyncio
|
2
|
+
from typing import Dict, Any, Optional
|
3
|
+
try:
|
4
|
+
from tinyagent.storage.base import Storage
|
5
|
+
from agno.storage.postgres import PostgresStorage as AgnoPG
|
6
|
+
from agno.storage.sqlite import SqliteStorage as AgnoSL
|
7
|
+
from agno.storage.session.agent import AgentSession
|
8
|
+
except ImportError as e:
|
9
|
+
raise ImportError("agno is not installed. Please install it with `pip install agno`.", e)
|
10
|
+
|
11
|
+
def _remap_agno_to_tiny(ag: Dict[str, Any]) -> Dict[str, Any]:
|
12
|
+
"""Map a full Agno to_dict() into TinyAgent’s to_dict() shape."""
|
13
|
+
sess_id = ag.get("session_id", "")
|
14
|
+
|
15
|
+
# meta = everything except the session_state fields
|
16
|
+
metadata = {
|
17
|
+
k: v
|
18
|
+
for k, v in ag.items()
|
19
|
+
if k not in ("session_id", "session_data", "memory", "runs")
|
20
|
+
}
|
21
|
+
|
22
|
+
# Safe‐guard: use {} if any of these are None
|
23
|
+
_session_data = ag.get("session_data") or {}
|
24
|
+
_memory = ag.get("memory") or {}
|
25
|
+
|
26
|
+
|
27
|
+
session_state = {
|
28
|
+
"messages": _memory.get("messages", []),
|
29
|
+
"memory": _memory,
|
30
|
+
**_session_data,
|
31
|
+
}
|
32
|
+
|
33
|
+
return {
|
34
|
+
"session_id": sess_id,
|
35
|
+
"metadata": metadata,
|
36
|
+
"session_state": session_state,
|
37
|
+
}
|
38
|
+
|
39
|
+
def _remap_tiny_to_agno(tiny: Dict[str, Any]) -> Dict[str, Any]:
|
40
|
+
"""
|
41
|
+
Given TinyAgent.to_dict() output:
|
42
|
+
{ "session_id": str,
|
43
|
+
"metadata": {...},
|
44
|
+
"session_state": { "messages": [...], "memory": {...}, "runs": [...] }
|
45
|
+
}
|
46
|
+
produce a full AgnoSession.to_dict() shape:
|
47
|
+
{ "session_id":..., "user_id":..., "memory":..., "runs":...,
|
48
|
+
"session_data": {"messages": [...]},
|
49
|
+
"extra_data":...,
|
50
|
+
... (other Agno fields remain None/default) }
|
51
|
+
"""
|
52
|
+
session_id = tiny["session_id"]
|
53
|
+
meta = tiny.get("metadata", {}) or {}
|
54
|
+
state = tiny.get("session_state", {}) or {}
|
55
|
+
|
56
|
+
return {
|
57
|
+
"session_id": session_id,
|
58
|
+
"user_id": meta.get("user_id"),
|
59
|
+
"team_session_id": meta.get("team_session_id"),
|
60
|
+
"memory": state.get("memory", {}),
|
61
|
+
"runs": state.get("runs", []),
|
62
|
+
"session_data": {"messages": state.get("messages", [])},
|
63
|
+
"extra_data": meta.get("extra_data"),
|
64
|
+
# created_at/updated_at/agent_id/agent_data will default in AgnoSession
|
65
|
+
}
|
66
|
+
|
67
|
+
|
68
|
+
class AgnoPostgresStorage(Storage):
|
69
|
+
def __init__(self, table_name: str, db_url: str, schema: str = "ai", mode: str = "agent"):
|
70
|
+
super().__init__()
|
71
|
+
self.backend = AgnoPG(table_name=table_name, schema=schema, db_url=db_url, mode=mode)
|
72
|
+
self.backend.create()
|
73
|
+
|
74
|
+
async def save_session(self, session_id: str, data: Dict[str, Any], user_id: Optional[str] = None) -> None:
|
75
|
+
# Pack TinyAgent dict into AgnoSession record
|
76
|
+
agno_dict = _remap_tiny_to_agno(data)
|
77
|
+
session_obj = AgentSession.from_dict(agno_dict)
|
78
|
+
loop = asyncio.get_event_loop()
|
79
|
+
await loop.run_in_executor(None, self.backend.upsert, session_obj)
|
80
|
+
|
81
|
+
async def load_session(self, session_id: str, user_id: Optional[str] = None) -> Dict[str, Any]:
|
82
|
+
loop = asyncio.get_event_loop()
|
83
|
+
agno_obj = await loop.run_in_executor(None, self.backend.read, session_id, user_id)
|
84
|
+
if not agno_obj:
|
85
|
+
return {}
|
86
|
+
ag = agno_obj.to_dict()
|
87
|
+
return _remap_agno_to_tiny(ag)
|
88
|
+
|
89
|
+
async def close(self) -> None:
|
90
|
+
pass
|
91
|
+
|
92
|
+
|
93
|
+
class AgnoSqliteStorage(Storage):
|
94
|
+
def __init__(self, table_name: str, db_url: str, mode: str = "agent"):
|
95
|
+
super().__init__()
|
96
|
+
self.backend = AgnoSL(table_name=table_name, db_url=db_url, mode=mode)
|
97
|
+
self.backend.create()
|
98
|
+
|
99
|
+
async def save_session(self, session_id: str, data: Dict[str, Any], user_id: Optional[str] = None) -> None:
|
100
|
+
agno_dict = _remap_tiny_to_agno(data)
|
101
|
+
session_obj = AgentSession.from_dict(agno_dict)
|
102
|
+
loop = asyncio.get_event_loop()
|
103
|
+
await loop.run_in_executor(None, self.backend.upsert, session_obj)
|
104
|
+
|
105
|
+
async def load_session(self, session_id: str, user_id: Optional[str] = None) -> Dict[str, Any]:
|
106
|
+
loop = asyncio.get_event_loop()
|
107
|
+
agno_obj = await loop.run_in_executor(None, self.backend.read, session_id, user_id)
|
108
|
+
if not agno_obj:
|
109
|
+
return {}
|
110
|
+
ag = agno_obj.to_dict()
|
111
|
+
return _remap_agno_to_tiny(ag)
|
112
|
+
|
113
|
+
async def close(self) -> None:
|
114
|
+
pass
|