tinyagent-py 0.0.7__py3-none-any.whl → 0.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tinyagent/__init__.py +2 -1
- tinyagent/code_agent/__init__.py +12 -0
- tinyagent/code_agent/example.py +176 -0
- tinyagent/code_agent/helper.py +173 -0
- tinyagent/code_agent/modal_sandbox.py +478 -0
- tinyagent/code_agent/providers/__init__.py +4 -0
- tinyagent/code_agent/providers/base.py +152 -0
- tinyagent/code_agent/providers/modal_provider.py +202 -0
- tinyagent/code_agent/tiny_code_agent.py +573 -0
- tinyagent/code_agent/tools/__init__.py +3 -0
- tinyagent/code_agent/tools/example_tools.py +41 -0
- tinyagent/code_agent/utils.py +120 -0
- tinyagent/hooks/__init__.py +2 -1
- tinyagent/hooks/gradio_callback.py +210 -35
- {tinyagent_py-0.0.7.dist-info → tinyagent_py-0.0.9.dist-info}/METADATA +138 -5
- tinyagent_py-0.0.9.dist-info/RECORD +31 -0
- tinyagent_py-0.0.7.dist-info/RECORD +0 -20
- {tinyagent_py-0.0.7.dist-info → tinyagent_py-0.0.9.dist-info}/WHEEL +0 -0
- {tinyagent_py-0.0.7.dist-info → tinyagent_py-0.0.9.dist-info}/licenses/LICENSE +0 -0
- {tinyagent_py-0.0.7.dist-info → tinyagent_py-0.0.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,120 @@
|
|
1
|
+
import sys
|
2
|
+
import cloudpickle
|
3
|
+
from typing import Dict, Any
|
4
|
+
|
5
|
+
|
6
|
+
def clean_response(resp: Dict[str, Any]) -> Dict[str, Any]:
|
7
|
+
"""
|
8
|
+
Clean the response from code execution, keeping only relevant fields.
|
9
|
+
|
10
|
+
Args:
|
11
|
+
resp: Raw response dictionary from code execution
|
12
|
+
|
13
|
+
Returns:
|
14
|
+
Cleaned response with only essential fields
|
15
|
+
"""
|
16
|
+
return {k: v for k, v in resp.items() if k in ['printed_output', 'return_value', 'stderr', 'error_traceback']}
|
17
|
+
|
18
|
+
|
19
|
+
def make_session_blob(ns: dict) -> bytes:
|
20
|
+
"""
|
21
|
+
Create a serialized blob of the session namespace, excluding unserializable objects.
|
22
|
+
|
23
|
+
Args:
|
24
|
+
ns: Namespace dictionary to serialize
|
25
|
+
|
26
|
+
Returns:
|
27
|
+
Serialized bytes of the clean namespace
|
28
|
+
"""
|
29
|
+
clean = {}
|
30
|
+
for name, val in ns.items():
|
31
|
+
try:
|
32
|
+
# Try serializing just this one object
|
33
|
+
cloudpickle.dumps(val)
|
34
|
+
except Exception:
|
35
|
+
# drop anything that fails
|
36
|
+
continue
|
37
|
+
else:
|
38
|
+
clean[name] = val
|
39
|
+
|
40
|
+
return cloudpickle.dumps(clean)
|
41
|
+
|
42
|
+
|
43
|
+
def _run_python(code: str, globals_dict: Dict[str, Any] = None, locals_dict: Dict[str, Any] = None):
|
44
|
+
"""
|
45
|
+
Execute Python code in a controlled environment with proper error handling.
|
46
|
+
|
47
|
+
Args:
|
48
|
+
code: Python code to execute
|
49
|
+
globals_dict: Global variables dictionary
|
50
|
+
locals_dict: Local variables dictionary
|
51
|
+
|
52
|
+
Returns:
|
53
|
+
Dictionary containing execution results
|
54
|
+
"""
|
55
|
+
import contextlib
|
56
|
+
import traceback
|
57
|
+
import io
|
58
|
+
import ast
|
59
|
+
|
60
|
+
# Make copies to avoid mutating the original parameters
|
61
|
+
globals_dict = globals_dict or {}
|
62
|
+
locals_dict = locals_dict or {}
|
63
|
+
updated_globals = globals_dict.copy()
|
64
|
+
updated_locals = locals_dict.copy()
|
65
|
+
|
66
|
+
# Pre-import essential modules into the global namespace
|
67
|
+
# This ensures they're available for imports inside functions
|
68
|
+
essential_modules = ['requests', 'json', 'os', 'sys', 'time', 'datetime', 're', 'random', 'math']
|
69
|
+
|
70
|
+
for module_name in essential_modules:
|
71
|
+
try:
|
72
|
+
module = __import__(module_name)
|
73
|
+
updated_globals[module_name] = module
|
74
|
+
#print(f"✓ {module_name} module loaded successfully")
|
75
|
+
except ImportError:
|
76
|
+
print(f"⚠️ Warning: {module_name} module not available")
|
77
|
+
|
78
|
+
tree = ast.parse(code, mode="exec")
|
79
|
+
compiled = compile(tree, filename="<ast>", mode="exec")
|
80
|
+
stdout_buf = io.StringIO()
|
81
|
+
stderr_buf = io.StringIO()
|
82
|
+
|
83
|
+
# Execute with stdout+stderr capture and exception handling
|
84
|
+
error_traceback = None
|
85
|
+
output = None
|
86
|
+
|
87
|
+
with contextlib.redirect_stdout(stdout_buf), contextlib.redirect_stderr(stderr_buf):
|
88
|
+
try:
|
89
|
+
# Merge all variables into globals to avoid scoping issues with generator expressions
|
90
|
+
# When exec() is called with both globals and locals, generator expressions can't
|
91
|
+
# access local variables. By using only globals, everything runs in global scope.
|
92
|
+
merged_globals = updated_globals.copy()
|
93
|
+
merged_globals.update(updated_locals)
|
94
|
+
|
95
|
+
# Execute with only globals - this fixes generator expression scoping issues
|
96
|
+
output = exec(code, merged_globals)
|
97
|
+
|
98
|
+
# Update both dictionaries with any new variables created during execution
|
99
|
+
for key, value in merged_globals.items():
|
100
|
+
if key not in updated_globals and key not in updated_locals:
|
101
|
+
updated_locals[key] = value
|
102
|
+
elif key in updated_locals or key not in updated_globals:
|
103
|
+
updated_locals[key] = value
|
104
|
+
updated_globals[key] = value
|
105
|
+
except Exception:
|
106
|
+
# Capture the full traceback as a string
|
107
|
+
error_traceback = traceback.format_exc()
|
108
|
+
|
109
|
+
printed_output = stdout_buf.getvalue()
|
110
|
+
stderr_output = stderr_buf.getvalue()
|
111
|
+
error_traceback_output = error_traceback
|
112
|
+
|
113
|
+
return {
|
114
|
+
"printed_output": printed_output,
|
115
|
+
"return_value": output,
|
116
|
+
"stderr": stderr_output,
|
117
|
+
"error_traceback": error_traceback_output,
|
118
|
+
"updated_globals": updated_globals,
|
119
|
+
"updated_locals": updated_locals
|
120
|
+
}
|
tinyagent/hooks/__init__.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1
1
|
#from .rich_ui_agent import RichUICallback
|
2
2
|
from .rich_ui_callback import RichUICallback
|
3
|
+
from .rich_code_ui_callback import RichCodeUICallback
|
3
4
|
from .logging_manager import LoggingManager
|
4
|
-
__all__ = ["RichUICallback", "LoggingManager"]
|
5
|
+
__all__ = ["RichUICallback", "RichCodeUICallback", "LoggingManager"]
|
@@ -157,7 +157,7 @@ class GradioCallback:
|
|
157
157
|
|
158
158
|
# Add to detailed tool call info if not already present by ID
|
159
159
|
if not any(tc['id'] == tool_id for tc in self.tool_call_details):
|
160
|
-
|
160
|
+
tool_detail = {
|
161
161
|
"id": tool_id,
|
162
162
|
"name": tool_name,
|
163
163
|
"arguments": formatted_args,
|
@@ -166,7 +166,25 @@ class GradioCallback:
|
|
166
166
|
"result_token_count": 0,
|
167
167
|
"timestamp": current_time,
|
168
168
|
"result_timestamp": None
|
169
|
-
}
|
169
|
+
}
|
170
|
+
|
171
|
+
# Special handling for run_python tool - extract code_lines
|
172
|
+
if tool_name == "run_python":
|
173
|
+
try:
|
174
|
+
# Look for code in different possible field names
|
175
|
+
code_content = None
|
176
|
+
for field in ['code_lines', 'code', 'script', 'python_code']:
|
177
|
+
if field in parsed_args:
|
178
|
+
code_content = parsed_args[field]
|
179
|
+
break
|
180
|
+
|
181
|
+
if code_content is not None:
|
182
|
+
tool_detail["code_lines"] = code_content
|
183
|
+
self.logger.debug(f"Stored code content for run_python tool {tool_id}")
|
184
|
+
except Exception as e:
|
185
|
+
self.logger.error(f"Error processing run_python arguments: {e}")
|
186
|
+
|
187
|
+
self.tool_call_details.append(tool_detail)
|
170
188
|
self.logger.debug(f"Added tool call detail: {tool_name} (ID: {tool_id}, Tokens: {token_count})")
|
171
189
|
|
172
190
|
# If this is a final_answer or ask_question tool, we'll handle it specially later
|
@@ -386,19 +404,127 @@ class GradioCallback:
|
|
386
404
|
f"O {self.token_usage['completion_tokens']} | " +
|
387
405
|
f"Total {self.token_usage['total_tokens']}")
|
388
406
|
|
407
|
+
def _format_run_python_tool(self, tool_detail: dict) -> str:
|
408
|
+
"""
|
409
|
+
Format run_python tool call with proper markdown formatting for code and output.
|
410
|
+
|
411
|
+
Args:
|
412
|
+
tool_detail: Tool call detail dictionary
|
413
|
+
|
414
|
+
Returns:
|
415
|
+
Formatted markdown string
|
416
|
+
"""
|
417
|
+
tool_name = tool_detail["name"]
|
418
|
+
tool_id = tool_detail.get("id", "unknown")
|
419
|
+
code_lines = tool_detail.get("code_lines", [])
|
420
|
+
result = tool_detail.get("result")
|
421
|
+
input_tokens = tool_detail.get("token_count", 0)
|
422
|
+
output_tokens = tool_detail.get("result_token_count", 0)
|
423
|
+
total_tokens = input_tokens + output_tokens
|
424
|
+
|
425
|
+
# Start building the formatted content
|
426
|
+
parts = []
|
427
|
+
|
428
|
+
# Handle different code_lines formats
|
429
|
+
combined_code = ""
|
430
|
+
if code_lines:
|
431
|
+
if isinstance(code_lines, list):
|
432
|
+
# Standard case: list of code lines
|
433
|
+
combined_code = "\n".join(code_lines)
|
434
|
+
elif isinstance(code_lines, str):
|
435
|
+
# Handle case where code_lines is a single string
|
436
|
+
combined_code = code_lines
|
437
|
+
else:
|
438
|
+
# Convert other types to string
|
439
|
+
combined_code = str(code_lines)
|
440
|
+
|
441
|
+
# If we have code content, show it as Python code block
|
442
|
+
if combined_code.strip():
|
443
|
+
parts.append(f"**Python Code:**\n```python\n{combined_code}\n```")
|
444
|
+
else:
|
445
|
+
# Try to extract code from arguments as fallback
|
446
|
+
try:
|
447
|
+
args_dict = json.loads(tool_detail['arguments'])
|
448
|
+
# Check for different possible code field names
|
449
|
+
code_content = None
|
450
|
+
for field in ['code_lines', 'code', 'script', 'python_code']:
|
451
|
+
if field in args_dict:
|
452
|
+
code_content = args_dict[field]
|
453
|
+
break
|
454
|
+
|
455
|
+
if code_content:
|
456
|
+
if isinstance(code_content, list):
|
457
|
+
combined_code = "\n".join(code_content)
|
458
|
+
else:
|
459
|
+
combined_code = str(code_content)
|
460
|
+
|
461
|
+
if combined_code.strip():
|
462
|
+
parts.append(f"**Python Code:**\n```python\n{combined_code}\n```")
|
463
|
+
else:
|
464
|
+
# Final fallback to showing raw arguments
|
465
|
+
parts.append(f"**Input Arguments:**\n```json\n{tool_detail['arguments']}\n```")
|
466
|
+
else:
|
467
|
+
# No code found, show raw arguments
|
468
|
+
parts.append(f"**Input Arguments:**\n```json\n{tool_detail['arguments']}\n```")
|
469
|
+
except (json.JSONDecodeError, KeyError):
|
470
|
+
# If we can't parse arguments, show them as-is
|
471
|
+
parts.append(f"**Input Arguments:**\n```json\n{tool_detail['arguments']}\n```")
|
472
|
+
|
473
|
+
# Add the output if available
|
474
|
+
if result is not None:
|
475
|
+
parts.append(f"\n**Output:** ({output_tokens} tokens)")
|
476
|
+
|
477
|
+
try:
|
478
|
+
# Try to parse result as JSON for better formatting
|
479
|
+
result_json = json.loads(result)
|
480
|
+
parts.append(f"```json\n{json.dumps(result_json, indent=2)}\n```")
|
481
|
+
except (json.JSONDecodeError, TypeError):
|
482
|
+
# Handle plain text result
|
483
|
+
if isinstance(result, str):
|
484
|
+
# Replace escaped newlines with actual newlines for better readability
|
485
|
+
formatted_result = result.replace("\\n", "\n")
|
486
|
+
parts.append(f"```\n{formatted_result}\n```")
|
487
|
+
else:
|
488
|
+
parts.append(f"```\n{str(result)}\n```")
|
489
|
+
else:
|
490
|
+
parts.append(f"\n**Status:** ⏳ Processing...")
|
491
|
+
|
492
|
+
# Add token information
|
493
|
+
parts.append(f"\n**Token Usage:** {total_tokens} total ({input_tokens} input + {output_tokens} output)")
|
494
|
+
|
495
|
+
return "\n".join(parts)
|
496
|
+
|
389
497
|
async def interact_with_agent(self, user_input_processed, chatbot_history):
|
390
498
|
"""
|
391
499
|
Process user input, interact with the agent, and stream updates to Gradio UI.
|
392
500
|
Each tool call and response will be shown as a separate message.
|
393
501
|
"""
|
394
502
|
self.logger.info(f"Starting interaction for: {user_input_processed[:50]}...")
|
503
|
+
|
504
|
+
# Reset state for new interaction to prevent showing previous content
|
505
|
+
self.thinking_content = ""
|
506
|
+
self.tool_calls = []
|
507
|
+
self.tool_call_details = []
|
508
|
+
self.assistant_text_responses = []
|
509
|
+
self.token_usage = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
|
510
|
+
self.is_running = True
|
511
|
+
self.last_update_yield_time = 0
|
512
|
+
self.logger.debug("Reset interaction state for new conversation turn")
|
395
513
|
|
396
514
|
# 1. Add user message to chatbot history as a ChatMessage
|
397
515
|
chatbot_history.append(
|
398
516
|
ChatMessage(role="user", content=user_input_processed)
|
399
517
|
)
|
400
518
|
|
401
|
-
#
|
519
|
+
# 2. Add typing indicator immediately after user message
|
520
|
+
typing_message = ChatMessage(
|
521
|
+
role="assistant",
|
522
|
+
content="🤔 Thinking..."
|
523
|
+
)
|
524
|
+
chatbot_history.append(typing_message)
|
525
|
+
typing_message_index = len(chatbot_history) - 1
|
526
|
+
|
527
|
+
# Initial yield to show user message and typing indicator
|
402
528
|
yield chatbot_history, self._get_token_usage_text()
|
403
529
|
|
404
530
|
# Kick off the agent in the background
|
@@ -407,7 +533,7 @@ class GradioCallback:
|
|
407
533
|
|
408
534
|
displayed_tool_calls = set()
|
409
535
|
displayed_text_responses = set()
|
410
|
-
|
536
|
+
thinking_removed = False
|
411
537
|
update_interval = 0.3
|
412
538
|
min_yield_interval = 0.2
|
413
539
|
|
@@ -420,6 +546,14 @@ class GradioCallback:
|
|
420
546
|
sorted_tool_details = sorted(self.tool_call_details, key=lambda x: x.get("timestamp", 0))
|
421
547
|
sorted_text_responses = sorted(self.assistant_text_responses, key=lambda x: x.get("timestamp", 0))
|
422
548
|
|
549
|
+
# Remove typing indicator once we have actual content to show
|
550
|
+
if not thinking_removed and (sorted_text_responses or sorted_tool_details):
|
551
|
+
# Remove the typing indicator
|
552
|
+
if typing_message_index < len(chatbot_history):
|
553
|
+
chatbot_history.pop(typing_message_index)
|
554
|
+
thinking_removed = True
|
555
|
+
self.logger.debug("Removed typing indicator")
|
556
|
+
|
423
557
|
# → New assistant text chunks
|
424
558
|
for resp in sorted_text_responses:
|
425
559
|
content = resp["content"]
|
@@ -430,22 +564,6 @@ class GradioCallback:
|
|
430
564
|
displayed_text_responses.add(content)
|
431
565
|
self.logger.debug(f"Added new text response: {content[:50]}...")
|
432
566
|
|
433
|
-
# → Thinking placeholder (optional)
|
434
|
-
if self.show_thinking and self.thinking_content \
|
435
|
-
and not thinking_message_added \
|
436
|
-
and not displayed_text_responses:
|
437
|
-
thinking_msg = (
|
438
|
-
"Working on it...\n\n"
|
439
|
-
"```"
|
440
|
-
f"{self.thinking_content}"
|
441
|
-
"```"
|
442
|
-
)
|
443
|
-
chatbot_history.append(
|
444
|
-
ChatMessage(role="assistant", content=thinking_msg)
|
445
|
-
)
|
446
|
-
thinking_message_added = True
|
447
|
-
self.logger.debug("Added thinking message")
|
448
|
-
|
449
567
|
# → Show tool calls with "working..." status when they start
|
450
568
|
if self.show_tool_calls:
|
451
569
|
for tool in sorted_tool_details:
|
@@ -455,11 +573,18 @@ class GradioCallback:
|
|
455
573
|
# If we haven't displayed this tool call yet
|
456
574
|
if tid not in displayed_tool_calls and tid not in in_progress_tool_calls:
|
457
575
|
in_tok = tool.get("token_count", 0)
|
576
|
+
|
458
577
|
# Create "working..." message for this tool call
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
578
|
+
if tname == "run_python":
|
579
|
+
# Special formatting for run_python
|
580
|
+
body = self._format_run_python_tool(tool)
|
581
|
+
else:
|
582
|
+
# Standard formatting for other tools
|
583
|
+
body = (
|
584
|
+
f"**Input Arguments:**\n```json\n{tool['arguments']}\n```\n\n"
|
585
|
+
f"**Output:** ⏳ Working...\n"
|
586
|
+
)
|
587
|
+
|
463
588
|
# Add to chatbot with "working" status
|
464
589
|
msg = ChatMessage(
|
465
590
|
role="assistant",
|
@@ -483,10 +608,16 @@ class GradioCallback:
|
|
483
608
|
tot_tok = in_tok + out_tok
|
484
609
|
|
485
610
|
# Update the message with completed status and result
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
611
|
+
if tname == "run_python":
|
612
|
+
# Special formatting for completed run_python
|
613
|
+
body = self._format_run_python_tool(tool)
|
614
|
+
else:
|
615
|
+
# Standard formatting for other completed tools
|
616
|
+
body = (
|
617
|
+
f"**Input Arguments:**\n```json\n{tool['arguments']}\n```\n\n"
|
618
|
+
f"**Output:** ({out_tok} tokens)\n```json\n{tool['result']}\n```\n"
|
619
|
+
)
|
620
|
+
|
490
621
|
# Update the existing message
|
491
622
|
chatbot_history[pos] = ChatMessage(
|
492
623
|
role="assistant",
|
@@ -508,6 +639,11 @@ class GradioCallback:
|
|
508
639
|
|
509
640
|
await asyncio.sleep(update_interval)
|
510
641
|
|
642
|
+
# Remove typing indicator if still present when agent finishes
|
643
|
+
if not thinking_removed and typing_message_index < len(chatbot_history):
|
644
|
+
chatbot_history.pop(typing_message_index)
|
645
|
+
self.logger.debug("Removed typing indicator at end")
|
646
|
+
|
511
647
|
# once the agent_task is done, add its final result if any
|
512
648
|
try:
|
513
649
|
final_text = await agent_task
|
@@ -772,8 +908,8 @@ class GradioCallback:
|
|
772
908
|
return app
|
773
909
|
|
774
910
|
def clear_conversation(self):
|
775
|
-
"""Clear the conversation history (UI + agent), reset state, and update UI."""
|
776
|
-
self.logger.debug("Clearing conversation (UI + agent)")
|
911
|
+
"""Clear the conversation history (UI + agent), reset state completely, and update UI."""
|
912
|
+
self.logger.debug("Clearing conversation completely (UI + agent with new session)")
|
777
913
|
# Reset UI‐side state
|
778
914
|
self.thinking_content = ""
|
779
915
|
self.tool_calls = []
|
@@ -782,13 +918,52 @@ class GradioCallback:
|
|
782
918
|
self.token_usage = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
|
783
919
|
self.is_running = False
|
784
920
|
|
785
|
-
#
|
921
|
+
# Completely reset the agent state with a new session
|
786
922
|
try:
|
787
|
-
if self.current_agent
|
788
|
-
|
789
|
-
|
923
|
+
if self.current_agent:
|
924
|
+
# Generate a new session ID for a fresh start
|
925
|
+
import uuid
|
926
|
+
new_session_id = str(uuid.uuid4())
|
927
|
+
self.current_agent.session_id = new_session_id
|
928
|
+
self.logger.debug(f"Generated new session ID: {new_session_id}")
|
929
|
+
|
930
|
+
# Reset all agent state
|
931
|
+
# 1. Clear conversation history (preserve system message)
|
932
|
+
if self.current_agent.messages:
|
933
|
+
system_msg = self.current_agent.messages[0]
|
934
|
+
self.current_agent.messages = [system_msg]
|
935
|
+
else:
|
936
|
+
# Rebuild default system prompt if missing
|
937
|
+
default_system_prompt = (
|
938
|
+
"You are a helpful AI assistant with access to a variety of tools. "
|
939
|
+
"Use the tools when appropriate to accomplish tasks. "
|
940
|
+
"If a tool you need isn't available, just say so."
|
941
|
+
)
|
942
|
+
self.current_agent.messages = [{
|
943
|
+
"role": "system",
|
944
|
+
"content": default_system_prompt
|
945
|
+
}]
|
946
|
+
|
947
|
+
# 2. Reset session state
|
948
|
+
self.current_agent.session_state = {}
|
949
|
+
|
950
|
+
# 3. Reset token usage in metadata
|
951
|
+
if hasattr(self.current_agent, 'metadata') and 'usage' in self.current_agent.metadata:
|
952
|
+
self.current_agent.metadata['usage'] = {
|
953
|
+
"prompt_tokens": 0,
|
954
|
+
"completion_tokens": 0,
|
955
|
+
"total_tokens": 0
|
956
|
+
}
|
957
|
+
|
958
|
+
# 4. Reset any other accumulated state that might affect behavior
|
959
|
+
self.current_agent.is_running = False
|
960
|
+
|
961
|
+
# 5. Reset session load flag to prevent any deferred loading of old session
|
962
|
+
self.current_agent._needs_session_load = False
|
963
|
+
|
964
|
+
self.logger.info(f"Completely reset TinyAgent with new session: {new_session_id}")
|
790
965
|
except Exception as e:
|
791
|
-
self.logger.error(f"Failed to
|
966
|
+
self.logger.error(f"Failed to reset TinyAgent completely: {e}")
|
792
967
|
|
793
968
|
# Return cleared UI components: empty chat + fresh token usage
|
794
969
|
return [], self._get_token_usage_text()
|
@@ -1,7 +1,7 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: tinyagent-py
|
3
|
-
Version: 0.0.
|
4
|
-
Summary:
|
3
|
+
Version: 0.0.9
|
4
|
+
Summary: TinyAgent with MCP Client, Code Agent (Thinking, Planning, and Executing in Python), and Extendable Hooks, Tiny but powerful
|
5
5
|
Author-email: Mahdi Golchin <golchin@askdev.ai>
|
6
6
|
Project-URL: Homepage, https://github.com/askbudi/tinyagent
|
7
7
|
Project-URL: Bug Tracker, https://github.com/askbudi/tinyagent/issues
|
@@ -25,10 +25,19 @@ Provides-Extra: sqlite
|
|
25
25
|
Requires-Dist: aiosqlite>=0.18.0; extra == "sqlite"
|
26
26
|
Provides-Extra: gradio
|
27
27
|
Requires-Dist: gradio>=3.50.0; extra == "gradio"
|
28
|
+
Provides-Extra: code
|
29
|
+
Requires-Dist: jinja2; extra == "code"
|
30
|
+
Requires-Dist: pyyaml; extra == "code"
|
31
|
+
Requires-Dist: cloudpickle; extra == "code"
|
32
|
+
Requires-Dist: modal; extra == "code"
|
28
33
|
Provides-Extra: all
|
29
34
|
Requires-Dist: asyncpg>=0.27.0; extra == "all"
|
30
35
|
Requires-Dist: aiosqlite>=0.18.0; extra == "all"
|
31
36
|
Requires-Dist: gradio>=3.50.0; extra == "all"
|
37
|
+
Requires-Dist: jinja2; extra == "all"
|
38
|
+
Requires-Dist: pyyaml; extra == "all"
|
39
|
+
Requires-Dist: cloudpickle; extra == "all"
|
40
|
+
Requires-Dist: modal; extra == "all"
|
32
41
|
Dynamic: license-file
|
33
42
|
|
34
43
|
# TinyAgent
|
@@ -52,7 +61,11 @@ Inspired by:
|
|
52
61
|
- [Build your own Tiny Agent](https://askdev.ai/github/askbudi/tinyagent)
|
53
62
|
|
54
63
|
## Overview
|
55
|
-
This is a tiny agent that uses MCP and LiteLLM to interact with
|
64
|
+
This is a tiny agent framework that uses MCP and LiteLLM to interact with language models. You have full control over the agent, you can add any tools you like from MCP and extend the agent using its event system.
|
65
|
+
|
66
|
+
**Two Main Components:**
|
67
|
+
- **TinyAgent**: Core agent with MCP tool integration and extensible hooks
|
68
|
+
- **TinyCodeAgent**: Specialized agent for secure Python code execution with pluggable providers
|
56
69
|
|
57
70
|
## Installation
|
58
71
|
|
@@ -64,6 +77,10 @@ pip install tinyagent-py
|
|
64
77
|
# Install with all optional dependencies
|
65
78
|
pip install tinyagent-py[all]
|
66
79
|
|
80
|
+
# Install with Code Agent support
|
81
|
+
pip install tinyagent-py[code]
|
82
|
+
|
83
|
+
|
67
84
|
# Install with PostgreSQL support
|
68
85
|
pip install tinyagent-py[postgres]
|
69
86
|
|
@@ -73,6 +90,10 @@ pip install tinyagent-py[sqlite]
|
|
73
90
|
# Install with Gradio UI support
|
74
91
|
pip install tinyagent-py[gradio]
|
75
92
|
|
93
|
+
|
94
|
+
|
95
|
+
|
96
|
+
|
76
97
|
```
|
77
98
|
|
78
99
|
### Using uv
|
@@ -80,6 +101,10 @@ pip install tinyagent-py[gradio]
|
|
80
101
|
# Basic installation
|
81
102
|
uv pip install tinyagent-py
|
82
103
|
|
104
|
+
# Install with Code Agent support
|
105
|
+
uv pip install tinyagent-py[code]
|
106
|
+
|
107
|
+
|
83
108
|
# Install with PostgreSQL support
|
84
109
|
uv pip install tinyagent-py[postgres]
|
85
110
|
|
@@ -92,11 +117,11 @@ uv pip install tinyagent-py[gradio]
|
|
92
117
|
# Install with all optional dependencies
|
93
118
|
uv pip install tinyagent-py[all]
|
94
119
|
|
95
|
-
# Install with development tools
|
96
|
-
uv pip install tinyagent-py[dev]
|
97
120
|
```
|
98
121
|
|
99
122
|
## Usage
|
123
|
+
|
124
|
+
### TinyAgent (Core Agent)
|
100
125
|
[](https://askdev.ai/github/askbudi/tinyagent)
|
101
126
|
|
102
127
|
|
@@ -133,6 +158,114 @@ I need accommodation in Toronto between 15th to 20th of May. Give me 5 options f
|
|
133
158
|
await test_agent(task, model="gpt-4.1-mini")
|
134
159
|
```
|
135
160
|
|
161
|
+
## TinyCodeAgent - Code Execution Made Easy
|
162
|
+
|
163
|
+
TinyCodeAgent is a specialized agent for executing Python code with enterprise-grade reliability and extensible execution providers.
|
164
|
+
|
165
|
+
### Quick Start with TinyCodeAgent
|
166
|
+
|
167
|
+
```python
|
168
|
+
import asyncio
|
169
|
+
from tinyagent import TinyCodeAgent
|
170
|
+
|
171
|
+
async def main():
|
172
|
+
# Initialize with minimal configuration
|
173
|
+
agent = TinyCodeAgent(
|
174
|
+
model="gpt-4.1-mini",
|
175
|
+
api_key="your-openai-api-key"
|
176
|
+
)
|
177
|
+
|
178
|
+
try:
|
179
|
+
# Ask the agent to solve a coding problem
|
180
|
+
result = await agent.run("Calculate the factorial of 10 and explain the algorithm")
|
181
|
+
print(result)
|
182
|
+
finally:
|
183
|
+
await agent.close()
|
184
|
+
|
185
|
+
asyncio.run(main())
|
186
|
+
```
|
187
|
+
|
188
|
+
### TinyCodeAgent with Gradio UI
|
189
|
+
|
190
|
+
Launch a complete web interface for interactive code execution:
|
191
|
+
|
192
|
+
```python
|
193
|
+
from tinyagent.code_agent.example import run_example
|
194
|
+
import asyncio
|
195
|
+
|
196
|
+
# Run the full example with Gradio interface
|
197
|
+
asyncio.run(run_example())
|
198
|
+
```
|
199
|
+
|
200
|
+
### Key Features
|
201
|
+
|
202
|
+
- **🔒 Secure Execution**: Sandboxed Python code execution using Modal.com or other providers
|
203
|
+
- **🔧 Extensible Providers**: Switch between Modal, Docker, local execution, or cloud functions
|
204
|
+
- **🎯 Built for Enterprise**: Production-ready with proper logging, error handling, and resource cleanup
|
205
|
+
- **📁 File Support**: Upload and process files through the Gradio interface
|
206
|
+
- **🛠️ Custom Tools**: Add your own tools and functions easily
|
207
|
+
- **📊 Session Persistence**: Code state persists across executions
|
208
|
+
|
209
|
+
### Provider System
|
210
|
+
|
211
|
+
TinyCodeAgent uses a pluggable provider system - change execution backends with minimal code changes:
|
212
|
+
|
213
|
+
```python
|
214
|
+
# Use Modal (default) - great for production
|
215
|
+
agent = TinyCodeAgent(provider="modal")
|
216
|
+
|
217
|
+
# Future providers (coming soon)
|
218
|
+
# agent = TinyCodeAgent(provider="docker")
|
219
|
+
# agent = TinyCodeAgent(provider="local")
|
220
|
+
# agent = TinyCodeAgent(provider="lambda")
|
221
|
+
```
|
222
|
+
|
223
|
+
### Example Use Cases
|
224
|
+
|
225
|
+
**Web Scraping:**
|
226
|
+
```python
|
227
|
+
result = await agent.run("""
|
228
|
+
What are trending spaces on huggingface today?
|
229
|
+
""")
|
230
|
+
# Agent will create a python tool to request HuggingFace API and find trending spaces
|
231
|
+
```
|
232
|
+
|
233
|
+
**Use code to solve a task:**
|
234
|
+
```python
|
235
|
+
response = await agent.run(dedent("""
|
236
|
+
Suggest me 13 tags for my Etsy Listing, each tag should be multiworded and maximum 20 characters. Each word should be used only once in the whole corpus, And tags should cover different ways people are searching for the product on Etsy.
|
237
|
+
- You should use your coding abilities to check your answer pass the criteria and continue your job until you get to the answer.
|
238
|
+
|
239
|
+
My Product is **Wedding Invitation Set of 3, in sage green color, with a gold foil border.**
|
240
|
+
"""),max_turns=20)
|
241
|
+
|
242
|
+
print(response)
|
243
|
+
# LLM is not good at this task, counting characters, avoid duplicates, but with the power of code, tiny model like gpt-4.1-mini can do it without any problem.
|
244
|
+
```
|
245
|
+
|
246
|
+
|
247
|
+
### Configuration Options
|
248
|
+
|
249
|
+
```python
|
250
|
+
from tinyagent import TinyCodeAgent
|
251
|
+
from tinyagent.code_agent.tools import get_weather, get_traffic
|
252
|
+
|
253
|
+
# Full configuration example
|
254
|
+
agent = TinyCodeAgent(
|
255
|
+
model="gpt-4.1-mini",
|
256
|
+
api_key="your-api-key",
|
257
|
+
provider="modal",
|
258
|
+
tools=[get_weather, get_traffic],
|
259
|
+
authorized_imports=["requests", "pandas", "numpy"],
|
260
|
+
provider_config={
|
261
|
+
"pip_packages": ["requests", "pandas"],
|
262
|
+
"sandbox_name": "my-code-sandbox"
|
263
|
+
}
|
264
|
+
)
|
265
|
+
```
|
266
|
+
|
267
|
+
For detailed documentation, see the [TinyCodeAgent README](tinyagent/code_agent/README.md).
|
268
|
+
|
136
269
|
## How the TinyAgent Hook System Works
|
137
270
|
|
138
271
|
TinyAgent is designed to be **extensible** via a simple, event-driven hook (callback) system. This allows you to add custom logic, logging, UI, memory, or any other behavior at key points in the agent's lifecycle.
|