tinyagent-py 0.0.13__py3-none-any.whl → 0.0.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tinyagent/tiny_agent.py CHANGED
@@ -12,10 +12,21 @@ import uuid
12
12
  from .storage import Storage # ← your abstract base
13
13
  import traceback
14
14
  import time # Add time import for Unix timestamps
15
+ from pathlib import Path
16
+
15
17
  # Module-level logger; configuration is handled externally.
16
18
  logger = logging.getLogger(__name__)
17
19
  #litellm.callbacks = ["arize_phoenix"]
18
20
 
21
+ def load_template(path: str,key:str="system_prompt") -> str:
22
+ """
23
+ Load the YAML file and extract its 'system_prompt' field.
24
+ """
25
+ import yaml
26
+ with open(path, "r") as f:
27
+ data = yaml.safe_load(f)
28
+ return data[key]
29
+
19
30
  def tool(name: Optional[str] = None, description: Optional[str] = None,
20
31
  schema: Optional[Dict[str, Any]] = None):
21
32
  """
@@ -39,6 +50,11 @@ def tool(name: Optional[str] = None, description: Optional[str] = None,
39
50
  # Get the description (use provided description or docstring)
40
51
  tool_description = description or inspect.getdoc(func_or_class) or f"Tool based on {tool_name}"
41
52
 
53
+ # Temporarily attach the description to the function/class
54
+ # This allows _generate_schema_from_function to access it for param extraction
55
+ if description:
56
+ func_or_class._temp_tool_description = description
57
+
42
58
  # Generate schema if not provided
43
59
  tool_schema = schema or {}
44
60
  if not tool_schema:
@@ -50,6 +66,10 @@ def tool(name: Optional[str] = None, description: Optional[str] = None,
50
66
  # For functions, use the function itself
51
67
  tool_schema = _generate_schema_from_function(func_or_class)
52
68
 
69
+ # Clean up temporary attribute
70
+ if hasattr(func_or_class, '_temp_tool_description'):
71
+ delattr(func_or_class, '_temp_tool_description')
72
+
53
73
  # Attach metadata to the function or class
54
74
  func_or_class._tool_metadata = {
55
75
  "name": tool_name,
@@ -76,6 +96,65 @@ def _generate_schema_from_function(func: Callable) -> Dict[str, Any]:
76
96
  sig = inspect.signature(func)
77
97
  type_hints = get_type_hints(func)
78
98
 
99
+ # Extract parameter descriptions from docstring
100
+ param_descriptions = {}
101
+
102
+ # First check if we have a tool decorator description (has higher priority)
103
+ decorator_description = None
104
+ if hasattr(func, '_temp_tool_description'):
105
+ decorator_description = func._temp_tool_description
106
+
107
+ # Get function docstring
108
+ docstring = inspect.getdoc(func) or ""
109
+
110
+ # Combine sources to check for parameter descriptions
111
+ sources_to_check = []
112
+ if decorator_description:
113
+ sources_to_check.append(decorator_description)
114
+ if docstring:
115
+ sources_to_check.append(docstring)
116
+
117
+ # Parse parameter descriptions from all sources
118
+ for source in sources_to_check:
119
+ lines = source.split('\n')
120
+ in_args_section = False
121
+ current_param = None
122
+
123
+ for line in lines:
124
+ line = line.strip()
125
+
126
+ # Check for Args/Parameters section markers
127
+ if line.lower() in ('args:', 'arguments:', 'parameters:'):
128
+ in_args_section = True
129
+ continue
130
+
131
+ # Check for other section markers that would end the args section
132
+ if line.lower() in ('returns:', 'raises:', 'yields:', 'examples:') and in_args_section:
133
+ in_args_section = False
134
+
135
+ # Look for :param or :arg style parameter descriptions
136
+ if line.startswith((":param", ":arg")):
137
+ try:
138
+ # e.g., ":param user_id: The ID of the user."
139
+ parts = line.split(" ", 2)
140
+ if len(parts) >= 3:
141
+ param_name = parts[1].strip().split(" ")[0]
142
+ param_descriptions[param_name] = parts[2].strip()
143
+ except (ValueError, IndexError):
144
+ continue
145
+
146
+ # Look for indented parameter descriptions in Args section
147
+ elif in_args_section and line.strip():
148
+ # Check for param: description pattern
149
+ param_match = line.lstrip().split(":", 1)
150
+ if len(param_match) == 2:
151
+ param_name = param_match[0].strip()
152
+ description = param_match[1].strip()
153
+ param_descriptions[param_name] = description
154
+ current_param = param_name
155
+ # Check for continued description from previous param
156
+ elif current_param and line.startswith((' ', '\t')):
157
+ param_descriptions[current_param] += " " + line.strip()
79
158
  # Skip 'self' parameter for methods
80
159
  params = {
81
160
  name: param for name, param in sig.parameters.items()
@@ -91,9 +170,12 @@ def _generate_schema_from_function(func: Callable) -> Dict[str, Any]:
91
170
  param_type = type_hints.get(name, Any)
92
171
 
93
172
  # Create property schema
94
- prop_schema = {"description": ""}
173
+ prop_schema = {}
174
+ description = param_descriptions.get(name)
175
+ if description:
176
+ prop_schema["description"] = description
95
177
 
96
- # Map Python types to JSON schema types
178
+ # Handle different types of type annotations
97
179
  if param_type == str:
98
180
  prop_schema["type"] = "string"
99
181
  elif param_type == int:
@@ -107,7 +189,113 @@ def _generate_schema_from_function(func: Callable) -> Dict[str, Any]:
107
189
  elif param_type == dict or param_type == Dict:
108
190
  prop_schema["type"] = "object"
109
191
  else:
110
- prop_schema["type"] = "string" # Default to string for complex types
192
+ # Handle generic types
193
+ origin = getattr(param_type, "__origin__", None)
194
+ args = getattr(param_type, "__args__", None)
195
+
196
+ if origin is not None and args is not None:
197
+ # Handle List[X], Sequence[X], etc.
198
+ if origin in (list, List) or (hasattr(origin, "__name__") and "List" in origin.__name__):
199
+ prop_schema["type"] = "array"
200
+ # Add items type if we can determine it
201
+ if args and len(args) == 1:
202
+ item_type = args[0]
203
+ if item_type == str:
204
+ prop_schema["items"] = {"type": "string"}
205
+ elif item_type == int:
206
+ prop_schema["items"] = {"type": "integer"}
207
+ elif item_type == float:
208
+ prop_schema["items"] = {"type": "number"}
209
+ elif item_type == bool:
210
+ prop_schema["items"] = {"type": "boolean"}
211
+ else:
212
+ prop_schema["items"] = {"type": "string"}
213
+
214
+ # Handle Dict[K, V], Mapping[K, V], etc.
215
+ elif origin in (dict, Dict) or (hasattr(origin, "__name__") and "Dict" in origin.__name__):
216
+ prop_schema["type"] = "object"
217
+ # We could add additionalProperties for value type, but it's not always needed
218
+ if args and len(args) == 2:
219
+ value_type = args[1]
220
+ if value_type == str:
221
+ prop_schema["additionalProperties"] = {"type": "string"}
222
+ elif value_type == int:
223
+ prop_schema["additionalProperties"] = {"type": "integer"}
224
+ elif value_type == float:
225
+ prop_schema["additionalProperties"] = {"type": "number"}
226
+ elif value_type == bool:
227
+ prop_schema["additionalProperties"] = {"type": "boolean"}
228
+ else:
229
+ prop_schema["additionalProperties"] = {"type": "string"}
230
+
231
+ # Handle Union types (Optional is Union[T, None])
232
+ elif origin is Union:
233
+ # Check if this is Optional[X] (Union[X, None])
234
+ if type(None) in args:
235
+ # Get the non-None type
236
+ non_none_types = [arg for arg in args if arg is not type(None)]
237
+ if non_none_types:
238
+ # Use the first non-None type
239
+ main_type = non_none_types[0]
240
+ # Recursively process this type
241
+ if main_type == str:
242
+ prop_schema["type"] = "string"
243
+ elif main_type == int:
244
+ prop_schema["type"] = "integer"
245
+ elif main_type == float:
246
+ prop_schema["type"] = "number"
247
+ elif main_type == bool:
248
+ prop_schema["type"] = "boolean"
249
+ elif main_type == list or main_type == List:
250
+ prop_schema["type"] = "array"
251
+ elif main_type == dict or main_type == Dict:
252
+ prop_schema["type"] = "object"
253
+ else:
254
+ # Try to handle generic types like List[str]
255
+ inner_origin = getattr(main_type, "__origin__", None)
256
+ inner_args = getattr(main_type, "__args__", None)
257
+
258
+ if inner_origin is not None and inner_args is not None:
259
+ if inner_origin in (list, List) or (hasattr(inner_origin, "__name__") and "List" in inner_origin.__name__):
260
+ prop_schema["type"] = "array"
261
+ if inner_args and len(inner_args) == 1:
262
+ inner_item_type = inner_args[0]
263
+ if inner_item_type == str:
264
+ prop_schema["items"] = {"type": "string"}
265
+ elif inner_item_type == int:
266
+ prop_schema["items"] = {"type": "integer"}
267
+ elif inner_item_type == float:
268
+ prop_schema["items"] = {"type": "number"}
269
+ elif inner_item_type == bool:
270
+ prop_schema["items"] = {"type": "boolean"}
271
+ else:
272
+ prop_schema["items"] = {"type": "string"}
273
+ elif inner_origin in (dict, Dict) or (hasattr(inner_origin, "__name__") and "Dict" in inner_origin.__name__):
274
+ prop_schema["type"] = "object"
275
+ # Add additionalProperties for value type
276
+ if inner_args and len(inner_args) == 2:
277
+ value_type = inner_args[1]
278
+ if value_type == str:
279
+ prop_schema["additionalProperties"] = {"type": "string"}
280
+ elif value_type == int:
281
+ prop_schema["additionalProperties"] = {"type": "integer"}
282
+ elif value_type == float:
283
+ prop_schema["additionalProperties"] = {"type": "number"}
284
+ elif value_type == bool:
285
+ prop_schema["additionalProperties"] = {"type": "boolean"}
286
+ else:
287
+ prop_schema["additionalProperties"] = {"type": "string"}
288
+ else:
289
+ prop_schema["type"] = "string" # Default for complex types
290
+ else:
291
+ prop_schema["type"] = "string" # Default for complex types
292
+ else:
293
+ # For non-Optional Union types, default to string
294
+ prop_schema["type"] = "string"
295
+ else:
296
+ prop_schema["type"] = "string" # Default for other complex types
297
+ else:
298
+ prop_schema["type"] = "string" # Default to string for complex types
111
299
 
112
300
  properties[name] = prop_schema
113
301
 
@@ -132,6 +320,13 @@ DEFAULT_SYSTEM_PROMPT = (
132
320
  "If a tool you need isn't available, just say so."
133
321
  )
134
322
 
323
+ DEFAULT_SUMMARY_SYSTEM_PROMPT = (
324
+ "You are an expert assistant. Your goal is to generate a concise, structured summary "
325
+ "of the conversation below that captures all essential information needed to continue "
326
+ "development after context replacement. Include tasks performed, code areas modified or "
327
+ "reviewed, key decisions or assumptions, test results or errors, and outstanding tasks or next steps."
328
+ )
329
+
135
330
  class TinyAgent:
136
331
  """
137
332
  A minimal implementation of an agent powered by MCP and LiteLLM,
@@ -154,7 +349,8 @@ class TinyAgent:
154
349
  session_id: Optional[str] = None,
155
350
  metadata: Optional[Dict[str, Any]] = None,
156
351
  storage: Optional[Storage] = None,
157
- persist_tool_configs: bool = False
352
+ persist_tool_configs: bool = False,
353
+ summary_config: Optional[Dict[str, Any]] = None
158
354
  ):
159
355
  """
160
356
  Initialize the Tiny Agent.
@@ -168,6 +364,8 @@ class TinyAgent:
168
364
  metadata: Optional metadata for the session
169
365
  storage: Optional storage backend for persistence
170
366
  persist_tool_configs: Whether to persist tool configurations
367
+ summary_model: Optional model to use for generating conversation summaries
368
+ summary_system_prompt: Optional system prompt for the summary model
171
369
  """
172
370
  # Set up logger
173
371
  self.logger = logger or logging.getLogger(__name__)
@@ -197,6 +395,8 @@ class TinyAgent:
197
395
  "content": system_prompt or DEFAULT_SYSTEM_PROMPT
198
396
  }]
199
397
 
398
+ self.summary_config = summary_config or {}
399
+
200
400
  # This list now accumulates tools from *all* connected MCP servers:
201
401
  self.available_tools: List[Dict[str, Any]] = []
202
402
 
@@ -586,6 +786,42 @@ class TinyAgent:
586
786
  self.messages.append(user_message)
587
787
  await self._run_callbacks("message_add", message=self.messages[-1])
588
788
 
789
+ return await self._run_agent_loop(max_turns)
790
+
791
+ async def resume(self, max_turns: int = 10) -> str:
792
+ """
793
+ Resume the conversation without adding a new user message.
794
+
795
+ This method continues the conversation from the current state,
796
+ allowing the agent to process the existing conversation history
797
+ and potentially take additional actions.
798
+
799
+ Args:
800
+ max_turns: Maximum number of conversation turns
801
+
802
+ Returns:
803
+ The agent's response
804
+ """
805
+ # Ensure any deferred session-load happens exactly once
806
+ if self._needs_session_load:
807
+ self.logger.debug(f"Deferred session load detected for {self.session_id}; loading now")
808
+ await self.init_async()
809
+
810
+ # Notify start with resume flag
811
+ await self._run_callbacks("agent_start", resume=True)
812
+
813
+ return await self._run_agent_loop(max_turns)
814
+
815
+ async def _run_agent_loop(self, max_turns: int = 10) -> str:
816
+ """
817
+ Internal method that runs the agent's main loop.
818
+
819
+ Args:
820
+ max_turns: Maximum number of conversation turns
821
+
822
+ Returns:
823
+ The agent's response
824
+ """
589
825
  # Initialize loop control variables
590
826
  num_turns = 0
591
827
  next_turn_should_call_tools = True
@@ -651,6 +887,10 @@ class TinyAgent:
651
887
  function_info = tool_call.function
652
888
  tool_name = function_info.name
653
889
 
890
+ await self._run_callbacks("tool_start", tool_call=tool_call)
891
+
892
+ tool_result_content = ""
893
+
654
894
  # Create a tool message
655
895
  tool_message = {
656
896
  "role": "tool",
@@ -671,28 +911,32 @@ class TinyAgent:
671
911
  # Handle control flow tools
672
912
  if tool_name == "final_answer":
673
913
  # Add a response for this tool call before returning
674
- tool_message["content"] = tool_args.get("content", "Task completed without final answer.!!!")
914
+ tool_result_content = tool_args.get("content", "Task completed without final answer.!!!")
915
+ tool_message["content"] = tool_result_content
675
916
  self.messages.append(tool_message)
676
917
  await self._run_callbacks("message_add", message=tool_message)
677
918
  await self._run_callbacks("agent_end", result="Task completed.")
919
+ await self._run_callbacks("tool_end", tool_call=tool_call, result=tool_result_content)
678
920
  return tool_message["content"]
679
921
  elif tool_name == "ask_question":
680
922
  question = tool_args.get("question", "Could you provide more details?")
681
923
  # Add a response for this tool call before returning
682
- tool_message["content"] = f"Question asked: {question}"
924
+ tool_result_content = f"Question asked: {question}"
925
+ tool_message["content"] = tool_result_content
683
926
  self.messages.append(tool_message)
684
927
  await self._run_callbacks("message_add", message=tool_message)
685
928
  await self._run_callbacks("agent_end", result=f"I need more information: {question}")
929
+ await self._run_callbacks("tool_end", tool_call=tool_call, result=tool_result_content)
686
930
  return f"I need more information: {question}"
687
931
  else:
688
932
  # Check if it's a custom tool first
689
933
  if tool_name in self.custom_tool_handlers:
690
- tool_message["content"] = await self._execute_custom_tool(tool_name, tool_args)
934
+ tool_result_content = await self._execute_custom_tool(tool_name, tool_args)
691
935
  else:
692
936
  # Dispatch to the proper MCPClient
693
937
  client = self.tool_to_client.get(tool_name)
694
938
  if not client:
695
- tool_message["content"] = f"No MCP server registered for tool '{tool_name}'"
939
+ tool_result_content = f"No MCP server registered for tool '{tool_name}'"
696
940
  else:
697
941
  try:
698
942
  self.logger.debug(f"Calling tool {tool_name} with args: {tool_args}")
@@ -703,22 +947,25 @@ class TinyAgent:
703
947
  if content_list:
704
948
  # Try different ways to extract the content
705
949
  if hasattr(content_list[0], 'text'):
706
- tool_message["content"] = content_list[0].text
950
+ tool_result_content = content_list[0].text
707
951
  elif isinstance(content_list[0], dict) and 'text' in content_list[0]:
708
- tool_message["content"] = content_list[0]['text']
952
+ tool_result_content = content_list[0]['text']
709
953
  else:
710
- tool_message["content"] = str(content_list)
954
+ tool_result_content = str(content_list)
711
955
  else:
712
- tool_message["content"] = "Tool returned no content"
956
+ tool_result_content = "Tool returned no content"
713
957
  except Exception as e:
714
958
  self.logger.error(f"Error calling tool {tool_name}: {str(e)}")
715
- tool_message["content"] = f"Error executing tool {tool_name}: {str(e)}"
959
+ tool_result_content = f"Error executing tool {tool_name}: {str(e)}"
716
960
  except Exception as e:
717
961
  # If any error occurs during tool call processing, make sure we still have a tool response
718
962
  self.logger.error(f"Unexpected error processing tool call {tool_call_id}: {str(e)}")
719
- tool_message["content"] = f"Error processing tool call: {str(e)}"
720
-
721
- # Always add the tool message to ensure each tool call has a response
963
+ tool_result_content = f"Error processing tool call: {str(e)}"
964
+ finally:
965
+ # Always add the tool message to ensure each tool call has a response
966
+ tool_message["content"] = tool_result_content
967
+ await self._run_callbacks("tool_end", tool_call=tool_call, result=tool_result_content)
968
+
722
969
  self.messages.append(tool_message)
723
970
  await self._run_callbacks("message_add", message=tool_message)
724
971
 
@@ -934,6 +1181,162 @@ class TinyAgent:
934
1181
 
935
1182
  # Tool configs would be handled separately if needed
936
1183
 
1184
+ async def summarize(self) -> str:
1185
+ """
1186
+ Generate a summary of the current conversation history.
1187
+
1188
+ Args:
1189
+ custom_model: Optional model to use for summary generation (overrides self.summary_model)
1190
+ custom_system_prompt: Optional system prompt for summary generation (overrides self.summary_system_prompt)
1191
+
1192
+ Returns:
1193
+ A string containing the conversation summary
1194
+ """
1195
+ # Skip if there are no messages or just the system message
1196
+ if len(self.messages) <= 1:
1197
+ return "No conversation to summarize."
1198
+
1199
+ # Use provided parameters or defaults
1200
+ system_prompt = self.summary_config.get("system_prompt",DEFAULT_SUMMARY_SYSTEM_PROMPT)
1201
+
1202
+ # Format the conversation into a single string
1203
+ conversation_text = self._format_conversation_for_summary()
1204
+
1205
+ task_prompt = load_template(str(Path(__file__).parent / "prompts" / "summarize.yaml"),"user_prompt")
1206
+
1207
+ # Build the prompt for the summary model
1208
+ summary_messages = [
1209
+ {
1210
+ "role": "system",
1211
+ "content": system_prompt
1212
+ },
1213
+ {
1214
+ "role": "user",
1215
+ #"content": f"Here is the conversation so far:\n{conversation_text}\n\nPlease summarize this conversation, covering:\n0. What is the task its requirments, goals and constraints\n1. Tasks performed and outcomes\n2. Code files, modules, or functions modified or examined\n3. Important decisions or assumptions made\n4. Errors encountered and test or build results\n5. Remaining tasks, open questions, or next steps\nProvide the summary in a clear, concise format."
1216
+ "content":conversation_text
1217
+ },
1218
+ {
1219
+ "role": "user",
1220
+ "content": task_prompt
1221
+ }
1222
+ ]
1223
+
1224
+ try:
1225
+ # Log that we're generating a summary
1226
+ self.logger.info(f"Generating conversation summary using model {self.summary_config.get('model',self.model)}")
1227
+
1228
+ # Call the LLM to generate the summary
1229
+ response = await litellm.acompletion(
1230
+ model=self.summary_config.get("model",self.model),
1231
+ api_key=self.summary_config.get("api_key",self.api_key),
1232
+ messages=summary_messages,
1233
+ temperature=self.summary_config.get("temperature",self.temperature), # Use low temperature for consistent summaries
1234
+ max_tokens=self.summary_config.get("max_tokens",8000) # Reasonable limit for summary length
1235
+ )
1236
+
1237
+ # Extract the summary from the response
1238
+ summary = response.choices[0].message.content
1239
+ return summary
1240
+
1241
+ except Exception as e:
1242
+ self.logger.error(f"Error generating conversation summary: {str(e)}")
1243
+ return f"Failed to generate summary: {str(e)}"
1244
+
1245
+ async def compact(self) -> bool:
1246
+ """
1247
+ Compact the conversation history by replacing it with a summary.
1248
+
1249
+ This method:
1250
+ 1. Generates a summary of the current conversation
1251
+ 2. If successful, replaces the conversation with just [system, user] messages
1252
+ where the user message contains the summary
1253
+ 3. Returns True if compaction was successful, False otherwise
1254
+
1255
+ Returns:
1256
+ Boolean indicating whether the compaction was successful
1257
+ """
1258
+ # Skip if there are no messages or just the system message
1259
+ if len(self.messages) <= 1:
1260
+ self.logger.info("No conversation to compact.")
1261
+ return False
1262
+
1263
+ # Generate the summary
1264
+ summary = await self.summarize()
1265
+
1266
+ # Check if the summary generation was successful
1267
+ if summary.startswith("Failed to generate summary:") or summary == "No conversation to summarize.":
1268
+ self.logger.error(f"Compaction failed: {summary}")
1269
+ return False
1270
+
1271
+ # Save the system message
1272
+ system_message = self.messages[0]
1273
+
1274
+
1275
+ # Create a new user message with the summary
1276
+ summary_message = {
1277
+ "role": "user",
1278
+ "content": f"This session is being continued from a previous conversation that ran out of context. The conversation is summarized below:\n{summary}",
1279
+ "created_at": int(time.time())
1280
+ }
1281
+
1282
+ # Replace the conversation with just [system, user] messages
1283
+ self.messages = [system_message, summary_message]
1284
+
1285
+ # Notify about the compaction
1286
+ self.logger.info("🤐Conversation successfully compacted.")
1287
+ await self._run_callbacks("message_add", message=summary_message)
1288
+
1289
+ return True
1290
+
1291
+ def _format_conversation_for_summary(self) -> str:
1292
+ """
1293
+ Format the conversation history into a string for summarization.
1294
+
1295
+ Returns:
1296
+ A string representing the conversation in the format:
1297
+ user: content
1298
+ assistant: content
1299
+ tool_call: tool name and args
1300
+ tool_response: response content
1301
+ ...
1302
+ """
1303
+ formatted_lines = []
1304
+
1305
+ # Skip the system message (index 0)
1306
+ for message in self.messages[1:]:
1307
+ role = message.get("role", "unknown")
1308
+
1309
+ if role == "user":
1310
+ formatted_lines.append(f"user: {message.get('content', '')}")
1311
+
1312
+ elif role == "assistant":
1313
+ content = message.get("content", "")
1314
+ tool_calls = message.get("tool_calls", [])
1315
+
1316
+ # Add assistant message content if present
1317
+ if content:
1318
+ formatted_lines.append(f"assistant: {content}")
1319
+
1320
+ # Add tool calls if present
1321
+ for tool_call in tool_calls:
1322
+ function_info = tool_call.get("function", {})
1323
+ tool_name = function_info.get("name", "unknown_tool")
1324
+ arguments = function_info.get("arguments", "{}")
1325
+
1326
+ formatted_lines.append(f"tool_call: {tool_name} with args {arguments}")
1327
+
1328
+ elif role == "tool":
1329
+ tool_name = message.get("name", "unknown_tool")
1330
+ content = message.get("content", "")
1331
+ formatted_lines.append(f"tool_response: {content}")
1332
+
1333
+ else:
1334
+ # Handle any other message types
1335
+ formatted_lines.append(f"{role}: {message.get('content', '')}")
1336
+
1337
+ return [{'type': 'text', 'text': f"{x}"} for x in formatted_lines]
1338
+ #return "\n".join(formatted_lines)
1339
+
937
1340
  async def run_example():
938
1341
  """Example usage of TinyAgent with proper logging."""
939
1342
  import os
@@ -994,7 +1397,13 @@ async def run_example():
994
1397
  agent_logger.info(f"Running agent with input: {user_input}")
995
1398
  result = await agent.run(user_input)
996
1399
 
997
- agent_logger.info(f"Final result: {result}")
1400
+ agent_logger.info(f"Initial result: {result}")
1401
+
1402
+ # Now demonstrate the resume functionality
1403
+ agent_logger.info("Resuming the conversation without new user input")
1404
+ resume_result = await agent.resume(max_turns=3)
1405
+
1406
+ agent_logger.info(f"Resume result: {resume_result}")
998
1407
 
999
1408
  # Clean up
1000
1409
  await agent.close()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tinyagent-py
3
- Version: 0.0.13
3
+ Version: 0.0.15
4
4
  Summary: TinyAgent with MCP Client, Code Agent (Thinking, Planning, and Executing in Python), and Extendable Hooks, Tiny but powerful
5
5
  Author-email: Mahdi Golchin <golchin@askdev.ai>
6
6
  Project-URL: Homepage, https://github.com/askbudi/tinyagent
@@ -1,33 +1,36 @@
1
1
  tinyagent/__init__.py,sha256=-3ZN8unMZDrA366BET1HKp-fnFCyXCAD1fPVbHkJSsY,172
2
2
  tinyagent/mcp_client.py,sha256=9dmLtJ8CTwKWKTH6K9z8CaCQuaicOH9ifAuNyX7Kdo0,6030
3
3
  tinyagent/memory_manager.py,sha256=tAaZZdxBJ235wJIyr04n3f2Damok4s2UXunTtur_p-4,44916
4
- tinyagent/tiny_agent.py,sha256=X-rh8gCxeVSNS4RJ9vdtqTKf8ksDGrXsV1jwprW6Fj8,41049
4
+ tinyagent/tiny_agent.py,sha256=RHeqjGWWXOfhuQTfEbPp6rcwQmpk23FrY-gSNOyb3y8,61127
5
5
  tinyagent/code_agent/__init__.py,sha256=YSOblSwRS1QcAYUu--GvF4fKeQX1KRTj9P8CWySY3pY,327
6
6
  tinyagent/code_agent/example.py,sha256=qC6i3auUT1YwXS9WK1Ovq-9oDOUgzRxDegYdlVcVcfA,5861
7
- tinyagent/code_agent/helper.py,sha256=oZnpo-_H3cB12LxNN7Ztd-31EiUcuI2UpWP69xuF8oE,7205
8
- tinyagent/code_agent/modal_sandbox.py,sha256=RcQ5a-UFyqV7xSHnttpgAQQ-mNWk-9Z0tb836ua7C0E,16381
9
- tinyagent/code_agent/safety.py,sha256=WHad2ypzfsdKnXG6FcgXcgGiMC-H4KTmOzhP9S9i3Zw,22209
10
- tinyagent/code_agent/tiny_code_agent.py,sha256=UrNjmJSrDC493bXGARrt0tDd1Bn56FgNlxuQSu4J614,28194
11
- tinyagent/code_agent/utils.py,sha256=FxHnpb06S2o2-xPRHgf9zAnzbXvGWs5QApNI4DEH__U,7870
7
+ tinyagent/code_agent/helper.py,sha256=Z_89CtEiyURW_zFwlbsAk_DApNlCycVsScpWcygk5l0,7217
8
+ tinyagent/code_agent/modal_sandbox.py,sha256=4HLcbf53eQm7oGidOTNFd12DL4FsGPue7tj8mHndll4,16398
9
+ tinyagent/code_agent/safety.py,sha256=RF93d6oYiVjdm5nU5jKbnRVtnPmC93AOy8Hg1LrBGtA,22494
10
+ tinyagent/code_agent/tiny_code_agent.py,sha256=gYRRJCk_0hpRS3YSnJxaeZCCzoJeCKtvY6EhKfeu38M,42691
11
+ tinyagent/code_agent/utils.py,sha256=jBoVMqox9HG5fWzCsu9gxiwI1oEk64dPKnDfTtw0nkw,11474
12
12
  tinyagent/code_agent/providers/__init__.py,sha256=myfy9qsBDjNOhcgXJ2E9jO1q5eo6jHp43I2k0k8esLY,136
13
- tinyagent/code_agent/providers/base.py,sha256=LfmahpulNbnivn5m91GTAo6ityjidq05dC3qx9EtJ80,8203
14
- tinyagent/code_agent/providers/modal_provider.py,sha256=R0qt8XlTMFMbznMHN32pQxupDE9KR18NpQ3l1wJJP0w,10799
13
+ tinyagent/code_agent/providers/base.py,sha256=ai-AfgbctLsx6QyVIE0rSmp-02j0HiHR1Fd-E8C5GcY,15040
14
+ tinyagent/code_agent/providers/modal_provider.py,sha256=lzMFJUU3MArxPv9brpNNDpU032kJpqt7RU3h2M8ctN0,15599
15
15
  tinyagent/code_agent/tools/__init__.py,sha256=0XtrgYBgBayOffW50KyrlmrXXs9iu6z1DHu7-D8WGqY,94
16
16
  tinyagent/code_agent/tools/example_tools.py,sha256=YbXb7PKuvvxh-LV12Y4n_Ez3RyLA95gWOcZrKsa7UHg,1203
17
- tinyagent/hooks/__init__.py,sha256=RZow2r0XHLJ3-tnmecScdc0_wrEdmOy5dtXqoiRME5Y,254
17
+ tinyagent/hooks/__init__.py,sha256=dva7ZeghQ7BorIcYCI1rpg0MiN61O90CPy1Uc-aWamk,383
18
18
  tinyagent/hooks/gradio_callback.py,sha256=78x2x9AbYoLV-qwCxn2sH4s39DLlhNCzL7qCkVR-vy4,56911
19
+ tinyagent/hooks/jupyter_notebook_callback.py,sha256=XjzX0EJXLbGYYZVqeqnWnVZ4KRj_uX5cUQ6oBsmCcus,65787
19
20
  tinyagent/hooks/logging_manager.py,sha256=UpdmpQ7HRPyer-jrmQSXcBwi409tV9LnGvXSHjTcYTI,7935
20
21
  tinyagent/hooks/rich_code_ui_callback.py,sha256=PLcu5MOSoP4oZR3BtvcV9DquxcIT_d0WzSlkvaDcGOk,19820
21
22
  tinyagent/hooks/rich_ui_callback.py,sha256=5iCNOiJmhc1lOL7ZjaOt5Sk3rompko4zu_pAxfTVgJQ,22897
23
+ tinyagent/hooks/token_tracker.py,sha256=t5BfDE1fFrDYzIAnaxLxSyCAmWlsHeEIjZSkrxbTcWI,23133
22
24
  tinyagent/prompts/code_agent.yaml,sha256=xkHhR75t6N_O0VbOpXWLVjg37XHNvfOIDCmfEeUBgU4,16516
25
+ tinyagent/prompts/summarize.yaml,sha256=-Omdjq0hDjwJPKLE75UlU2Pbgmr7qV14XKtkaQNNhgA,6344
23
26
  tinyagent/storage/__init__.py,sha256=7qwfdD4smCl891xaRuiReSUgfOJFy7jJZsN0ul1iQdY,173
24
27
  tinyagent/storage/base.py,sha256=GGAMvOoslmm1INLFG_jtwOkRk2Qg39QXx-1LnN7fxDI,1474
25
28
  tinyagent/storage/json_file_storage.py,sha256=SYD8lvTHu2-FEHm1tZmsrcgEOirBrlUsUM186X-UPgI,1114
26
29
  tinyagent/storage/postgres_storage.py,sha256=IGwan8UXHNnTZFK1F8x4kvMDex3GAAGWUg9ePx_5IF4,9018
27
30
  tinyagent/storage/redis_storage.py,sha256=hu3y7wHi49HkpiR-AW7cWVQuTVOUk1WaB8TEPGUKVJ8,1742
28
31
  tinyagent/storage/sqlite_storage.py,sha256=ZyOYe0d_oHO1wOIT8FxKIbc67tP_0e_8FnM2Zq8Pwj8,5915
29
- tinyagent_py-0.0.13.dist-info/licenses/LICENSE,sha256=YIogcVQnknaaE4K-oaQylFWo8JGRBWnwmGb3fWB_Pww,1064
30
- tinyagent_py-0.0.13.dist-info/METADATA,sha256=qr6akWcOHEZNUtQ34H_XalRWtJD-mK6WLiwmlcTH_N0,13848
31
- tinyagent_py-0.0.13.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
32
- tinyagent_py-0.0.13.dist-info/top_level.txt,sha256=Ny8aJNchZpc2Vvhp3306L5vjceJakvFxBk-UjjVeA_I,10
33
- tinyagent_py-0.0.13.dist-info/RECORD,,
32
+ tinyagent_py-0.0.15.dist-info/licenses/LICENSE,sha256=YIogcVQnknaaE4K-oaQylFWo8JGRBWnwmGb3fWB_Pww,1064
33
+ tinyagent_py-0.0.15.dist-info/METADATA,sha256=dLpzqjyzmYLqPX6wX54zJF5PuXs4z4KVA8oNFvhK_rM,13848
34
+ tinyagent_py-0.0.15.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
35
+ tinyagent_py-0.0.15.dist-info/top_level.txt,sha256=Ny8aJNchZpc2Vvhp3306L5vjceJakvFxBk-UjjVeA_I,10
36
+ tinyagent_py-0.0.15.dist-info/RECORD,,