swarms 7.9.6__py3-none-any.whl → 7.9.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
swarms/structs/agent.py CHANGED
@@ -86,7 +86,6 @@ from swarms.utils.index import (
86
86
  )
87
87
  from swarms.schemas.conversation_schema import ConversationSchema
88
88
  from swarms.utils.output_types import OutputType
89
- from swarms.utils.retry_func import retry_function
90
89
 
91
90
 
92
91
  def stop_when_repeats(response: str) -> bool:
@@ -576,8 +575,6 @@ class Agent:
576
575
  self.tool_retry_attempts = tool_retry_attempts
577
576
  self.speed_mode = speed_mode
578
577
 
579
- # self.short_memory = self.short_memory_init()
580
-
581
578
  # Initialize the feedback
582
579
  self.feedback = []
583
580
 
@@ -605,7 +602,8 @@ class Agent:
605
602
 
606
603
  # Run sequential operations after all concurrent tasks are done
607
604
  # self.agent_output = self.agent_output_model()
608
- log_agent_data(self.to_dict())
605
+ if self.autosave is True:
606
+ log_agent_data(self.to_dict())
609
607
 
610
608
  if exists(self.tools):
611
609
  self.tool_handling()
@@ -827,12 +825,9 @@ class Agent:
827
825
  if self.preset_stopping_token is not None:
828
826
  self.stopping_token = "<DONE>"
829
827
 
830
- def prepare_tools_list_dictionary(self):
831
- import json
832
-
833
- return json.loads(self.tools_list_dictionary)
834
-
835
- def check_model_supports_utilities(self, img: str = None) -> bool:
828
+ def check_model_supports_utilities(
829
+ self, img: Optional[str] = None
830
+ ) -> bool:
836
831
  """
837
832
  Check if the current model supports vision capabilities.
838
833
 
@@ -842,18 +837,43 @@ class Agent:
842
837
  Returns:
843
838
  bool: True if model supports vision and image is provided, False otherwise.
844
839
  """
845
- from litellm.utils import supports_vision
840
+ from litellm.utils import (
841
+ supports_vision,
842
+ supports_function_calling,
843
+ supports_parallel_function_calling,
844
+ )
846
845
 
847
846
  # Only check vision support if an image is provided
848
847
  if img is not None:
849
848
  out = supports_vision(self.model_name)
850
- if not out:
851
- raise ValueError(
852
- f"Model {self.model_name} does not support vision capabilities. Please use a vision-enabled model."
849
+ if out is False:
850
+ logger.error(
851
+ f"[Agent: {self.agent_name}] Model '{self.model_name}' does not support vision capabilities. "
852
+ f"Image input was provided: {img[:100]}{'...' if len(img) > 100 else ''}. "
853
+ f"Please use a vision-enabled model."
853
854
  )
854
- return out
855
855
 
856
- return False
856
+ if self.tools_list_dictionary is not None:
857
+ out = supports_function_calling(self.model_name)
858
+ if out is False:
859
+ logger.error(
860
+ f"[Agent: {self.agent_name}] Model '{self.model_name}' does not support function calling capabilities. "
861
+ f"tools_list_dictionary is set: {self.tools_list_dictionary}. "
862
+ f"Please use a function calling-enabled model."
863
+ )
864
+
865
+ if self.tools is not None:
866
+ if len(self.tools) > 2:
867
+ out = supports_parallel_function_calling(
868
+ self.model_name
869
+ )
870
+ if out is False:
871
+ logger.error(
872
+ f"[Agent: {self.agent_name}] Model '{self.model_name}' does not support parallel function calling capabilities. "
873
+ f"Please use a parallel function calling-enabled model."
874
+ )
875
+
876
+ return None
857
877
 
858
878
  def check_if_no_prompt_then_autogenerate(self, task: str = None):
859
879
  """
@@ -976,7 +996,6 @@ class Agent:
976
996
  self,
977
997
  task: Optional[Union[str, Any]] = None,
978
998
  img: Optional[str] = None,
979
- print_task: Optional[bool] = False,
980
999
  *args,
981
1000
  **kwargs,
982
1001
  ) -> Any:
@@ -1001,8 +1020,7 @@ class Agent:
1001
1020
 
1002
1021
  self.check_if_no_prompt_then_autogenerate(task)
1003
1022
 
1004
- if img is not None:
1005
- self.check_model_supports_utilities(img=img)
1023
+ self.check_model_supports_utilities(img=img)
1006
1024
 
1007
1025
  self.short_memory.add(role=self.user_name, content=task)
1008
1026
 
@@ -1015,22 +1033,11 @@ class Agent:
1015
1033
  # Clear the short memory
1016
1034
  response = None
1017
1035
 
1018
- # Query the long term memory first for the context
1019
- if self.long_term_memory is not None:
1020
- self.memory_query(task)
1021
-
1022
1036
  # Autosave
1023
1037
  if self.autosave:
1024
1038
  log_agent_data(self.to_dict())
1025
1039
  self.save()
1026
1040
 
1027
- # Print the request
1028
- if print_task is True:
1029
- formatter.print_panel(
1030
- content=f"\n User: {task}",
1031
- title=f"Task Request for {self.agent_name}",
1032
- )
1033
-
1034
1041
  while (
1035
1042
  self.max_loops == "auto"
1036
1043
  or loop_count < self.max_loops
@@ -1064,14 +1071,6 @@ class Agent:
1064
1071
  success = False
1065
1072
  while attempt < self.retry_attempts and not success:
1066
1073
  try:
1067
- if (
1068
- self.long_term_memory is not None
1069
- and self.rag_every_loop is True
1070
- ):
1071
- logger.info(
1072
- "Querying RAG database for context..."
1073
- )
1074
- self.memory_query(task_prompt)
1075
1074
 
1076
1075
  if img is not None:
1077
1076
  response = self.call_llm(
@@ -1108,11 +1107,9 @@ class Agent:
1108
1107
  if self.print_on is True:
1109
1108
  if isinstance(response, list):
1110
1109
  self.pretty_print(
1111
- f"Structured Output - Attempting Function Call Execution [{time.strftime('%H:%M:%S')}] \n\n {format_data_structure(response)} ",
1110
+ f"Structured Output - Attempting Function Call Execution [{time.strftime('%H:%M:%S')}] \n\n Output: {format_data_structure(response)} ",
1112
1111
  loop_count,
1113
1112
  )
1114
- elif self.streaming_on is True:
1115
- pass
1116
1113
  else:
1117
1114
  self.pretty_print(
1118
1115
  response, loop_count
@@ -1141,15 +1138,14 @@ class Agent:
1141
1138
  f"LLM returned None response in loop {loop_count}, skipping MCP tool handling"
1142
1139
  )
1143
1140
 
1144
- self.sentiment_and_evaluator(response)
1141
+ # self.sentiment_and_evaluator(response)
1145
1142
 
1146
1143
  success = True # Mark as successful to exit the retry loop
1147
1144
 
1148
1145
  except Exception as e:
1149
1146
 
1150
- log_agent_data(self.to_dict())
1151
-
1152
1147
  if self.autosave is True:
1148
+ log_agent_data(self.to_dict())
1153
1149
  self.save()
1154
1150
 
1155
1151
  logger.error(
@@ -1159,9 +1155,8 @@ class Agent:
1159
1155
 
1160
1156
  if not success:
1161
1157
 
1162
- log_agent_data(self.to_dict())
1163
-
1164
1158
  if self.autosave is True:
1159
+ log_agent_data(self.to_dict())
1165
1160
  self.save()
1166
1161
 
1167
1162
  logger.error(
@@ -1220,192 +1215,6 @@ class Agent:
1220
1215
 
1221
1216
  self.save()
1222
1217
 
1223
- log_agent_data(self.to_dict())
1224
-
1225
- # Output formatting based on output_type
1226
- return history_output_formatter(
1227
- self.short_memory, type=self.output_type
1228
- )
1229
-
1230
- except Exception as error:
1231
- self._handle_run_error(error)
1232
-
1233
- except KeyboardInterrupt as error:
1234
- self._handle_run_error(error)
1235
-
1236
- def _run_fast(
1237
- self,
1238
- task: Optional[Union[str, Any]] = None,
1239
- img: Optional[str] = None,
1240
- print_task: Optional[bool] = False,
1241
- *args,
1242
- **kwargs,
1243
- ) -> Any:
1244
- """
1245
- run the agent
1246
-
1247
- Args:
1248
- task (str): The task to be performed.
1249
- img (str): The image to be processed.
1250
- is_last (bool): Indicates if this is the last task.
1251
-
1252
- Returns:
1253
- Any: The output of the agent.
1254
- (string, list, json, dict, yaml, xml)
1255
-
1256
- Examples:
1257
- agent(task="What is the capital of France?")
1258
- agent(task="What is the capital of France?", img="path/to/image.jpg")
1259
- agent(task="What is the capital of France?", img="path/to/image.jpg", is_last=True)
1260
- """
1261
- try:
1262
-
1263
- self.short_memory.add(role=self.user_name, content=task)
1264
-
1265
- # Set the loop count
1266
- loop_count = 0
1267
-
1268
- # Clear the short memory
1269
- response = None
1270
-
1271
- # Query the long term memory first for the context
1272
- if self.long_term_memory is not None:
1273
- self.memory_query(task)
1274
-
1275
- # Print the request
1276
- if print_task is True:
1277
- formatter.print_panel(
1278
- content=f"\n User: {task}",
1279
- title=f"Task Request for {self.agent_name}",
1280
- )
1281
-
1282
- while (
1283
- self.max_loops == "auto"
1284
- or loop_count < self.max_loops
1285
- ):
1286
- loop_count += 1
1287
-
1288
- if self.max_loops >= 2:
1289
- self.short_memory.add(
1290
- role=self.agent_name,
1291
- content=f"Current Internal Reasoning Loop: {loop_count}/{self.max_loops}",
1292
- )
1293
-
1294
- # If it is the final loop, then add the final loop message
1295
- if loop_count >= 2 and loop_count == self.max_loops:
1296
- self.short_memory.add(
1297
- role=self.agent_name,
1298
- content=f"🎉 Final Internal Reasoning Loop: {loop_count}/{self.max_loops} Prepare your comprehensive response.",
1299
- )
1300
-
1301
- # Dynamic temperature
1302
- if self.dynamic_temperature_enabled is True:
1303
- self.dynamic_temperature()
1304
-
1305
- # Task prompt
1306
- task_prompt = (
1307
- self.short_memory.return_history_as_string()
1308
- )
1309
-
1310
- # Parameters
1311
- attempt = 0
1312
- success = False
1313
- while attempt < self.retry_attempts and not success:
1314
- try:
1315
- if (
1316
- self.long_term_memory is not None
1317
- and self.rag_every_loop is True
1318
- ):
1319
- logger.info(
1320
- "Querying RAG database for context..."
1321
- )
1322
- self.memory_query(task_prompt)
1323
-
1324
- if img is not None:
1325
- response = self.call_llm(
1326
- task=task_prompt,
1327
- img=img,
1328
- current_loop=loop_count,
1329
- *args,
1330
- **kwargs,
1331
- )
1332
- else:
1333
- response = self.call_llm(
1334
- task=task_prompt,
1335
- current_loop=loop_count,
1336
- *args,
1337
- **kwargs,
1338
- )
1339
-
1340
- # If streaming is enabled, then don't print the response
1341
-
1342
- # Parse the response from the agent with the output type
1343
- if exists(self.tools_list_dictionary):
1344
- if isinstance(response, BaseModel):
1345
- response = response.model_dump()
1346
-
1347
- # Parse the response from the agent with the output type
1348
- response = self.parse_llm_output(response)
1349
-
1350
- self.short_memory.add(
1351
- role=self.agent_name,
1352
- content=response,
1353
- )
1354
-
1355
- # Print
1356
- if self.print_on is True:
1357
- if isinstance(response, list):
1358
- self.pretty_print(
1359
- f"Structured Output - Attempting Function Call Execution [{time.strftime('%H:%M:%S')}] \n\n {format_data_structure(response)} ",
1360
- loop_count,
1361
- )
1362
- else:
1363
- self.pretty_print(
1364
- response, loop_count
1365
- )
1366
-
1367
- # Check and execute callable tools
1368
- if exists(self.tools):
1369
- self.tool_execution_retry(
1370
- response, loop_count
1371
- )
1372
-
1373
- # Handle MCP tools
1374
- if (
1375
- exists(self.mcp_url)
1376
- or exists(self.mcp_config)
1377
- or exists(self.mcp_urls)
1378
- ):
1379
- # Only handle MCP tools if response is not None
1380
- if response is not None:
1381
- self.mcp_tool_handling(
1382
- response=response,
1383
- current_loop=loop_count,
1384
- )
1385
- else:
1386
- logger.warning(
1387
- f"LLM returned None response in loop {loop_count}, skipping MCP tool handling"
1388
- )
1389
-
1390
- success = True # Mark as successful to exit the retry loop
1391
-
1392
- except Exception as e:
1393
-
1394
- logger.error(
1395
- f"Attempt {attempt+1}/{self.retry_attempts}: Error generating response in loop {loop_count} for agent '{self.agent_name}': {str(e)} | "
1396
- )
1397
- attempt += 1
1398
-
1399
- if not success:
1400
-
1401
- logger.error(
1402
- "Failed to generate a valid response after"
1403
- " retry attempts."
1404
- )
1405
- break # Exit the loop if all retry attempts fail
1406
-
1407
- # log_agent_data(self.to_dict())
1408
-
1409
1218
  # Output formatting based on output_type
1410
1219
  return history_output_formatter(
1411
1220
  self.short_memory, type=self.output_type
@@ -1420,10 +1229,9 @@ class Agent:
1420
1229
  def __handle_run_error(self, error: any):
1421
1230
  import traceback
1422
1231
 
1423
- log_agent_data(self.to_dict())
1424
-
1425
1232
  if self.autosave is True:
1426
1233
  self.save()
1234
+ log_agent_data(self.to_dict())
1427
1235
 
1428
1236
  # Get detailed error information
1429
1237
  error_type = type(error).__name__
@@ -1442,12 +1250,9 @@ class Agent:
1442
1250
  raise error
1443
1251
 
1444
1252
  def _handle_run_error(self, error: any):
1445
- process_thread = threading.Thread(
1446
- target=self.__handle_run_error,
1447
- args=(error,),
1448
- daemon=True,
1449
- )
1450
- process_thread.start()
1253
+ # Handle error directly instead of using daemon thread
1254
+ # to ensure proper exception propagation
1255
+ self.__handle_run_error(error)
1451
1256
 
1452
1257
  async def arun(
1453
1258
  self,
@@ -2121,9 +1926,9 @@ class Agent:
2121
1926
 
2122
1927
  """
2123
1928
  logger.info(f"Adding response filter: {filter_word}")
2124
- self.reponse_filters.append(filter_word)
1929
+ self.response_filters.append(filter_word)
2125
1930
 
2126
- def apply_reponse_filters(self, response: str) -> str:
1931
+ def apply_response_filters(self, response: str) -> str:
2127
1932
  """
2128
1933
  Apply the response filters to the response
2129
1934
 
@@ -2198,11 +2003,17 @@ class Agent:
2198
2003
  None
2199
2004
  """
2200
2005
  try:
2006
+ # Process all documents and combine their content
2007
+ all_data = []
2201
2008
  for doc in docs:
2202
2009
  data = data_to_text(doc)
2010
+ all_data.append(f"Document: {doc}\n{data}")
2011
+
2012
+ # Combine all document content
2013
+ combined_data = "\n\n".join(all_data)
2203
2014
 
2204
2015
  return self.short_memory.add(
2205
- role=self.user_name, content=data
2016
+ role=self.user_name, content=combined_data
2206
2017
  )
2207
2018
  except Exception as error:
2208
2019
  logger.info(f"Error ingesting docs: {error}", "red")
@@ -3420,3 +3231,13 @@ class Agent:
3420
3231
  f"Full traceback: {traceback.format_exc()}. "
3421
3232
  f"Attempting to retry tool execution with 3 attempts"
3422
3233
  )
3234
+
3235
+ def add_tool_schema(self, tool_schema: dict):
3236
+ self.tools_list_dictionary = [tool_schema]
3237
+
3238
+ self.output_type = "dict-all-except-first"
3239
+
3240
+ def add_multiple_tool_schemas(self, tool_schemas: list[dict]):
3241
+ self.tools_list_dictionary = tool_schemas
3242
+
3243
+ self.output_type = "dict-all-except-first"
@@ -6,7 +6,6 @@ import threading
6
6
  import uuid
7
7
  from typing import (
8
8
  TYPE_CHECKING,
9
- Callable,
10
9
  Dict,
11
10
  List,
12
11
  Optional,
@@ -190,18 +189,16 @@ class Conversation(BaseStructure):
190
189
  save_enabled: bool = False, # New parameter to control if saving is enabled
191
190
  save_filepath: str = None,
192
191
  load_filepath: str = None, # New parameter to specify which file to load from
193
- tokenizer: Callable = None,
194
192
  context_length: int = 8192,
195
193
  rules: str = None,
196
194
  custom_rules_prompt: str = None,
197
- user: str = "User:",
195
+ user: str = "User",
198
196
  save_as_yaml: bool = False,
199
197
  save_as_json_bool: bool = False,
200
- token_count: bool = True,
198
+ token_count: bool = False,
201
199
  message_id_on: bool = False,
202
200
  provider: providers = "in-memory",
203
201
  backend: Optional[str] = None,
204
- # Backend-specific parameters
205
202
  supabase_url: Optional[str] = None,
206
203
  supabase_key: Optional[str] = None,
207
204
  redis_host: str = "localhost",
@@ -210,7 +207,6 @@ class Conversation(BaseStructure):
210
207
  redis_password: Optional[str] = None,
211
208
  db_path: Optional[str] = None,
212
209
  table_name: str = "conversations",
213
- # Additional backend parameters
214
210
  use_embedded_redis: bool = True,
215
211
  persist_redis: bool = True,
216
212
  auto_persist: bool = True,
@@ -230,20 +226,7 @@ class Conversation(BaseStructure):
230
226
  self.save_enabled = save_enabled
231
227
  self.conversations_dir = conversations_dir
232
228
  self.message_id_on = message_id_on
233
-
234
- # Handle save filepath
235
- if save_enabled and save_filepath:
236
- self.save_filepath = save_filepath
237
- elif save_enabled and conversations_dir:
238
- self.save_filepath = os.path.join(
239
- conversations_dir, f"{self.id}.json"
240
- )
241
- else:
242
- self.save_filepath = None
243
-
244
229
  self.load_filepath = load_filepath
245
- self.conversation_history = []
246
- self.tokenizer = tokenizer
247
230
  self.context_length = context_length
248
231
  self.rules = rules
249
232
  self.custom_rules_prompt = custom_rules_prompt
@@ -253,9 +236,40 @@ class Conversation(BaseStructure):
253
236
  self.token_count = token_count
254
237
  self.provider = provider # Keep for backwards compatibility
255
238
  self.conversations_dir = conversations_dir
239
+ self.backend = backend
240
+ self.supabase_url = supabase_url
241
+ self.supabase_key = supabase_key
242
+ self.redis_host = redis_host
243
+ self.redis_port = redis_port
244
+ self.redis_db = redis_db
245
+ self.redis_password = redis_password
246
+ self.db_path = db_path
247
+ self.table_name = table_name
248
+ self.use_embedded_redis = use_embedded_redis
249
+ self.persist_redis = persist_redis
250
+ self.auto_persist = auto_persist
251
+ self.redis_data_dir = redis_data_dir
252
+
253
+ self.conversation_history = []
254
+
255
+ # Handle save filepath
256
+ if save_enabled and save_filepath:
257
+ self.save_filepath = save_filepath
258
+ elif save_enabled and conversations_dir:
259
+ self.save_filepath = os.path.join(
260
+ conversations_dir, f"{self.id}.json"
261
+ )
262
+ else:
263
+ self.save_filepath = None
256
264
 
257
265
  # Support both 'provider' and 'backend' parameters for backwards compatibility
258
266
  # 'backend' takes precedence if both are provided
267
+
268
+ self.backend_setup(backend, provider)
269
+
270
+ def backend_setup(
271
+ self, backend: str = None, provider: str = None
272
+ ):
259
273
  self.backend = backend or provider
260
274
  self.backend_instance = None
261
275
 
@@ -285,19 +299,18 @@ class Conversation(BaseStructure):
285
299
  ]:
286
300
  try:
287
301
  self._initialize_backend(
288
- supabase_url=supabase_url,
289
- supabase_key=supabase_key,
290
- redis_host=redis_host,
291
- redis_port=redis_port,
292
- redis_db=redis_db,
293
- redis_password=redis_password,
294
- db_path=db_path,
295
- table_name=table_name,
296
- use_embedded_redis=use_embedded_redis,
297
- persist_redis=persist_redis,
298
- auto_persist=auto_persist,
299
- redis_data_dir=redis_data_dir,
300
- **kwargs,
302
+ supabase_url=self.supabase_url,
303
+ supabase_key=self.supabase_key,
304
+ redis_host=self.redis_host,
305
+ redis_port=self.redis_port,
306
+ redis_db=self.redis_db,
307
+ redis_password=self.redis_password,
308
+ db_path=self.db_path,
309
+ table_name=self.table_name,
310
+ use_embedded_redis=self.use_embedded_redis,
311
+ persist_redis=self.persist_redis,
312
+ auto_persist=self.auto_persist,
313
+ redis_data_dir=self.redis_data_dir,
301
314
  )
302
315
  except Exception as e:
303
316
  logger.warning(
@@ -324,7 +337,6 @@ class Conversation(BaseStructure):
324
337
  "time_enabled": self.time_enabled,
325
338
  "autosave": self.autosave,
326
339
  "save_filepath": self.save_filepath,
327
- "tokenizer": self.tokenizer,
328
340
  "context_length": self.context_length,
329
341
  "rules": self.rules,
330
342
  "custom_rules_prompt": self.custom_rules_prompt,
@@ -449,8 +461,8 @@ class Conversation(BaseStructure):
449
461
  if self.custom_rules_prompt is not None:
450
462
  self.add(self.user or "User", self.custom_rules_prompt)
451
463
 
452
- if self.tokenizer is not None:
453
- self.truncate_memory_with_tokenizer()
464
+ # if self.tokenizer is not None:
465
+ # self.truncate_memory_with_tokenizer()
454
466
 
455
467
  def _autosave(self):
456
468
  """Automatically save the conversation if autosave is enabled."""
@@ -1051,9 +1063,7 @@ class Conversation(BaseStructure):
1051
1063
  for message in self.conversation_history:
1052
1064
  role = message.get("role")
1053
1065
  content = message.get("content")
1054
- tokens = self.tokenizer.count_tokens(
1055
- text=content
1056
- ) # Count the number of tokens
1066
+ tokens = count_tokens(content)
1057
1067
  count = tokens # Assign the token count
1058
1068
  total_tokens += count
1059
1069