swarms 7.9.7__py3-none-any.whl → 7.9.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
swarms/structs/agent.py CHANGED
@@ -575,8 +575,6 @@ class Agent:
575
575
  self.tool_retry_attempts = tool_retry_attempts
576
576
  self.speed_mode = speed_mode
577
577
 
578
- # self.short_memory = self.short_memory_init()
579
-
580
578
  # Initialize the feedback
581
579
  self.feedback = []
582
580
 
@@ -827,12 +825,9 @@ class Agent:
827
825
  if self.preset_stopping_token is not None:
828
826
  self.stopping_token = "<DONE>"
829
827
 
830
- def prepare_tools_list_dictionary(self):
831
- import json
832
-
833
- return json.loads(self.tools_list_dictionary)
834
-
835
- def check_model_supports_utilities(self, img: str = None) -> bool:
828
+ def check_model_supports_utilities(
829
+ self, img: Optional[str] = None
830
+ ) -> bool:
836
831
  """
837
832
  Check if the current model supports vision capabilities.
838
833
 
@@ -842,18 +837,43 @@ class Agent:
842
837
  Returns:
843
838
  bool: True if model supports vision and image is provided, False otherwise.
844
839
  """
845
- from litellm.utils import supports_vision
840
+ from litellm.utils import (
841
+ supports_vision,
842
+ supports_function_calling,
843
+ supports_parallel_function_calling,
844
+ )
846
845
 
847
846
  # Only check vision support if an image is provided
848
847
  if img is not None:
849
848
  out = supports_vision(self.model_name)
850
- if not out:
851
- raise ValueError(
852
- f"Model {self.model_name} does not support vision capabilities. Please use a vision-enabled model."
849
+ if out is False:
850
+ logger.error(
851
+ f"[Agent: {self.agent_name}] Model '{self.model_name}' does not support vision capabilities. "
852
+ f"Image input was provided: {img[:100]}{'...' if len(img) > 100 else ''}. "
853
+ f"Please use a vision-enabled model."
854
+ )
855
+
856
+ if self.tools_list_dictionary is not None:
857
+ out = supports_function_calling(self.model_name)
858
+ if out is False:
859
+ logger.error(
860
+ f"[Agent: {self.agent_name}] Model '{self.model_name}' does not support function calling capabilities. "
861
+ f"tools_list_dictionary is set: {self.tools_list_dictionary}. "
862
+ f"Please use a function calling-enabled model."
863
+ )
864
+
865
+ if self.tools is not None:
866
+ if len(self.tools) > 2:
867
+ out = supports_parallel_function_calling(
868
+ self.model_name
853
869
  )
854
- return out
870
+ if out is False:
871
+ logger.error(
872
+ f"[Agent: {self.agent_name}] Model '{self.model_name}' does not support parallel function calling capabilities. "
873
+ f"Please use a parallel function calling-enabled model."
874
+ )
855
875
 
856
- return False
876
+ return None
857
877
 
858
878
  def check_if_no_prompt_then_autogenerate(self, task: str = None):
859
879
  """
@@ -976,7 +996,6 @@ class Agent:
976
996
  self,
977
997
  task: Optional[Union[str, Any]] = None,
978
998
  img: Optional[str] = None,
979
- print_task: Optional[bool] = False,
980
999
  *args,
981
1000
  **kwargs,
982
1001
  ) -> Any:
@@ -1001,8 +1020,7 @@ class Agent:
1001
1020
 
1002
1021
  self.check_if_no_prompt_then_autogenerate(task)
1003
1022
 
1004
- if img is not None:
1005
- self.check_model_supports_utilities(img=img)
1023
+ self.check_model_supports_utilities(img=img)
1006
1024
 
1007
1025
  self.short_memory.add(role=self.user_name, content=task)
1008
1026
 
@@ -1015,22 +1033,11 @@ class Agent:
1015
1033
  # Clear the short memory
1016
1034
  response = None
1017
1035
 
1018
- # Query the long term memory first for the context
1019
- if self.long_term_memory is not None:
1020
- self.memory_query(task)
1021
-
1022
1036
  # Autosave
1023
1037
  if self.autosave:
1024
1038
  log_agent_data(self.to_dict())
1025
1039
  self.save()
1026
1040
 
1027
- # Print the request
1028
- if print_task is True:
1029
- formatter.print_panel(
1030
- content=f"\n User: {task}",
1031
- title=f"Task Request for {self.agent_name}",
1032
- )
1033
-
1034
1041
  while (
1035
1042
  self.max_loops == "auto"
1036
1043
  or loop_count < self.max_loops
@@ -1064,14 +1071,6 @@ class Agent:
1064
1071
  success = False
1065
1072
  while attempt < self.retry_attempts and not success:
1066
1073
  try:
1067
- if (
1068
- self.long_term_memory is not None
1069
- and self.rag_every_loop is True
1070
- ):
1071
- logger.info(
1072
- "Querying RAG database for context..."
1073
- )
1074
- self.memory_query(task_prompt)
1075
1074
 
1076
1075
  if img is not None:
1077
1076
  response = self.call_llm(
@@ -1108,11 +1107,9 @@ class Agent:
1108
1107
  if self.print_on is True:
1109
1108
  if isinstance(response, list):
1110
1109
  self.pretty_print(
1111
- f"Structured Output - Attempting Function Call Execution [{time.strftime('%H:%M:%S')}] \n\n {format_data_structure(response)} ",
1110
+ f"Structured Output - Attempting Function Call Execution [{time.strftime('%H:%M:%S')}] \n\n Output: {format_data_structure(response)} ",
1112
1111
  loop_count,
1113
1112
  )
1114
- elif self.streaming_on is True:
1115
- pass
1116
1113
  else:
1117
1114
  self.pretty_print(
1118
1115
  response, loop_count
@@ -1141,7 +1138,7 @@ class Agent:
1141
1138
  f"LLM returned None response in loop {loop_count}, skipping MCP tool handling"
1142
1139
  )
1143
1140
 
1144
- self.sentiment_and_evaluator(response)
1141
+ # self.sentiment_and_evaluator(response)
1145
1142
 
1146
1143
  success = True # Mark as successful to exit the retry loop
1147
1144
 
@@ -1158,7 +1155,6 @@ class Agent:
1158
1155
 
1159
1156
  if not success:
1160
1157
 
1161
-
1162
1158
  if self.autosave is True:
1163
1159
  log_agent_data(self.to_dict())
1164
1160
  self.save()
@@ -1219,191 +1215,6 @@ class Agent:
1219
1215
 
1220
1216
  self.save()
1221
1217
 
1222
-
1223
- # Output formatting based on output_type
1224
- return history_output_formatter(
1225
- self.short_memory, type=self.output_type
1226
- )
1227
-
1228
- except Exception as error:
1229
- self._handle_run_error(error)
1230
-
1231
- except KeyboardInterrupt as error:
1232
- self._handle_run_error(error)
1233
-
1234
- def _run_fast(
1235
- self,
1236
- task: Optional[Union[str, Any]] = None,
1237
- img: Optional[str] = None,
1238
- print_task: Optional[bool] = False,
1239
- *args,
1240
- **kwargs,
1241
- ) -> Any:
1242
- """
1243
- run the agent
1244
-
1245
- Args:
1246
- task (str): The task to be performed.
1247
- img (str): The image to be processed.
1248
- is_last (bool): Indicates if this is the last task.
1249
-
1250
- Returns:
1251
- Any: The output of the agent.
1252
- (string, list, json, dict, yaml, xml)
1253
-
1254
- Examples:
1255
- agent(task="What is the capital of France?")
1256
- agent(task="What is the capital of France?", img="path/to/image.jpg")
1257
- agent(task="What is the capital of France?", img="path/to/image.jpg", is_last=True)
1258
- """
1259
- try:
1260
-
1261
- self.short_memory.add(role=self.user_name, content=task)
1262
-
1263
- # Set the loop count
1264
- loop_count = 0
1265
-
1266
- # Clear the short memory
1267
- response = None
1268
-
1269
- # Query the long term memory first for the context
1270
- if self.long_term_memory is not None:
1271
- self.memory_query(task)
1272
-
1273
- # Print the request
1274
- if print_task is True:
1275
- formatter.print_panel(
1276
- content=f"\n User: {task}",
1277
- title=f"Task Request for {self.agent_name}",
1278
- )
1279
-
1280
- while (
1281
- self.max_loops == "auto"
1282
- or loop_count < self.max_loops
1283
- ):
1284
- loop_count += 1
1285
-
1286
- if self.max_loops >= 2:
1287
- self.short_memory.add(
1288
- role=self.agent_name,
1289
- content=f"Current Internal Reasoning Loop: {loop_count}/{self.max_loops}",
1290
- )
1291
-
1292
- # If it is the final loop, then add the final loop message
1293
- if loop_count >= 2 and loop_count == self.max_loops:
1294
- self.short_memory.add(
1295
- role=self.agent_name,
1296
- content=f"🎉 Final Internal Reasoning Loop: {loop_count}/{self.max_loops} Prepare your comprehensive response.",
1297
- )
1298
-
1299
- # Dynamic temperature
1300
- if self.dynamic_temperature_enabled is True:
1301
- self.dynamic_temperature()
1302
-
1303
- # Task prompt
1304
- task_prompt = (
1305
- self.short_memory.return_history_as_string()
1306
- )
1307
-
1308
- # Parameters
1309
- attempt = 0
1310
- success = False
1311
- while attempt < self.retry_attempts and not success:
1312
- try:
1313
- if (
1314
- self.long_term_memory is not None
1315
- and self.rag_every_loop is True
1316
- ):
1317
- logger.info(
1318
- "Querying RAG database for context..."
1319
- )
1320
- self.memory_query(task_prompt)
1321
-
1322
- if img is not None:
1323
- response = self.call_llm(
1324
- task=task_prompt,
1325
- img=img,
1326
- current_loop=loop_count,
1327
- *args,
1328
- **kwargs,
1329
- )
1330
- else:
1331
- response = self.call_llm(
1332
- task=task_prompt,
1333
- current_loop=loop_count,
1334
- *args,
1335
- **kwargs,
1336
- )
1337
-
1338
- # If streaming is enabled, then don't print the response
1339
-
1340
- # Parse the response from the agent with the output type
1341
- if exists(self.tools_list_dictionary):
1342
- if isinstance(response, BaseModel):
1343
- response = response.model_dump()
1344
-
1345
- # Parse the response from the agent with the output type
1346
- response = self.parse_llm_output(response)
1347
-
1348
- self.short_memory.add(
1349
- role=self.agent_name,
1350
- content=response,
1351
- )
1352
-
1353
- # Print
1354
- if self.print_on is True:
1355
- if isinstance(response, list):
1356
- self.pretty_print(
1357
- f"Structured Output - Attempting Function Call Execution [{time.strftime('%H:%M:%S')}] \n\n {format_data_structure(response)} ",
1358
- loop_count,
1359
- )
1360
- else:
1361
- self.pretty_print(
1362
- response, loop_count
1363
- )
1364
-
1365
- # Check and execute callable tools
1366
- if exists(self.tools):
1367
- self.tool_execution_retry(
1368
- response, loop_count
1369
- )
1370
-
1371
- # Handle MCP tools
1372
- if (
1373
- exists(self.mcp_url)
1374
- or exists(self.mcp_config)
1375
- or exists(self.mcp_urls)
1376
- ):
1377
- # Only handle MCP tools if response is not None
1378
- if response is not None:
1379
- self.mcp_tool_handling(
1380
- response=response,
1381
- current_loop=loop_count,
1382
- )
1383
- else:
1384
- logger.warning(
1385
- f"LLM returned None response in loop {loop_count}, skipping MCP tool handling"
1386
- )
1387
-
1388
- success = True # Mark as successful to exit the retry loop
1389
-
1390
- except Exception as e:
1391
-
1392
- logger.error(
1393
- f"Attempt {attempt+1}/{self.retry_attempts}: Error generating response in loop {loop_count} for agent '{self.agent_name}': {str(e)} | "
1394
- )
1395
- attempt += 1
1396
-
1397
- if not success:
1398
-
1399
- logger.error(
1400
- "Failed to generate a valid response after"
1401
- " retry attempts."
1402
- )
1403
- break # Exit the loop if all retry attempts fail
1404
-
1405
- # log_agent_data(self.to_dict())
1406
-
1407
1218
  # Output formatting based on output_type
1408
1219
  return history_output_formatter(
1409
1220
  self.short_memory, type=self.output_type
@@ -1439,12 +1250,9 @@ class Agent:
1439
1250
  raise error
1440
1251
 
1441
1252
  def _handle_run_error(self, error: any):
1442
- process_thread = threading.Thread(
1443
- target=self.__handle_run_error,
1444
- args=(error,),
1445
- daemon=True,
1446
- )
1447
- process_thread.start()
1253
+ # Handle error directly instead of using daemon thread
1254
+ # to ensure proper exception propagation
1255
+ self.__handle_run_error(error)
1448
1256
 
1449
1257
  async def arun(
1450
1258
  self,
@@ -2118,9 +1926,9 @@ class Agent:
2118
1926
 
2119
1927
  """
2120
1928
  logger.info(f"Adding response filter: {filter_word}")
2121
- self.reponse_filters.append(filter_word)
1929
+ self.response_filters.append(filter_word)
2122
1930
 
2123
- def apply_reponse_filters(self, response: str) -> str:
1931
+ def apply_response_filters(self, response: str) -> str:
2124
1932
  """
2125
1933
  Apply the response filters to the response
2126
1934
 
@@ -2195,11 +2003,17 @@ class Agent:
2195
2003
  None
2196
2004
  """
2197
2005
  try:
2006
+ # Process all documents and combine their content
2007
+ all_data = []
2198
2008
  for doc in docs:
2199
2009
  data = data_to_text(doc)
2010
+ all_data.append(f"Document: {doc}\n{data}")
2011
+
2012
+ # Combine all document content
2013
+ combined_data = "\n\n".join(all_data)
2200
2014
 
2201
2015
  return self.short_memory.add(
2202
- role=self.user_name, content=data
2016
+ role=self.user_name, content=combined_data
2203
2017
  )
2204
2018
  except Exception as error:
2205
2019
  logger.info(f"Error ingesting docs: {error}", "red")
@@ -3417,3 +3231,13 @@ class Agent:
3417
3231
  f"Full traceback: {traceback.format_exc()}. "
3418
3232
  f"Attempting to retry tool execution with 3 attempts"
3419
3233
  )
3234
+
3235
+ def add_tool_schema(self, tool_schema: dict):
3236
+ self.tools_list_dictionary = [tool_schema]
3237
+
3238
+ self.output_type = "dict-all-except-first"
3239
+
3240
+ def add_multiple_tool_schemas(self, tool_schemas: list[dict]):
3241
+ self.tools_list_dictionary = tool_schemas
3242
+
3243
+ self.output_type = "dict-all-except-first"
@@ -6,7 +6,6 @@ import threading
6
6
  import uuid
7
7
  from typing import (
8
8
  TYPE_CHECKING,
9
- Callable,
10
9
  Dict,
11
10
  List,
12
11
  Optional,
@@ -190,18 +189,16 @@ class Conversation(BaseStructure):
190
189
  save_enabled: bool = False, # New parameter to control if saving is enabled
191
190
  save_filepath: str = None,
192
191
  load_filepath: str = None, # New parameter to specify which file to load from
193
- tokenizer: Callable = None,
194
192
  context_length: int = 8192,
195
193
  rules: str = None,
196
194
  custom_rules_prompt: str = None,
197
- user: str = "User:",
195
+ user: str = "User",
198
196
  save_as_yaml: bool = False,
199
197
  save_as_json_bool: bool = False,
200
- token_count: bool = True,
198
+ token_count: bool = False,
201
199
  message_id_on: bool = False,
202
200
  provider: providers = "in-memory",
203
201
  backend: Optional[str] = None,
204
- # Backend-specific parameters
205
202
  supabase_url: Optional[str] = None,
206
203
  supabase_key: Optional[str] = None,
207
204
  redis_host: str = "localhost",
@@ -210,7 +207,6 @@ class Conversation(BaseStructure):
210
207
  redis_password: Optional[str] = None,
211
208
  db_path: Optional[str] = None,
212
209
  table_name: str = "conversations",
213
- # Additional backend parameters
214
210
  use_embedded_redis: bool = True,
215
211
  persist_redis: bool = True,
216
212
  auto_persist: bool = True,
@@ -230,20 +226,7 @@ class Conversation(BaseStructure):
230
226
  self.save_enabled = save_enabled
231
227
  self.conversations_dir = conversations_dir
232
228
  self.message_id_on = message_id_on
233
-
234
- # Handle save filepath
235
- if save_enabled and save_filepath:
236
- self.save_filepath = save_filepath
237
- elif save_enabled and conversations_dir:
238
- self.save_filepath = os.path.join(
239
- conversations_dir, f"{self.id}.json"
240
- )
241
- else:
242
- self.save_filepath = None
243
-
244
229
  self.load_filepath = load_filepath
245
- self.conversation_history = []
246
- self.tokenizer = tokenizer
247
230
  self.context_length = context_length
248
231
  self.rules = rules
249
232
  self.custom_rules_prompt = custom_rules_prompt
@@ -253,9 +236,40 @@ class Conversation(BaseStructure):
253
236
  self.token_count = token_count
254
237
  self.provider = provider # Keep for backwards compatibility
255
238
  self.conversations_dir = conversations_dir
239
+ self.backend = backend
240
+ self.supabase_url = supabase_url
241
+ self.supabase_key = supabase_key
242
+ self.redis_host = redis_host
243
+ self.redis_port = redis_port
244
+ self.redis_db = redis_db
245
+ self.redis_password = redis_password
246
+ self.db_path = db_path
247
+ self.table_name = table_name
248
+ self.use_embedded_redis = use_embedded_redis
249
+ self.persist_redis = persist_redis
250
+ self.auto_persist = auto_persist
251
+ self.redis_data_dir = redis_data_dir
252
+
253
+ self.conversation_history = []
254
+
255
+ # Handle save filepath
256
+ if save_enabled and save_filepath:
257
+ self.save_filepath = save_filepath
258
+ elif save_enabled and conversations_dir:
259
+ self.save_filepath = os.path.join(
260
+ conversations_dir, f"{self.id}.json"
261
+ )
262
+ else:
263
+ self.save_filepath = None
256
264
 
257
265
  # Support both 'provider' and 'backend' parameters for backwards compatibility
258
266
  # 'backend' takes precedence if both are provided
267
+
268
+ self.backend_setup(backend, provider)
269
+
270
+ def backend_setup(
271
+ self, backend: str = None, provider: str = None
272
+ ):
259
273
  self.backend = backend or provider
260
274
  self.backend_instance = None
261
275
 
@@ -285,19 +299,18 @@ class Conversation(BaseStructure):
285
299
  ]:
286
300
  try:
287
301
  self._initialize_backend(
288
- supabase_url=supabase_url,
289
- supabase_key=supabase_key,
290
- redis_host=redis_host,
291
- redis_port=redis_port,
292
- redis_db=redis_db,
293
- redis_password=redis_password,
294
- db_path=db_path,
295
- table_name=table_name,
296
- use_embedded_redis=use_embedded_redis,
297
- persist_redis=persist_redis,
298
- auto_persist=auto_persist,
299
- redis_data_dir=redis_data_dir,
300
- **kwargs,
302
+ supabase_url=self.supabase_url,
303
+ supabase_key=self.supabase_key,
304
+ redis_host=self.redis_host,
305
+ redis_port=self.redis_port,
306
+ redis_db=self.redis_db,
307
+ redis_password=self.redis_password,
308
+ db_path=self.db_path,
309
+ table_name=self.table_name,
310
+ use_embedded_redis=self.use_embedded_redis,
311
+ persist_redis=self.persist_redis,
312
+ auto_persist=self.auto_persist,
313
+ redis_data_dir=self.redis_data_dir,
301
314
  )
302
315
  except Exception as e:
303
316
  logger.warning(
@@ -324,7 +337,6 @@ class Conversation(BaseStructure):
324
337
  "time_enabled": self.time_enabled,
325
338
  "autosave": self.autosave,
326
339
  "save_filepath": self.save_filepath,
327
- "tokenizer": self.tokenizer,
328
340
  "context_length": self.context_length,
329
341
  "rules": self.rules,
330
342
  "custom_rules_prompt": self.custom_rules_prompt,
@@ -449,8 +461,8 @@ class Conversation(BaseStructure):
449
461
  if self.custom_rules_prompt is not None:
450
462
  self.add(self.user or "User", self.custom_rules_prompt)
451
463
 
452
- if self.tokenizer is not None:
453
- self.truncate_memory_with_tokenizer()
464
+ # if self.tokenizer is not None:
465
+ # self.truncate_memory_with_tokenizer()
454
466
 
455
467
  def _autosave(self):
456
468
  """Automatically save the conversation if autosave is enabled."""
@@ -1051,9 +1063,7 @@ class Conversation(BaseStructure):
1051
1063
  for message in self.conversation_history:
1052
1064
  role = message.get("role")
1053
1065
  content = message.get("content")
1054
- tokens = self.tokenizer.count_tokens(
1055
- text=content
1056
- ) # Count the number of tokens
1066
+ tokens = count_tokens(content)
1057
1067
  count = tokens # Assign the token count
1058
1068
  total_tokens += count
1059
1069