swarms 7.6.1__py3-none-any.whl → 7.6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. swarms/__init__.py +1 -0
  2. swarms/agents/__init__.py +4 -5
  3. swarms/agents/flexion_agent.py +2 -1
  4. swarms/agents/reasoning_agents.py +10 -0
  5. swarms/client/__init__.py +15 -0
  6. swarms/prompts/multi_agent_collab_prompt.py +313 -0
  7. swarms/structs/__init__.py +10 -17
  8. swarms/structs/agent.py +178 -262
  9. swarms/structs/base_swarm.py +0 -7
  10. swarms/structs/concurrent_workflow.py +2 -2
  11. swarms/structs/conversation.py +16 -2
  12. swarms/structs/de_hallucination_swarm.py +8 -4
  13. swarms/structs/dynamic_conversational_swarm.py +226 -0
  14. swarms/structs/groupchat.py +80 -84
  15. swarms/structs/hiearchical_swarm.py +1 -1
  16. swarms/structs/hybrid_hiearchical_peer_swarm.py +256 -0
  17. swarms/structs/majority_voting.py +1 -1
  18. swarms/structs/mixture_of_agents.py +1 -1
  19. swarms/structs/multi_agent_exec.py +63 -139
  20. swarms/structs/multi_agent_orchestrator.py +1 -1
  21. swarms/structs/output_types.py +3 -0
  22. swarms/structs/rearrange.py +66 -205
  23. swarms/structs/sequential_workflow.py +34 -47
  24. swarms/structs/swarm_router.py +3 -2
  25. swarms/telemetry/bootup.py +19 -38
  26. swarms/telemetry/main.py +62 -22
  27. swarms/tools/tool_schema_base_model.py +57 -0
  28. swarms/utils/auto_download_check_packages.py +2 -2
  29. swarms/utils/disable_logging.py +0 -17
  30. swarms/utils/history_output_formatter.py +8 -3
  31. swarms/utils/litellm_wrapper.py +117 -1
  32. {swarms-7.6.1.dist-info → swarms-7.6.4.dist-info}/METADATA +1 -5
  33. {swarms-7.6.1.dist-info → swarms-7.6.4.dist-info}/RECORD +37 -37
  34. swarms/structs/agent_security.py +0 -318
  35. swarms/structs/airflow_swarm.py +0 -430
  36. swarms/structs/output_type.py +0 -18
  37. swarms/utils/agent_ops_check.py +0 -26
  38. swarms/utils/pandas_utils.py +0 -92
  39. /swarms/{structs/swarms_api.py → client/main.py} +0 -0
  40. {swarms-7.6.1.dist-info → swarms-7.6.4.dist-info}/LICENSE +0 -0
  41. {swarms-7.6.1.dist-info → swarms-7.6.4.dist-info}/WHEEL +0 -0
  42. {swarms-7.6.1.dist-info → swarms-7.6.4.dist-info}/entry_points.txt +0 -0
swarms/structs/agent.py CHANGED
@@ -38,10 +38,8 @@ from swarms.schemas.base_schemas import (
38
38
  ChatCompletionResponseChoice,
39
39
  ChatMessageResponse,
40
40
  )
41
- from swarms.structs.concat import concat_strings
41
+ from swarms.structs.agent_roles import agent_roles
42
42
  from swarms.structs.conversation import Conversation
43
-
44
- # from swarms.structs.multi_agent_exec import run_agents_concurrently
45
43
  from swarms.structs.safe_loading import (
46
44
  SafeLoaderUtils,
47
45
  SafeStateManager,
@@ -49,12 +47,16 @@ from swarms.structs.safe_loading import (
49
47
  from swarms.telemetry.main import log_agent_data
50
48
  from swarms.tools.base_tool import BaseTool
51
49
  from swarms.tools.tool_parse_exec import parse_and_execute_json
50
+ from swarms.utils.any_to_str import any_to_str
52
51
  from swarms.utils.data_to_text import data_to_text
53
52
  from swarms.utils.file_processing import create_file_in_folder
54
53
  from swarms.utils.formatter import formatter
54
+ from swarms.utils.history_output_formatter import (
55
+ history_output_formatter,
56
+ )
55
57
  from swarms.utils.litellm_tokenizer import count_tokens
56
58
  from swarms.utils.pdf_to_text import pdf_to_text
57
- from swarms.structs.agent_roles import agent_roles
59
+ from swarms.utils.str_to_dict import str_to_dict
58
60
 
59
61
 
60
62
  # Utils
@@ -470,12 +472,21 @@ class Agent:
470
472
  self.no_print = no_print
471
473
  self.tools_list_dictionary = tools_list_dictionary
472
474
 
475
+ if (
476
+ self.agent_name is not None
477
+ or self.agent_description is not None
478
+ ):
479
+ prompt = f"Your Name: {self.agent_name} \n\n Your Description: {self.agent_description} \n\n {system_prompt}"
480
+ else:
481
+ prompt = system_prompt
482
+
473
483
  # Initialize the short term memory
474
484
  self.short_memory = Conversation(
475
- system_prompt=system_prompt,
485
+ system_prompt=prompt,
476
486
  time_enabled=False,
477
487
  user=user_name,
478
488
  rules=rules,
489
+ token_count=False,
479
490
  *args,
480
491
  **kwargs,
481
492
  )
@@ -501,24 +512,10 @@ class Agent:
501
512
  tool_system_prompt=tool_system_prompt,
502
513
  )
503
514
 
504
- # The max_loops will be set dynamically if the dynamic_loop
505
- if self.dynamic_loops is True:
506
- logger.info("Dynamic loops enabled")
507
- self.max_loops = "auto"
508
-
509
- # If multimodal = yes then set the sop to the multimodal sop
510
- if self.multi_modal is True:
511
- self.sop = MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1
512
-
513
- # If the preset stopping token is enabled then set the stopping token to the preset stopping token
514
- if preset_stopping_token is not None:
515
- self.stopping_token = "<DONE>"
516
-
517
- # If the docs exist then ingest the docs
518
- # if exists(self.docs):
519
- # threading.Thread(
520
- # target=self.ingest_docs, args=(self.docs)
521
- # ).start()
515
+ # Some common configuration settings
516
+ threading.Thread(
517
+ target=self.setup_config, daemon=True
518
+ ).start()
522
519
 
523
520
  # If docs folder exists then get the docs from docs folder
524
521
  if exists(self.docs_folder):
@@ -564,10 +561,6 @@ class Agent:
564
561
  if exists(self.sop) or exists(self.sop_list):
565
562
  threading.Thread(target=self.handle_sop_ops()).start()
566
563
 
567
- # If agent_ops is on => activate agentops
568
- if agent_ops_on is True:
569
- threading.Thread(target=self.activate_agentops()).start()
570
-
571
564
  # Many steps
572
565
  self.agent_output = ManySteps(
573
566
  agent_id=agent_id,
@@ -631,12 +624,27 @@ class Agent:
631
624
  temperature=self.temperature,
632
625
  max_tokens=self.max_tokens,
633
626
  system_prompt=self.system_prompt,
627
+ stream=self.streaming_on,
634
628
  )
635
629
  return llm
636
630
  except Exception as e:
637
631
  logger.error(f"Error in llm_handling: {e}")
638
632
  return None
639
633
 
634
+ def setup_config(self):
635
+ # The max_loops will be set dynamically if the dynamic_loop
636
+ if self.dynamic_loops is True:
637
+ logger.info("Dynamic loops enabled")
638
+ self.max_loops = "auto"
639
+
640
+ # If multimodal = yes then set the sop to the multimodal sop
641
+ if self.multi_modal is True:
642
+ self.sop = MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1
643
+
644
+ # If the preset stopping token is enabled then set the stopping token to the preset stopping token
645
+ if self.preset_stopping_token is not None:
646
+ self.stopping_token = "<DONE>"
647
+
640
648
  def prepare_tools_list_dictionary(self):
641
649
  import json
642
650
 
@@ -772,18 +780,6 @@ class Agent:
772
780
  """,
773
781
  )
774
782
 
775
- def loop_count_print(
776
- self, loop_count: int, max_loops: int
777
- ) -> None:
778
- """loop_count_print summary
779
-
780
- Args:
781
- loop_count (_type_): _description_
782
- max_loops (_type_): _description_
783
- """
784
- logger.info(f"\nLoop {loop_count} of {max_loops}")
785
- print("\n")
786
-
787
783
  # Check parameters
788
784
  def check_parameters(self):
789
785
  if self.llm is None:
@@ -803,7 +799,7 @@ class Agent:
803
799
  # Main function
804
800
  def _run(
805
801
  self,
806
- task: Optional[str] = None,
802
+ task: Optional[Union[str, Any]] = None,
807
803
  img: Optional[str] = None,
808
804
  speech: Optional[str] = None,
809
805
  video: Optional[str] = None,
@@ -834,8 +830,6 @@ class Agent:
834
830
  try:
835
831
  self.check_if_no_prompt_then_autogenerate(task)
836
832
 
837
- self.agent_output.task = task
838
-
839
833
  # Add task to memory
840
834
  self.short_memory.add(role=self.user_name, content=task)
841
835
 
@@ -845,17 +839,17 @@ class Agent:
845
839
 
846
840
  # Set the loop count
847
841
  loop_count = 0
842
+
848
843
  # Clear the short memory
849
844
  response = None
850
- all_responses = []
851
845
 
852
846
  # Query the long term memory first for the context
853
847
  if self.long_term_memory is not None:
854
848
  self.memory_query(task)
855
849
 
856
- # Print the user's request
857
-
850
+ # Autosave
858
851
  if self.autosave:
852
+ log_agent_data(self.to_dict())
859
853
  self.save()
860
854
 
861
855
  # Print the request
@@ -870,8 +864,11 @@ class Agent:
870
864
  or loop_count < self.max_loops
871
865
  ):
872
866
  loop_count += 1
873
- self.loop_count_print(loop_count, self.max_loops)
874
- print("\n")
867
+
868
+ # self.short_memory.add(
869
+ # role=f"{self.agent_name}",
870
+ # content=f"Internal Reasoning Loop: {loop_count} of {self.max_loops}",
871
+ # )
875
872
 
876
873
  # Dynamic temperature
877
874
  if self.dynamic_temperature_enabled is True:
@@ -902,48 +899,24 @@ class Agent:
902
899
  if img is None
903
900
  else (task_prompt, img, *args)
904
901
  )
902
+
903
+ # Call the LLM
905
904
  response = self.call_llm(
906
905
  *response_args, **kwargs
907
906
  )
908
907
 
909
908
  # Convert to a str if the response is not a str
910
- response = self.llm_output_parser(response)
909
+ response = self.parse_llm_output(response)
911
910
 
912
- # if correct_answer is not None:
913
- # if correct_answer not in response:
914
- # logger.info("Correct answer found in response")
915
- # # break
911
+ self.short_memory.add(
912
+ role=self.agent_name, content=response
913
+ )
916
914
 
917
915
  # Print
918
- if self.no_print is False:
919
- if self.streaming_on is True:
920
- # self.stream_response(response)
921
- formatter.print_panel_token_by_token(
922
- f"{self.agent_name}: {response}",
923
- title=f"Agent Name: {self.agent_name} [Max Loops: {loop_count}]",
924
- )
925
- else:
926
- # logger.info(f"Response: {response}")
927
- formatter.print_panel(
928
- f"{self.agent_name}: {response}",
929
- f"Agent Name {self.agent_name} [Max Loops: {loop_count} ]",
930
- )
931
-
932
- # Check if response is a dictionary and has 'choices' key
933
- if (
934
- isinstance(response, dict)
935
- and "choices" in response
936
- ):
937
- response = response["choices"][0][
938
- "message"
939
- ]["content"]
940
- elif isinstance(response, str):
941
- # If response is already a string, use it as is
942
- pass
943
- else:
944
- raise ValueError(
945
- f"Unexpected response format: {type(response)}"
946
- )
916
+ self.pretty_print(response, loop_count)
917
+
918
+ # Output Cleaner
919
+ self.output_cleaner_op(response)
947
920
 
948
921
  # Check and execute tools
949
922
  if self.tools is not None:
@@ -975,34 +948,7 @@ class Agent:
975
948
  role=self.agent_name, content=out
976
949
  )
977
950
 
978
- # Add the response to the memory
979
- self.short_memory.add(
980
- role=self.agent_name, content=response
981
- )
982
-
983
- # Add to all responses
984
- all_responses.append(response)
985
-
986
- # # TODO: Implement reliability check
987
-
988
- if self.evaluator:
989
- logger.info("Evaluating response...")
990
- evaluated_response = self.evaluator(
991
- response
992
- )
993
- print(
994
- "Evaluated Response:"
995
- f" {evaluated_response}"
996
- )
997
- self.short_memory.add(
998
- role="Evaluator",
999
- content=evaluated_response,
1000
- )
1001
-
1002
- # Sentiment analysis
1003
- if self.sentiment_analyzer:
1004
- logger.info("Analyzing sentiment...")
1005
- self.sentiment_analysis_handler(response)
951
+ self.sentiment_and_evaluator(response)
1006
952
 
1007
953
  success = True # Mark as successful to exit the retry loop
1008
954
 
@@ -1059,7 +1005,7 @@ class Agent:
1059
1005
  break
1060
1006
 
1061
1007
  self.short_memory.add(
1062
- role=self.user_name, content=user_input
1008
+ role="User", content=user_input
1063
1009
  )
1064
1010
 
1065
1011
  if self.loop_interval:
@@ -1074,90 +1020,14 @@ class Agent:
1074
1020
  if self.autosave is True:
1075
1021
  self.save()
1076
1022
 
1077
- # Apply the cleaner function to the response
1078
- if self.output_cleaner is not None:
1079
- logger.info("Applying output cleaner to response.")
1080
- response = self.output_cleaner(response)
1081
- logger.info(
1082
- f"Response after output cleaner: {response}"
1083
- )
1084
- self.short_memory.add(
1085
- role="Output Cleaner",
1086
- content=response,
1087
- )
1088
-
1089
- if self.agent_ops_on is True and is_last is True:
1090
- self.check_end_session_agentops()
1091
-
1092
- # Merge all responses
1093
- all_responses = [
1094
- response
1095
- for response in all_responses
1096
- if response is not None
1097
- ]
1098
-
1099
- self.agent_output.steps = self.short_memory.to_dict()
1100
- self.agent_output.full_history = (
1101
- self.short_memory.get_str()
1102
- )
1103
- self.agent_output.total_tokens = count_tokens(
1104
- self.short_memory.get_str()
1105
- )
1106
-
1107
- # # Handle artifacts
1108
- # if self.artifacts_on is True:
1109
- # self.handle_artifacts(
1110
- # concat_strings(all_responses),
1111
- # self.artifacts_output_path,
1112
- # self.artifacts_file_extension,
1113
- # )
1114
-
1115
1023
  log_agent_data(self.to_dict())
1024
+
1116
1025
  if self.autosave is True:
1117
1026
  self.save()
1118
1027
 
1119
- # More flexible output types
1120
- if (
1121
- self.output_type == "string"
1122
- or self.output_type == "str"
1123
- ):
1124
- return concat_strings(all_responses)
1125
- elif self.output_type == "list":
1126
- return all_responses
1127
- elif (
1128
- self.output_type == "json"
1129
- or self.return_step_meta is True
1130
- ):
1131
- return self.agent_output.model_dump_json(indent=4)
1132
- elif self.output_type == "csv":
1133
- return self.dict_to_csv(
1134
- self.agent_output.model_dump()
1135
- )
1136
- elif self.output_type == "dict":
1137
- return self.agent_output.model_dump()
1138
- elif self.output_type == "yaml":
1139
- return yaml.safe_dump(
1140
- self.agent_output.model_dump(), sort_keys=False
1141
- )
1142
-
1143
- elif self.output_type == "memory-list":
1144
- return self.short_memory.return_messages_as_list()
1145
-
1146
- elif self.output_type == "memory-dict":
1147
- return (
1148
- self.short_memory.return_messages_as_dictionary()
1149
- )
1150
- elif self.return_history is True:
1151
- history = self.short_memory.get_str()
1152
-
1153
- formatter.print_panel(
1154
- history, title=f"{self.agent_name} History"
1155
- )
1156
- return history
1157
- else:
1158
- raise ValueError(
1159
- f"Invalid output type: {self.output_type}"
1160
- )
1028
+ return history_output_formatter(
1029
+ self.short_memory, type=self.output_type
1030
+ )
1161
1031
 
1162
1032
  except Exception as error:
1163
1033
  self._handle_run_error(error)
@@ -1933,7 +1803,7 @@ class Agent:
1933
1803
  """Send a message to the agent"""
1934
1804
  try:
1935
1805
  logger.info(f"Sending agent message: {message}")
1936
- message = f"{agent_name}: {message}"
1806
+ message = f"To: {agent_name}: {message}"
1937
1807
  return self.run(message, *args, **kwargs)
1938
1808
  except Exception as error:
1939
1809
  logger.info(f"Error sending agent message: {error}")
@@ -2010,20 +1880,6 @@ class Agent:
2010
1880
  )
2011
1881
  raise error
2012
1882
 
2013
- def check_end_session_agentops(self):
2014
- if self.agent_ops_on is True:
2015
- try:
2016
- from swarms.utils.agent_ops_check import (
2017
- end_session_agentops,
2018
- )
2019
-
2020
- # Try ending the session
2021
- return end_session_agentops()
2022
- except ImportError:
2023
- logger.error(
2024
- "Could not import agentops, try installing agentops: $ pip3 install agentops"
2025
- )
2026
-
2027
1883
  def memory_query(self, task: str = None, *args, **kwargs) -> None:
2028
1884
  try:
2029
1885
  # Query the long term memory
@@ -2150,50 +2006,6 @@ class Agent:
2150
2006
 
2151
2007
  return out
2152
2008
 
2153
- def activate_agentops(self):
2154
- if self.agent_ops_on is True:
2155
- try:
2156
- from swarms.utils.agent_ops_check import (
2157
- try_import_agentops,
2158
- )
2159
-
2160
- # Try importing agent ops
2161
- logger.info(
2162
- "Agent Ops Initializing, ensure that you have the agentops API key and the pip package installed."
2163
- )
2164
- try_import_agentops()
2165
- self.agent_ops_agent_name = self.agent_name
2166
-
2167
- logger.info("Agentops successfully activated!")
2168
- except ImportError:
2169
- logger.error(
2170
- "Could not import agentops, try installing agentops: $ pip3 install agentops"
2171
- )
2172
-
2173
- def llm_output_parser(self, response: Any) -> str:
2174
- """Parse the output from the LLM"""
2175
- try:
2176
- if isinstance(response, dict):
2177
- if "choices" in response:
2178
- return response["choices"][0]["message"][
2179
- "content"
2180
- ]
2181
- else:
2182
- return json.dumps(
2183
- response
2184
- ) # Convert dict to string
2185
- elif isinstance(response, str):
2186
- return response
2187
- else:
2188
- return str(
2189
- response
2190
- ) # Convert any other type to string
2191
- except Exception as e:
2192
- logger.error(f"Error parsing LLM output: {e}")
2193
- return str(
2194
- response
2195
- ) # Return string representation as fallback
2196
-
2197
2009
  def log_step_metadata(
2198
2010
  self, loop: int, task: str, response: str
2199
2011
  ) -> Step:
@@ -2494,7 +2306,7 @@ class Agent:
2494
2306
 
2495
2307
  def run(
2496
2308
  self,
2497
- task: Optional[str] = None,
2309
+ task: Optional[Union[str, Any]] = None,
2498
2310
  img: Optional[str] = None,
2499
2311
  device: Optional[str] = "cpu", # gpu
2500
2312
  device_id: Optional[int] = 0,
@@ -2531,6 +2343,9 @@ class Agent:
2531
2343
  Exception: If any other error occurs during execution.
2532
2344
  """
2533
2345
 
2346
+ if not isinstance(task, str):
2347
+ task = any_to_str(task)
2348
+
2534
2349
  if scheduled_run_date:
2535
2350
  while datetime.now() < scheduled_run_date:
2536
2351
  time.sleep(
@@ -2539,13 +2354,18 @@ class Agent:
2539
2354
 
2540
2355
  try:
2541
2356
  # If cluster ops disabled, run directly
2542
- return self._run(
2357
+ output = self._run(
2543
2358
  task=task,
2544
2359
  img=img,
2545
2360
  *args,
2546
2361
  **kwargs,
2547
2362
  )
2548
2363
 
2364
+ if self.tools_list_dictionary is not None:
2365
+ return str_to_dict(output)
2366
+ else:
2367
+ return output
2368
+
2549
2369
  except ValueError as e:
2550
2370
  self._handle_run_error(e)
2551
2371
 
@@ -2635,7 +2455,7 @@ class Agent:
2635
2455
  )
2636
2456
 
2637
2457
  return agent.run(
2638
- task=f"From {self.agent_name}: {output}",
2458
+ task=f"From {self.agent_name}: Message: {output}",
2639
2459
  img=img,
2640
2460
  *args,
2641
2461
  **kwargs,
@@ -2651,10 +2471,27 @@ class Agent:
2651
2471
  """
2652
2472
  Talk to multiple agents.
2653
2473
  """
2654
- outputs = []
2655
- for agent in agents:
2656
- output = self.talk_to(agent, task, *args, **kwargs)
2657
- outputs.append(output)
2474
+ # o# Use the existing executor from self.executor or create a new one if needed
2475
+ with ThreadPoolExecutor() as executor:
2476
+ # Create futures for each agent conversation
2477
+ futures = [
2478
+ executor.submit(
2479
+ self.talk_to, agent, task, *args, **kwargs
2480
+ )
2481
+ for agent in agents
2482
+ ]
2483
+
2484
+ # Wait for all futures to complete and collect results
2485
+ outputs = []
2486
+ for future in futures:
2487
+ try:
2488
+ result = future.result()
2489
+ outputs.append(result)
2490
+ except Exception as e:
2491
+ logger.error(f"Error in agent communication: {e}")
2492
+ outputs.append(
2493
+ None
2494
+ ) # or handle error case as needed
2658
2495
 
2659
2496
  return outputs
2660
2497
 
@@ -2664,9 +2501,88 @@ class Agent:
2664
2501
  """
2665
2502
  return self.role
2666
2503
 
2667
- # def __getstate__(self):
2668
- # state = self.__dict__.copy()
2669
- # # Remove or replace unpicklable attributes.
2670
- # if '_queue' in state:
2671
- # del state['_queue']
2672
- # return state
2504
+ def pretty_print(self, response: str, loop_count: int):
2505
+ if self.no_print is False:
2506
+ if self.streaming_on is True:
2507
+ # self.stream_response(response)
2508
+ formatter.print_panel_token_by_token(
2509
+ f"{self.agent_name}: {response}",
2510
+ title=f"Agent Name: {self.agent_name} [Max Loops: {loop_count}]",
2511
+ )
2512
+ else:
2513
+ # logger.info(f"Response: {response}")
2514
+ formatter.print_panel(
2515
+ f"{self.agent_name}: {response}",
2516
+ f"Agent Name {self.agent_name} [Max Loops: {loop_count} ]",
2517
+ )
2518
+
2519
+ def parse_llm_output(self, response: Any) -> str:
2520
+ """Parse and standardize the output from the LLM.
2521
+
2522
+ Args:
2523
+ response (Any): The response from the LLM in any format
2524
+
2525
+ Returns:
2526
+ str: Standardized string output
2527
+
2528
+ Raises:
2529
+ ValueError: If the response format is unexpected and can't be handled
2530
+ """
2531
+ try:
2532
+ # Handle dictionary responses
2533
+ if isinstance(response, dict):
2534
+ if "choices" in response:
2535
+ return response["choices"][0]["message"][
2536
+ "content"
2537
+ ]
2538
+ return json.dumps(
2539
+ response
2540
+ ) # Convert other dicts to string
2541
+
2542
+ # Handle string responses
2543
+ elif isinstance(response, str):
2544
+ return response
2545
+
2546
+ # Handle list responses (from check_llm_outputs)
2547
+ elif isinstance(response, list):
2548
+ return "\n".join(response)
2549
+
2550
+ # Handle any other type by converting to string
2551
+ else:
2552
+ return str(response)
2553
+
2554
+ except Exception as e:
2555
+ logger.error(f"Error parsing LLM output: {e}")
2556
+ raise ValueError(
2557
+ f"Failed to parse LLM output: {type(response)}"
2558
+ )
2559
+
2560
+ def sentiment_and_evaluator(self, response: str):
2561
+ if self.evaluator:
2562
+ logger.info("Evaluating response...")
2563
+
2564
+ evaluated_response = self.evaluator(response)
2565
+ print("Evaluated Response:" f" {evaluated_response}")
2566
+ self.short_memory.add(
2567
+ role="Evaluator",
2568
+ content=evaluated_response,
2569
+ )
2570
+
2571
+ # Sentiment analysis
2572
+ if self.sentiment_analyzer:
2573
+ logger.info("Analyzing sentiment...")
2574
+ self.sentiment_analysis_handler(response)
2575
+
2576
+ def output_cleaner_op(self, response: str):
2577
+ # Apply the cleaner function to the response
2578
+ if self.output_cleaner is not None:
2579
+ logger.info("Applying output cleaner to response.")
2580
+
2581
+ response = self.output_cleaner(response)
2582
+
2583
+ logger.info(f"Response after output cleaner: {response}")
2584
+
2585
+ self.short_memory.add(
2586
+ role="Output Cleaner",
2587
+ content=response,
2588
+ )
@@ -89,7 +89,6 @@ class BaseSwarm(ABC):
89
89
  stopping_function: Optional[Callable] = None,
90
90
  stopping_condition: Optional[str] = "stop",
91
91
  stopping_condition_args: Optional[Dict] = None,
92
- agentops_on: Optional[bool] = False,
93
92
  speaker_selection_func: Optional[Callable] = None,
94
93
  rules: Optional[str] = None,
95
94
  collective_memory_system: Optional[Any] = False,
@@ -112,7 +111,6 @@ class BaseSwarm(ABC):
112
111
  self.stopping_function = stopping_function
113
112
  self.stopping_condition = stopping_condition
114
113
  self.stopping_condition_args = stopping_condition_args
115
- self.agentops_on = agentops_on
116
114
  self.speaker_selection_func = speaker_selection_func
117
115
  self.rules = rules
118
116
  self.collective_memory_system = collective_memory_system
@@ -167,11 +165,6 @@ class BaseSwarm(ABC):
167
165
  self.stopping_condition_args = stopping_condition_args
168
166
  self.stopping_condition = stopping_condition
169
167
 
170
- # If agentops is enabled, try to import agentops
171
- if agentops_on is True:
172
- for agent in self.agents:
173
- agent.agent_ops_on = True
174
-
175
168
  # Handle speaker selection function
176
169
  if speaker_selection_func is not None:
177
170
  if not callable(speaker_selection_func):
@@ -12,7 +12,7 @@ from swarms.utils.file_processing import create_file_in_folder
12
12
  from swarms.utils.loguru_logger import initialize_logger
13
13
  from swarms.structs.conversation import Conversation
14
14
  from swarms.structs.swarm_id_generator import generate_swarm_id
15
- from swarms.structs.output_type import OutputType
15
+ from swarms.structs.output_types import OutputType
16
16
 
17
17
  logger = initialize_logger(log_folder="concurrent_workflow")
18
18
 
@@ -262,7 +262,7 @@ class ConcurrentWorkflow(BaseSwarm):
262
262
  ) -> AgentOutputSchema:
263
263
  start_time = datetime.now()
264
264
  try:
265
- output = agent.run(task=task, img=img)
265
+ output = agent.run(task=task)
266
266
 
267
267
  self.conversation.add(
268
268
  agent.agent_name,