swarms 7.6.2__py3-none-any.whl → 7.6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
swarms/structs/agent.py CHANGED
@@ -6,7 +6,6 @@ import random
6
6
  import threading
7
7
  import time
8
8
  import uuid
9
-
10
9
  from concurrent.futures import ThreadPoolExecutor
11
10
  from datetime import datetime
12
11
  from typing import (
@@ -39,10 +38,8 @@ from swarms.schemas.base_schemas import (
39
38
  ChatCompletionResponseChoice,
40
39
  ChatMessageResponse,
41
40
  )
42
- from swarms.structs.concat import concat_strings
41
+ from swarms.structs.agent_roles import agent_roles
43
42
  from swarms.structs.conversation import Conversation
44
-
45
- # from swarms.structs.multi_agent_exec import run_agents_concurrently
46
43
  from swarms.structs.safe_loading import (
47
44
  SafeLoaderUtils,
48
45
  SafeStateManager,
@@ -54,9 +51,11 @@ from swarms.utils.any_to_str import any_to_str
54
51
  from swarms.utils.data_to_text import data_to_text
55
52
  from swarms.utils.file_processing import create_file_in_folder
56
53
  from swarms.utils.formatter import formatter
54
+ from swarms.utils.history_output_formatter import (
55
+ history_output_formatter,
56
+ )
57
57
  from swarms.utils.litellm_tokenizer import count_tokens
58
58
  from swarms.utils.pdf_to_text import pdf_to_text
59
- from swarms.structs.agent_roles import agent_roles
60
59
  from swarms.utils.str_to_dict import str_to_dict
61
60
 
62
61
 
@@ -473,12 +472,21 @@ class Agent:
473
472
  self.no_print = no_print
474
473
  self.tools_list_dictionary = tools_list_dictionary
475
474
 
475
+ if (
476
+ self.agent_name is not None
477
+ or self.agent_description is not None
478
+ ):
479
+ prompt = f"Your Name: {self.agent_name} \n\n Your Description: {self.agent_description} \n\n {system_prompt}"
480
+ else:
481
+ prompt = system_prompt
482
+
476
483
  # Initialize the short term memory
477
484
  self.short_memory = Conversation(
478
- system_prompt=system_prompt,
485
+ system_prompt=prompt,
479
486
  time_enabled=False,
480
487
  user=user_name,
481
488
  rules=rules,
489
+ token_count=False,
482
490
  *args,
483
491
  **kwargs,
484
492
  )
@@ -504,24 +512,10 @@ class Agent:
504
512
  tool_system_prompt=tool_system_prompt,
505
513
  )
506
514
 
507
- # The max_loops will be set dynamically if the dynamic_loop
508
- if self.dynamic_loops is True:
509
- logger.info("Dynamic loops enabled")
510
- self.max_loops = "auto"
511
-
512
- # If multimodal = yes then set the sop to the multimodal sop
513
- if self.multi_modal is True:
514
- self.sop = MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1
515
-
516
- # If the preset stopping token is enabled then set the stopping token to the preset stopping token
517
- if preset_stopping_token is not None:
518
- self.stopping_token = "<DONE>"
519
-
520
- # If the docs exist then ingest the docs
521
- # if exists(self.docs):
522
- # threading.Thread(
523
- # target=self.ingest_docs, args=(self.docs)
524
- # ).start()
515
+ # Some common configuration settings
516
+ threading.Thread(
517
+ target=self.setup_config, daemon=True
518
+ ).start()
525
519
 
526
520
  # If docs folder exists then get the docs from docs folder
527
521
  if exists(self.docs_folder):
@@ -567,10 +561,6 @@ class Agent:
567
561
  if exists(self.sop) or exists(self.sop_list):
568
562
  threading.Thread(target=self.handle_sop_ops()).start()
569
563
 
570
- # If agent_ops is on => activate agentops
571
- if agent_ops_on is True:
572
- threading.Thread(target=self.activate_agentops()).start()
573
-
574
564
  # Many steps
575
565
  self.agent_output = ManySteps(
576
566
  agent_id=agent_id,
@@ -634,12 +624,27 @@ class Agent:
634
624
  temperature=self.temperature,
635
625
  max_tokens=self.max_tokens,
636
626
  system_prompt=self.system_prompt,
627
+ stream=self.streaming_on,
637
628
  )
638
629
  return llm
639
630
  except Exception as e:
640
631
  logger.error(f"Error in llm_handling: {e}")
641
632
  return None
642
633
 
634
+ def setup_config(self):
635
+ # The max_loops will be set dynamically if the dynamic_loop
636
+ if self.dynamic_loops is True:
637
+ logger.info("Dynamic loops enabled")
638
+ self.max_loops = "auto"
639
+
640
+ # If multimodal = yes then set the sop to the multimodal sop
641
+ if self.multi_modal is True:
642
+ self.sop = MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1
643
+
644
+ # If the preset stopping token is enabled then set the stopping token to the preset stopping token
645
+ if self.preset_stopping_token is not None:
646
+ self.stopping_token = "<DONE>"
647
+
643
648
  def prepare_tools_list_dictionary(self):
644
649
  import json
645
650
 
@@ -775,18 +780,6 @@ class Agent:
775
780
  """,
776
781
  )
777
782
 
778
- def loop_count_print(
779
- self, loop_count: int, max_loops: int
780
- ) -> None:
781
- """loop_count_print summary
782
-
783
- Args:
784
- loop_count (_type_): _description_
785
- max_loops (_type_): _description_
786
- """
787
- logger.info(f"\nLoop {loop_count} of {max_loops}")
788
- print("\n")
789
-
790
783
  # Check parameters
791
784
  def check_parameters(self):
792
785
  if self.llm is None:
@@ -837,8 +830,6 @@ class Agent:
837
830
  try:
838
831
  self.check_if_no_prompt_then_autogenerate(task)
839
832
 
840
- self.agent_output.task = task
841
-
842
833
  # Add task to memory
843
834
  self.short_memory.add(role=self.user_name, content=task)
844
835
 
@@ -848,17 +839,17 @@ class Agent:
848
839
 
849
840
  # Set the loop count
850
841
  loop_count = 0
842
+
851
843
  # Clear the short memory
852
844
  response = None
853
- all_responses = []
854
845
 
855
846
  # Query the long term memory first for the context
856
847
  if self.long_term_memory is not None:
857
848
  self.memory_query(task)
858
849
 
859
- # Print the user's request
860
-
850
+ # Autosave
861
851
  if self.autosave:
852
+ log_agent_data(self.to_dict())
862
853
  self.save()
863
854
 
864
855
  # Print the request
@@ -873,8 +864,11 @@ class Agent:
873
864
  or loop_count < self.max_loops
874
865
  ):
875
866
  loop_count += 1
876
- self.loop_count_print(loop_count, self.max_loops)
877
- print("\n")
867
+
868
+ # self.short_memory.add(
869
+ # role=f"{self.agent_name}",
870
+ # content=f"Internal Reasoning Loop: {loop_count} of {self.max_loops}",
871
+ # )
878
872
 
879
873
  # Dynamic temperature
880
874
  if self.dynamic_temperature_enabled is True:
@@ -905,48 +899,24 @@ class Agent:
905
899
  if img is None
906
900
  else (task_prompt, img, *args)
907
901
  )
902
+
903
+ # Call the LLM
908
904
  response = self.call_llm(
909
905
  *response_args, **kwargs
910
906
  )
911
907
 
912
908
  # Convert to a str if the response is not a str
913
- response = self.llm_output_parser(response)
909
+ response = self.parse_llm_output(response)
914
910
 
915
- # if correct_answer is not None:
916
- # if correct_answer not in response:
917
- # logger.info("Correct answer found in response")
918
- # # break
911
+ self.short_memory.add(
912
+ role=self.agent_name, content=response
913
+ )
919
914
 
920
915
  # Print
921
- if self.no_print is False:
922
- if self.streaming_on is True:
923
- # self.stream_response(response)
924
- formatter.print_panel_token_by_token(
925
- f"{self.agent_name}: {response}",
926
- title=f"Agent Name: {self.agent_name} [Max Loops: {loop_count}]",
927
- )
928
- else:
929
- # logger.info(f"Response: {response}")
930
- formatter.print_panel(
931
- f"{self.agent_name}: {response}",
932
- f"Agent Name {self.agent_name} [Max Loops: {loop_count} ]",
933
- )
934
-
935
- # Check if response is a dictionary and has 'choices' key
936
- if (
937
- isinstance(response, dict)
938
- and "choices" in response
939
- ):
940
- response = response["choices"][0][
941
- "message"
942
- ]["content"]
943
- elif isinstance(response, str):
944
- # If response is already a string, use it as is
945
- pass
946
- else:
947
- raise ValueError(
948
- f"Unexpected response format: {type(response)}"
949
- )
916
+ self.pretty_print(response, loop_count)
917
+
918
+ # Output Cleaner
919
+ self.output_cleaner_op(response)
950
920
 
951
921
  # Check and execute tools
952
922
  if self.tools is not None:
@@ -978,34 +948,7 @@ class Agent:
978
948
  role=self.agent_name, content=out
979
949
  )
980
950
 
981
- # Add the response to the memory
982
- self.short_memory.add(
983
- role=self.agent_name, content=response
984
- )
985
-
986
- # Add to all responses
987
- all_responses.append(response)
988
-
989
- # # TODO: Implement reliability check
990
-
991
- if self.evaluator:
992
- logger.info("Evaluating response...")
993
- evaluated_response = self.evaluator(
994
- response
995
- )
996
- print(
997
- "Evaluated Response:"
998
- f" {evaluated_response}"
999
- )
1000
- self.short_memory.add(
1001
- role="Evaluator",
1002
- content=evaluated_response,
1003
- )
1004
-
1005
- # Sentiment analysis
1006
- if self.sentiment_analyzer:
1007
- logger.info("Analyzing sentiment...")
1008
- self.sentiment_analysis_handler(response)
951
+ self.sentiment_and_evaluator(response)
1009
952
 
1010
953
  success = True # Mark as successful to exit the retry loop
1011
954
 
@@ -1062,7 +1005,7 @@ class Agent:
1062
1005
  break
1063
1006
 
1064
1007
  self.short_memory.add(
1065
- role=self.user_name, content=user_input
1008
+ role="User", content=user_input
1066
1009
  )
1067
1010
 
1068
1011
  if self.loop_interval:
@@ -1077,91 +1020,14 @@ class Agent:
1077
1020
  if self.autosave is True:
1078
1021
  self.save()
1079
1022
 
1080
- # Apply the cleaner function to the response
1081
- if self.output_cleaner is not None:
1082
- logger.info("Applying output cleaner to response.")
1083
- response = self.output_cleaner(response)
1084
- logger.info(
1085
- f"Response after output cleaner: {response}"
1086
- )
1087
- self.short_memory.add(
1088
- role="Output Cleaner",
1089
- content=response,
1090
- )
1091
-
1092
- if self.agent_ops_on is True and is_last is True:
1093
- self.check_end_session_agentops()
1094
-
1095
- # Merge all responses
1096
- all_responses = [
1097
- response
1098
- for response in all_responses
1099
- if response is not None
1100
- ]
1101
-
1102
- self.agent_output.steps = self.short_memory.to_dict()
1103
- self.agent_output.full_history = (
1104
- self.short_memory.get_str()
1105
- )
1106
- self.agent_output.total_tokens = count_tokens(
1107
- self.short_memory.get_str()
1108
- )
1109
-
1110
- # # Handle artifacts
1111
- # if self.artifacts_on is True:
1112
- # self.handle_artifacts(
1113
- # concat_strings(all_responses),
1114
- # self.artifacts_output_path,
1115
- # self.artifacts_file_extension,
1116
- # )
1117
-
1118
1023
  log_agent_data(self.to_dict())
1119
1024
 
1120
1025
  if self.autosave is True:
1121
1026
  self.save()
1122
1027
 
1123
- # More flexible output types
1124
- if (
1125
- self.output_type == "string"
1126
- or self.output_type == "str"
1127
- ):
1128
- return concat_strings(all_responses)
1129
- elif self.output_type == "list":
1130
- return all_responses
1131
- elif (
1132
- self.output_type == "json"
1133
- or self.return_step_meta is True
1134
- ):
1135
- return self.agent_output.model_dump_json(indent=4)
1136
- elif self.output_type == "csv":
1137
- return self.dict_to_csv(
1138
- self.agent_output.model_dump()
1139
- )
1140
- elif self.output_type == "dict":
1141
- return self.agent_output.model_dump()
1142
- elif self.output_type == "yaml":
1143
- return yaml.safe_dump(
1144
- self.agent_output.model_dump(), sort_keys=False
1145
- )
1146
-
1147
- elif self.output_type == "memory-list":
1148
- return self.short_memory.return_messages_as_list()
1149
-
1150
- elif self.output_type == "memory-dict":
1151
- return (
1152
- self.short_memory.return_messages_as_dictionary()
1153
- )
1154
- elif self.return_history is True:
1155
- history = self.short_memory.get_str()
1156
-
1157
- formatter.print_panel(
1158
- history, title=f"{self.agent_name} History"
1159
- )
1160
- return history
1161
- else:
1162
- raise ValueError(
1163
- f"Invalid output type: {self.output_type}"
1164
- )
1028
+ return history_output_formatter(
1029
+ self.short_memory, type=self.output_type
1030
+ )
1165
1031
 
1166
1032
  except Exception as error:
1167
1033
  self._handle_run_error(error)
@@ -1937,7 +1803,7 @@ class Agent:
1937
1803
  """Send a message to the agent"""
1938
1804
  try:
1939
1805
  logger.info(f"Sending agent message: {message}")
1940
- message = f"{agent_name}: {message}"
1806
+ message = f"To: {agent_name}: {message}"
1941
1807
  return self.run(message, *args, **kwargs)
1942
1808
  except Exception as error:
1943
1809
  logger.info(f"Error sending agent message: {error}")
@@ -2014,20 +1880,6 @@ class Agent:
2014
1880
  )
2015
1881
  raise error
2016
1882
 
2017
- def check_end_session_agentops(self):
2018
- if self.agent_ops_on is True:
2019
- try:
2020
- from swarms.utils.agent_ops_check import (
2021
- end_session_agentops,
2022
- )
2023
-
2024
- # Try ending the session
2025
- return end_session_agentops()
2026
- except ImportError:
2027
- logger.error(
2028
- "Could not import agentops, try installing agentops: $ pip3 install agentops"
2029
- )
2030
-
2031
1883
  def memory_query(self, task: str = None, *args, **kwargs) -> None:
2032
1884
  try:
2033
1885
  # Query the long term memory
@@ -2154,50 +2006,6 @@ class Agent:
2154
2006
 
2155
2007
  return out
2156
2008
 
2157
- def activate_agentops(self):
2158
- if self.agent_ops_on is True:
2159
- try:
2160
- from swarms.utils.agent_ops_check import (
2161
- try_import_agentops,
2162
- )
2163
-
2164
- # Try importing agent ops
2165
- logger.info(
2166
- "Agent Ops Initializing, ensure that you have the agentops API key and the pip package installed."
2167
- )
2168
- try_import_agentops()
2169
- self.agent_ops_agent_name = self.agent_name
2170
-
2171
- logger.info("Agentops successfully activated!")
2172
- except ImportError:
2173
- logger.error(
2174
- "Could not import agentops, try installing agentops: $ pip3 install agentops"
2175
- )
2176
-
2177
- def llm_output_parser(self, response: Any) -> str:
2178
- """Parse the output from the LLM"""
2179
- try:
2180
- if isinstance(response, dict):
2181
- if "choices" in response:
2182
- return response["choices"][0]["message"][
2183
- "content"
2184
- ]
2185
- else:
2186
- return json.dumps(
2187
- response
2188
- ) # Convert dict to string
2189
- elif isinstance(response, str):
2190
- return response
2191
- else:
2192
- return str(
2193
- response
2194
- ) # Convert any other type to string
2195
- except Exception as e:
2196
- logger.error(f"Error parsing LLM output: {e}")
2197
- return str(
2198
- response
2199
- ) # Return string representation as fallback
2200
-
2201
2009
  def log_step_metadata(
2202
2010
  self, loop: int, task: str, response: str
2203
2011
  ) -> Step:
@@ -2693,9 +2501,88 @@ class Agent:
2693
2501
  """
2694
2502
  return self.role
2695
2503
 
2696
- # def __getstate__(self):
2697
- # state = self.__dict__.copy()
2698
- # # Remove or replace unpicklable attributes.
2699
- # if '_queue' in state:
2700
- # del state['_queue']
2701
- # return state
2504
+ def pretty_print(self, response: str, loop_count: int):
2505
+ if self.no_print is False:
2506
+ if self.streaming_on is True:
2507
+ # self.stream_response(response)
2508
+ formatter.print_panel_token_by_token(
2509
+ f"{self.agent_name}: {response}",
2510
+ title=f"Agent Name: {self.agent_name} [Max Loops: {loop_count}]",
2511
+ )
2512
+ else:
2513
+ # logger.info(f"Response: {response}")
2514
+ formatter.print_panel(
2515
+ f"{self.agent_name}: {response}",
2516
+ f"Agent Name {self.agent_name} [Max Loops: {loop_count} ]",
2517
+ )
2518
+
2519
+ def parse_llm_output(self, response: Any) -> str:
2520
+ """Parse and standardize the output from the LLM.
2521
+
2522
+ Args:
2523
+ response (Any): The response from the LLM in any format
2524
+
2525
+ Returns:
2526
+ str: Standardized string output
2527
+
2528
+ Raises:
2529
+ ValueError: If the response format is unexpected and can't be handled
2530
+ """
2531
+ try:
2532
+ # Handle dictionary responses
2533
+ if isinstance(response, dict):
2534
+ if "choices" in response:
2535
+ return response["choices"][0]["message"][
2536
+ "content"
2537
+ ]
2538
+ return json.dumps(
2539
+ response
2540
+ ) # Convert other dicts to string
2541
+
2542
+ # Handle string responses
2543
+ elif isinstance(response, str):
2544
+ return response
2545
+
2546
+ # Handle list responses (from check_llm_outputs)
2547
+ elif isinstance(response, list):
2548
+ return "\n".join(response)
2549
+
2550
+ # Handle any other type by converting to string
2551
+ else:
2552
+ return str(response)
2553
+
2554
+ except Exception as e:
2555
+ logger.error(f"Error parsing LLM output: {e}")
2556
+ raise ValueError(
2557
+ f"Failed to parse LLM output: {type(response)}"
2558
+ )
2559
+
2560
+ def sentiment_and_evaluator(self, response: str):
2561
+ if self.evaluator:
2562
+ logger.info("Evaluating response...")
2563
+
2564
+ evaluated_response = self.evaluator(response)
2565
+ print("Evaluated Response:" f" {evaluated_response}")
2566
+ self.short_memory.add(
2567
+ role="Evaluator",
2568
+ content=evaluated_response,
2569
+ )
2570
+
2571
+ # Sentiment analysis
2572
+ if self.sentiment_analyzer:
2573
+ logger.info("Analyzing sentiment...")
2574
+ self.sentiment_analysis_handler(response)
2575
+
2576
+ def output_cleaner_op(self, response: str):
2577
+ # Apply the cleaner function to the response
2578
+ if self.output_cleaner is not None:
2579
+ logger.info("Applying output cleaner to response.")
2580
+
2581
+ response = self.output_cleaner(response)
2582
+
2583
+ logger.info(f"Response after output cleaner: {response}")
2584
+
2585
+ self.short_memory.add(
2586
+ role="Output Cleaner",
2587
+ content=response,
2588
+ )
@@ -89,7 +89,6 @@ class BaseSwarm(ABC):
89
89
  stopping_function: Optional[Callable] = None,
90
90
  stopping_condition: Optional[str] = "stop",
91
91
  stopping_condition_args: Optional[Dict] = None,
92
- agentops_on: Optional[bool] = False,
93
92
  speaker_selection_func: Optional[Callable] = None,
94
93
  rules: Optional[str] = None,
95
94
  collective_memory_system: Optional[Any] = False,
@@ -112,7 +111,6 @@ class BaseSwarm(ABC):
112
111
  self.stopping_function = stopping_function
113
112
  self.stopping_condition = stopping_condition
114
113
  self.stopping_condition_args = stopping_condition_args
115
- self.agentops_on = agentops_on
116
114
  self.speaker_selection_func = speaker_selection_func
117
115
  self.rules = rules
118
116
  self.collective_memory_system = collective_memory_system
@@ -167,11 +165,6 @@ class BaseSwarm(ABC):
167
165
  self.stopping_condition_args = stopping_condition_args
168
166
  self.stopping_condition = stopping_condition
169
167
 
170
- # If agentops is enabled, try to import agentops
171
- if agentops_on is True:
172
- for agent in self.agents:
173
- agent.agent_ops_on = True
174
-
175
168
  # Handle speaker selection function
176
169
  if speaker_selection_func is not None:
177
170
  if not callable(speaker_selection_func):
@@ -262,7 +262,7 @@ class ConcurrentWorkflow(BaseSwarm):
262
262
  ) -> AgentOutputSchema:
263
263
  start_time = datetime.now()
264
264
  try:
265
- output = agent.run(task=task, img=img)
265
+ output = agent.run(task=task)
266
266
 
267
267
  self.conversation.add(
268
268
  agent.agent_name,
@@ -119,7 +119,7 @@ class Conversation(BaseStructure):
119
119
  content (Union[str, dict, list]): The content of the message to be added.
120
120
  """
121
121
  now = datetime.datetime.now()
122
- timestamp = now.strftime("%Y-%m-%d %H:%M:%S")
122
+ now.strftime("%Y-%m-%d %H:%M:%S")
123
123
 
124
124
  # Base message with role
125
125
  message = {
@@ -129,8 +129,12 @@ class Conversation(BaseStructure):
129
129
  # Handle different content types
130
130
  if isinstance(content, dict) or isinstance(content, list):
131
131
  message["content"] = content
132
+ elif self.time_enabled:
133
+ message["content"] = (
134
+ f"Time: {now.strftime('%Y-%m-%d %H:%M:%S')} \n {content}"
135
+ )
132
136
  else:
133
- message["content"] = f"Time: {timestamp} \n {content}"
137
+ message["content"] = content
134
138
 
135
139
  # Add the message to history immediately without waiting for token count
136
140
  self.conversation_history.append(message)
@@ -510,6 +514,16 @@ class Conversation(BaseStructure):
510
514
  """
511
515
  return f"{self.conversation_history[-1]['role']}: {self.conversation_history[-1]['content']}"
512
516
 
517
+ def get_final_message_content(self):
518
+ """Return the content of the final message from the conversation history.
519
+
520
+ Returns:
521
+ str: The content of the final message.
522
+ """
523
+ output = self.conversation_history[-1]["content"]
524
+ # print(output)
525
+ return output
526
+
513
527
 
514
528
  # # Example usage
515
529
  # # conversation = Conversation()
@@ -4,7 +4,8 @@ from loguru import logger
4
4
  from swarms.structs.agent import Agent
5
5
 
6
6
  # Prompt templates for different agent roles
7
- GENERATOR_PROMPT = """You are a knowledgeable assistant tasked with providing accurate information on a wide range of topics.
7
+ GENERATOR_PROMPT = """
8
+ You are a knowledgeable assistant tasked with providing accurate information on a wide range of topics.
8
9
 
9
10
  Your responsibilities:
10
11
  1. Provide accurate information based on your training data
@@ -22,7 +23,8 @@ When responding to queries:
22
23
  Remember, it's better to acknowledge ignorance than to provide incorrect information.
23
24
  """
24
25
 
25
- CRITIC_PROMPT = """You are a critical reviewer tasked with identifying potential inaccuracies, hallucinations, or unsupported claims in AI-generated text.
26
+ CRITIC_PROMPT = """
27
+ You are a critical reviewer tasked with identifying potential inaccuracies, hallucinations, or unsupported claims in AI-generated text.
26
28
 
27
29
  Your responsibilities:
28
30
  1. Carefully analyze the provided text for factual errors
@@ -47,7 +49,8 @@ Focus particularly on:
47
49
  Be thorough and specific in your critique. Provide actionable feedback for improvement.
48
50
  """
49
51
 
50
- REFINER_PROMPT = """You are a refinement specialist tasked with improving text based on critical feedback.
52
+ REFINER_PROMPT = """
53
+ You are a refinement specialist tasked with improving text based on critical feedback.
51
54
 
52
55
  Your responsibilities:
53
56
  1. Carefully review the original text and the critical feedback
@@ -67,7 +70,8 @@ Guidelines for refinement:
67
70
  The refined text should be helpful and informative while being scrupulously accurate.
68
71
  """
69
72
 
70
- VALIDATOR_PROMPT = """You are a validation expert tasked with ensuring the highest standards of accuracy in refined AI outputs.
73
+ VALIDATOR_PROMPT = """
74
+ You are a validation expert tasked with ensuring the highest standards of accuracy in refined AI outputs.
71
75
 
72
76
  Your responsibilities:
73
77
  1. Verify that all critical issues from previous feedback have been properly addressed