swarms 7.6.0__py3-none-any.whl → 7.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,581 @@
1
+ from typing import List, Dict, Any, Union
2
+ import time
3
+
4
+ from swarms.structs.agent import Agent
5
+ from swarms.structs.conversation import Conversation
6
+
7
+ from loguru import logger
8
+
9
+
10
+ class KnowledgeGenerator:
11
+ """
12
+ A component that generates relevant knowledge for a given input query.
13
+
14
+ The knowledge generator creates detailed contextual information that can be used
15
+ to enhance the reasoning capabilities of the main agent when responding to queries.
16
+
17
+ Attributes:
18
+ agent_name (str): Name of the knowledge generator agent
19
+ model_name (str): Model to use for knowledge generation
20
+ num_knowledge_items (int): Number of knowledge items to generate per query
21
+ """
22
+
23
+ def __init__(
24
+ self,
25
+ agent_name: str = "knowledge-generator",
26
+ model_name: str = "openai/o1",
27
+ num_knowledge_items: int = 2,
28
+ ) -> None:
29
+ """
30
+ Initialize the knowledge generator component.
31
+
32
+ Args:
33
+ agent_name (str): Name identifier for the knowledge generator agent
34
+ model_name (str): LLM model to use for knowledge generation
35
+ num_knowledge_items (int): Number of knowledge snippets to generate for each query
36
+ """
37
+ self.agent_name = agent_name
38
+ self.model_name = model_name
39
+ self.num_knowledge_items = num_knowledge_items
40
+
41
+ # Create the knowledge generator agent
42
+ knowledge_system_prompt = (
43
+ self._create_knowledge_system_prompt()
44
+ )
45
+ self.agent = Agent(
46
+ agent_name=agent_name,
47
+ agent_description="Generates factual, relevant knowledge to assist with answering queries",
48
+ system_prompt=knowledge_system_prompt,
49
+ model_name=model_name,
50
+ max_loops=1,
51
+ )
52
+
53
+ logger.info(
54
+ f"Initialized {self.agent_name} with model {self.model_name}"
55
+ )
56
+
57
+ def _create_knowledge_system_prompt(self) -> str:
58
+ """
59
+ Create the system prompt for the knowledge generator.
60
+
61
+ Returns:
62
+ str: System prompt with examples and instructions
63
+ """
64
+ examples_text = ""
65
+
66
+ system_prompt = f"""You are a specialized knowledge generator that provides factually accurate, detailed information relevant to a given input query. Your role is to generate precise knowledge that can help answer the query correctly.
67
+
68
+ When provided with an input query, generate {self.num_knowledge_items} separate, independent knowledge statements that are directly relevant to the query and provide context that would help answer it accurately.
69
+
70
+ Each knowledge statement should be:
71
+ 1. Factually accurate and verifiable
72
+ 2. Detailed and specific (not general statements)
73
+ 3. Directly relevant to addressing the query
74
+ 4. Neutral and objective, providing context rather than opinions
75
+ 5. Independent from other knowledge statements (provide different perspectives)
76
+
77
+ Here are examples of good knowledge generation:
78
+
79
+ {examples_text}
80
+
81
+ For each input, provide knowledge statements formatted as:
82
+ "Knowledge 1: [factual, detailed information relevant to the query]"
83
+ "Knowledge 2: [alternative factual, detailed information relevant to the query]"
84
+ etc.
85
+
86
+ Focus on providing knowledge that would help someone arrive at the correct answer to the query, particularly for questions that require commonsense reasoning or factual information.
87
+ """
88
+
89
+ return system_prompt
90
+
91
+ def generate_knowledge(self, query: str) -> List[str]:
92
+ """
93
+ Generate relevant knowledge for the input query.
94
+
95
+ Args:
96
+ query (str): The input query to generate knowledge for
97
+
98
+ Returns:
99
+ List[str]: List of generated knowledge statements
100
+ """
101
+ prompt = f"Input: {query}\nKnowledge:"
102
+
103
+ logger.debug(f"Generating knowledge for query: {query}")
104
+ start_time = time.time()
105
+
106
+ response = self.agent.run(task=prompt)
107
+
108
+ end_time = time.time()
109
+ logger.debug(
110
+ f"Knowledge generation completed in {end_time - start_time:.2f}s"
111
+ )
112
+
113
+ # Parse the generated knowledge into separate statements
114
+ knowledge_items = []
115
+
116
+ # Handle different response formats
117
+ if "Knowledge 1:" in response:
118
+ # Extract numbered knowledge items
119
+ for i in range(1, self.num_knowledge_items + 1):
120
+ marker = f"Knowledge {i}:"
121
+ next_marker = (
122
+ f"Knowledge {i+1}:"
123
+ if i < self.num_knowledge_items
124
+ else None
125
+ )
126
+
127
+ if marker in response:
128
+ start_idx = response.find(marker) + len(marker)
129
+ end_idx = (
130
+ response.find(next_marker)
131
+ if next_marker and next_marker in response
132
+ else None
133
+ )
134
+
135
+ knowledge = (
136
+ response[start_idx:end_idx].strip()
137
+ if end_idx
138
+ else response[start_idx:].strip()
139
+ )
140
+ knowledge_items.append(knowledge)
141
+ else:
142
+ # If not properly formatted with numbers, split by paragraphs
143
+ paragraphs = [
144
+ p.strip() for p in response.split("\n\n") if p.strip()
145
+ ]
146
+ for p in paragraphs[: self.num_knowledge_items]:
147
+ if p.startswith("Knowledge:"):
148
+ p = p[len("Knowledge:") :].strip()
149
+ knowledge_items.append(p)
150
+
151
+ # Ensure we have the requested number of knowledge items
152
+ while len(knowledge_items) < self.num_knowledge_items:
153
+ logger.warning(
154
+ f"Only generated {len(knowledge_items)} knowledge items, expected {self.num_knowledge_items}"
155
+ )
156
+ knowledge_items.append(
157
+ ""
158
+ ) # Add empty string as placeholder
159
+
160
+ # Truncate if we have too many
161
+ knowledge_items = knowledge_items[: self.num_knowledge_items]
162
+
163
+ logger.info(
164
+ f"Generated {len(knowledge_items)} knowledge items"
165
+ )
166
+ return knowledge_items
167
+
168
+
169
+ class Reasoner:
170
+ """
171
+ Component that uses generated knowledge to reason about and answer queries.
172
+
173
+ This reasoner takes knowledge generated by the KnowledgeGenerator and uses it
174
+ to make more informed decisions when answering questions.
175
+
176
+ Attributes:
177
+ agent_name (str): Name of the reasoner agent
178
+ model_name (str): Model to use for reasoning
179
+ """
180
+
181
+ def __init__(
182
+ self,
183
+ agent_name: str = "knowledge-reasoner",
184
+ model_name: str = "openai/o1",
185
+ ) -> None:
186
+ """
187
+ Initialize the reasoner component.
188
+
189
+ Args:
190
+ agent_name (str): Name identifier for the reasoner agent
191
+ model_name (str): LLM model to use for reasoning
192
+ """
193
+ self.agent_name = agent_name
194
+ self.model_name = model_name
195
+
196
+ # Create the reasoning agent
197
+ reasoning_system_prompt = (
198
+ self._create_reasoning_system_prompt()
199
+ )
200
+ self.agent = Agent(
201
+ agent_name=agent_name,
202
+ agent_description="Reasons about queries using provided knowledge to generate accurate answers",
203
+ system_prompt=reasoning_system_prompt,
204
+ model_name=model_name,
205
+ max_loops=1,
206
+ )
207
+
208
+ logger.info(
209
+ f"Initialized {self.agent_name} with model {self.model_name}"
210
+ )
211
+
212
+ def _create_reasoning_system_prompt(self) -> str:
213
+ """
214
+ Create the system prompt for the reasoner.
215
+
216
+ Returns:
217
+ str: System prompt with instructions
218
+ """
219
+ system_prompt = """
220
+ You are a specialized reasoning agent that answers questions based on provided knowledge. Your role is to carefully analyze the given knowledge and use it to answer the question accurately.
221
+
222
+ For each question:
223
+ 1. Carefully read the provided knowledge
224
+ 2. Analyze how the knowledge relates to the question
225
+ 3. Use the knowledge to form a well-reasoned answer
226
+ 4. Provide your answer along with an explanation of your reasoning
227
+ 5. Include a confidence assessment (very high, high, medium, low, very low)
228
+
229
+ Your response should follow this format:
230
+ "Explanation: [Your detailed reasoning based on the knowledge]
231
+ Confidence: [Your confidence level]
232
+ Answer: [Your final answer]"
233
+
234
+ Be objective and precise. If the knowledge contradicts itself or is insufficient to answer the question, acknowledge this in your response and provide your best judgment given the available information.
235
+
236
+ Focus on using the provided knowledge rather than your pre-existing information, though you may use your general understanding to interpret the knowledge appropriately.
237
+ """
238
+
239
+ return system_prompt
240
+
241
+ def reason_and_answer(
242
+ self, query: str, knowledge: str
243
+ ) -> Dict[str, str]:
244
+ """
245
+ Reason about the query using the provided knowledge and generate an answer.
246
+
247
+ Args:
248
+ query (str): The input query to answer
249
+ knowledge (str): Knowledge to use for reasoning
250
+
251
+ Returns:
252
+ Dict[str, str]: Dictionary containing explanation, confidence and answer
253
+ """
254
+ # Format the prompt
255
+ prompt = f"Question: {query}\nKnowledge: {knowledge}\nExplain and Answer:"
256
+
257
+ logger.debug(f"Reasoning about query: {query}")
258
+ start_time = time.time()
259
+
260
+ response = self.agent.run(task=prompt)
261
+
262
+ end_time = time.time()
263
+ logger.debug(
264
+ f"Reasoning completed in {end_time - start_time:.2f}s"
265
+ )
266
+
267
+ # Parse the response
268
+ result = {"explanation": "", "confidence": "", "answer": ""}
269
+
270
+ if "Explanation:" in response and "Answer:" in response:
271
+ # Get explanation
272
+ explanation_start = response.find("Explanation:") + len(
273
+ "Explanation:"
274
+ )
275
+
276
+ # Find the end of explanation (which is either Confidence: or Answer:)
277
+ confidence_pos = response.find("Confidence:")
278
+ answer_pos = response.find("Answer:")
279
+
280
+ explanation_end = min(
281
+ pos for pos in [confidence_pos, answer_pos] if pos > 0
282
+ )
283
+ result["explanation"] = response[
284
+ explanation_start:explanation_end
285
+ ].strip()
286
+
287
+ # Get confidence if present
288
+ if confidence_pos > 0:
289
+ confidence_start = confidence_pos + len("Confidence:")
290
+ confidence_end = (
291
+ answer_pos
292
+ if answer_pos > confidence_pos
293
+ else len(response)
294
+ )
295
+ result["confidence"] = response[
296
+ confidence_start:confidence_end
297
+ ].strip()
298
+
299
+ # Get answer
300
+ if answer_pos > 0:
301
+ answer_start = answer_pos + len("Answer:")
302
+ result["answer"] = response[answer_start:].strip()
303
+ else:
304
+ # Fallback parsing if not properly formatted
305
+ result["answer"] = response.strip()
306
+
307
+ return result
308
+
309
+
310
+ class GKPAgent:
311
+ """
312
+ Generated Knowledge Prompting (GKP) Agent that enhances reasoning by generating
313
+ relevant knowledge before answering queries.
314
+
315
+ This agent implements the approach described in Liu et al. 2022, generating knowledge
316
+ to improve performance on tasks requiring commonsense reasoning and factual information.
317
+
318
+ Attributes:
319
+ agent_name (str): Name of the GKP agent
320
+ model_name (str): Model to use for all components
321
+ num_knowledge_items (int): Number of knowledge items to generate per query
322
+ knowledge_generator (KnowledgeGenerator): Component for generating knowledge
323
+ reasoner (Reasoner): Component for reasoning using the generated knowledge
324
+ conversation (Conversation): Conversation history manager
325
+ """
326
+
327
+ def __init__(
328
+ self,
329
+ agent_name: str = "gkp-agent",
330
+ model_name: str = "openai/o1",
331
+ num_knowledge_items: int = 6,
332
+ ) -> None:
333
+ """
334
+ Initialize the GKP Agent with its components.
335
+
336
+ Args:
337
+ agent_name (str): Name identifier for the agent
338
+ model_name (str): LLM model to use for all components
339
+ num_knowledge_items (int): Number of knowledge snippets to generate for each query
340
+ """
341
+ self.agent_name = agent_name
342
+ self.model_name = model_name
343
+ self.num_knowledge_items = num_knowledge_items
344
+ self.conversation = Conversation(time_enabled=True)
345
+
346
+ # Initialize components
347
+ self.knowledge_generator = KnowledgeGenerator(
348
+ agent_name=f"{agent_name}-knowledge-generator",
349
+ model_name=model_name,
350
+ num_knowledge_items=num_knowledge_items,
351
+ )
352
+
353
+ self.reasoner = Reasoner(
354
+ agent_name=f"{agent_name}-reasoner",
355
+ model_name=model_name,
356
+ )
357
+
358
+ # Create the final response coordinator agent
359
+ coordinator_system_prompt = (
360
+ self._create_coordinator_system_prompt()
361
+ )
362
+ self.coordinator = Agent(
363
+ agent_name=f"{agent_name}-coordinator",
364
+ agent_description="Coordinates multiple reasoning paths to provide the best final answer",
365
+ system_prompt=coordinator_system_prompt,
366
+ model_name=model_name,
367
+ max_loops=1,
368
+ )
369
+
370
+ logger.info(
371
+ f"Initialized {self.agent_name} with model {self.model_name}"
372
+ )
373
+
374
+ def _create_coordinator_system_prompt(self) -> str:
375
+ """
376
+ Create the system prompt for the response coordinator.
377
+
378
+ Returns:
379
+ str: System prompt with instructions
380
+ """
381
+ system_prompt = """
382
+ You are a specialized coordination agent that analyzes multiple reasoning paths and answers to determine the most accurate final response.
383
+
384
+ For each query, you will receive:
385
+ 1. The original question
386
+ 2. Multiple reasoning paths, each with:
387
+ - Generated knowledge used for reasoning
388
+ - An explanation of the reasoning process
389
+ - A confidence assessment
390
+ - An answer derived from that reasoning path
391
+
392
+ Your task is to:
393
+ 1. Analyze all reasoning paths
394
+ 2. Determine which path(s) have the most accurate and reliable reasoning
395
+ 3. Assess the confidence levels provided
396
+ 4. Resolve any contradictions between different answers
397
+ 5. Provide a final, definitive answer that represents the most accurate conclusion
398
+
399
+ Structure your response as follows:
400
+ "Analysis: [Brief analysis of the different reasoning paths]
401
+ Final Answer: [Clear, definitive answer to the original question]
402
+ Explanation: [Explanation supporting your final answer, drawing from the best elements of the reasoning paths]"
403
+
404
+ Be objective and precise. Your goal is to determine the most accurate answer based on the quality of reasoning and knowledge provided in each path.
405
+ """
406
+
407
+ return system_prompt
408
+
409
+ def process(self, query: str) -> Dict[str, Any]:
410
+ """
411
+ Process a query using the GKP approach.
412
+
413
+ Args:
414
+ query (str): The query to process
415
+
416
+ Returns:
417
+ Dict[str, Any]: Dictionary containing the full processing results
418
+ """
419
+ start_time = time.time()
420
+ logger.info(f"Processing query: {query}")
421
+
422
+ # 1. Generate knowledge
423
+ knowledge_items = self.knowledge_generator.generate_knowledge(
424
+ query
425
+ )
426
+
427
+ # 2. Use each knowledge item to reason about the query
428
+ reasoning_results = []
429
+ for i, knowledge in enumerate(knowledge_items):
430
+ logger.debug(f"Reasoning with knowledge item {i+1}")
431
+ reasoning_result = self.reasoner.reason_and_answer(
432
+ query, knowledge
433
+ )
434
+ reasoning_result["knowledge"] = knowledge
435
+ reasoning_results.append(reasoning_result)
436
+
437
+ # 3. Coordinate the different reasoning paths to produce final answer
438
+ final_answer = self._coordinate_answers(
439
+ query, reasoning_results
440
+ )
441
+
442
+ # 4. Record in conversation history
443
+ self.conversation.add("user", query)
444
+ self.conversation.add("assistant", final_answer["response"])
445
+
446
+ end_time = time.time()
447
+ process_time = end_time - start_time
448
+ logger.info(f"Query processed in {process_time:.2f}s")
449
+
450
+ # Return complete results
451
+ return {
452
+ "query": query,
453
+ "knowledge_items": knowledge_items,
454
+ "reasoning_results": reasoning_results,
455
+ "final_answer": final_answer,
456
+ "process_time": process_time,
457
+ }
458
+
459
+ def _coordinate_answers(
460
+ self, query: str, reasoning_results: List[Dict[str, str]]
461
+ ) -> Dict[str, str]:
462
+ """
463
+ Coordinate multiple reasoning paths to produce the final answer.
464
+
465
+ Args:
466
+ query (str): The original query
467
+ reasoning_results (List[Dict[str, str]]): Results from multiple reasoning paths
468
+
469
+ Returns:
470
+ Dict[str, str]: The final coordinated answer
471
+ """
472
+ # Format the prompt for the coordinator
473
+ prompt_parts = [f"Question: {query}\n"]
474
+
475
+ for i, result in enumerate(reasoning_results):
476
+ prompt_parts.append(f"Reasoning Path {i+1}:")
477
+ prompt_parts.append(f"Knowledge: {result['knowledge']}")
478
+ prompt_parts.append(
479
+ f"Explanation: {result['explanation']}"
480
+ )
481
+ prompt_parts.append(f"Confidence: {result['confidence']}")
482
+ prompt_parts.append(f"Answer: {result['answer']}\n")
483
+
484
+ prompt_parts.append(
485
+ "Based on these reasoning paths, provide your final answer."
486
+ )
487
+ prompt = "\n".join(prompt_parts)
488
+
489
+ logger.debug("Coordinating multiple reasoning paths")
490
+ response = self.coordinator.run(task=prompt)
491
+
492
+ # Parse the coordinated response
493
+ result = {"analysis": "", "response": "", "explanation": ""}
494
+
495
+ if "Analysis:" in response and "Final Answer:" in response:
496
+ # Extract analysis
497
+ analysis_start = response.find("Analysis:") + len(
498
+ "Analysis:"
499
+ )
500
+ analysis_end = response.find("Final Answer:")
501
+ result["analysis"] = response[
502
+ analysis_start:analysis_end
503
+ ].strip()
504
+
505
+ # Extract final answer
506
+ answer_start = response.find("Final Answer:") + len(
507
+ "Final Answer:"
508
+ )
509
+
510
+ if "Explanation:" in response:
511
+ answer_end = response.find("Explanation:")
512
+ explanation_start = answer_end + len("Explanation:")
513
+
514
+ result["response"] = response[
515
+ answer_start:answer_end
516
+ ].strip()
517
+ result["explanation"] = response[
518
+ explanation_start:
519
+ ].strip()
520
+ else:
521
+ result["response"] = response[answer_start:].strip()
522
+ else:
523
+ # Fallback if not properly formatted
524
+ result["response"] = response.strip()
525
+
526
+ return result
527
+
528
+ def run(
529
+ self, queries: List[str], detailed_output: bool = False
530
+ ) -> Union[List[str], List[Dict[str, Any]]]:
531
+ """
532
+ Run the GKP agent on a list of queries.
533
+
534
+ Args:
535
+ queries (List[str]): List of queries to process
536
+ detailed_output (bool): Whether to return detailed processing results
537
+
538
+ Returns:
539
+ Union[List[str], List[Dict[str, Any]]]: List of answers or detailed results
540
+ """
541
+ results = []
542
+
543
+ for i, query in enumerate(queries):
544
+ logger.info(f"Processing query {i+1}/{len(queries)}")
545
+ process_result = self.process(query)
546
+
547
+ if detailed_output:
548
+ results.append(process_result)
549
+ else:
550
+ results.append(
551
+ process_result["final_answer"]["response"]
552
+ )
553
+
554
+ return results
555
+
556
+
557
+ # # Example usage
558
+ # if __name__ == "__main__":
559
+ # # Initialize the GKP Agent
560
+ # agent = GKPAgent(
561
+ # agent_name="gkp-agent",
562
+ # model_name="gpt-4o-mini", # Using OpenAI's model
563
+ # num_knowledge_items=10, # Generate 2 knowledge items per query
564
+ # )
565
+
566
+ # # Example queries
567
+ # queries = [
568
+ # "Create an entirely new construct of mathematics unifying physics and traditional physics never seen",
569
+ # ]
570
+
571
+ # # Run the agent
572
+ # results = agent.run(queries)
573
+
574
+ # print(results)
575
+
576
+ # # Print results
577
+ # for i, result in enumerate(results):
578
+ # print(f"\n\nQUERY {i+1}:")
579
+ # print(f"{queries[i]}\n")
580
+ # print("FINAL ANSWER:")
581
+ # print(f"{result}")
@@ -1,6 +1,8 @@
1
1
  from typing import List, Literal
2
2
 
3
3
  from swarms.agents.consistency_agent import SelfConsistencyAgent
4
+ from swarms.agents.flexion_agent import ReflexionAgent
5
+ from swarms.agents.gkp_agent import GKPAgent
4
6
  from swarms.agents.i_agent import (
5
7
  IterativeReflectiveExpansion as IREAgent,
6
8
  )
@@ -14,6 +16,8 @@ agent_types = Literal[
14
16
  "reasoning-agent",
15
17
  "consistency-agent",
16
18
  "ire-agent",
19
+ "ReflexionAgent",
20
+ "GKPAgent",
17
21
  ]
18
22
 
19
23
 
@@ -42,6 +46,8 @@ class ReasoningAgentRouter:
42
46
  swarm_type: agent_types = "reasoning_duo",
43
47
  num_samples: int = 1,
44
48
  output_type: OutputType = "dict",
49
+ num_knowledge_items: int = 6,
50
+ memory_capacity: int = 6,
45
51
  ):
46
52
  self.agent_name = agent_name
47
53
  self.description = description
@@ -51,6 +57,8 @@ class ReasoningAgentRouter:
51
57
  self.swarm_type = swarm_type
52
58
  self.num_samples = num_samples
53
59
  self.output_type = output_type
60
+ self.num_knowledge_items = num_knowledge_items
61
+ self.memory_capacity = memory_capacity
54
62
 
55
63
  def select_swarm(self):
56
64
  """
@@ -98,6 +106,20 @@ class ReasoningAgentRouter:
98
106
  output_type=self.output_type,
99
107
  )
100
108
 
109
+ elif self.swarm_type == "ReflexionAgent":
110
+ return ReflexionAgent(
111
+ agent_name=self.agent_name,
112
+ system_prompt=self.system_prompt,
113
+ model_name=self.model_name,
114
+ max_loops=self.max_loops,
115
+ )
116
+
117
+ elif self.swarm_type == "GKPAgent":
118
+ return GKPAgent(
119
+ agent_name=self.agent_name,
120
+ model_name=self.model_name,
121
+ num_knowledge_items=self.num_knowledge_items,
122
+ )
101
123
  else:
102
124
  raise ValueError(f"Invalid swarm type: {self.swarm_type}")
103
125
 
@@ -0,0 +1,38 @@
1
+ AGENT_JUDGE_PROMPT = """
2
+ # Adaptive Output Evaluator - Role and Protocol
3
+
4
+ Your role is to critically evaluate outputs across diverse domains by first understanding the context, then applying domain-appropriate evaluation criteria to provide a well-reasoned assessment.
5
+
6
+ ## Core Responsibilities
7
+
8
+ 1. **Context Assessment**
9
+ - Begin by identifying the domain and specific context of the evaluation (technical, creative, analytical, etc.)
10
+ - Determine the appropriate evaluation framework based on domain requirements
11
+ - Adjust evaluation criteria and standards to match domain-specific best practices
12
+ - If domain is unclear, request clarification with: DOMAIN CLARIFICATION NEEDED: *specific_question*
13
+
14
+ 2. **Input Validation**
15
+ - Ensure all necessary information is present for a comprehensive evaluation
16
+ - Identify gaps in provided materials that would impact assessment quality
17
+ - Request additional context when needed with: ADDITIONAL CONTEXT NEEDED: *specific_information*
18
+ - Consider implicit domain knowledge that may influence proper evaluation
19
+
20
+ 3. **Evidence-Based Analysis**
21
+ - Apply domain-specific criteria to evaluate accuracy, effectiveness, and appropriateness
22
+ - Distinguish between factual claims, reasoned arguments, and subjective opinions
23
+ - Flag assumptions or claims lacking sufficient support within domain standards
24
+ - Evaluate internal consistency and alignment with established principles in the field
25
+ - For technical domains, verify logical and methodological soundness
26
+
27
+ 4. **Comparative Assessment**
28
+ - When multiple solutions or approaches are presented, compare relative strengths
29
+ - Identify trade-offs between different approaches within domain constraints
30
+ - Consider alternative interpretations or solutions not explicitly mentioned
31
+ - Balance competing priorities based on domain-specific values and standards
32
+
33
+ 5. **Final Assessment Declaration**
34
+ - Present your final assessment with: **EVALUATION_COMPLETE \\boxed{_assessment_summary_}**
35
+ - Follow with a concise justification referencing domain-specific standards
36
+ - Include constructive feedback for improvement where appropriate
37
+ - When appropriate, suggest alternative approaches that align with domain best practices
38
+ """
@@ -84,6 +84,7 @@ from swarms.structs.swarms_api import (
84
84
  )
85
85
 
86
86
  from swarms.structs.de_hallucination_swarm import DeHallucinationSwarm
87
+ from swarms.structs.deep_research_swarm import DeepResearchSwarm
87
88
 
88
89
  __all__ = [
89
90
  "Agent",
@@ -159,4 +160,5 @@ __all__ = [
159
160
  "AgentsBuilder",
160
161
  "MALT",
161
162
  "DeHallucinationSwarm",
163
+ "DeepResearchSwarm",
162
164
  ]