swarms 7.7.1__py3-none-any.whl → 7.7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,729 +0,0 @@
1
- """
2
- TalkHier: A hierarchical multi-agent framework for content generation and refinement.
3
- Implements structured communication and evaluation protocols.
4
- """
5
-
6
- import json
7
- import logging
8
- from dataclasses import dataclass, asdict
9
- from datetime import datetime
10
- from enum import Enum
11
- from pathlib import Path
12
- from typing import Any, Dict, List, Optional, Union
13
- from concurrent.futures import ThreadPoolExecutor
14
-
15
- from swarms import Agent
16
- from swarms.structs.conversation import Conversation
17
-
18
- logging.basicConfig(level=logging.INFO)
19
- logger = logging.getLogger(__name__)
20
-
21
-
22
- class AgentRole(Enum):
23
- """Defines the possible roles for agents in the system."""
24
-
25
- SUPERVISOR = "supervisor"
26
- GENERATOR = "generator"
27
- EVALUATOR = "evaluator"
28
- REVISOR = "revisor"
29
-
30
-
31
- @dataclass
32
- class CommunicationEvent:
33
- """Represents a structured communication event between agents."""
34
-
35
- message: str
36
- background: Optional[str] = None
37
- intermediate_output: Optional[Dict[str, Any]] = None
38
- sender: str = ""
39
- receiver: str = ""
40
- timestamp: str = str(datetime.now())
41
-
42
-
43
- class TalkHier:
44
- """
45
- A hierarchical multi-agent system for content generation and refinement.
46
-
47
- Implements the TalkHier framework with structured communication protocols
48
- and hierarchical refinement processes.
49
-
50
- Attributes:
51
- max_iterations: Maximum number of refinement iterations
52
- quality_threshold: Minimum score required for content acceptance
53
- model_name: Name of the LLM model to use
54
- base_path: Path for saving agent states
55
- """
56
-
57
- def __init__(
58
- self,
59
- max_iterations: int = 3,
60
- quality_threshold: float = 0.8,
61
- model_name: str = "gpt-4",
62
- base_path: Optional[str] = None,
63
- return_string: bool = False,
64
- ):
65
- """Initialize the TalkHier system."""
66
- self.max_iterations = max_iterations
67
- self.quality_threshold = quality_threshold
68
- self.model_name = model_name
69
- self.return_string = return_string
70
- self.base_path = (
71
- Path(base_path) if base_path else Path("./agent_states")
72
- )
73
- self.base_path.mkdir(exist_ok=True)
74
-
75
- # Initialize agents
76
- self._init_agents()
77
-
78
- # Create conversation
79
- self.conversation = Conversation()
80
-
81
- def _safely_parse_json(self, json_str: str) -> Dict[str, Any]:
82
- """
83
- Safely parse JSON string, handling various formats and potential errors.
84
-
85
- Args:
86
- json_str: String containing JSON data
87
-
88
- Returns:
89
- Parsed dictionary
90
- """
91
- try:
92
- # Try direct JSON parsing
93
- return json.loads(json_str)
94
- except json.JSONDecodeError:
95
- try:
96
- # Try extracting JSON from potential text wrapper
97
- import re
98
-
99
- json_match = re.search(r"\{.*\}", json_str, re.DOTALL)
100
- if json_match:
101
- return json.loads(json_match.group())
102
- # Try extracting from markdown code blocks
103
- code_block_match = re.search(
104
- r"```(?:json)?\s*(\{.*?\})\s*```",
105
- json_str,
106
- re.DOTALL,
107
- )
108
- if code_block_match:
109
- return json.loads(code_block_match.group(1))
110
- except Exception as e:
111
- logger.warning(f"Failed to extract JSON: {str(e)}")
112
-
113
- # Fallback: create structured dict from text
114
- return {
115
- "content": json_str,
116
- "metadata": {
117
- "parsed": False,
118
- "timestamp": str(datetime.now()),
119
- },
120
- }
121
-
122
- def _get_criteria_generator_prompt(self) -> str:
123
- """Get the prompt for the criteria generator agent."""
124
- return """You are a Criteria Generator agent responsible for creating task-specific evaluation criteria.
125
- Analyze the task and generate appropriate evaluation criteria based on:
126
- - Task type and complexity
127
- - Required domain knowledge
128
- - Target audience expectations
129
- - Quality requirements
130
-
131
- Output all responses in strict JSON format:
132
- {
133
- "criteria": {
134
- "criterion_name": {
135
- "description": "Detailed description of what this criterion measures",
136
- "importance": "Weight from 0.0-1.0 indicating importance",
137
- "evaluation_guide": "Guidelines for how to evaluate this criterion"
138
- }
139
- },
140
- "metadata": {
141
- "task_type": "Classification of the task type",
142
- "complexity_level": "Assessment of task complexity",
143
- "domain_focus": "Primary domain or field of the task"
144
- }
145
- }"""
146
-
147
- def _init_agents(self) -> None:
148
- """Initialize all agents with their specific roles and prompts."""
149
- # Main supervisor agent
150
- self.main_supervisor = Agent(
151
- agent_name="Main-Supervisor",
152
- system_prompt=self._get_supervisor_prompt(),
153
- model_name=self.model_name,
154
- max_loops=1,
155
- saved_state_path=str(
156
- self.base_path / "main_supervisor.json"
157
- ),
158
- verbose=True,
159
- )
160
-
161
- # Generator agent
162
- self.generator = Agent(
163
- agent_name="Content-Generator",
164
- system_prompt=self._get_generator_prompt(),
165
- model_name=self.model_name,
166
- max_loops=1,
167
- saved_state_path=str(self.base_path / "generator.json"),
168
- verbose=True,
169
- )
170
-
171
- # Criteria Generator agent
172
- self.criteria_generator = Agent(
173
- agent_name="Criteria-Generator",
174
- system_prompt=self._get_criteria_generator_prompt(),
175
- model_name=self.model_name,
176
- max_loops=1,
177
- saved_state_path=str(
178
- self.base_path / "criteria_generator.json"
179
- ),
180
- verbose=True,
181
- )
182
-
183
- # Evaluators without criteria (will be set during run)
184
- self.evaluators = []
185
- for i in range(3):
186
- self.evaluators.append(
187
- Agent(
188
- agent_name=f"Evaluator-{i}",
189
- system_prompt=self._get_evaluator_prompt(i),
190
- model_name=self.model_name,
191
- max_loops=1,
192
- saved_state_path=str(
193
- self.base_path / f"evaluator_{i}.json"
194
- ),
195
- verbose=True,
196
- )
197
- )
198
-
199
- # Revisor agent
200
- self.revisor = Agent(
201
- agent_name="Content-Revisor",
202
- system_prompt=self._get_revisor_prompt(),
203
- model_name=self.model_name,
204
- max_loops=1,
205
- saved_state_path=str(self.base_path / "revisor.json"),
206
- verbose=True,
207
- )
208
-
209
- def _generate_dynamic_criteria(self, task: str) -> Dict[str, str]:
210
- """
211
- Generate dynamic evaluation criteria based on the task.
212
-
213
- Args:
214
- task: Content generation task description
215
-
216
- Returns:
217
- Dictionary containing dynamic evaluation criteria
218
- """
219
- # Example dynamic criteria generation logic
220
- if "technical" in task.lower():
221
- return {
222
- "accuracy": "Technical correctness and source reliability",
223
- "clarity": "Readability and logical structure",
224
- "depth": "Comprehensive coverage of technical details",
225
- "engagement": "Interest level and relevance to the audience",
226
- "technical_quality": "Grammar, spelling, and formatting",
227
- }
228
- else:
229
- return {
230
- "accuracy": "Factual correctness and source reliability",
231
- "clarity": "Readability and logical structure",
232
- "coherence": "Logical consistency and argument structure",
233
- "engagement": "Interest level and relevance to the audience",
234
- "completeness": "Coverage of the topic and depth",
235
- "technical_quality": "Grammar, spelling, and formatting",
236
- }
237
-
238
- def _get_supervisor_prompt(self) -> str:
239
- """Get the prompt for the supervisor agent."""
240
- return """You are a Supervisor agent responsible for orchestrating the content generation process and selecting the best evaluation criteria.
241
-
242
- You must:
243
- 1. Analyze tasks and develop strategies
244
- 2. Review multiple evaluator feedback
245
- 3. Select the most appropriate evaluation based on:
246
- - Completeness of criteria
247
- - Relevance to task
248
- - Quality of feedback
249
- 4. Provide clear instructions for content revision
250
-
251
- Output all responses in strict JSON format:
252
- {
253
- "thoughts": {
254
- "task_analysis": "Analysis of requirements, audience, scope",
255
- "strategy": "Step-by-step plan and success metrics",
256
- "evaluation_selection": {
257
- "chosen_evaluator": "ID of selected evaluator",
258
- "reasoning": "Why this evaluation was chosen",
259
- "key_criteria": ["List of most important criteria"]
260
- }
261
- },
262
- "next_action": {
263
- "agent": "Next agent to engage",
264
- "instruction": "Detailed instructions with context"
265
- }
266
- }"""
267
-
268
- def _get_generator_prompt(self) -> str:
269
- """Get the prompt for the generator agent."""
270
- return """You are a Generator agent responsible for creating high-quality, original content. Your role is to produce content that is engaging, informative, and tailored to the target audience.
271
-
272
- When generating content:
273
- - Thoroughly research and fact-check all information
274
- - Structure content logically with clear flow
275
- - Use appropriate tone and language for the target audience
276
- - Include relevant examples and explanations
277
- - Ensure content is original and plagiarism-free
278
- - Consider SEO best practices where applicable
279
-
280
- Output all responses in strict JSON format:
281
- {
282
- "content": {
283
- "main_body": "The complete generated content with proper formatting and structure",
284
- "metadata": {
285
- "word_count": "Accurate word count of main body",
286
- "target_audience": "Detailed audience description",
287
- "key_points": ["List of main points covered"],
288
- "sources": ["List of reference sources if applicable"],
289
- "readability_level": "Estimated reading level",
290
- "tone": "Description of content tone"
291
- }
292
- }
293
- }"""
294
-
295
- def _get_evaluator_prompt(self, evaluator_id: int) -> str:
296
- """Get the base prompt for an evaluator agent."""
297
- return f"""You are Evaluator {evaluator_id}, responsible for critically assessing content quality. Your evaluation must be thorough, objective, and constructive.
298
-
299
- When receiving content to evaluate:
300
- 1. First analyze the task description to determine appropriate evaluation criteria
301
- 2. Generate specific criteria based on task requirements
302
- 3. Evaluate content against these criteria
303
- 4. Provide detailed feedback for each criterion
304
-
305
- Output all responses in strict JSON format:
306
- {{
307
- "generated_criteria": {{
308
- "criteria_name": "description of what this criterion measures",
309
- // Add more criteria based on task analysis
310
- }},
311
- "scores": {{
312
- "overall": "0.0-1.0 composite score",
313
- "categories": {{
314
- // Scores for each generated criterion
315
- "criterion_name": "0.0-1.0 score with evidence"
316
- }}
317
- }},
318
- "feedback": [
319
- "Specific, actionable improvement suggestions per criterion"
320
- ],
321
- "strengths": ["Notable positive aspects"],
322
- "weaknesses": ["Areas needing improvement"]
323
- }}"""
324
-
325
- def _get_revisor_prompt(self) -> str:
326
- """Get the prompt for the revisor agent."""
327
- return """You are a Revisor agent responsible for improving content based on evaluator feedback. Your role is to enhance content while maintaining its core message and purpose.
328
-
329
- When revising content:
330
- - Address all evaluator feedback systematically
331
- - Maintain consistency in tone and style
332
- - Preserve accurate information
333
- - Enhance clarity and flow
334
- - Fix technical issues
335
- - Optimize for target audience
336
- - Track all changes made
337
-
338
- Output all responses in strict JSON format:
339
- {
340
- "revised_content": {
341
- "main_body": "Complete revised content incorporating all improvements",
342
- "metadata": {
343
- "word_count": "Updated word count",
344
- "changes_made": [
345
- "Detailed list of specific changes and improvements",
346
- "Reasoning for each major revision",
347
- "Feedback points addressed"
348
- ],
349
- "improvement_summary": "Overview of main enhancements",
350
- "preserved_elements": ["Key elements maintained from original"],
351
- "revision_approach": "Strategy used for revisions"
352
- }
353
- }
354
- }"""
355
-
356
- def _generate_criteria_for_task(
357
- self, task: str
358
- ) -> Dict[str, Any]:
359
- """Generate evaluation criteria for the given task."""
360
- try:
361
- criteria_input = {
362
- "task": task,
363
- "instruction": "Generate specific evaluation criteria for this task.",
364
- }
365
-
366
- criteria_response = self.criteria_generator.run(
367
- json.dumps(criteria_input)
368
- )
369
- self.conversation.add(
370
- role="Criteria-Generator", content=criteria_response
371
- )
372
-
373
- return self._safely_parse_json(criteria_response)
374
- except Exception as e:
375
- logger.error(f"Error generating criteria: {str(e)}")
376
- return {"criteria": {}}
377
-
378
- def _create_comm_event(
379
- self, sender: Agent, receiver: Agent, response: Dict
380
- ) -> CommunicationEvent:
381
- """Create a structured communication event between agents."""
382
- return CommunicationEvent(
383
- message=response.get("message", ""),
384
- background=response.get("background", ""),
385
- intermediate_output=response.get(
386
- "intermediate_output", {}
387
- ),
388
- sender=sender.agent_name,
389
- receiver=receiver.agent_name,
390
- )
391
-
392
- def _evaluate_content(
393
- self, content: Union[str, Dict], task: str
394
- ) -> Dict[str, Any]:
395
- """Coordinate evaluation process with parallel evaluator execution."""
396
- try:
397
- content_dict = (
398
- self._safely_parse_json(content)
399
- if isinstance(content, str)
400
- else content
401
- )
402
- criteria_data = self._generate_criteria_for_task(task)
403
-
404
- def run_evaluator(evaluator, eval_input):
405
- response = evaluator.run(json.dumps(eval_input))
406
- return {
407
- "evaluator_id": evaluator.agent_name,
408
- "evaluation": self._safely_parse_json(response),
409
- }
410
-
411
- eval_inputs = [
412
- {
413
- "task": task,
414
- "content": content_dict,
415
- "criteria": criteria_data.get("criteria", {}),
416
- }
417
- for _ in self.evaluators
418
- ]
419
-
420
- with ThreadPoolExecutor() as executor:
421
- evaluations = list(
422
- executor.map(
423
- lambda x: run_evaluator(*x),
424
- zip(self.evaluators, eval_inputs),
425
- )
426
- )
427
-
428
- supervisor_input = {
429
- "evaluations": evaluations,
430
- "task": task,
431
- "instruction": "Synthesize feedback",
432
- }
433
- supervisor_response = self.main_supervisor.run(
434
- json.dumps(supervisor_input)
435
- )
436
- aggregated_eval = self._safely_parse_json(
437
- supervisor_response
438
- )
439
-
440
- # Track communication
441
- comm_event = self._create_comm_event(
442
- self.main_supervisor, self.revisor, aggregated_eval
443
- )
444
- self.conversation.add(
445
- role="Communication",
446
- content=json.dumps(asdict(comm_event)),
447
- )
448
-
449
- return aggregated_eval
450
-
451
- except Exception as e:
452
- logger.error(f"Evaluation error: {str(e)}")
453
- return self._get_fallback_evaluation()
454
-
455
- def _get_fallback_evaluation(self) -> Dict[str, Any]:
456
- """Get a safe fallback evaluation result."""
457
- return {
458
- "scores": {
459
- "overall": 0.5,
460
- "categories": {
461
- "accuracy": 0.5,
462
- "clarity": 0.5,
463
- "coherence": 0.5,
464
- },
465
- },
466
- "feedback": ["Evaluation failed"],
467
- "metadata": {
468
- "timestamp": str(datetime.now()),
469
- "is_fallback": True,
470
- },
471
- }
472
-
473
- def _aggregate_evaluations(
474
- self, evaluations: List[Dict[str, Any]]
475
- ) -> Dict[str, Any]:
476
- """Aggregate multiple evaluation results into a single evaluation."""
477
- try:
478
- # Collect all unique criteria from evaluators
479
- all_criteria = set()
480
- for eval_data in evaluations:
481
- categories = eval_data.get("scores", {}).get(
482
- "categories", {}
483
- )
484
- all_criteria.update(categories.keys())
485
-
486
- # Initialize score aggregation
487
- aggregated_scores = {
488
- criterion: [] for criterion in all_criteria
489
- }
490
- overall_scores = []
491
- all_feedback = []
492
-
493
- # Collect scores and feedback
494
- for eval_data in evaluations:
495
- scores = eval_data.get("scores", {})
496
- overall_scores.append(scores.get("overall", 0.5))
497
-
498
- categories = scores.get("categories", {})
499
- for criterion in all_criteria:
500
- if criterion in categories:
501
- aggregated_scores[criterion].append(
502
- categories.get(criterion, 0.5)
503
- )
504
-
505
- all_feedback.extend(eval_data.get("feedback", []))
506
-
507
- # Calculate means
508
- def safe_mean(scores: List[float]) -> float:
509
- return sum(scores) / len(scores) if scores else 0.5
510
-
511
- return {
512
- "scores": {
513
- "overall": safe_mean(overall_scores),
514
- "categories": {
515
- criterion: safe_mean(scores)
516
- for criterion, scores in aggregated_scores.items()
517
- },
518
- },
519
- "feedback": list(set(all_feedback)),
520
- "metadata": {
521
- "evaluator_count": len(evaluations),
522
- "criteria_used": list(all_criteria),
523
- "timestamp": str(datetime.now()),
524
- },
525
- }
526
-
527
- except Exception as e:
528
- logger.error(f"Error in evaluation aggregation: {str(e)}")
529
- return self._get_fallback_evaluation()
530
-
531
- def _evaluate_and_revise(
532
- self, content: Union[str, Dict], task: str
533
- ) -> Dict[str, Any]:
534
- """Coordinate evaluation and revision process."""
535
- try:
536
- # Get evaluations and supervisor selection
537
- evaluation_result = self._evaluate_content(content, task)
538
-
539
- # Extract selected evaluation and supervisor reasoning
540
- selected_evaluation = evaluation_result.get(
541
- "selected_evaluation", {}
542
- )
543
- supervisor_reasoning = evaluation_result.get(
544
- "supervisor_reasoning", {}
545
- )
546
-
547
- # Prepare revision input with selected evaluation
548
- revision_input = {
549
- "content": content,
550
- "evaluation": selected_evaluation,
551
- "supervisor_feedback": supervisor_reasoning,
552
- "instruction": "Revise the content based on the selected evaluation feedback",
553
- }
554
-
555
- # Get revision from content generator
556
- revision_response = self.generator.run(
557
- json.dumps(revision_input)
558
- )
559
- revised_content = self._safely_parse_json(
560
- revision_response
561
- )
562
-
563
- return {
564
- "content": revised_content,
565
- "evaluation": evaluation_result,
566
- }
567
- except Exception as e:
568
- logger.error(f"Evaluation and revision error: {str(e)}")
569
- return {
570
- "content": content,
571
- "evaluation": self._get_fallback_evaluation(),
572
- }
573
-
574
- def run(self, task: str) -> Dict[str, Any]:
575
- """
576
- Generate and iteratively refine content based on the given task.
577
-
578
- Args:
579
- task: Content generation task description
580
-
581
- Returns:
582
- Dictionary containing final content and metadata
583
- """
584
- logger.info(f"Starting content generation for task: {task}")
585
-
586
- try:
587
- # Get initial direction from supervisor
588
- supervisor_response = self.main_supervisor.run(task)
589
-
590
- self.conversation.add(
591
- role=self.main_supervisor.agent_name,
592
- content=supervisor_response,
593
- )
594
-
595
- supervisor_data = self._safely_parse_json(
596
- supervisor_response
597
- )
598
-
599
- # Generate initial content
600
- generator_response = self.generator.run(
601
- json.dumps(supervisor_data.get("next_action", {}))
602
- )
603
-
604
- self.conversation.add(
605
- role=self.generator.agent_name,
606
- content=generator_response,
607
- )
608
-
609
- current_content = self._safely_parse_json(
610
- generator_response
611
- )
612
-
613
- for iteration in range(self.max_iterations):
614
- logger.info(f"Starting iteration {iteration + 1}")
615
-
616
- # Evaluate and revise content
617
- result = self._evaluate_and_revise(
618
- current_content, task
619
- )
620
- evaluation = result["evaluation"]
621
- current_content = result["content"]
622
-
623
- # Check if quality threshold is met
624
- selected_eval = evaluation.get(
625
- "selected_evaluation", {}
626
- )
627
- overall_score = selected_eval.get("scores", {}).get(
628
- "overall", 0.0
629
- )
630
-
631
- if overall_score >= self.quality_threshold:
632
- logger.info(
633
- "Quality threshold met, returning content"
634
- )
635
- return {
636
- "content": current_content.get(
637
- "content", {}
638
- ).get("main_body", ""),
639
- "final_score": overall_score,
640
- "iterations": iteration + 1,
641
- "metadata": {
642
- "content_metadata": current_content.get(
643
- "content", {}
644
- ).get("metadata", {}),
645
- "evaluation": evaluation,
646
- },
647
- }
648
-
649
- # Add to conversation history
650
- self.conversation.add(
651
- role=self.generator.agent_name,
652
- content=json.dumps(current_content),
653
- )
654
-
655
- logger.warning(
656
- "Max iterations reached without meeting quality threshold"
657
- )
658
-
659
- except Exception as e:
660
- logger.error(f"Error in generate_and_refine: {str(e)}")
661
- current_content = {
662
- "content": {"main_body": f"Error: {str(e)}"}
663
- }
664
- evaluation = self._get_fallback_evaluation()
665
-
666
- if self.return_string:
667
- return self.conversation.return_history_as_string()
668
- else:
669
- return {
670
- "content": current_content.get("content", {}).get(
671
- "main_body", ""
672
- ),
673
- "final_score": evaluation["scores"]["overall"],
674
- "iterations": self.max_iterations,
675
- "metadata": {
676
- "content_metadata": current_content.get(
677
- "content", {}
678
- ).get("metadata", {}),
679
- "evaluation": evaluation,
680
- "error": "Max iterations reached",
681
- },
682
- }
683
-
684
- def save_state(self) -> None:
685
- """Save the current state of all agents."""
686
- for agent in [
687
- self.main_supervisor,
688
- self.generator,
689
- *self.evaluators,
690
- self.revisor,
691
- ]:
692
- try:
693
- agent.save_state()
694
- except Exception as e:
695
- logger.error(
696
- f"Error saving state for {agent.agent_name}: {str(e)}"
697
- )
698
-
699
- def load_state(self) -> None:
700
- """Load the saved state of all agents."""
701
- for agent in [
702
- self.main_supervisor,
703
- self.generator,
704
- *self.evaluators,
705
- self.revisor,
706
- ]:
707
- try:
708
- agent.load_state()
709
- except Exception as e:
710
- logger.error(
711
- f"Error loading state for {agent.agent_name}: {str(e)}"
712
- )
713
-
714
-
715
- # if __name__ == "__main__":
716
- # try:
717
- # talkhier = TalkHier(
718
- # max_iterations=1,
719
- # quality_threshold=0.8,
720
- # model_name="gpt-4o",
721
- # return_string=False,
722
- # )
723
-
724
- # # Ask for user input
725
- # task = input("Enter the content generation task description: ")
726
- # result = talkhier.run(task)
727
-
728
- # except Exception as e:
729
- # logger.error(f"Error in main execution: {str(e)}")