swarms 7.7.1__py3-none-any.whl → 7.7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,818 +0,0 @@
1
- import asyncio
2
- import json
3
- import logging
4
- import os
5
- import threading
6
- import uuid
7
- from contextlib import asynccontextmanager
8
- from dataclasses import asdict, dataclass
9
- from datetime import datetime
10
- from enum import Enum
11
- from logging.handlers import RotatingFileHandler
12
- from typing import Any, Dict, List, Optional
13
-
14
- from pydantic import BaseModel, Field
15
-
16
- from swarms.structs.agent import Agent
17
- from swarms.structs.base_workflow import BaseWorkflow
18
- from swarms.utils.loguru_logger import initialize_logger
19
-
20
- # Base logger initialization
21
- logger = initialize_logger("async_workflow")
22
-
23
-
24
- # Pydantic models for structured data
25
- class AgentOutput(BaseModel):
26
- agent_id: str
27
- agent_name: str
28
- task_id: str
29
- input: str
30
- output: Any
31
- start_time: datetime
32
- end_time: datetime
33
- status: str
34
- error: Optional[str] = None
35
-
36
-
37
- class WorkflowOutput(BaseModel):
38
- workflow_id: str
39
- workflow_name: str
40
- start_time: datetime
41
- end_time: datetime
42
- total_agents: int
43
- successful_tasks: int
44
- failed_tasks: int
45
- agent_outputs: List[AgentOutput]
46
- metadata: Dict[str, Any] = Field(default_factory=dict)
47
-
48
-
49
- class SpeakerRole(str, Enum):
50
- COORDINATOR = "coordinator"
51
- CRITIC = "critic"
52
- EXECUTOR = "executor"
53
- VALIDATOR = "validator"
54
- DEFAULT = "default"
55
-
56
-
57
- class SpeakerMessage(BaseModel):
58
- role: SpeakerRole
59
- content: Any
60
- timestamp: datetime
61
- agent_name: str
62
- metadata: Dict[str, Any] = Field(default_factory=dict)
63
-
64
-
65
- class GroupChatConfig(BaseModel):
66
- max_loops: int = 10
67
- timeout_per_turn: float = 30.0
68
- require_all_speakers: bool = False
69
- allow_concurrent: bool = True
70
- save_history: bool = True
71
-
72
-
73
- @dataclass
74
- class SharedMemoryItem:
75
- key: str
76
- value: Any
77
- timestamp: datetime
78
- author: str
79
- metadata: Dict[str, Any] = None
80
-
81
-
82
- @dataclass
83
- class SpeakerConfig:
84
- role: SpeakerRole
85
- agent: Any
86
- priority: int = 0
87
- concurrent: bool = True
88
- timeout: float = 30.0
89
- required: bool = False
90
-
91
-
92
- class SharedMemory:
93
- """Thread-safe shared memory implementation with persistence"""
94
-
95
- def __init__(self, persistence_path: Optional[str] = None):
96
- self._memory = {}
97
- self._lock = threading.Lock()
98
- self._persistence_path = persistence_path
99
- self._load_from_disk()
100
-
101
- def set(
102
- self,
103
- key: str,
104
- value: Any,
105
- author: str,
106
- metadata: Dict[str, Any] = None,
107
- ) -> None:
108
- with self._lock:
109
- item = SharedMemoryItem(
110
- key=key,
111
- value=value,
112
- timestamp=datetime.utcnow(),
113
- author=author,
114
- metadata=metadata or {},
115
- )
116
- self._memory[key] = item
117
- self._persist_to_disk()
118
-
119
- def get(self, key: str) -> Optional[Any]:
120
- with self._lock:
121
- item = self._memory.get(key)
122
- return item.value if item else None
123
-
124
- def get_with_metadata(
125
- self, key: str
126
- ) -> Optional[SharedMemoryItem]:
127
- with self._lock:
128
- return self._memory.get(key)
129
-
130
- def _persist_to_disk(self) -> None:
131
- if self._persistence_path:
132
- with open(self._persistence_path, "w") as f:
133
- json.dump(
134
- {k: asdict(v) for k, v in self._memory.items()}, f
135
- )
136
-
137
- def _load_from_disk(self) -> None:
138
- if self._persistence_path and os.path.exists(
139
- self._persistence_path
140
- ):
141
- with open(self._persistence_path, "r") as f:
142
- data = json.load(f)
143
- self._memory = {
144
- k: SharedMemoryItem(**v) for k, v in data.items()
145
- }
146
-
147
-
148
- class SpeakerSystem:
149
- """Manages speaker interactions and group chat functionality"""
150
-
151
- def __init__(self, default_timeout: float = 30.0):
152
- self.speakers: Dict[SpeakerRole, SpeakerConfig] = {}
153
- self.message_history: List[SpeakerMessage] = []
154
- self.default_timeout = default_timeout
155
- self._lock = threading.Lock()
156
-
157
- def add_speaker(self, config: SpeakerConfig) -> None:
158
- with self._lock:
159
- self.speakers[config.role] = config
160
-
161
- def remove_speaker(self, role: SpeakerRole) -> None:
162
- with self._lock:
163
- self.speakers.pop(role, None)
164
-
165
- async def _execute_speaker(
166
- self,
167
- config: SpeakerConfig,
168
- input_data: Any,
169
- context: Dict[str, Any] = None,
170
- ) -> SpeakerMessage:
171
- try:
172
- result = await asyncio.wait_for(
173
- config.agent.arun(input_data), timeout=config.timeout
174
- )
175
-
176
- return SpeakerMessage(
177
- role=config.role,
178
- content=result,
179
- timestamp=datetime.utcnow(),
180
- agent_name=config.agent.agent_name,
181
- metadata={"context": context or {}},
182
- )
183
- except asyncio.TimeoutError:
184
- return SpeakerMessage(
185
- role=config.role,
186
- content=None,
187
- timestamp=datetime.utcnow(),
188
- agent_name=config.agent.agent_name,
189
- metadata={"error": "Timeout"},
190
- )
191
- except Exception as e:
192
- return SpeakerMessage(
193
- role=config.role,
194
- content=None,
195
- timestamp=datetime.utcnow(),
196
- agent_name=config.agent.agent_name,
197
- metadata={"error": str(e)},
198
- )
199
-
200
-
201
- class AsyncWorkflow(BaseWorkflow):
202
- """Enhanced asynchronous workflow with advanced speaker system"""
203
-
204
- def __init__(
205
- self,
206
- name: str = "AsyncWorkflow",
207
- agents: List[Agent] = None,
208
- max_workers: int = 5,
209
- dashboard: bool = False,
210
- autosave: bool = False,
211
- verbose: bool = False,
212
- log_path: str = "workflow.log",
213
- shared_memory_path: Optional[str] = "shared_memory.json",
214
- enable_group_chat: bool = False,
215
- group_chat_config: Optional[GroupChatConfig] = None,
216
- **kwargs,
217
- ):
218
- super().__init__(agents=agents, **kwargs)
219
- self.workflow_id = str(uuid.uuid4())
220
- self.name = name
221
- self.agents = agents or []
222
- self.max_workers = max_workers
223
- self.dashboard = dashboard
224
- self.autosave = autosave
225
- self.verbose = verbose
226
- self.task_pool = []
227
- self.results = []
228
- self.shared_memory = SharedMemory(shared_memory_path)
229
- self.speaker_system = SpeakerSystem()
230
- self.enable_group_chat = enable_group_chat
231
- self.group_chat_config = (
232
- group_chat_config or GroupChatConfig()
233
- )
234
- self._setup_logging(log_path)
235
- self.metadata = {}
236
-
237
- def _setup_logging(self, log_path: str) -> None:
238
- """Configure rotating file logger"""
239
- self.logger = logging.getLogger(
240
- f"workflow_{self.workflow_id}"
241
- )
242
- self.logger.setLevel(
243
- logging.DEBUG if self.verbose else logging.INFO
244
- )
245
-
246
- handler = RotatingFileHandler(
247
- log_path, maxBytes=10 * 1024 * 1024, backupCount=5
248
- )
249
- formatter = logging.Formatter(
250
- "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
251
- )
252
- handler.setFormatter(formatter)
253
- self.logger.addHandler(handler)
254
-
255
- def add_default_speakers(self) -> None:
256
- """Add all agents as default concurrent speakers"""
257
- for agent in self.agents:
258
- config = SpeakerConfig(
259
- role=SpeakerRole.DEFAULT,
260
- agent=agent,
261
- concurrent=True,
262
- timeout=30.0,
263
- required=False,
264
- )
265
- self.speaker_system.add_speaker(config)
266
-
267
- async def run_concurrent_speakers(
268
- self, task: str, context: Dict[str, Any] = None
269
- ) -> List[SpeakerMessage]:
270
- """Run all concurrent speakers in parallel"""
271
- concurrent_tasks = [
272
- self.speaker_system._execute_speaker(
273
- config, task, context
274
- )
275
- for config in self.speaker_system.speakers.values()
276
- if config.concurrent
277
- ]
278
-
279
- results = await asyncio.gather(
280
- *concurrent_tasks, return_exceptions=True
281
- )
282
- return [r for r in results if isinstance(r, SpeakerMessage)]
283
-
284
- async def run_sequential_speakers(
285
- self, task: str, context: Dict[str, Any] = None
286
- ) -> List[SpeakerMessage]:
287
- """Run non-concurrent speakers in sequence"""
288
- results = []
289
- for config in sorted(
290
- self.speaker_system.speakers.values(),
291
- key=lambda x: x.priority,
292
- ):
293
- if not config.concurrent:
294
- result = await self.speaker_system._execute_speaker(
295
- config, task, context
296
- )
297
- results.append(result)
298
- return results
299
-
300
- async def run_group_chat(
301
- self, initial_message: str, context: Dict[str, Any] = None
302
- ) -> List[SpeakerMessage]:
303
- """Run a group chat discussion among speakers"""
304
- if not self.enable_group_chat:
305
- raise ValueError(
306
- "Group chat is not enabled for this workflow"
307
- )
308
-
309
- messages: List[SpeakerMessage] = []
310
- current_turn = 0
311
-
312
- while current_turn < self.group_chat_config.max_loops:
313
- turn_context = {
314
- "turn": current_turn,
315
- "history": messages,
316
- **(context or {}),
317
- }
318
-
319
- if self.group_chat_config.allow_concurrent:
320
- turn_messages = await self.run_concurrent_speakers(
321
- (
322
- initial_message
323
- if current_turn == 0
324
- else messages[-1].content
325
- ),
326
- turn_context,
327
- )
328
- else:
329
- turn_messages = await self.run_sequential_speakers(
330
- (
331
- initial_message
332
- if current_turn == 0
333
- else messages[-1].content
334
- ),
335
- turn_context,
336
- )
337
-
338
- messages.extend(turn_messages)
339
-
340
- # Check if we should continue the conversation
341
- if self._should_end_group_chat(messages):
342
- break
343
-
344
- current_turn += 1
345
-
346
- if self.group_chat_config.save_history:
347
- self.speaker_system.message_history.extend(messages)
348
-
349
- return messages
350
-
351
- def _should_end_group_chat(
352
- self, messages: List[SpeakerMessage]
353
- ) -> bool:
354
- """Determine if group chat should end based on messages"""
355
- if not messages:
356
- return True
357
-
358
- # Check if all required speakers have participated
359
- if self.group_chat_config.require_all_speakers:
360
- participating_roles = {msg.role for msg in messages}
361
- required_roles = {
362
- role
363
- for role, config in self.speaker_system.speakers.items()
364
- if config.required
365
- }
366
- if not required_roles.issubset(participating_roles):
367
- return False
368
-
369
- return False
370
-
371
- @asynccontextmanager
372
- async def task_context(self):
373
- """Context manager for task execution with proper cleanup"""
374
- start_time = datetime.utcnow()
375
- try:
376
- yield
377
- finally:
378
- end_time = datetime.utcnow()
379
- if self.autosave:
380
- await self._save_results(start_time, end_time)
381
-
382
- async def _execute_agent_task(
383
- self, agent: Agent, task: str
384
- ) -> AgentOutput:
385
- """Execute a single agent task with enhanced error handling and monitoring"""
386
- start_time = datetime.utcnow()
387
- task_id = str(uuid.uuid4())
388
-
389
- try:
390
- self.logger.info(
391
- f"Agent {agent.agent_name} starting task {task_id}: {task}"
392
- )
393
-
394
- result = await agent.arun(task)
395
-
396
- end_time = datetime.utcnow()
397
- self.logger.info(
398
- f"Agent {agent.agent_name} completed task {task_id}"
399
- )
400
-
401
- return AgentOutput(
402
- agent_id=str(id(agent)),
403
- agent_name=agent.agent_name,
404
- task_id=task_id,
405
- input=task,
406
- output=result,
407
- start_time=start_time,
408
- end_time=end_time,
409
- status="success",
410
- )
411
-
412
- except Exception as e:
413
- end_time = datetime.utcnow()
414
- self.logger.error(
415
- f"Error in agent {agent.agent_name} task {task_id}: {str(e)}",
416
- exc_info=True,
417
- )
418
-
419
- return AgentOutput(
420
- agent_id=str(id(agent)),
421
- agent_name=agent.agent_name,
422
- task_id=task_id,
423
- input=task,
424
- output=None,
425
- start_time=start_time,
426
- end_time=end_time,
427
- status="error",
428
- error=str(e),
429
- )
430
-
431
- async def run(self, task: str) -> WorkflowOutput:
432
- """Enhanced workflow execution with speaker system integration"""
433
- if not self.agents:
434
- raise ValueError("No agents provided to the workflow")
435
-
436
- async with self.task_context():
437
- start_time = datetime.utcnow()
438
-
439
- try:
440
- # Run speakers first if enabled
441
- speaker_outputs = []
442
- if self.enable_group_chat:
443
- speaker_outputs = await self.run_group_chat(task)
444
- else:
445
- concurrent_outputs = (
446
- await self.run_concurrent_speakers(task)
447
- )
448
- sequential_outputs = (
449
- await self.run_sequential_speakers(task)
450
- )
451
- speaker_outputs = (
452
- concurrent_outputs + sequential_outputs
453
- )
454
-
455
- # Store speaker outputs in shared memory
456
- self.shared_memory.set(
457
- "speaker_outputs",
458
- [msg.dict() for msg in speaker_outputs],
459
- "workflow",
460
- )
461
-
462
- # Create tasks for all agents
463
- tasks = [
464
- self._execute_agent_task(agent, task)
465
- for agent in self.agents
466
- ]
467
-
468
- # Execute all tasks concurrently
469
- agent_outputs = await asyncio.gather(
470
- *tasks, return_exceptions=True
471
- )
472
-
473
- end_time = datetime.utcnow()
474
-
475
- # Calculate success/failure counts
476
- successful_tasks = sum(
477
- 1
478
- for output in agent_outputs
479
- if isinstance(output, AgentOutput)
480
- and output.status == "success"
481
- )
482
- failed_tasks = len(agent_outputs) - successful_tasks
483
-
484
- return WorkflowOutput(
485
- workflow_id=self.workflow_id,
486
- workflow_name=self.name,
487
- start_time=start_time,
488
- end_time=end_time,
489
- total_agents=len(self.agents),
490
- successful_tasks=successful_tasks,
491
- failed_tasks=failed_tasks,
492
- agent_outputs=[
493
- output
494
- for output in agent_outputs
495
- if isinstance(output, AgentOutput)
496
- ],
497
- metadata={
498
- "max_workers": self.max_workers,
499
- "shared_memory_keys": list(
500
- self.shared_memory._memory.keys()
501
- ),
502
- "group_chat_enabled": self.enable_group_chat,
503
- "total_speaker_messages": len(
504
- speaker_outputs
505
- ),
506
- "speaker_outputs": [
507
- msg.dict() for msg in speaker_outputs
508
- ],
509
- },
510
- )
511
-
512
- except Exception as e:
513
- self.logger.error(
514
- f"Critical workflow error: {str(e)}",
515
- exc_info=True,
516
- )
517
- raise
518
-
519
- async def _save_results(
520
- self, start_time: datetime, end_time: datetime
521
- ) -> None:
522
- """Save workflow results to disk"""
523
- if not self.autosave:
524
- return
525
-
526
- output_dir = "workflow_outputs"
527
- os.makedirs(output_dir, exist_ok=True)
528
-
529
- filename = f"{output_dir}/workflow_{self.workflow_id}_{end_time.strftime('%Y%m%d_%H%M%S')}.json"
530
-
531
- try:
532
- with open(filename, "w") as f:
533
- json.dump(
534
- {
535
- "workflow_id": self.workflow_id,
536
- "start_time": start_time.isoformat(),
537
- "end_time": end_time.isoformat(),
538
- "results": [
539
- (
540
- asdict(result)
541
- if hasattr(result, "__dict__")
542
- else (
543
- result.dict()
544
- if hasattr(result, "dict")
545
- else str(result)
546
- )
547
- )
548
- for result in self.results
549
- ],
550
- "speaker_history": [
551
- msg.dict()
552
- for msg in self.speaker_system.message_history
553
- ],
554
- "metadata": self.metadata,
555
- },
556
- f,
557
- default=str,
558
- indent=2,
559
- )
560
-
561
- self.logger.info(f"Workflow results saved to {filename}")
562
- except Exception as e:
563
- self.logger.error(
564
- f"Error saving workflow results: {str(e)}"
565
- )
566
-
567
- def _validate_config(self) -> None:
568
- """Validate workflow configuration"""
569
- if self.max_workers < 1:
570
- raise ValueError("max_workers must be at least 1")
571
-
572
- if (
573
- self.enable_group_chat
574
- and not self.speaker_system.speakers
575
- ):
576
- raise ValueError(
577
- "Group chat enabled but no speakers configured"
578
- )
579
-
580
- for config in self.speaker_system.speakers.values():
581
- if config.timeout <= 0:
582
- raise ValueError(
583
- f"Invalid timeout for speaker {config.role}"
584
- )
585
-
586
- async def cleanup(self) -> None:
587
- """Cleanup workflow resources"""
588
- try:
589
- # Close any open file handlers
590
- for handler in self.logger.handlers[:]:
591
- handler.close()
592
- self.logger.removeHandler(handler)
593
-
594
- # Persist final state
595
- if self.autosave:
596
- end_time = datetime.utcnow()
597
- await self._save_results(
598
- (
599
- self.results[0].start_time
600
- if self.results
601
- else end_time
602
- ),
603
- end_time,
604
- )
605
-
606
- # Clear shared memory if configured
607
- self.shared_memory._memory.clear()
608
-
609
- except Exception as e:
610
- self.logger.error(f"Error during cleanup: {str(e)}")
611
- raise
612
-
613
-
614
- # Utility functions for the workflow
615
- def create_default_workflow(
616
- agents: List[Agent],
617
- name: str = "DefaultWorkflow",
618
- enable_group_chat: bool = False,
619
- ) -> AsyncWorkflow:
620
- """Create a workflow with default configuration"""
621
- workflow = AsyncWorkflow(
622
- name=name,
623
- agents=agents,
624
- max_workers=len(agents),
625
- dashboard=True,
626
- autosave=True,
627
- verbose=True,
628
- enable_group_chat=enable_group_chat,
629
- group_chat_config=GroupChatConfig(
630
- max_loops=5,
631
- allow_concurrent=True,
632
- require_all_speakers=False,
633
- ),
634
- )
635
-
636
- workflow.add_default_speakers()
637
- return workflow
638
-
639
-
640
- async def run_workflow_with_retry(
641
- workflow: AsyncWorkflow,
642
- task: str,
643
- max_retries: int = 3,
644
- retry_delay: float = 1.0,
645
- ) -> WorkflowOutput:
646
- """Run workflow with retry logic"""
647
- for attempt in range(max_retries):
648
- try:
649
- return await workflow.run(task)
650
- except Exception as e:
651
- if attempt == max_retries - 1:
652
- raise
653
- workflow.logger.warning(
654
- f"Attempt {attempt + 1} failed, retrying in {retry_delay} seconds: {str(e)}"
655
- )
656
- await asyncio.sleep(retry_delay)
657
- retry_delay *= 2 # Exponential backoff
658
-
659
-
660
- # async def create_specialized_agents() -> List[Agent]:
661
- # """Create a set of specialized agents for financial analysis"""
662
-
663
- # # Base model configuration
664
- # model = OpenAIChat(model_name="gpt-4o")
665
-
666
- # # Financial Analysis Agent
667
- # financial_agent = Agent(
668
- # agent_name="Financial-Analysis-Agent",
669
- # agent_description="Personal finance advisor agent",
670
- # system_prompt=FINANCIAL_AGENT_SYS_PROMPT +
671
- # "Output the <DONE> token when you're done creating a portfolio of etfs, index, funds, and more for AI",
672
- # max_loops=1,
673
- # llm=model,
674
- # dynamic_temperature_enabled=True,
675
- # user_name="Kye",
676
- # retry_attempts=3,
677
- # context_length=8192,
678
- # return_step_meta=False,
679
- # output_type="str",
680
- # auto_generate_prompt=False,
681
- # max_tokens=4000,
682
- # stopping_token="<DONE>",
683
- # saved_state_path="financial_agent.json",
684
- # interactive=False,
685
- # )
686
-
687
- # # Risk Assessment Agent
688
- # risk_agent = Agent(
689
- # agent_name="Risk-Assessment-Agent",
690
- # agent_description="Investment risk analysis specialist",
691
- # system_prompt="Analyze investment risks and provide risk scores. Output <DONE> when analysis is complete.",
692
- # max_loops=1,
693
- # llm=model,
694
- # dynamic_temperature_enabled=True,
695
- # user_name="Kye",
696
- # retry_attempts=3,
697
- # context_length=8192,
698
- # output_type="str",
699
- # max_tokens=4000,
700
- # stopping_token="<DONE>",
701
- # saved_state_path="risk_agent.json",
702
- # interactive=False,
703
- # )
704
-
705
- # # Market Research Agent
706
- # research_agent = Agent(
707
- # agent_name="Market-Research-Agent",
708
- # agent_description="AI and tech market research specialist",
709
- # system_prompt="Research AI market trends and growth opportunities. Output <DONE> when research is complete.",
710
- # max_loops=1,
711
- # llm=model,
712
- # dynamic_temperature_enabled=True,
713
- # user_name="Kye",
714
- # retry_attempts=3,
715
- # context_length=8192,
716
- # output_type="str",
717
- # max_tokens=4000,
718
- # stopping_token="<DONE>",
719
- # saved_state_path="research_agent.json",
720
- # interactive=False,
721
- # )
722
-
723
- # return [financial_agent, risk_agent, research_agent]
724
-
725
- # async def main():
726
- # # Create specialized agents
727
- # agents = await create_specialized_agents()
728
-
729
- # # Create workflow with group chat enabled
730
- # workflow = create_default_workflow(
731
- # agents=agents,
732
- # name="AI-Investment-Analysis-Workflow",
733
- # enable_group_chat=True
734
- # )
735
-
736
- # # Configure speaker roles
737
- # workflow.speaker_system.add_speaker(
738
- # SpeakerConfig(
739
- # role=SpeakerRole.COORDINATOR,
740
- # agent=agents[0], # Financial agent as coordinator
741
- # priority=1,
742
- # concurrent=False,
743
- # required=True
744
- # )
745
- # )
746
-
747
- # workflow.speaker_system.add_speaker(
748
- # SpeakerConfig(
749
- # role=SpeakerRole.CRITIC,
750
- # agent=agents[1], # Risk agent as critic
751
- # priority=2,
752
- # concurrent=True
753
- # )
754
- # )
755
-
756
- # workflow.speaker_system.add_speaker(
757
- # SpeakerConfig(
758
- # role=SpeakerRole.EXECUTOR,
759
- # agent=agents[2], # Research agent as executor
760
- # priority=2,
761
- # concurrent=True
762
- # )
763
- # )
764
-
765
- # # Investment analysis task
766
- # investment_task = """
767
- # Create a comprehensive investment analysis for a $40k portfolio focused on AI growth opportunities:
768
- # 1. Identify high-growth AI ETFs and index funds
769
- # 2. Analyze risks and potential returns
770
- # 3. Create a diversified portfolio allocation
771
- # 4. Provide market trend analysis
772
- # Present the results in a structured markdown format.
773
- # """
774
-
775
- # try:
776
- # # Run workflow with retry
777
- # result = await run_workflow_with_retry(
778
- # workflow=workflow,
779
- # task=investment_task,
780
- # max_retries=3
781
- # )
782
-
783
- # print("\nWorkflow Results:")
784
- # print("================")
785
-
786
- # # Process and display agent outputs
787
- # for output in result.agent_outputs:
788
- # print(f"\nAgent: {output.agent_name}")
789
- # print("-" * (len(output.agent_name) + 8))
790
- # print(output.output)
791
-
792
- # # Display group chat history if enabled
793
- # if workflow.enable_group_chat:
794
- # print("\nGroup Chat Discussion:")
795
- # print("=====================")
796
- # for msg in workflow.speaker_system.message_history:
797
- # print(f"\n{msg.role} ({msg.agent_name}):")
798
- # print(msg.content)
799
-
800
- # # Save detailed results
801
- # if result.metadata.get("shared_memory_keys"):
802
- # print("\nShared Insights:")
803
- # print("===============")
804
- # for key in result.metadata["shared_memory_keys"]:
805
- # value = workflow.shared_memory.get(key)
806
- # if value:
807
- # print(f"\n{key}:")
808
- # print(value)
809
-
810
- # except Exception as e:
811
- # print(f"Workflow failed: {str(e)}")
812
-
813
- # finally:
814
- # await workflow.cleanup()
815
-
816
- # if __name__ == "__main__":
817
- # # Run the example
818
- # asyncio.run(main())