swarms 7.9.9__py3-none-any.whl → 8.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swarms/agents/agent_judge.py +350 -61
- swarms/agents/reasoning_agents.py +62 -72
- swarms/agents/reasoning_duo.py +77 -24
- swarms/structs/__init__.py +2 -0
- swarms/structs/agent.py +21 -15
- swarms/structs/election_swarm.py +270 -0
- swarms/structs/heavy_swarm.py +1701 -0
- swarms/structs/qa_swarm.py +253 -0
- swarms/structs/swarm_router.py +61 -21
- swarms/telemetry/log_executions.py +257 -8
- swarms/utils/agent_cache.py +675 -0
- swarms/utils/concurrent_wrapper.py +520 -0
- {swarms-7.9.9.dist-info → swarms-8.0.0.dist-info}/METADATA +20 -16
- {swarms-7.9.9.dist-info → swarms-8.0.0.dist-info}/RECORD +17 -12
- {swarms-7.9.9.dist-info → swarms-8.0.0.dist-info}/LICENSE +0 -0
- {swarms-7.9.9.dist-info → swarms-8.0.0.dist-info}/WHEEL +0 -0
- {swarms-7.9.9.dist-info → swarms-8.0.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,1701 @@
|
|
1
|
+
import concurrent.futures
|
2
|
+
import json
|
3
|
+
import os
|
4
|
+
import time
|
5
|
+
import traceback
|
6
|
+
from functools import lru_cache
|
7
|
+
from typing import Dict, List, Optional
|
8
|
+
|
9
|
+
from loguru import logger
|
10
|
+
from rich.console import Console
|
11
|
+
from rich.panel import Panel
|
12
|
+
from rich.progress import (
|
13
|
+
Progress,
|
14
|
+
SpinnerColumn,
|
15
|
+
TextColumn,
|
16
|
+
TimeElapsedColumn,
|
17
|
+
)
|
18
|
+
from rich.table import Table
|
19
|
+
|
20
|
+
from swarms.structs.agent import Agent
|
21
|
+
from swarms.structs.conversation import Conversation
|
22
|
+
from swarms.utils.formatter import formatter
|
23
|
+
from swarms.utils.history_output_formatter import (
|
24
|
+
history_output_formatter,
|
25
|
+
)
|
26
|
+
from swarms.utils.litellm_wrapper import LiteLLM
|
27
|
+
|
28
|
+
RESEARCH_AGENT_PROMPT = """
|
29
|
+
You are an expert Research Agent with exceptional capabilities in:
|
30
|
+
|
31
|
+
CORE EXPERTISE:
|
32
|
+
- Comprehensive information gathering and synthesis
|
33
|
+
- Primary and secondary research methodologies
|
34
|
+
- Data collection, validation, and verification
|
35
|
+
- Market research and competitive analysis
|
36
|
+
- Academic and industry report analysis
|
37
|
+
- Statistical data interpretation
|
38
|
+
- Trend identification and pattern recognition
|
39
|
+
- Source credibility assessment
|
40
|
+
|
41
|
+
RESEARCH METHODOLOGIES:
|
42
|
+
- Systematic literature reviews
|
43
|
+
- Market surveys and analysis
|
44
|
+
- Competitive intelligence gathering
|
45
|
+
- Industry benchmarking studies
|
46
|
+
- Consumer behavior research
|
47
|
+
- Technical specification analysis
|
48
|
+
- Historical data compilation
|
49
|
+
- Cross-referencing multiple sources
|
50
|
+
|
51
|
+
ANALYTICAL CAPABILITIES:
|
52
|
+
- Data quality assessment
|
53
|
+
- Information gap identification
|
54
|
+
- Research bias detection
|
55
|
+
- Methodology evaluation
|
56
|
+
- Source triangulation
|
57
|
+
- Evidence hierarchy establishment
|
58
|
+
- Research limitation identification
|
59
|
+
- Reliability scoring
|
60
|
+
|
61
|
+
DELIVERABLES:
|
62
|
+
- Comprehensive research reports
|
63
|
+
- Executive summaries with key findings
|
64
|
+
- Data visualization recommendations
|
65
|
+
- Source documentation and citations
|
66
|
+
- Research methodology explanations
|
67
|
+
- Confidence intervals and uncertainty ranges
|
68
|
+
- Recommendations for further research
|
69
|
+
- Action items based on findings
|
70
|
+
|
71
|
+
You approach every research task with:
|
72
|
+
- Systematic methodology
|
73
|
+
- Critical thinking
|
74
|
+
- Attention to detail
|
75
|
+
- Objective analysis
|
76
|
+
- Comprehensive coverage
|
77
|
+
- Quality assurance
|
78
|
+
- Ethical research practices
|
79
|
+
|
80
|
+
Provide thorough, well-sourced, and actionable research insights."""
|
81
|
+
|
82
|
+
|
83
|
+
ANALYSIS_AGENT_PROMPT = """
|
84
|
+
You are an expert Analysis Agent with advanced capabilities in:
|
85
|
+
|
86
|
+
ANALYTICAL EXPERTISE:
|
87
|
+
- Advanced statistical analysis and modeling
|
88
|
+
- Pattern recognition and trend analysis
|
89
|
+
- Causal relationship identification
|
90
|
+
- Predictive modeling and forecasting
|
91
|
+
- Risk assessment and scenario analysis
|
92
|
+
- Performance metrics development
|
93
|
+
- Comparative analysis frameworks
|
94
|
+
- Root cause analysis methodologies
|
95
|
+
|
96
|
+
ANALYTICAL TECHNIQUES:
|
97
|
+
- Regression analysis and correlation studies
|
98
|
+
- Time series analysis and forecasting
|
99
|
+
- Cluster analysis and segmentation
|
100
|
+
- Factor analysis and dimensionality reduction
|
101
|
+
- Sensitivity analysis and stress testing
|
102
|
+
- Monte Carlo simulations
|
103
|
+
- Decision tree analysis
|
104
|
+
- Optimization modeling
|
105
|
+
|
106
|
+
DATA INTERPRETATION:
|
107
|
+
- Statistical significance testing
|
108
|
+
- Confidence interval calculation
|
109
|
+
- Variance analysis and decomposition
|
110
|
+
- Outlier detection and handling
|
111
|
+
- Missing data treatment
|
112
|
+
- Bias identification and correction
|
113
|
+
- Data transformation techniques
|
114
|
+
- Quality metrics establishment
|
115
|
+
|
116
|
+
INSIGHT GENERATION:
|
117
|
+
- Key finding identification
|
118
|
+
- Implication analysis
|
119
|
+
- Strategic recommendation development
|
120
|
+
- Performance gap analysis
|
121
|
+
- Opportunity identification
|
122
|
+
- Threat assessment
|
123
|
+
- Success factor determination
|
124
|
+
- Critical path analysis
|
125
|
+
|
126
|
+
DELIVERABLES:
|
127
|
+
- Detailed analytical reports
|
128
|
+
- Statistical summaries and interpretations
|
129
|
+
- Predictive models and forecasts
|
130
|
+
- Risk assessment matrices
|
131
|
+
- Performance dashboards
|
132
|
+
- Recommendation frameworks
|
133
|
+
- Implementation roadmaps
|
134
|
+
- Success measurement criteria
|
135
|
+
|
136
|
+
You approach analysis with:
|
137
|
+
- Mathematical rigor
|
138
|
+
- Statistical validity
|
139
|
+
- Logical reasoning
|
140
|
+
- Systematic methodology
|
141
|
+
- Evidence-based conclusions
|
142
|
+
- Actionable insights
|
143
|
+
- Clear communication
|
144
|
+
|
145
|
+
Provide precise, data-driven analysis with clear implications and recommendations."""
|
146
|
+
|
147
|
+
ALTERNATIVES_AGENT_PROMPT = """
|
148
|
+
You are an expert Alternatives Agent with exceptional capabilities in:
|
149
|
+
|
150
|
+
STRATEGIC THINKING:
|
151
|
+
- Alternative strategy development
|
152
|
+
- Creative problem-solving approaches
|
153
|
+
- Innovation and ideation techniques
|
154
|
+
- Strategic option evaluation
|
155
|
+
- Scenario planning and modeling
|
156
|
+
- Blue ocean strategy identification
|
157
|
+
- Disruptive innovation assessment
|
158
|
+
- Strategic pivot recommendations
|
159
|
+
|
160
|
+
SOLUTION FRAMEWORKS:
|
161
|
+
- Multiple pathway generation
|
162
|
+
- Trade-off analysis matrices
|
163
|
+
- Cost-benefit evaluation models
|
164
|
+
- Risk-reward assessment tools
|
165
|
+
- Implementation complexity scoring
|
166
|
+
- Resource requirement analysis
|
167
|
+
- Timeline and milestone planning
|
168
|
+
- Success probability estimation
|
169
|
+
|
170
|
+
CREATIVE METHODOLOGIES:
|
171
|
+
- Design thinking processes
|
172
|
+
- Brainstorming and ideation sessions
|
173
|
+
- Lateral thinking techniques
|
174
|
+
- Analogical reasoning approaches
|
175
|
+
- Constraint removal exercises
|
176
|
+
- Assumption challenging methods
|
177
|
+
- Reverse engineering solutions
|
178
|
+
- Cross-industry benchmarking
|
179
|
+
|
180
|
+
OPTION EVALUATION:
|
181
|
+
- Multi-criteria decision analysis
|
182
|
+
- Weighted scoring models
|
183
|
+
- Pareto analysis applications
|
184
|
+
- Real options valuation
|
185
|
+
- Strategic fit assessment
|
186
|
+
- Competitive advantage evaluation
|
187
|
+
- Scalability potential analysis
|
188
|
+
- Market acceptance probability
|
189
|
+
|
190
|
+
STRATEGIC ALTERNATIVES:
|
191
|
+
- Build vs. buy vs. partner decisions
|
192
|
+
- Organic vs. inorganic growth options
|
193
|
+
- Technology platform choices
|
194
|
+
- Market entry strategies
|
195
|
+
- Business model innovations
|
196
|
+
- Operational approach variations
|
197
|
+
- Financial structure alternatives
|
198
|
+
- Partnership and alliance options
|
199
|
+
|
200
|
+
DELIVERABLES:
|
201
|
+
- Alternative strategy portfolios
|
202
|
+
- Option evaluation matrices
|
203
|
+
- Implementation roadmaps
|
204
|
+
- Risk mitigation plans
|
205
|
+
- Resource allocation models
|
206
|
+
- Timeline and milestone charts
|
207
|
+
- Success measurement frameworks
|
208
|
+
- Contingency planning guides
|
209
|
+
|
210
|
+
You approach alternatives generation with:
|
211
|
+
- Creative thinking
|
212
|
+
- Strategic insight
|
213
|
+
- Practical feasibility
|
214
|
+
- Innovation mindset
|
215
|
+
- Risk awareness
|
216
|
+
- Implementation focus
|
217
|
+
- Value optimization
|
218
|
+
|
219
|
+
Provide innovative, practical, and well-evaluated alternative approaches and solutions.
|
220
|
+
"""
|
221
|
+
|
222
|
+
|
223
|
+
VERIFICATION_AGENT_PROMPT = """
|
224
|
+
You are an expert Verification Agent with comprehensive capabilities in:
|
225
|
+
|
226
|
+
VALIDATION EXPERTISE:
|
227
|
+
- Fact-checking and source verification
|
228
|
+
- Data accuracy and integrity assessment
|
229
|
+
- Methodology validation and review
|
230
|
+
- Assumption testing and challenge
|
231
|
+
- Logic and reasoning verification
|
232
|
+
- Completeness and gap analysis
|
233
|
+
- Consistency checking across sources
|
234
|
+
- Evidence quality evaluation
|
235
|
+
|
236
|
+
FEASIBILITY ASSESSMENT:
|
237
|
+
- Technical feasibility evaluation
|
238
|
+
- Economic viability analysis
|
239
|
+
- Operational capability assessment
|
240
|
+
- Resource availability verification
|
241
|
+
- Timeline realism evaluation
|
242
|
+
- Risk factor identification
|
243
|
+
- Constraint and limitation analysis
|
244
|
+
- Implementation barrier assessment
|
245
|
+
|
246
|
+
QUALITY ASSURANCE:
|
247
|
+
- Information reliability scoring
|
248
|
+
- Source credibility evaluation
|
249
|
+
- Bias detection and mitigation
|
250
|
+
- Error identification and correction
|
251
|
+
- Standard compliance verification
|
252
|
+
- Best practice alignment check
|
253
|
+
- Performance criteria validation
|
254
|
+
- Success measurement verification
|
255
|
+
|
256
|
+
VERIFICATION METHODOLOGIES:
|
257
|
+
- Independent source triangulation
|
258
|
+
- Peer review and expert validation
|
259
|
+
- Benchmarking against standards
|
260
|
+
- Historical precedent analysis
|
261
|
+
- Stress testing and scenario modeling
|
262
|
+
- Sensitivity analysis performance
|
263
|
+
- Cross-functional review processes
|
264
|
+
- Stakeholder feedback integration
|
265
|
+
|
266
|
+
RISK ASSESSMENT:
|
267
|
+
- Implementation risk evaluation
|
268
|
+
- Market acceptance risk analysis
|
269
|
+
- Technical risk identification
|
270
|
+
- Financial risk assessment
|
271
|
+
- Operational risk evaluation
|
272
|
+
- Regulatory compliance verification
|
273
|
+
- Competitive response assessment
|
274
|
+
- Timeline and delivery risk analysis
|
275
|
+
|
276
|
+
COMPLIANCE VERIFICATION:
|
277
|
+
- Regulatory requirement checking
|
278
|
+
- Industry standard compliance
|
279
|
+
- Legal framework alignment
|
280
|
+
- Ethical guideline adherence
|
281
|
+
- Safety standard verification
|
282
|
+
- Quality management compliance
|
283
|
+
- Environmental impact assessment
|
284
|
+
- Social responsibility validation
|
285
|
+
|
286
|
+
DELIVERABLES:
|
287
|
+
- Verification and validation reports
|
288
|
+
- Feasibility assessment summaries
|
289
|
+
- Risk evaluation matrices
|
290
|
+
- Compliance checklists
|
291
|
+
- Quality assurance scorecards
|
292
|
+
- Recommendation refinements
|
293
|
+
- Implementation guardrails
|
294
|
+
- Success probability assessments
|
295
|
+
|
296
|
+
You approach verification with:
|
297
|
+
- Rigorous methodology
|
298
|
+
- Critical evaluation
|
299
|
+
- Attention to detail
|
300
|
+
- Objective assessment
|
301
|
+
- Risk awareness
|
302
|
+
- Quality focus
|
303
|
+
- Practical realism
|
304
|
+
|
305
|
+
Provide thorough, objective verification with clear feasibility assessments and risk evaluations."""
|
306
|
+
|
307
|
+
SYNTHESIS_AGENT_PROMPT = """
|
308
|
+
You are an expert Synthesis Agent with advanced capabilities in:
|
309
|
+
|
310
|
+
INTEGRATION EXPERTISE:
|
311
|
+
- Multi-perspective synthesis and integration
|
312
|
+
- Cross-functional analysis and coordination
|
313
|
+
- Holistic view development and presentation
|
314
|
+
- Complex information consolidation
|
315
|
+
- Stakeholder perspective integration
|
316
|
+
- Strategic alignment and coherence
|
317
|
+
- Comprehensive solution development
|
318
|
+
- Executive summary creation
|
319
|
+
|
320
|
+
SYNTHESIS METHODOLOGIES:
|
321
|
+
- Information architecture development
|
322
|
+
- Priority matrix creation and application
|
323
|
+
- Weighted factor analysis
|
324
|
+
- Multi-criteria decision frameworks
|
325
|
+
- Consensus building techniques
|
326
|
+
- Conflict resolution approaches
|
327
|
+
- Trade-off optimization strategies
|
328
|
+
- Value proposition development
|
329
|
+
|
330
|
+
COMPREHENSIVE ANALYSIS:
|
331
|
+
- End-to-end solution evaluation
|
332
|
+
- Impact assessment across dimensions
|
333
|
+
- Cost-benefit comprehensive analysis
|
334
|
+
- Risk-reward optimization models
|
335
|
+
- Implementation roadmap development
|
336
|
+
- Success factor identification
|
337
|
+
- Critical path analysis
|
338
|
+
- Milestone and deliverable planning
|
339
|
+
|
340
|
+
STRATEGIC INTEGRATION:
|
341
|
+
- Vision and mission alignment
|
342
|
+
- Strategic objective integration
|
343
|
+
- Resource optimization across initiatives
|
344
|
+
- Timeline synchronization and coordination
|
345
|
+
- Stakeholder impact assessment
|
346
|
+
- Change management consideration
|
347
|
+
- Performance measurement integration
|
348
|
+
- Continuous improvement frameworks
|
349
|
+
|
350
|
+
DELIVERABLE CREATION:
|
351
|
+
- Executive summary development
|
352
|
+
- Strategic recommendation reports
|
353
|
+
- Implementation action plans
|
354
|
+
- Risk mitigation strategies
|
355
|
+
- Performance measurement frameworks
|
356
|
+
- Communication and rollout plans
|
357
|
+
- Success criteria and metrics
|
358
|
+
- Follow-up and review schedules
|
359
|
+
|
360
|
+
COMMUNICATION EXCELLENCE:
|
361
|
+
- Clear and concise reporting
|
362
|
+
- Executive-level presentation skills
|
363
|
+
- Technical detail appropriate scaling
|
364
|
+
- Visual and narrative integration
|
365
|
+
- Stakeholder-specific customization
|
366
|
+
- Action-oriented recommendations
|
367
|
+
- Decision-support optimization
|
368
|
+
- Implementation-focused guidance
|
369
|
+
|
370
|
+
You approach synthesis with:
|
371
|
+
- Holistic thinking
|
372
|
+
- Strategic perspective
|
373
|
+
- Integration mindset
|
374
|
+
- Communication clarity
|
375
|
+
- Action orientation
|
376
|
+
- Value optimization
|
377
|
+
- Implementation focus
|
378
|
+
|
379
|
+
Provide comprehensive, integrated analysis with clear, actionable recommendations and detailed implementation guidance."""
|
380
|
+
|
381
|
+
schema = {
|
382
|
+
"type": "function",
|
383
|
+
"function": {
|
384
|
+
"name": "generate_specialized_questions",
|
385
|
+
"description": "Generate 4 specialized questions for different agent roles to comprehensively analyze a given task",
|
386
|
+
"parameters": {
|
387
|
+
"type": "object",
|
388
|
+
"properties": {
|
389
|
+
"thinking": {
|
390
|
+
"type": "string",
|
391
|
+
"description": "Your reasoning process for how to break down this task into 4 specialized questions for different agent roles",
|
392
|
+
},
|
393
|
+
"research_question": {
|
394
|
+
"type": "string",
|
395
|
+
"description": "A detailed research question for the Research Agent to gather comprehensive background information and data",
|
396
|
+
},
|
397
|
+
"analysis_question": {
|
398
|
+
"type": "string",
|
399
|
+
"description": "An analytical question for the Analysis Agent to examine patterns, trends, and insights",
|
400
|
+
},
|
401
|
+
"alternatives_question": {
|
402
|
+
"type": "string",
|
403
|
+
"description": "A strategic question for the Alternatives Agent to explore different approaches, options, and solutions",
|
404
|
+
},
|
405
|
+
"verification_question": {
|
406
|
+
"type": "string",
|
407
|
+
"description": "A verification question for the Verification Agent to validate findings, check accuracy, and assess feasibility",
|
408
|
+
},
|
409
|
+
},
|
410
|
+
"required": [
|
411
|
+
"thinking",
|
412
|
+
"research_question",
|
413
|
+
"analysis_question",
|
414
|
+
"alternatives_question",
|
415
|
+
"verification_question",
|
416
|
+
],
|
417
|
+
},
|
418
|
+
},
|
419
|
+
}
|
420
|
+
|
421
|
+
schema = [schema]
|
422
|
+
|
423
|
+
|
424
|
+
class HeavySwarm:
|
425
|
+
"""
|
426
|
+
HeavySwarm is a sophisticated multi-agent orchestration system that decomposes complex tasks
|
427
|
+
into specialized questions and executes them using four specialized agents: Research, Analysis,
|
428
|
+
Alternatives, and Verification. The results are then synthesized into a comprehensive response.
|
429
|
+
|
430
|
+
This swarm architecture provides robust task analysis through:
|
431
|
+
- Intelligent question generation for specialized agent roles
|
432
|
+
- Parallel execution of specialized agents for efficiency
|
433
|
+
- Comprehensive synthesis of multi-perspective results
|
434
|
+
- Real-time progress monitoring with rich dashboard displays
|
435
|
+
- Reliability checks and validation systems
|
436
|
+
|
437
|
+
The HeavySwarm follows a structured workflow:
|
438
|
+
1. Task decomposition into specialized questions
|
439
|
+
2. Parallel execution by specialized agents
|
440
|
+
3. Result synthesis and integration
|
441
|
+
4. Comprehensive final report generation
|
442
|
+
|
443
|
+
Attributes:
|
444
|
+
name (str): Name identifier for the swarm instance
|
445
|
+
description (str): Description of the swarm's purpose
|
446
|
+
agents (List[Agent]): List of agent instances (currently unused, agents are created internally)
|
447
|
+
timeout (int): Maximum execution time per agent in seconds
|
448
|
+
aggregation_strategy (str): Strategy for result aggregation (currently 'synthesis')
|
449
|
+
loops_per_agent (int): Number of execution loops per agent
|
450
|
+
question_agent_model_name (str): Model name for question generation
|
451
|
+
worker_model_name (str): Model name for specialized worker agents
|
452
|
+
verbose (bool): Enable detailed logging output
|
453
|
+
max_workers (int): Maximum number of concurrent worker threads
|
454
|
+
show_dashboard (bool): Enable rich dashboard with progress visualization
|
455
|
+
agent_prints_on (bool): Enable individual agent output printing
|
456
|
+
conversation (Conversation): Conversation history tracker
|
457
|
+
console (Console): Rich console for dashboard output
|
458
|
+
|
459
|
+
Example:
|
460
|
+
>>> swarm = HeavySwarm(
|
461
|
+
... name="AnalysisSwarm",
|
462
|
+
... description="Market analysis swarm",
|
463
|
+
... question_agent_model_name="gpt-4o-mini",
|
464
|
+
... worker_model_name="gpt-4o-mini",
|
465
|
+
... show_dashboard=True
|
466
|
+
... )
|
467
|
+
>>> result = swarm.run("Analyze the current cryptocurrency market trends")
|
468
|
+
"""
|
469
|
+
|
470
|
+
def __init__(
|
471
|
+
self,
|
472
|
+
name: str = "HeavySwarm",
|
473
|
+
description: str = "A swarm of agents that can analyze a task and generate specialized questions for each agent role",
|
474
|
+
agents: List[Agent] = None,
|
475
|
+
timeout: int = 300,
|
476
|
+
aggregation_strategy: str = "synthesis",
|
477
|
+
loops_per_agent: int = 1,
|
478
|
+
question_agent_model_name: str = "gpt-4o-mini",
|
479
|
+
worker_model_name: str = "gpt-4o-mini",
|
480
|
+
verbose: bool = False,
|
481
|
+
max_workers: int = int(os.cpu_count() * 0.9),
|
482
|
+
show_dashboard: bool = False,
|
483
|
+
agent_prints_on: bool = False,
|
484
|
+
output_type: str = "dict-all-except-first",
|
485
|
+
):
|
486
|
+
"""
|
487
|
+
Initialize the HeavySwarm with configuration parameters.
|
488
|
+
|
489
|
+
Args:
|
490
|
+
name (str, optional): Identifier name for the swarm instance. Defaults to "HeavySwarm".
|
491
|
+
description (str, optional): Description of the swarm's purpose and capabilities.
|
492
|
+
Defaults to standard description.
|
493
|
+
agents (List[Agent], optional): Pre-configured agent list (currently unused as agents
|
494
|
+
are created internally). Defaults to None.
|
495
|
+
timeout (int, optional): Maximum execution time per agent in seconds. Defaults to 300.
|
496
|
+
aggregation_strategy (str, optional): Strategy for aggregating results. Currently only
|
497
|
+
'synthesis' is supported. Defaults to "synthesis".
|
498
|
+
loops_per_agent (int, optional): Number of execution loops each agent should perform.
|
499
|
+
Must be greater than 0. Defaults to 1.
|
500
|
+
question_agent_model_name (str, optional): Language model for question generation.
|
501
|
+
Defaults to "gpt-4o-mini".
|
502
|
+
worker_model_name (str, optional): Language model for specialized worker agents.
|
503
|
+
Defaults to "gpt-4o-mini".
|
504
|
+
verbose (bool, optional): Enable detailed logging and debug output. Defaults to False.
|
505
|
+
max_workers (int, optional): Maximum concurrent workers for parallel execution.
|
506
|
+
Defaults to 90% of CPU count.
|
507
|
+
show_dashboard (bool, optional): Enable rich dashboard with progress visualization.
|
508
|
+
Defaults to False.
|
509
|
+
agent_prints_on (bool, optional): Enable individual agent output printing.
|
510
|
+
Defaults to False.
|
511
|
+
|
512
|
+
Raises:
|
513
|
+
ValueError: If loops_per_agent is 0 or negative
|
514
|
+
ValueError: If required model names are None
|
515
|
+
|
516
|
+
Note:
|
517
|
+
The swarm automatically performs reliability checks during initialization
|
518
|
+
to ensure all required parameters are properly configured.
|
519
|
+
"""
|
520
|
+
self.name = name
|
521
|
+
self.description = description
|
522
|
+
self.agents = agents
|
523
|
+
self.timeout = timeout
|
524
|
+
self.aggregation_strategy = aggregation_strategy
|
525
|
+
self.loops_per_agent = loops_per_agent
|
526
|
+
self.question_agent_model_name = question_agent_model_name
|
527
|
+
self.worker_model_name = worker_model_name
|
528
|
+
self.verbose = verbose
|
529
|
+
self.max_workers = max_workers
|
530
|
+
self.show_dashboard = show_dashboard
|
531
|
+
self.agent_prints_on = agent_prints_on
|
532
|
+
self.output_type = output_type
|
533
|
+
|
534
|
+
self.conversation = Conversation()
|
535
|
+
self.console = Console()
|
536
|
+
|
537
|
+
if self.show_dashboard:
|
538
|
+
self.show_swarm_info()
|
539
|
+
|
540
|
+
self.reliability_check()
|
541
|
+
|
542
|
+
def show_swarm_info(self):
|
543
|
+
"""
|
544
|
+
Display comprehensive swarm configuration information in a rich dashboard format.
|
545
|
+
|
546
|
+
This method creates and displays a professionally styled information table containing
|
547
|
+
all key swarm configuration parameters including models, timeouts, and operational
|
548
|
+
settings. The display uses Arasaka-inspired styling with red headers and borders.
|
549
|
+
|
550
|
+
The dashboard includes:
|
551
|
+
- Swarm identification (name, description)
|
552
|
+
- Execution parameters (timeout, loops per agent)
|
553
|
+
- Model configurations (question and worker models)
|
554
|
+
- Performance settings (max workers, aggregation strategy)
|
555
|
+
|
556
|
+
Note:
|
557
|
+
This method only displays output when show_dashboard is enabled. If show_dashboard
|
558
|
+
is False, the method returns immediately without any output.
|
559
|
+
|
560
|
+
Returns:
|
561
|
+
None: This method only displays output and has no return value.
|
562
|
+
"""
|
563
|
+
if not self.show_dashboard:
|
564
|
+
return
|
565
|
+
|
566
|
+
# Create swarm info table with Arasaka styling
|
567
|
+
info_table = Table(
|
568
|
+
title="⚡ HEAVYSWARM CONFIGURATION",
|
569
|
+
show_header=True,
|
570
|
+
header_style="bold red",
|
571
|
+
)
|
572
|
+
info_table.add_column("Parameter", style="white", width=25)
|
573
|
+
info_table.add_column("Value", style="bright_white", width=40)
|
574
|
+
|
575
|
+
info_table.add_row("Swarm Name", self.name)
|
576
|
+
info_table.add_row("Description", self.description)
|
577
|
+
info_table.add_row("Timeout", f"{self.timeout}s")
|
578
|
+
info_table.add_row(
|
579
|
+
"Loops per Agent", str(self.loops_per_agent)
|
580
|
+
)
|
581
|
+
info_table.add_row(
|
582
|
+
"Question Model", self.question_agent_model_name
|
583
|
+
)
|
584
|
+
info_table.add_row("Worker Model", self.worker_model_name)
|
585
|
+
info_table.add_row("Max Workers", str(self.max_workers))
|
586
|
+
info_table.add_row(
|
587
|
+
"Aggregation Strategy", self.aggregation_strategy
|
588
|
+
)
|
589
|
+
|
590
|
+
# Display dashboard with professional Arasaka styling
|
591
|
+
self.console.print(
|
592
|
+
Panel(
|
593
|
+
info_table,
|
594
|
+
title="[bold red]HEAVYSWARM SYSTEM[/bold red]",
|
595
|
+
border_style="red",
|
596
|
+
)
|
597
|
+
)
|
598
|
+
self.console.print()
|
599
|
+
|
600
|
+
def reliability_check(self):
|
601
|
+
"""
|
602
|
+
Perform comprehensive reliability and configuration validation checks.
|
603
|
+
|
604
|
+
This method validates all critical swarm configuration parameters to ensure
|
605
|
+
the system is properly configured for operation. It checks for common
|
606
|
+
configuration errors and provides clear error messages for any issues found.
|
607
|
+
|
608
|
+
Validation checks include:
|
609
|
+
- loops_per_agent: Must be greater than 0 to ensure agents execute
|
610
|
+
- worker_model_name: Must be set for agent execution
|
611
|
+
- question_agent_model_name: Must be set for question generation
|
612
|
+
|
613
|
+
The method provides different user experiences based on the show_dashboard setting:
|
614
|
+
- With dashboard: Shows animated progress bars with professional styling
|
615
|
+
- Without dashboard: Provides basic console output with completion confirmation
|
616
|
+
|
617
|
+
Raises:
|
618
|
+
ValueError: If loops_per_agent is 0 or negative (agents won't execute)
|
619
|
+
ValueError: If worker_model_name is None (agents can't be created)
|
620
|
+
ValueError: If question_agent_model_name is None (questions can't be generated)
|
621
|
+
|
622
|
+
Note:
|
623
|
+
This method is automatically called during __init__ to ensure the swarm
|
624
|
+
is properly configured before any operations begin.
|
625
|
+
"""
|
626
|
+
if self.show_dashboard:
|
627
|
+
with Progress(
|
628
|
+
SpinnerColumn(),
|
629
|
+
TextColumn(
|
630
|
+
"[progress.description]{task.description}"
|
631
|
+
),
|
632
|
+
transient=True,
|
633
|
+
console=self.console,
|
634
|
+
) as progress:
|
635
|
+
task = progress.add_task(
|
636
|
+
"[red]RUNNING RELIABILITY CHECKS...", total=4
|
637
|
+
)
|
638
|
+
|
639
|
+
# Check loops_per_agent
|
640
|
+
time.sleep(0.5)
|
641
|
+
if self.loops_per_agent == 0:
|
642
|
+
raise ValueError(
|
643
|
+
"loops_per_agent must be greater than 0. This parameter is used to determine how many times each agent will run. If it is 0, the agent will not run at all."
|
644
|
+
)
|
645
|
+
progress.update(
|
646
|
+
task,
|
647
|
+
advance=1,
|
648
|
+
description="[white]✓ LOOPS PER AGENT VALIDATED",
|
649
|
+
)
|
650
|
+
|
651
|
+
# Check worker_model_name
|
652
|
+
time.sleep(0.5)
|
653
|
+
if self.worker_model_name is None:
|
654
|
+
raise ValueError(
|
655
|
+
"worker_model_name must be set. This parameter is used to determine the model that will be used to execute the agents."
|
656
|
+
)
|
657
|
+
progress.update(
|
658
|
+
task,
|
659
|
+
advance=1,
|
660
|
+
description="[white]✓ WORKER MODEL VALIDATED",
|
661
|
+
)
|
662
|
+
|
663
|
+
# Check question_agent_model_name
|
664
|
+
time.sleep(0.5)
|
665
|
+
if self.question_agent_model_name is None:
|
666
|
+
raise ValueError(
|
667
|
+
"question_agent_model_name must be set. This parameter is used to determine the model that will be used to generate the questions."
|
668
|
+
)
|
669
|
+
progress.update(
|
670
|
+
task,
|
671
|
+
advance=1,
|
672
|
+
description="[white]✓ QUESTION MODEL VALIDATED",
|
673
|
+
)
|
674
|
+
|
675
|
+
# Final validation
|
676
|
+
time.sleep(0.5)
|
677
|
+
progress.update(
|
678
|
+
task,
|
679
|
+
advance=1,
|
680
|
+
description="[bold white]✓ ALL RELIABILITY CHECKS PASSED!",
|
681
|
+
)
|
682
|
+
time.sleep(0.8) # Let user see the final message
|
683
|
+
|
684
|
+
self.console.print(
|
685
|
+
Panel(
|
686
|
+
"[bold red]✅ HEAVYSWARM RELIABILITY CHECK COMPLETE[/bold red]\n"
|
687
|
+
"[white]All systems validated and ready for operation[/white]",
|
688
|
+
title="[bold red]SYSTEM STATUS[/bold red]",
|
689
|
+
border_style="red",
|
690
|
+
)
|
691
|
+
)
|
692
|
+
self.console.print()
|
693
|
+
else:
|
694
|
+
# Original non-dashboard behavior
|
695
|
+
if self.loops_per_agent == 0:
|
696
|
+
raise ValueError(
|
697
|
+
"loops_per_agent must be greater than 0. This parameter is used to determine how many times each agent will run. If it is 0, the agent will not run at all."
|
698
|
+
)
|
699
|
+
|
700
|
+
if self.worker_model_name is None:
|
701
|
+
raise ValueError(
|
702
|
+
"worker_model_name must be set. This parameter is used to determine the model that will be used to execute the agents."
|
703
|
+
)
|
704
|
+
|
705
|
+
if self.question_agent_model_name is None:
|
706
|
+
raise ValueError(
|
707
|
+
"question_agent_model_name must be set. This parameter is used to determine the model that will be used to generate the questions."
|
708
|
+
)
|
709
|
+
|
710
|
+
formatter.print_panel(
|
711
|
+
content="Reliability check passed",
|
712
|
+
title="Reliability Check",
|
713
|
+
)
|
714
|
+
|
715
|
+
def run(self, task: str, img: str = None):
|
716
|
+
"""
|
717
|
+
Execute the complete HeavySwarm orchestration flow.
|
718
|
+
|
719
|
+
Args:
|
720
|
+
task (str): The main task to analyze
|
721
|
+
img (str, optional): Image input if needed
|
722
|
+
|
723
|
+
Returns:
|
724
|
+
str: Comprehensive final answer from synthesis agent
|
725
|
+
"""
|
726
|
+
if self.show_dashboard:
|
727
|
+
self.console.print(
|
728
|
+
Panel(
|
729
|
+
f"[bold red]⚡ Completing Task[/bold red]\n"
|
730
|
+
f"[white]Task: {task}[/white]",
|
731
|
+
title="[bold red]Initializing HeavySwarm[/bold red]",
|
732
|
+
border_style="red",
|
733
|
+
)
|
734
|
+
)
|
735
|
+
self.console.print()
|
736
|
+
|
737
|
+
self.conversation.add(
|
738
|
+
role="User",
|
739
|
+
content=task,
|
740
|
+
category="input",
|
741
|
+
)
|
742
|
+
|
743
|
+
# Question generation with dashboard
|
744
|
+
if self.show_dashboard:
|
745
|
+
with Progress(
|
746
|
+
SpinnerColumn(),
|
747
|
+
TextColumn(
|
748
|
+
"[progress.description]{task.description}"
|
749
|
+
),
|
750
|
+
transient=True,
|
751
|
+
console=self.console,
|
752
|
+
) as progress:
|
753
|
+
task_gen = progress.add_task(
|
754
|
+
"[red]⚡ GENERATING SPECIALIZED QUESTIONS...",
|
755
|
+
total=100,
|
756
|
+
)
|
757
|
+
progress.update(task_gen, advance=30)
|
758
|
+
questions = self.execute_question_generation(task)
|
759
|
+
progress.update(
|
760
|
+
task_gen,
|
761
|
+
advance=70,
|
762
|
+
description="[white]✓ QUESTIONS GENERATED SUCCESSFULLY!",
|
763
|
+
)
|
764
|
+
time.sleep(0.5)
|
765
|
+
else:
|
766
|
+
questions = self.execute_question_generation(task)
|
767
|
+
|
768
|
+
# if self.show_dashboard:
|
769
|
+
# # Create questions table
|
770
|
+
# questions_table = Table(
|
771
|
+
# title="⚡ GENERATED QUESTIONS FOR SPECIALIZED AGENTS",
|
772
|
+
# show_header=True,
|
773
|
+
# header_style="bold red",
|
774
|
+
# )
|
775
|
+
# questions_table.add_column(
|
776
|
+
# "Agent", style="white", width=20
|
777
|
+
# )
|
778
|
+
# questions_table.add_column(
|
779
|
+
# "Specialized Question", style="bright_white", width=60
|
780
|
+
# )
|
781
|
+
|
782
|
+
# questions_table.add_row(
|
783
|
+
# "Agent 1",
|
784
|
+
# questions.get("research_question", "N/A"),
|
785
|
+
# )
|
786
|
+
# questions_table.add_row(
|
787
|
+
# "Agent 2",
|
788
|
+
# questions.get("analysis_question", "N/A"),
|
789
|
+
# )
|
790
|
+
# questions_table.add_row(
|
791
|
+
# "Agent 3",
|
792
|
+
# questions.get("alternatives_question", "N/A"),
|
793
|
+
# )
|
794
|
+
# questions_table.add_row(
|
795
|
+
# "Agent 4",
|
796
|
+
# questions.get("verification_question", "N/A"),
|
797
|
+
# )
|
798
|
+
|
799
|
+
# self.console.print(
|
800
|
+
# Panel(
|
801
|
+
# questions_table,
|
802
|
+
# title="[bold red]QUESTION GENERATION COMPLETE[/bold red]",
|
803
|
+
# border_style="red",
|
804
|
+
# )
|
805
|
+
# )
|
806
|
+
# self.console.print()
|
807
|
+
# else:
|
808
|
+
# formatter.print_panel(
|
809
|
+
# content=json.dumps(questions, indent=4),
|
810
|
+
# title="Questions",
|
811
|
+
# )
|
812
|
+
|
813
|
+
self.conversation.add(
|
814
|
+
role="Question Generator Agent",
|
815
|
+
content=questions,
|
816
|
+
category="output",
|
817
|
+
)
|
818
|
+
|
819
|
+
if "error" in questions:
|
820
|
+
return (
|
821
|
+
f"Error in question generation: {questions['error']}"
|
822
|
+
)
|
823
|
+
|
824
|
+
if self.show_dashboard:
|
825
|
+
self.console.print(
|
826
|
+
Panel(
|
827
|
+
"[bold red]⚡ LAUNCHING SPECIALIZED AGENTS[/bold red]\n"
|
828
|
+
"[white]Executing 4 agents in parallel for comprehensive analysis[/white]",
|
829
|
+
title="[bold red]AGENT EXECUTION PHASE[/bold red]",
|
830
|
+
border_style="red",
|
831
|
+
)
|
832
|
+
)
|
833
|
+
|
834
|
+
agents = self.create_agents()
|
835
|
+
|
836
|
+
agent_results = self._execute_agents_parallel(
|
837
|
+
questions=questions, agents=agents, img=img
|
838
|
+
)
|
839
|
+
|
840
|
+
# Synthesis with dashboard
|
841
|
+
if self.show_dashboard:
|
842
|
+
with Progress(
|
843
|
+
SpinnerColumn(),
|
844
|
+
TextColumn(
|
845
|
+
"[progress.description]{task.description}"
|
846
|
+
),
|
847
|
+
TimeElapsedColumn(),
|
848
|
+
console=self.console,
|
849
|
+
) as progress:
|
850
|
+
synthesis_task = progress.add_task(
|
851
|
+
"[red]Agent 5: SYNTHESIZING COMPREHENSIVE ANALYSIS ••••••••••••••••••••••••••••••••",
|
852
|
+
total=None,
|
853
|
+
)
|
854
|
+
|
855
|
+
progress.update(
|
856
|
+
synthesis_task,
|
857
|
+
description="[red]Agent 5: INTEGRATING AGENT RESULTS ••••••••••••••••••••••••••••••••",
|
858
|
+
)
|
859
|
+
time.sleep(0.5)
|
860
|
+
|
861
|
+
progress.update(
|
862
|
+
synthesis_task,
|
863
|
+
description="[red]Agent 5: Summarizing Results ••••••••••••••••••••••••••••••••",
|
864
|
+
)
|
865
|
+
|
866
|
+
final_result = self._synthesize_results(
|
867
|
+
original_task=task,
|
868
|
+
questions=questions,
|
869
|
+
agent_results=agent_results,
|
870
|
+
)
|
871
|
+
|
872
|
+
progress.update(
|
873
|
+
synthesis_task,
|
874
|
+
description="[white]Agent 5: GENERATING FINAL REPORT ••••••••••••••••••••••••••••••••",
|
875
|
+
)
|
876
|
+
time.sleep(0.3)
|
877
|
+
|
878
|
+
progress.update(
|
879
|
+
synthesis_task,
|
880
|
+
description="[bold white]Agent 5: ✅ COMPLETE! ••••••••••••••••••••••••••••••••",
|
881
|
+
)
|
882
|
+
time.sleep(0.5)
|
883
|
+
|
884
|
+
self.console.print(
|
885
|
+
Panel(
|
886
|
+
"[bold red]⚡ HEAVYSWARM ANALYSIS COMPLETE![/bold red]\n"
|
887
|
+
"[white]Comprehensive multi-agent analysis delivered successfully[/white]",
|
888
|
+
title="[bold red]MISSION ACCOMPLISHED[/bold red]",
|
889
|
+
border_style="red",
|
890
|
+
)
|
891
|
+
)
|
892
|
+
self.console.print()
|
893
|
+
else:
|
894
|
+
final_result = self._synthesize_results(
|
895
|
+
original_task=task,
|
896
|
+
questions=questions,
|
897
|
+
agent_results=agent_results,
|
898
|
+
)
|
899
|
+
|
900
|
+
self.conversation.add(
|
901
|
+
role="Synthesis Agent",
|
902
|
+
content=final_result,
|
903
|
+
category="output",
|
904
|
+
)
|
905
|
+
|
906
|
+
return history_output_formatter(
|
907
|
+
conversation=self.conversation,
|
908
|
+
type=self.output_type,
|
909
|
+
)
|
910
|
+
|
911
|
+
@lru_cache(maxsize=1)
|
912
|
+
def create_agents(self):
|
913
|
+
"""
|
914
|
+
Create and cache the 4 specialized agents with detailed role-specific prompts.
|
915
|
+
|
916
|
+
This method creates a complete set of specialized agents optimized for different
|
917
|
+
aspects of task analysis. Each agent is configured with expert-level system prompts
|
918
|
+
and optimal settings for their specific role. The agents are cached using LRU cache
|
919
|
+
to avoid recreation overhead on subsequent calls.
|
920
|
+
|
921
|
+
The specialized agents created:
|
922
|
+
|
923
|
+
1. **Research Agent**: Expert in comprehensive information gathering, data collection,
|
924
|
+
market research, and source verification. Specializes in systematic literature
|
925
|
+
reviews, competitive intelligence, and statistical data interpretation.
|
926
|
+
|
927
|
+
2. **Analysis Agent**: Expert in advanced statistical analysis, pattern recognition,
|
928
|
+
predictive modeling, and causal relationship identification. Specializes in
|
929
|
+
regression analysis, forecasting, and performance metrics development.
|
930
|
+
|
931
|
+
3. **Alternatives Agent**: Expert in strategic thinking, creative problem-solving,
|
932
|
+
innovation ideation, and strategic option evaluation. Specializes in design
|
933
|
+
thinking, scenario planning, and blue ocean strategy identification.
|
934
|
+
|
935
|
+
4. **Verification Agent**: Expert in validation, feasibility assessment, fact-checking,
|
936
|
+
and quality assurance. Specializes in risk assessment, compliance verification,
|
937
|
+
and implementation barrier analysis.
|
938
|
+
|
939
|
+
5. **Synthesis Agent**: Expert in multi-perspective integration, comprehensive analysis,
|
940
|
+
and executive summary creation. Specializes in strategic alignment, conflict
|
941
|
+
resolution, and holistic solution development.
|
942
|
+
|
943
|
+
Agent Configuration:
|
944
|
+
- All agents use the configured worker_model_name
|
945
|
+
- Loops are set based on loops_per_agent parameter
|
946
|
+
- Dynamic temperature is enabled for creative responses
|
947
|
+
- Streaming is disabled for complete responses
|
948
|
+
- Verbose mode follows class configuration
|
949
|
+
|
950
|
+
Returns:
|
951
|
+
Dict[str, Agent]: Dictionary containing all 5 specialized agents with keys:
|
952
|
+
- 'research': Research Agent instance
|
953
|
+
- 'analysis': Analysis Agent instance
|
954
|
+
- 'alternatives': Alternatives Agent instance
|
955
|
+
- 'verification': Verification Agent instance
|
956
|
+
- 'synthesis': Synthesis Agent instance
|
957
|
+
|
958
|
+
Note:
|
959
|
+
This method uses @lru_cache(maxsize=1) to ensure agents are only created once
|
960
|
+
per HeavySwarm instance, improving performance for multiple task executions.
|
961
|
+
"""
|
962
|
+
if self.verbose:
|
963
|
+
logger.info("🏗️ Creating specialized agents...")
|
964
|
+
|
965
|
+
# Research Agent - Deep information gathering and data collection
|
966
|
+
research_agent = Agent(
|
967
|
+
agent_name="Research-Agent",
|
968
|
+
agent_description="Expert research agent specializing in comprehensive information gathering and data collection",
|
969
|
+
system_prompt=RESEARCH_AGENT_PROMPT,
|
970
|
+
max_loops=self.loops_per_agent,
|
971
|
+
model_name=self.worker_model_name,
|
972
|
+
streaming_on=False,
|
973
|
+
verbose=False,
|
974
|
+
dynamic_temperature_enabled=True,
|
975
|
+
print_on=self.agent_prints_on,
|
976
|
+
)
|
977
|
+
|
978
|
+
# Analysis Agent - Pattern recognition and deep analytical insights
|
979
|
+
analysis_agent = Agent(
|
980
|
+
agent_name="Analysis-Agent",
|
981
|
+
agent_description="Expert analytical agent specializing in pattern recognition, data analysis, and insight generation",
|
982
|
+
system_prompt=ANALYSIS_AGENT_PROMPT,
|
983
|
+
max_loops=self.loops_per_agent,
|
984
|
+
model_name=self.worker_model_name,
|
985
|
+
streaming_on=False,
|
986
|
+
verbose=False,
|
987
|
+
dynamic_temperature_enabled=True,
|
988
|
+
print_on=self.agent_prints_on,
|
989
|
+
)
|
990
|
+
|
991
|
+
# Alternatives Agent - Strategic options and creative solutions
|
992
|
+
alternatives_agent = Agent(
|
993
|
+
agent_name="Alternatives-Agent",
|
994
|
+
agent_description="Expert strategic agent specializing in alternative approaches, creative solutions, and option generation",
|
995
|
+
system_prompt=ALTERNATIVES_AGENT_PROMPT,
|
996
|
+
max_loops=self.loops_per_agent,
|
997
|
+
model_name=self.worker_model_name,
|
998
|
+
streaming_on=False,
|
999
|
+
verbose=False,
|
1000
|
+
dynamic_temperature_enabled=True,
|
1001
|
+
print_on=self.agent_prints_on,
|
1002
|
+
)
|
1003
|
+
|
1004
|
+
# Verification Agent - Validation, feasibility assessment, and quality assurance
|
1005
|
+
verification_agent = Agent(
|
1006
|
+
agent_name="Verification-Agent",
|
1007
|
+
agent_description="Expert verification agent specializing in validation, feasibility assessment, and quality assurance",
|
1008
|
+
system_prompt=VERIFICATION_AGENT_PROMPT,
|
1009
|
+
max_loops=self.loops_per_agent,
|
1010
|
+
model_name=self.worker_model_name,
|
1011
|
+
streaming_on=False,
|
1012
|
+
verbose=False,
|
1013
|
+
dynamic_temperature_enabled=True,
|
1014
|
+
print_on=self.agent_prints_on,
|
1015
|
+
)
|
1016
|
+
|
1017
|
+
# Synthesis Agent - Integration and comprehensive analysis
|
1018
|
+
synthesis_agent = Agent(
|
1019
|
+
agent_name="Synthesis-Agent",
|
1020
|
+
agent_description="Expert synthesis agent specializing in integration, comprehensive analysis, and final recommendations",
|
1021
|
+
system_prompt=SYNTHESIS_AGENT_PROMPT,
|
1022
|
+
max_loops=1,
|
1023
|
+
model_name=self.worker_model_name,
|
1024
|
+
streaming_on=False,
|
1025
|
+
verbose=False,
|
1026
|
+
dynamic_temperature_enabled=True,
|
1027
|
+
)
|
1028
|
+
|
1029
|
+
agents = {
|
1030
|
+
"research": research_agent,
|
1031
|
+
"analysis": analysis_agent,
|
1032
|
+
"alternatives": alternatives_agent,
|
1033
|
+
"verification": verification_agent,
|
1034
|
+
"synthesis": synthesis_agent,
|
1035
|
+
}
|
1036
|
+
return agents
|
1037
|
+
|
1038
|
+
def _execute_agents_parallel(
|
1039
|
+
self, questions: Dict, agents: Dict, img: Optional[str] = None
|
1040
|
+
) -> Dict[str, str]:
|
1041
|
+
"""
|
1042
|
+
Execute the 4 specialized agents in TRUE parallel using concurrent.futures.
|
1043
|
+
|
1044
|
+
Args:
|
1045
|
+
questions (Dict): Generated questions for each agent
|
1046
|
+
agents (Dict): Dictionary of specialized agents
|
1047
|
+
img (str, optional): Image input if needed
|
1048
|
+
|
1049
|
+
Returns:
|
1050
|
+
Dict[str, str]: Results from each agent
|
1051
|
+
"""
|
1052
|
+
|
1053
|
+
if self.show_dashboard:
|
1054
|
+
return self._execute_agents_with_dashboard(
|
1055
|
+
questions, agents, img
|
1056
|
+
)
|
1057
|
+
else:
|
1058
|
+
return self._execute_agents_basic(questions, agents, img)
|
1059
|
+
|
1060
|
+
def _execute_agents_basic(
|
1061
|
+
self, questions: Dict, agents: Dict, img: Optional[str] = None
|
1062
|
+
) -> Dict[str, str]:
|
1063
|
+
"""
|
1064
|
+
Execute specialized agents in parallel without dashboard visualization.
|
1065
|
+
|
1066
|
+
This method provides the core agent execution functionality using concurrent.futures
|
1067
|
+
for true parallel processing. It executes the four specialized agents simultaneously
|
1068
|
+
to maximize efficiency while providing basic error handling and timeout management.
|
1069
|
+
|
1070
|
+
The execution process:
|
1071
|
+
1. Prepare agent tasks with their respective specialized questions
|
1072
|
+
2. Submit all tasks to ThreadPoolExecutor for parallel execution
|
1073
|
+
3. Collect results as agents complete their work
|
1074
|
+
4. Handle timeouts and exceptions gracefully
|
1075
|
+
5. Log results to conversation history
|
1076
|
+
|
1077
|
+
Args:
|
1078
|
+
questions (Dict): Generated questions containing keys:
|
1079
|
+
- research_question: Question for Research Agent
|
1080
|
+
- analysis_question: Question for Analysis Agent
|
1081
|
+
- alternatives_question: Question for Alternatives Agent
|
1082
|
+
- verification_question: Question for Verification Agent
|
1083
|
+
agents (Dict): Dictionary of specialized agent instances from create_agents()
|
1084
|
+
img (str, optional): Image input for agents that support visual analysis.
|
1085
|
+
Defaults to None.
|
1086
|
+
|
1087
|
+
Returns:
|
1088
|
+
Dict[str, str]: Results from each agent execution with keys:
|
1089
|
+
- 'research': Research Agent output
|
1090
|
+
- 'analysis': Analysis Agent output
|
1091
|
+
- 'alternatives': Alternatives Agent output
|
1092
|
+
- 'verification': Verification Agent output
|
1093
|
+
|
1094
|
+
Note:
|
1095
|
+
This method uses ThreadPoolExecutor with max_workers limit for parallel execution.
|
1096
|
+
Each agent runs independently and results are collected as they complete.
|
1097
|
+
Timeout and exception handling ensure robustness even if individual agents fail.
|
1098
|
+
"""
|
1099
|
+
|
1100
|
+
# Define agent execution tasks
|
1101
|
+
def execute_agent(agent_info):
|
1102
|
+
agent_type, agent, question = agent_info
|
1103
|
+
try:
|
1104
|
+
result = agent.run(question)
|
1105
|
+
|
1106
|
+
self.conversation.add(
|
1107
|
+
role=agent.agent_name,
|
1108
|
+
content=result,
|
1109
|
+
category="output",
|
1110
|
+
)
|
1111
|
+
return agent_type, result
|
1112
|
+
except Exception as e:
|
1113
|
+
logger.error(
|
1114
|
+
f"❌ Error in {agent_type} Agent: {str(e)} Traceback: {traceback.format_exc()}"
|
1115
|
+
)
|
1116
|
+
return agent_type, f"Error: {str(e)}"
|
1117
|
+
|
1118
|
+
# Prepare agent tasks
|
1119
|
+
agent_tasks = [
|
1120
|
+
(
|
1121
|
+
"Research",
|
1122
|
+
agents["research"],
|
1123
|
+
questions.get("research_question", ""),
|
1124
|
+
),
|
1125
|
+
(
|
1126
|
+
"Analysis",
|
1127
|
+
agents["analysis"],
|
1128
|
+
questions.get("analysis_question", ""),
|
1129
|
+
),
|
1130
|
+
(
|
1131
|
+
"Alternatives",
|
1132
|
+
agents["alternatives"],
|
1133
|
+
questions.get("alternatives_question", ""),
|
1134
|
+
),
|
1135
|
+
(
|
1136
|
+
"Verification",
|
1137
|
+
agents["verification"],
|
1138
|
+
questions.get("verification_question", ""),
|
1139
|
+
),
|
1140
|
+
]
|
1141
|
+
|
1142
|
+
# Execute agents in parallel using ThreadPoolExecutor
|
1143
|
+
results = {}
|
1144
|
+
with concurrent.futures.ThreadPoolExecutor(
|
1145
|
+
max_workers=self.max_workers
|
1146
|
+
) as executor:
|
1147
|
+
# Submit all agent tasks
|
1148
|
+
future_to_agent = {
|
1149
|
+
executor.submit(execute_agent, task): task[0]
|
1150
|
+
for task in agent_tasks
|
1151
|
+
}
|
1152
|
+
|
1153
|
+
# Collect results as they complete
|
1154
|
+
for future in concurrent.futures.as_completed(
|
1155
|
+
future_to_agent
|
1156
|
+
):
|
1157
|
+
agent_type = future_to_agent[future]
|
1158
|
+
try:
|
1159
|
+
agent_name, result = future.result(
|
1160
|
+
timeout=self.timeout
|
1161
|
+
)
|
1162
|
+
results[agent_name.lower()] = result
|
1163
|
+
except concurrent.futures.TimeoutError:
|
1164
|
+
logger.error(
|
1165
|
+
f"⏰ Timeout for {agent_type} Agent after {self.timeout}s"
|
1166
|
+
)
|
1167
|
+
results[agent_type.lower()] = (
|
1168
|
+
f"Timeout after {self.timeout} seconds"
|
1169
|
+
)
|
1170
|
+
except Exception as e:
|
1171
|
+
logger.error(
|
1172
|
+
f"❌ Exception in {agent_type} Agent: {str(e)}"
|
1173
|
+
)
|
1174
|
+
results[agent_type.lower()] = (
|
1175
|
+
f"Exception: {str(e)}"
|
1176
|
+
)
|
1177
|
+
|
1178
|
+
return results
|
1179
|
+
|
1180
|
+
def _execute_agents_with_dashboard(
|
1181
|
+
self, questions: Dict, agents: Dict, img: Optional[str] = None
|
1182
|
+
) -> Dict[str, str]:
|
1183
|
+
"""
|
1184
|
+
Execute specialized agents in parallel with rich dashboard visualization and progress tracking.
|
1185
|
+
|
1186
|
+
This method provides an enhanced user experience by displaying real-time progress bars
|
1187
|
+
and status updates for each agent execution. It combines the efficiency of parallel
|
1188
|
+
processing with professional dashboard visualization using Rich console styling.
|
1189
|
+
|
1190
|
+
Dashboard Features:
|
1191
|
+
- Individual progress bars for each of the 4 specialized agents
|
1192
|
+
- Real-time status updates with professional Arasaka-inspired styling
|
1193
|
+
- Animated dots and progress indicators for visual engagement
|
1194
|
+
- Color-coded status messages (red for processing, white for completion)
|
1195
|
+
- Completion summary with mission accomplished messaging
|
1196
|
+
|
1197
|
+
Progress Phases for Each Agent:
|
1198
|
+
1. INITIALIZING: Agent setup and preparation
|
1199
|
+
2. PROCESSING QUERY: Question analysis and processing
|
1200
|
+
3. EXECUTING: Core agent execution with animated indicators
|
1201
|
+
4. GENERATING RESPONSE: Response formulation and completion
|
1202
|
+
5. COMPLETE: Successful execution confirmation
|
1203
|
+
|
1204
|
+
Args:
|
1205
|
+
questions (Dict): Generated specialized questions containing:
|
1206
|
+
- research_question: Comprehensive information gathering query
|
1207
|
+
- analysis_question: Pattern recognition and insight analysis query
|
1208
|
+
- alternatives_question: Creative solutions and options exploration query
|
1209
|
+
- verification_question: Validation and feasibility assessment query
|
1210
|
+
agents (Dict): Dictionary of specialized agent instances with keys:
|
1211
|
+
- research, analysis, alternatives, verification
|
1212
|
+
img (str, optional): Image input for agents supporting visual analysis.
|
1213
|
+
Defaults to None.
|
1214
|
+
|
1215
|
+
Returns:
|
1216
|
+
Dict[str, str]: Comprehensive results from agent execution:
|
1217
|
+
- Keys correspond to agent types (research, analysis, alternatives, verification)
|
1218
|
+
- Values contain detailed agent outputs and analysis
|
1219
|
+
|
1220
|
+
Note:
|
1221
|
+
This method requires show_dashboard=True in the HeavySwarm configuration.
|
1222
|
+
It provides the same parallel execution as _execute_agents_basic but with
|
1223
|
+
enhanced visual feedback and professional presentation.
|
1224
|
+
"""
|
1225
|
+
|
1226
|
+
# Agent configurations with professional styling
|
1227
|
+
agent_configs = [
|
1228
|
+
(
|
1229
|
+
"Agent 1",
|
1230
|
+
"research",
|
1231
|
+
"white",
|
1232
|
+
"Gathering comprehensive research data",
|
1233
|
+
),
|
1234
|
+
(
|
1235
|
+
"Agent 2",
|
1236
|
+
"analysis",
|
1237
|
+
"white",
|
1238
|
+
"Analyzing patterns and generating insights",
|
1239
|
+
),
|
1240
|
+
(
|
1241
|
+
"Agent 3",
|
1242
|
+
"alternatives",
|
1243
|
+
"white",
|
1244
|
+
"Exploring creative solutions and alternatives",
|
1245
|
+
),
|
1246
|
+
(
|
1247
|
+
"Agent 4",
|
1248
|
+
"verification",
|
1249
|
+
"white",
|
1250
|
+
"Validating findings and checking feasibility",
|
1251
|
+
),
|
1252
|
+
]
|
1253
|
+
|
1254
|
+
results = {}
|
1255
|
+
|
1256
|
+
with Progress(
|
1257
|
+
SpinnerColumn(),
|
1258
|
+
TextColumn("[progress.description]{task.description}"),
|
1259
|
+
TimeElapsedColumn(),
|
1260
|
+
console=self.console,
|
1261
|
+
) as progress:
|
1262
|
+
|
1263
|
+
# Create progress tasks for each agent
|
1264
|
+
tasks = {}
|
1265
|
+
for (
|
1266
|
+
display_name,
|
1267
|
+
agent_key,
|
1268
|
+
color,
|
1269
|
+
description,
|
1270
|
+
) in agent_configs:
|
1271
|
+
task_id = progress.add_task(
|
1272
|
+
f"[{color}]{display_name}[/{color}]: INITIALIZING",
|
1273
|
+
total=None,
|
1274
|
+
)
|
1275
|
+
tasks[agent_key] = task_id
|
1276
|
+
|
1277
|
+
# Define agent execution function with progress updates
|
1278
|
+
def execute_agent_with_progress(agent_info):
|
1279
|
+
agent_type, agent_key, agent, question = agent_info
|
1280
|
+
try:
|
1281
|
+
# Update progress to show agent starting
|
1282
|
+
progress.update(
|
1283
|
+
tasks[agent_key],
|
1284
|
+
description=f"[red]{agent_type}[/red]: INITIALIZING ••••••••",
|
1285
|
+
)
|
1286
|
+
|
1287
|
+
# Simulate some processing time for visual effect
|
1288
|
+
time.sleep(0.5)
|
1289
|
+
progress.update(
|
1290
|
+
tasks[agent_key],
|
1291
|
+
description=f"[red]{agent_type}[/red]: PROCESSING QUERY ••••••••••••••",
|
1292
|
+
)
|
1293
|
+
|
1294
|
+
# Execute the agent with dots animation
|
1295
|
+
progress.update(
|
1296
|
+
tasks[agent_key],
|
1297
|
+
description=f"[red]{agent_type}[/red]: EXECUTING ••••••••••••••••••••",
|
1298
|
+
)
|
1299
|
+
|
1300
|
+
result = agent.run(question)
|
1301
|
+
|
1302
|
+
# Update progress during execution
|
1303
|
+
progress.update(
|
1304
|
+
tasks[agent_key],
|
1305
|
+
description=f"[white]{agent_type}[/white]: GENERATING RESPONSE ••••••••••••••••••••••••••",
|
1306
|
+
)
|
1307
|
+
|
1308
|
+
# Add to conversation
|
1309
|
+
self.conversation.add(
|
1310
|
+
role=agent.agent_name,
|
1311
|
+
content=result,
|
1312
|
+
category="output",
|
1313
|
+
)
|
1314
|
+
|
1315
|
+
# Complete the progress
|
1316
|
+
progress.update(
|
1317
|
+
tasks[agent_key],
|
1318
|
+
description=f"[bold white]{agent_type}[/bold white]: ✅ COMPLETE! ••••••••••••••••••••••••••••••••",
|
1319
|
+
)
|
1320
|
+
|
1321
|
+
return agent_type, result
|
1322
|
+
|
1323
|
+
except Exception as e:
|
1324
|
+
progress.update(
|
1325
|
+
tasks[agent_key],
|
1326
|
+
description=f"[bold red]{agent_type}[/bold red]: ❌ ERROR! ••••••••••••••••••••••••••••••••",
|
1327
|
+
)
|
1328
|
+
logger.error(
|
1329
|
+
f"❌ Error in {agent_type} Agent: {str(e)} Traceback: {traceback.format_exc()}"
|
1330
|
+
)
|
1331
|
+
return agent_type, f"Error: {str(e)}"
|
1332
|
+
|
1333
|
+
# Prepare agent tasks with keys
|
1334
|
+
agent_tasks = [
|
1335
|
+
(
|
1336
|
+
"Agent 1",
|
1337
|
+
"research",
|
1338
|
+
agents["research"],
|
1339
|
+
questions.get("research_question", ""),
|
1340
|
+
),
|
1341
|
+
(
|
1342
|
+
"Agent 2",
|
1343
|
+
"analysis",
|
1344
|
+
agents["analysis"],
|
1345
|
+
questions.get("analysis_question", ""),
|
1346
|
+
),
|
1347
|
+
(
|
1348
|
+
"Agent 3",
|
1349
|
+
"alternatives",
|
1350
|
+
agents["alternatives"],
|
1351
|
+
questions.get("alternatives_question", ""),
|
1352
|
+
),
|
1353
|
+
(
|
1354
|
+
"Agent 4",
|
1355
|
+
"verification",
|
1356
|
+
agents["verification"],
|
1357
|
+
questions.get("verification_question", ""),
|
1358
|
+
),
|
1359
|
+
]
|
1360
|
+
|
1361
|
+
# Execute agents in parallel
|
1362
|
+
with concurrent.futures.ThreadPoolExecutor(
|
1363
|
+
max_workers=self.max_workers
|
1364
|
+
) as executor:
|
1365
|
+
# Submit all agent tasks
|
1366
|
+
future_to_agent = {
|
1367
|
+
executor.submit(
|
1368
|
+
execute_agent_with_progress, task
|
1369
|
+
): task[1]
|
1370
|
+
for task in agent_tasks
|
1371
|
+
}
|
1372
|
+
|
1373
|
+
# Collect results as they complete
|
1374
|
+
for future in concurrent.futures.as_completed(
|
1375
|
+
future_to_agent
|
1376
|
+
):
|
1377
|
+
agent_key = future_to_agent[future]
|
1378
|
+
try:
|
1379
|
+
agent_name, result = future.result(
|
1380
|
+
timeout=self.timeout
|
1381
|
+
)
|
1382
|
+
results[
|
1383
|
+
agent_name.lower()
|
1384
|
+
.replace("🔍 ", "")
|
1385
|
+
.replace("📊 ", "")
|
1386
|
+
.replace("⚡ ", "")
|
1387
|
+
.replace("✅ ", "")
|
1388
|
+
] = result
|
1389
|
+
except concurrent.futures.TimeoutError:
|
1390
|
+
progress.update(
|
1391
|
+
tasks[agent_key],
|
1392
|
+
description=f"[bold red]Agent {list(tasks.keys()).index(agent_key) + 1}[/bold red]: ⏰ TIMEOUT! ••••••••••••••••••••••••••••••••",
|
1393
|
+
)
|
1394
|
+
results[agent_key] = (
|
1395
|
+
f"Timeout after {self.timeout} seconds"
|
1396
|
+
)
|
1397
|
+
except Exception as e:
|
1398
|
+
progress.update(
|
1399
|
+
tasks[agent_key],
|
1400
|
+
description=f"[bold red]Agent {list(tasks.keys()).index(agent_key) + 1}[/bold red]: ❌ ERROR! ••••••••••••••••••••••••••••••••",
|
1401
|
+
)
|
1402
|
+
results[agent_key] = f"Exception: {str(e)}"
|
1403
|
+
|
1404
|
+
# Show completion summary
|
1405
|
+
self.console.print(
|
1406
|
+
Panel(
|
1407
|
+
"[bold red]⚡ ALL AGENTS COMPLETED SUCCESSFULLY![/bold red]\n"
|
1408
|
+
"[white]Results from all 4 specialized agents are ready for synthesis[/white]",
|
1409
|
+
title="[bold red]EXECUTION COMPLETE[/bold red]",
|
1410
|
+
border_style="red",
|
1411
|
+
)
|
1412
|
+
)
|
1413
|
+
self.console.print()
|
1414
|
+
|
1415
|
+
return results
|
1416
|
+
|
1417
|
+
def _synthesize_results(
|
1418
|
+
self, original_task: str, questions: Dict, agent_results: Dict
|
1419
|
+
) -> str:
|
1420
|
+
"""
|
1421
|
+
Synthesize all agent results into a comprehensive final answer.
|
1422
|
+
|
1423
|
+
Args:
|
1424
|
+
original_task (str): The original user task
|
1425
|
+
questions (Dict): Generated questions
|
1426
|
+
agent_results (Dict): Results from all agents
|
1427
|
+
|
1428
|
+
Returns:
|
1429
|
+
str: Comprehensive synthesized analysis
|
1430
|
+
"""
|
1431
|
+
# Get the cached agents
|
1432
|
+
agents = self.create_agents()
|
1433
|
+
synthesis_agent = agents["synthesis"]
|
1434
|
+
|
1435
|
+
agents_names = [
|
1436
|
+
"Research Agent",
|
1437
|
+
"Analysis Agent",
|
1438
|
+
"Alternatives Agent",
|
1439
|
+
"Verification Agent",
|
1440
|
+
]
|
1441
|
+
|
1442
|
+
# Create comprehensive synthesis prompt
|
1443
|
+
synthesis_prompt = f"""
|
1444
|
+
You are an expert synthesis agent tasked with producing a clear, actionable, and executive-ready report based on the following task and the results from four specialized agents (Research, Analysis, Alternatives, Verification).
|
1445
|
+
|
1446
|
+
Original Task:
|
1447
|
+
{original_task}
|
1448
|
+
|
1449
|
+
Your objectives:
|
1450
|
+
- Integrate and synthesize insights from all four agents {", ".join(agents_names)}, highlighting how each contributes to the overall understanding.
|
1451
|
+
- Identify and explain key themes, patterns, and any points of agreement or disagreement across the agents' findings.
|
1452
|
+
- Provide clear, prioritized, and actionable recommendations directly addressing the original task.
|
1453
|
+
- Explicitly discuss potential risks, limitations, and propose mitigation strategies.
|
1454
|
+
- Offer practical implementation guidance and concrete next steps.
|
1455
|
+
- Ensure the report is well-structured, concise, and suitable for decision-makers (executive summary style).
|
1456
|
+
- Use bullet points, numbered lists, and section headings where appropriate for clarity and readability.
|
1457
|
+
|
1458
|
+
You may reference the conversation history for additional context:
|
1459
|
+
|
1460
|
+
\n\n
|
1461
|
+
|
1462
|
+
{self.conversation.return_history_as_string()}
|
1463
|
+
|
1464
|
+
\n\n
|
1465
|
+
|
1466
|
+
Please present your synthesis in the following structure:
|
1467
|
+
1. Executive Summary
|
1468
|
+
2. Key Insights from Each Agent
|
1469
|
+
3. Integrated Analysis & Themes
|
1470
|
+
4. Actionable Recommendations
|
1471
|
+
5. Risks & Mitigation Strategies
|
1472
|
+
6. Implementation Guidance & Next Steps
|
1473
|
+
|
1474
|
+
Be thorough, objective, and ensure your synthesis is easy to follow for a non-technical audience.
|
1475
|
+
"""
|
1476
|
+
|
1477
|
+
return synthesis_agent.run(synthesis_prompt)
|
1478
|
+
|
1479
|
+
def _parse_tool_calls(self, tool_calls: List) -> Dict[str, any]:
|
1480
|
+
"""
|
1481
|
+
Parse ChatCompletionMessageToolCall objects into a structured dictionary format.
|
1482
|
+
|
1483
|
+
This method extracts and structures the question generation results from language model
|
1484
|
+
tool calls. It handles the JSON parsing of function arguments and provides clean access
|
1485
|
+
to the generated questions for each specialized agent role.
|
1486
|
+
|
1487
|
+
The method specifically looks for the 'generate_specialized_questions' function call
|
1488
|
+
and extracts the four specialized questions along with metadata. It provides robust
|
1489
|
+
error handling for JSON parsing failures and includes both successful and error cases.
|
1490
|
+
|
1491
|
+
Args:
|
1492
|
+
tool_calls (List): List of ChatCompletionMessageToolCall objects returned by the LLM.
|
1493
|
+
Expected to contain at least one tool call with question generation results.
|
1494
|
+
|
1495
|
+
Returns:
|
1496
|
+
Dict[str, any]: Structured dictionary containing:
|
1497
|
+
On success:
|
1498
|
+
- thinking (str): Reasoning process for question decomposition
|
1499
|
+
- research_question (str): Question for Research Agent
|
1500
|
+
- analysis_question (str): Question for Analysis Agent
|
1501
|
+
- alternatives_question (str): Question for Alternatives Agent
|
1502
|
+
- verification_question (str): Question for Verification Agent
|
1503
|
+
- tool_call_id (str): Unique identifier for the tool call
|
1504
|
+
- function_name (str): Name of the called function
|
1505
|
+
|
1506
|
+
On error:
|
1507
|
+
- error (str): Error message describing the parsing failure
|
1508
|
+
- raw_arguments (str): Original unparsed function arguments
|
1509
|
+
- tool_call_id (str): Tool call identifier for debugging
|
1510
|
+
- function_name (str): Function name for debugging
|
1511
|
+
|
1512
|
+
Note:
|
1513
|
+
If no tool calls are provided, returns an empty dictionary.
|
1514
|
+
Only the first tool call is processed, as only one question generation
|
1515
|
+
call is expected per task.
|
1516
|
+
"""
|
1517
|
+
if not tool_calls:
|
1518
|
+
return {}
|
1519
|
+
|
1520
|
+
# Get the first tool call (should be the question generation)
|
1521
|
+
tool_call = tool_calls[0]
|
1522
|
+
|
1523
|
+
try:
|
1524
|
+
# Parse the JSON arguments
|
1525
|
+
arguments = json.loads(tool_call.function.arguments)
|
1526
|
+
|
1527
|
+
return {
|
1528
|
+
"thinking": arguments.get("thinking", ""),
|
1529
|
+
"research_question": arguments.get(
|
1530
|
+
"research_question", ""
|
1531
|
+
),
|
1532
|
+
"analysis_question": arguments.get(
|
1533
|
+
"analysis_question", ""
|
1534
|
+
),
|
1535
|
+
"alternatives_question": arguments.get(
|
1536
|
+
"alternatives_question", ""
|
1537
|
+
),
|
1538
|
+
"verification_question": arguments.get(
|
1539
|
+
"verification_question", ""
|
1540
|
+
),
|
1541
|
+
"tool_call_id": tool_call.id,
|
1542
|
+
"function_name": tool_call.function.name,
|
1543
|
+
}
|
1544
|
+
|
1545
|
+
except json.JSONDecodeError as e:
|
1546
|
+
return {
|
1547
|
+
"error": f"Failed to parse tool call arguments: {str(e)}",
|
1548
|
+
"raw_arguments": tool_call.function.arguments,
|
1549
|
+
"tool_call_id": tool_call.id,
|
1550
|
+
"function_name": tool_call.function.name,
|
1551
|
+
}
|
1552
|
+
|
1553
|
+
def execute_question_generation(
|
1554
|
+
self, task: str
|
1555
|
+
) -> Dict[str, str]:
|
1556
|
+
"""
|
1557
|
+
Execute the question generation using the schema with a language model.
|
1558
|
+
|
1559
|
+
Args:
|
1560
|
+
task (str): The main task to analyze
|
1561
|
+
|
1562
|
+
Returns:
|
1563
|
+
Dict[str, str]: Generated questions for each agent role with parsed data
|
1564
|
+
"""
|
1565
|
+
|
1566
|
+
# Create the prompt for question generation
|
1567
|
+
prompt = f"""
|
1568
|
+
You are an expert task analyzer. Your job is to break down the following task into 4 specialized questions for different agent roles:
|
1569
|
+
|
1570
|
+
1. Research Agent: Focuses on gathering information, data, and background context
|
1571
|
+
2. Analysis Agent: Focuses on examining patterns, trends, and deriving insights
|
1572
|
+
3. Alternatives Agent: Focuses on exploring different approaches and solutions
|
1573
|
+
4. Verification Agent: Focuses on validating findings and checking feasibility
|
1574
|
+
|
1575
|
+
Task to analyze: {task}
|
1576
|
+
|
1577
|
+
Use the generate_specialized_questions function to create targeted questions for each agent role.
|
1578
|
+
"""
|
1579
|
+
|
1580
|
+
question_agent = LiteLLM(
|
1581
|
+
system_prompt=prompt,
|
1582
|
+
model=self.question_agent_model_name,
|
1583
|
+
tools_list_dictionary=schema,
|
1584
|
+
max_tokens=3000,
|
1585
|
+
temperature=0.7,
|
1586
|
+
top_p=1,
|
1587
|
+
frequency_penalty=0,
|
1588
|
+
presence_penalty=0,
|
1589
|
+
tool_choice="auto",
|
1590
|
+
)
|
1591
|
+
|
1592
|
+
# Get raw tool calls from LiteLLM
|
1593
|
+
raw_output = question_agent.run(task)
|
1594
|
+
|
1595
|
+
# Parse the tool calls and return clean data
|
1596
|
+
out = self._parse_tool_calls(raw_output)
|
1597
|
+
|
1598
|
+
if self.verbose:
|
1599
|
+
logger.info(
|
1600
|
+
f"🔍 Question Generation Output: {out} and type: {type(out)}"
|
1601
|
+
)
|
1602
|
+
|
1603
|
+
return out
|
1604
|
+
|
1605
|
+
def get_questions_only(self, task: str) -> Dict[str, str]:
|
1606
|
+
"""
|
1607
|
+
Generate and extract only the specialized questions without metadata or execution.
|
1608
|
+
|
1609
|
+
This utility method provides a clean interface for obtaining just the generated
|
1610
|
+
questions for each agent role without executing the full swarm workflow. It's
|
1611
|
+
useful for previewing questions, debugging question generation, or integrating
|
1612
|
+
with external systems that only need the questions.
|
1613
|
+
|
1614
|
+
The method performs question generation using the configured question agent model
|
1615
|
+
and returns a clean dictionary containing only the four specialized questions,
|
1616
|
+
filtering out metadata like thinking process, tool call IDs, and function names.
|
1617
|
+
|
1618
|
+
Args:
|
1619
|
+
task (str): The main task or query to analyze and decompose into specialized
|
1620
|
+
questions. Should be a clear, specific task description.
|
1621
|
+
|
1622
|
+
Returns:
|
1623
|
+
Dict[str, str]: Clean dictionary containing only the questions:
|
1624
|
+
- research_question (str): Question for comprehensive information gathering
|
1625
|
+
- analysis_question (str): Question for pattern analysis and insights
|
1626
|
+
- alternatives_question (str): Question for exploring creative solutions
|
1627
|
+
- verification_question (str): Question for validation and feasibility
|
1628
|
+
|
1629
|
+
On error:
|
1630
|
+
- error (str): Error message if question generation fails
|
1631
|
+
|
1632
|
+
Example:
|
1633
|
+
>>> swarm = HeavySwarm()
|
1634
|
+
>>> questions = swarm.get_questions_only("Analyze market trends for EVs")
|
1635
|
+
>>> print(questions['research_question'])
|
1636
|
+
"""
|
1637
|
+
result = self.execute_question_generation(task)
|
1638
|
+
|
1639
|
+
if "error" in result:
|
1640
|
+
return {"error": result["error"]}
|
1641
|
+
|
1642
|
+
return {
|
1643
|
+
"research_question": result.get("research_question", ""),
|
1644
|
+
"analysis_question": result.get("analysis_question", ""),
|
1645
|
+
"alternatives_question": result.get(
|
1646
|
+
"alternatives_question", ""
|
1647
|
+
),
|
1648
|
+
"verification_question": result.get(
|
1649
|
+
"verification_question", ""
|
1650
|
+
),
|
1651
|
+
}
|
1652
|
+
|
1653
|
+
def get_questions_as_list(self, task: str) -> List[str]:
|
1654
|
+
"""
|
1655
|
+
Generate specialized questions and return them as an ordered list.
|
1656
|
+
|
1657
|
+
This utility method provides the simplest interface for obtaining generated questions
|
1658
|
+
in a list format. It's particularly useful for iteration, display purposes, or
|
1659
|
+
integration with systems that prefer list-based data structures over dictionaries.
|
1660
|
+
|
1661
|
+
The questions are returned in a consistent order:
|
1662
|
+
1. Research question (information gathering)
|
1663
|
+
2. Analysis question (pattern recognition and insights)
|
1664
|
+
3. Alternatives question (creative solutions exploration)
|
1665
|
+
4. Verification question (validation and feasibility)
|
1666
|
+
|
1667
|
+
Args:
|
1668
|
+
task (str): The main task or query to decompose into specialized questions.
|
1669
|
+
Should be a clear, actionable task description that can be analyzed
|
1670
|
+
from multiple perspectives.
|
1671
|
+
|
1672
|
+
Returns:
|
1673
|
+
List[str]: Ordered list of 4 specialized questions:
|
1674
|
+
[0] Research question for comprehensive information gathering
|
1675
|
+
[1] Analysis question for pattern analysis and insights
|
1676
|
+
[2] Alternatives question for exploring creative solutions
|
1677
|
+
[3] Verification question for validation and feasibility assessment
|
1678
|
+
|
1679
|
+
On error: Single-item list containing error message
|
1680
|
+
|
1681
|
+
Example:
|
1682
|
+
>>> swarm = HeavySwarm()
|
1683
|
+
>>> questions = swarm.get_questions_as_list("Optimize supply chain efficiency")
|
1684
|
+
>>> for i, question in enumerate(questions):
|
1685
|
+
... print(f"Agent {i+1}: {question}")
|
1686
|
+
|
1687
|
+
Note:
|
1688
|
+
This method internally calls get_questions_only() and converts the dictionary
|
1689
|
+
to a list format, maintaining the standard agent order.
|
1690
|
+
"""
|
1691
|
+
questions = self.get_questions_only(task)
|
1692
|
+
|
1693
|
+
if "error" in questions:
|
1694
|
+
return [f"Error: {questions['error']}"]
|
1695
|
+
|
1696
|
+
return [
|
1697
|
+
questions.get("research_question", ""),
|
1698
|
+
questions.get("analysis_question", ""),
|
1699
|
+
questions.get("alternatives_question", ""),
|
1700
|
+
questions.get("verification_question", ""),
|
1701
|
+
]
|