empathy-framework 3.11.0__py3-none-any.whl → 4.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-3.11.0.dist-info → empathy_framework-4.0.0.dist-info}/METADATA +105 -26
- {empathy_framework-3.11.0.dist-info → empathy_framework-4.0.0.dist-info}/RECORD +14 -7
- empathy_os/cli.py +229 -0
- empathy_os/orchestration/__init__.py +32 -0
- empathy_os/orchestration/agent_templates.py +516 -0
- empathy_os/orchestration/config_store.py +499 -0
- empathy_os/orchestration/execution_strategies.py +632 -0
- empathy_os/orchestration/meta_orchestrator.py +621 -0
- empathy_os/workflows/orchestrated_release_prep.py +582 -0
- empathy_os/workflows/test_coverage_boost.py +433 -0
- {empathy_framework-3.11.0.dist-info → empathy_framework-4.0.0.dist-info}/WHEEL +0 -0
- {empathy_framework-3.11.0.dist-info → empathy_framework-4.0.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-3.11.0.dist-info → empathy_framework-4.0.0.dist-info}/licenses/LICENSE +0 -0
- {empathy_framework-3.11.0.dist-info → empathy_framework-4.0.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,621 @@
|
|
|
1
|
+
"""Meta-orchestrator for intelligent agent composition.
|
|
2
|
+
|
|
3
|
+
This module implements the core orchestration logic that analyzes tasks,
|
|
4
|
+
selects appropriate agents, and chooses composition patterns.
|
|
5
|
+
|
|
6
|
+
Security:
|
|
7
|
+
- All inputs validated before processing
|
|
8
|
+
- No eval() or exec() usage
|
|
9
|
+
- Agent selection based on whitelisted templates
|
|
10
|
+
|
|
11
|
+
Example:
|
|
12
|
+
>>> orchestrator = MetaOrchestrator()
|
|
13
|
+
>>> plan = orchestrator.analyze_and_compose(
|
|
14
|
+
... task="Boost test coverage to 90%",
|
|
15
|
+
... context={"current_coverage": 75}
|
|
16
|
+
... )
|
|
17
|
+
>>> print(plan.strategy)
|
|
18
|
+
sequential
|
|
19
|
+
>>> print([a.role for a in plan.agents])
|
|
20
|
+
['Test Coverage Expert', 'Test Generation Specialist', 'Quality Assurance Validator']
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
import logging
|
|
24
|
+
from dataclasses import dataclass, field
|
|
25
|
+
from enum import Enum
|
|
26
|
+
from typing import Any
|
|
27
|
+
|
|
28
|
+
from .agent_templates import AgentTemplate, get_template, get_templates_by_capability
|
|
29
|
+
|
|
30
|
+
logger = logging.getLogger(__name__)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class TaskComplexity(Enum):
|
|
34
|
+
"""Task complexity classification."""
|
|
35
|
+
|
|
36
|
+
SIMPLE = "simple" # Single agent, straightforward
|
|
37
|
+
MODERATE = "moderate" # 2-3 agents, some coordination
|
|
38
|
+
COMPLEX = "complex" # 4+ agents, multi-phase execution
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class TaskDomain(Enum):
|
|
42
|
+
"""Task domain classification."""
|
|
43
|
+
|
|
44
|
+
TESTING = "testing"
|
|
45
|
+
SECURITY = "security"
|
|
46
|
+
CODE_QUALITY = "code_quality"
|
|
47
|
+
DOCUMENTATION = "documentation"
|
|
48
|
+
PERFORMANCE = "performance"
|
|
49
|
+
ARCHITECTURE = "architecture"
|
|
50
|
+
REFACTORING = "refactoring"
|
|
51
|
+
GENERAL = "general"
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class CompositionPattern(Enum):
|
|
55
|
+
"""Available composition patterns (grammar rules)."""
|
|
56
|
+
|
|
57
|
+
SEQUENTIAL = "sequential" # A → B → C
|
|
58
|
+
PARALLEL = "parallel" # A || B || C
|
|
59
|
+
DEBATE = "debate" # A ⇄ B ⇄ C → Synthesis
|
|
60
|
+
TEACHING = "teaching" # Junior → Expert validation
|
|
61
|
+
REFINEMENT = "refinement" # Draft → Review → Polish
|
|
62
|
+
ADAPTIVE = "adaptive" # Classifier → Specialist
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
@dataclass
|
|
66
|
+
class TaskRequirements:
|
|
67
|
+
"""Extracted requirements from task analysis.
|
|
68
|
+
|
|
69
|
+
Attributes:
|
|
70
|
+
complexity: Task complexity level
|
|
71
|
+
domain: Primary task domain
|
|
72
|
+
capabilities_needed: List of capabilities required
|
|
73
|
+
parallelizable: Whether task can be parallelized
|
|
74
|
+
quality_gates: Quality thresholds to enforce
|
|
75
|
+
context: Additional context for customization
|
|
76
|
+
"""
|
|
77
|
+
|
|
78
|
+
complexity: TaskComplexity
|
|
79
|
+
domain: TaskDomain
|
|
80
|
+
capabilities_needed: list[str]
|
|
81
|
+
parallelizable: bool = False
|
|
82
|
+
quality_gates: dict[str, Any] = field(default_factory=dict)
|
|
83
|
+
context: dict[str, Any] = field(default_factory=dict)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
@dataclass
|
|
87
|
+
class ExecutionPlan:
|
|
88
|
+
"""Plan for agent execution.
|
|
89
|
+
|
|
90
|
+
Attributes:
|
|
91
|
+
agents: List of agents to execute
|
|
92
|
+
strategy: Composition pattern to use
|
|
93
|
+
quality_gates: Quality thresholds to enforce
|
|
94
|
+
estimated_cost: Estimated execution cost
|
|
95
|
+
estimated_duration: Estimated time in seconds
|
|
96
|
+
"""
|
|
97
|
+
|
|
98
|
+
agents: list[AgentTemplate]
|
|
99
|
+
strategy: CompositionPattern
|
|
100
|
+
quality_gates: dict[str, Any] = field(default_factory=dict)
|
|
101
|
+
estimated_cost: float = 0.0
|
|
102
|
+
estimated_duration: int = 0
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class MetaOrchestrator:
|
|
106
|
+
"""Intelligent task analyzer and agent composition engine.
|
|
107
|
+
|
|
108
|
+
The meta-orchestrator analyzes tasks to determine requirements,
|
|
109
|
+
selects appropriate agents, and chooses optimal composition patterns.
|
|
110
|
+
|
|
111
|
+
Example:
|
|
112
|
+
>>> orchestrator = MetaOrchestrator()
|
|
113
|
+
>>> plan = orchestrator.analyze_and_compose(
|
|
114
|
+
... task="Prepare for v3.12.0 release",
|
|
115
|
+
... context={"version": "3.12.0"}
|
|
116
|
+
... )
|
|
117
|
+
"""
|
|
118
|
+
|
|
119
|
+
# Keyword patterns for task analysis
|
|
120
|
+
COMPLEXITY_KEYWORDS = {
|
|
121
|
+
TaskComplexity.SIMPLE: [
|
|
122
|
+
"format",
|
|
123
|
+
"lint",
|
|
124
|
+
"check",
|
|
125
|
+
"validate",
|
|
126
|
+
"document",
|
|
127
|
+
],
|
|
128
|
+
TaskComplexity.MODERATE: [
|
|
129
|
+
"improve",
|
|
130
|
+
"refactor",
|
|
131
|
+
"optimize",
|
|
132
|
+
"test",
|
|
133
|
+
"review",
|
|
134
|
+
],
|
|
135
|
+
TaskComplexity.COMPLEX: [
|
|
136
|
+
"release",
|
|
137
|
+
"migrate",
|
|
138
|
+
"redesign",
|
|
139
|
+
"architecture",
|
|
140
|
+
"prepare",
|
|
141
|
+
],
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
DOMAIN_KEYWORDS = {
|
|
145
|
+
TaskDomain.TESTING: [
|
|
146
|
+
"test",
|
|
147
|
+
"coverage",
|
|
148
|
+
"pytest",
|
|
149
|
+
"unit test",
|
|
150
|
+
"integration test",
|
|
151
|
+
],
|
|
152
|
+
TaskDomain.SECURITY: [
|
|
153
|
+
"security",
|
|
154
|
+
"vulnerability",
|
|
155
|
+
"audit",
|
|
156
|
+
"penetration",
|
|
157
|
+
"threat",
|
|
158
|
+
],
|
|
159
|
+
TaskDomain.CODE_QUALITY: [
|
|
160
|
+
"quality",
|
|
161
|
+
"code review",
|
|
162
|
+
"lint",
|
|
163
|
+
"best practices",
|
|
164
|
+
"maintainability",
|
|
165
|
+
],
|
|
166
|
+
TaskDomain.DOCUMENTATION: [
|
|
167
|
+
"docs",
|
|
168
|
+
"documentation",
|
|
169
|
+
"readme",
|
|
170
|
+
"guide",
|
|
171
|
+
"tutorial",
|
|
172
|
+
],
|
|
173
|
+
TaskDomain.PERFORMANCE: [
|
|
174
|
+
"performance",
|
|
175
|
+
"optimize",
|
|
176
|
+
"speed",
|
|
177
|
+
"benchmark",
|
|
178
|
+
"profile",
|
|
179
|
+
],
|
|
180
|
+
TaskDomain.ARCHITECTURE: [
|
|
181
|
+
"architecture",
|
|
182
|
+
"design",
|
|
183
|
+
"structure",
|
|
184
|
+
"pattern",
|
|
185
|
+
"dependency",
|
|
186
|
+
],
|
|
187
|
+
TaskDomain.REFACTORING: [
|
|
188
|
+
"refactor",
|
|
189
|
+
"cleanup",
|
|
190
|
+
"simplify",
|
|
191
|
+
"restructure",
|
|
192
|
+
"debt",
|
|
193
|
+
],
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
# Capability mapping by domain
|
|
197
|
+
DOMAIN_CAPABILITIES = {
|
|
198
|
+
TaskDomain.TESTING: [
|
|
199
|
+
"analyze_gaps",
|
|
200
|
+
"suggest_tests",
|
|
201
|
+
"validate_coverage",
|
|
202
|
+
],
|
|
203
|
+
TaskDomain.SECURITY: [
|
|
204
|
+
"vulnerability_scan",
|
|
205
|
+
"threat_modeling",
|
|
206
|
+
"compliance_check",
|
|
207
|
+
],
|
|
208
|
+
TaskDomain.CODE_QUALITY: [
|
|
209
|
+
"code_review",
|
|
210
|
+
"quality_assessment",
|
|
211
|
+
"best_practices_check",
|
|
212
|
+
],
|
|
213
|
+
TaskDomain.DOCUMENTATION: [
|
|
214
|
+
"generate_docs",
|
|
215
|
+
"check_completeness",
|
|
216
|
+
"update_examples",
|
|
217
|
+
],
|
|
218
|
+
TaskDomain.PERFORMANCE: [
|
|
219
|
+
"profile_code",
|
|
220
|
+
"identify_bottlenecks",
|
|
221
|
+
"suggest_optimizations",
|
|
222
|
+
],
|
|
223
|
+
TaskDomain.ARCHITECTURE: [
|
|
224
|
+
"analyze_architecture",
|
|
225
|
+
"identify_patterns",
|
|
226
|
+
"suggest_improvements",
|
|
227
|
+
],
|
|
228
|
+
TaskDomain.REFACTORING: [
|
|
229
|
+
"identify_code_smells",
|
|
230
|
+
"suggest_refactorings",
|
|
231
|
+
"validate_changes",
|
|
232
|
+
],
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
def __init__(self):
|
|
236
|
+
"""Initialize meta-orchestrator."""
|
|
237
|
+
logger.info("MetaOrchestrator initialized")
|
|
238
|
+
|
|
239
|
+
def analyze_and_compose(
|
|
240
|
+
self, task: str, context: dict[str, Any] | None = None
|
|
241
|
+
) -> ExecutionPlan:
|
|
242
|
+
"""Analyze task and create execution plan.
|
|
243
|
+
|
|
244
|
+
This is the main entry point for the meta-orchestrator.
|
|
245
|
+
|
|
246
|
+
Args:
|
|
247
|
+
task: Task description (e.g., "Boost test coverage to 90%")
|
|
248
|
+
context: Optional context dictionary
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
ExecutionPlan with agents and strategy
|
|
252
|
+
|
|
253
|
+
Raises:
|
|
254
|
+
ValueError: If task is invalid
|
|
255
|
+
|
|
256
|
+
Example:
|
|
257
|
+
>>> orchestrator = MetaOrchestrator()
|
|
258
|
+
>>> plan = orchestrator.analyze_and_compose(
|
|
259
|
+
... task="Improve test coverage",
|
|
260
|
+
... context={"current_coverage": 75}
|
|
261
|
+
... )
|
|
262
|
+
"""
|
|
263
|
+
if not task or not isinstance(task, str):
|
|
264
|
+
raise ValueError("task must be a non-empty string")
|
|
265
|
+
|
|
266
|
+
context = context or {}
|
|
267
|
+
logger.info(f"Analyzing task: {task}")
|
|
268
|
+
|
|
269
|
+
# Step 1: Analyze task requirements
|
|
270
|
+
requirements = self._analyze_task(task, context)
|
|
271
|
+
logger.info(
|
|
272
|
+
f"Task analysis: complexity={requirements.complexity.value}, "
|
|
273
|
+
f"domain={requirements.domain.value}, "
|
|
274
|
+
f"capabilities={requirements.capabilities_needed}"
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
# Step 2: Select appropriate agents
|
|
278
|
+
agents = self._select_agents(requirements)
|
|
279
|
+
logger.info(f"Selected {len(agents)} agents: {[a.id for a in agents]}")
|
|
280
|
+
|
|
281
|
+
# Step 3: Choose composition pattern
|
|
282
|
+
strategy = self._choose_composition_pattern(requirements, agents)
|
|
283
|
+
logger.info(f"Selected strategy: {strategy.value}")
|
|
284
|
+
|
|
285
|
+
# Step 4: Create execution plan
|
|
286
|
+
plan = ExecutionPlan(
|
|
287
|
+
agents=agents,
|
|
288
|
+
strategy=strategy,
|
|
289
|
+
quality_gates=requirements.quality_gates,
|
|
290
|
+
estimated_cost=self._estimate_cost(agents),
|
|
291
|
+
estimated_duration=self._estimate_duration(agents, strategy),
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
return plan
|
|
295
|
+
|
|
296
|
+
def _analyze_task(self, task: str, context: dict[str, Any]) -> TaskRequirements:
|
|
297
|
+
"""Analyze task to extract requirements.
|
|
298
|
+
|
|
299
|
+
Args:
|
|
300
|
+
task: Task description
|
|
301
|
+
context: Context dictionary
|
|
302
|
+
|
|
303
|
+
Returns:
|
|
304
|
+
TaskRequirements with extracted information
|
|
305
|
+
"""
|
|
306
|
+
task_lower = task.lower()
|
|
307
|
+
|
|
308
|
+
# Determine complexity
|
|
309
|
+
complexity = self._classify_complexity(task_lower)
|
|
310
|
+
|
|
311
|
+
# Determine domain
|
|
312
|
+
domain = self._classify_domain(task_lower)
|
|
313
|
+
|
|
314
|
+
# Extract needed capabilities
|
|
315
|
+
capabilities = self._extract_capabilities(domain, context)
|
|
316
|
+
|
|
317
|
+
# Determine if parallelizable
|
|
318
|
+
parallelizable = self._is_parallelizable(task_lower, complexity)
|
|
319
|
+
|
|
320
|
+
# Extract quality gates from context
|
|
321
|
+
quality_gates = context.get("quality_gates", {})
|
|
322
|
+
|
|
323
|
+
return TaskRequirements(
|
|
324
|
+
complexity=complexity,
|
|
325
|
+
domain=domain,
|
|
326
|
+
capabilities_needed=capabilities,
|
|
327
|
+
parallelizable=parallelizable,
|
|
328
|
+
quality_gates=quality_gates,
|
|
329
|
+
context=context,
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
def _classify_complexity(self, task_lower: str) -> TaskComplexity:
|
|
333
|
+
"""Classify task complexity based on keywords.
|
|
334
|
+
|
|
335
|
+
Args:
|
|
336
|
+
task_lower: Lowercase task description
|
|
337
|
+
|
|
338
|
+
Returns:
|
|
339
|
+
TaskComplexity classification
|
|
340
|
+
"""
|
|
341
|
+
# Check for complex keywords first (most specific)
|
|
342
|
+
for keyword in self.COMPLEXITY_KEYWORDS[TaskComplexity.COMPLEX]:
|
|
343
|
+
if keyword in task_lower:
|
|
344
|
+
return TaskComplexity.COMPLEX
|
|
345
|
+
|
|
346
|
+
# Check for moderate keywords
|
|
347
|
+
for keyword in self.COMPLEXITY_KEYWORDS[TaskComplexity.MODERATE]:
|
|
348
|
+
if keyword in task_lower:
|
|
349
|
+
return TaskComplexity.MODERATE
|
|
350
|
+
|
|
351
|
+
# Check for simple keywords
|
|
352
|
+
for keyword in self.COMPLEXITY_KEYWORDS[TaskComplexity.SIMPLE]:
|
|
353
|
+
if keyword in task_lower:
|
|
354
|
+
return TaskComplexity.SIMPLE
|
|
355
|
+
|
|
356
|
+
# Default to moderate if no keywords match
|
|
357
|
+
return TaskComplexity.MODERATE
|
|
358
|
+
|
|
359
|
+
def _classify_domain(self, task_lower: str) -> TaskDomain:
|
|
360
|
+
"""Classify task domain based on keywords.
|
|
361
|
+
|
|
362
|
+
Args:
|
|
363
|
+
task_lower: Lowercase task description
|
|
364
|
+
|
|
365
|
+
Returns:
|
|
366
|
+
TaskDomain classification
|
|
367
|
+
"""
|
|
368
|
+
# Score each domain based on keyword matches
|
|
369
|
+
domain_scores: dict[TaskDomain, int] = dict.fromkeys(TaskDomain, 0)
|
|
370
|
+
|
|
371
|
+
for domain, keywords in self.DOMAIN_KEYWORDS.items():
|
|
372
|
+
for keyword in keywords:
|
|
373
|
+
if keyword in task_lower:
|
|
374
|
+
domain_scores[domain] += 1
|
|
375
|
+
|
|
376
|
+
# Return domain with highest score
|
|
377
|
+
max_score = max(domain_scores.values())
|
|
378
|
+
if max_score > 0:
|
|
379
|
+
for domain, score in domain_scores.items():
|
|
380
|
+
if score == max_score:
|
|
381
|
+
return domain
|
|
382
|
+
|
|
383
|
+
# Default to general if no keywords match
|
|
384
|
+
return TaskDomain.GENERAL
|
|
385
|
+
|
|
386
|
+
def _extract_capabilities(self, domain: TaskDomain, context: dict[str, Any]) -> list[str]:
|
|
387
|
+
"""Extract needed capabilities based on domain.
|
|
388
|
+
|
|
389
|
+
Args:
|
|
390
|
+
domain: Task domain
|
|
391
|
+
context: Context dictionary
|
|
392
|
+
|
|
393
|
+
Returns:
|
|
394
|
+
List of capability names
|
|
395
|
+
"""
|
|
396
|
+
# Get default capabilities for domain
|
|
397
|
+
capabilities = self.DOMAIN_CAPABILITIES.get(domain, []).copy()
|
|
398
|
+
|
|
399
|
+
# Add capabilities from context if provided
|
|
400
|
+
if "capabilities" in context:
|
|
401
|
+
additional = context["capabilities"]
|
|
402
|
+
if isinstance(additional, list):
|
|
403
|
+
capabilities.extend(additional)
|
|
404
|
+
|
|
405
|
+
return capabilities
|
|
406
|
+
|
|
407
|
+
def _is_parallelizable(self, task_lower: str, complexity: TaskComplexity) -> bool:
|
|
408
|
+
"""Determine if task can be parallelized.
|
|
409
|
+
|
|
410
|
+
Args:
|
|
411
|
+
task_lower: Lowercase task description
|
|
412
|
+
complexity: Task complexity
|
|
413
|
+
|
|
414
|
+
Returns:
|
|
415
|
+
True if task can be parallelized
|
|
416
|
+
"""
|
|
417
|
+
# Keywords indicating parallel execution
|
|
418
|
+
parallel_keywords = [
|
|
419
|
+
"release",
|
|
420
|
+
"audit",
|
|
421
|
+
"check",
|
|
422
|
+
"validate",
|
|
423
|
+
"review",
|
|
424
|
+
]
|
|
425
|
+
|
|
426
|
+
# Keywords indicating sequential execution
|
|
427
|
+
sequential_keywords = [
|
|
428
|
+
"migrate",
|
|
429
|
+
"refactor",
|
|
430
|
+
"generate",
|
|
431
|
+
"create",
|
|
432
|
+
]
|
|
433
|
+
|
|
434
|
+
# Check for sequential keywords first (higher precedence)
|
|
435
|
+
for keyword in sequential_keywords:
|
|
436
|
+
if keyword in task_lower:
|
|
437
|
+
return False
|
|
438
|
+
|
|
439
|
+
# Check for parallel keywords
|
|
440
|
+
for keyword in parallel_keywords:
|
|
441
|
+
if keyword in task_lower:
|
|
442
|
+
return True
|
|
443
|
+
|
|
444
|
+
# Complex tasks often benefit from parallel execution
|
|
445
|
+
return complexity == TaskComplexity.COMPLEX
|
|
446
|
+
|
|
447
|
+
def _select_agents(self, requirements: TaskRequirements) -> list[AgentTemplate]:
|
|
448
|
+
"""Select appropriate agents based on requirements.
|
|
449
|
+
|
|
450
|
+
Args:
|
|
451
|
+
requirements: Task requirements
|
|
452
|
+
|
|
453
|
+
Returns:
|
|
454
|
+
List of agent templates
|
|
455
|
+
|
|
456
|
+
Raises:
|
|
457
|
+
ValueError: If no agents match requirements
|
|
458
|
+
"""
|
|
459
|
+
agents: list[AgentTemplate] = []
|
|
460
|
+
|
|
461
|
+
# Select agents based on needed capabilities
|
|
462
|
+
for capability in requirements.capabilities_needed:
|
|
463
|
+
templates = get_templates_by_capability(capability)
|
|
464
|
+
if templates:
|
|
465
|
+
# Pick the first template with this capability
|
|
466
|
+
# In future: could rank by success rate, cost, etc.
|
|
467
|
+
agent = templates[0]
|
|
468
|
+
if agent not in agents:
|
|
469
|
+
agents.append(agent)
|
|
470
|
+
|
|
471
|
+
# If no agents found, use domain-appropriate default
|
|
472
|
+
if not agents:
|
|
473
|
+
agents = self._get_default_agents(requirements.domain)
|
|
474
|
+
|
|
475
|
+
if not agents:
|
|
476
|
+
raise ValueError(f"No agents available for domain: {requirements.domain.value}")
|
|
477
|
+
|
|
478
|
+
return agents
|
|
479
|
+
|
|
480
|
+
def _get_default_agents(self, domain: TaskDomain) -> list[AgentTemplate]:
|
|
481
|
+
"""Get default agents for a domain.
|
|
482
|
+
|
|
483
|
+
Args:
|
|
484
|
+
domain: Task domain
|
|
485
|
+
|
|
486
|
+
Returns:
|
|
487
|
+
List of default agent templates
|
|
488
|
+
"""
|
|
489
|
+
defaults = {
|
|
490
|
+
TaskDomain.TESTING: ["test_coverage_analyzer"],
|
|
491
|
+
TaskDomain.SECURITY: ["security_auditor"],
|
|
492
|
+
TaskDomain.CODE_QUALITY: ["code_reviewer"],
|
|
493
|
+
TaskDomain.DOCUMENTATION: ["documentation_writer"],
|
|
494
|
+
TaskDomain.PERFORMANCE: ["performance_optimizer"],
|
|
495
|
+
TaskDomain.ARCHITECTURE: ["architecture_analyst"],
|
|
496
|
+
TaskDomain.REFACTORING: ["refactoring_specialist"],
|
|
497
|
+
}
|
|
498
|
+
|
|
499
|
+
template_ids = defaults.get(domain, ["code_reviewer"])
|
|
500
|
+
agents = []
|
|
501
|
+
for template_id in template_ids:
|
|
502
|
+
template = get_template(template_id)
|
|
503
|
+
if template:
|
|
504
|
+
agents.append(template)
|
|
505
|
+
|
|
506
|
+
return agents
|
|
507
|
+
|
|
508
|
+
def _choose_composition_pattern(
|
|
509
|
+
self, requirements: TaskRequirements, agents: list[AgentTemplate]
|
|
510
|
+
) -> CompositionPattern:
|
|
511
|
+
"""Choose optimal composition pattern.
|
|
512
|
+
|
|
513
|
+
Args:
|
|
514
|
+
requirements: Task requirements
|
|
515
|
+
agents: Selected agents
|
|
516
|
+
|
|
517
|
+
Returns:
|
|
518
|
+
CompositionPattern to use
|
|
519
|
+
"""
|
|
520
|
+
num_agents = len(agents)
|
|
521
|
+
|
|
522
|
+
# Parallelizable tasks: use parallel strategy (check before single agent)
|
|
523
|
+
if requirements.parallelizable:
|
|
524
|
+
return CompositionPattern.PARALLEL
|
|
525
|
+
|
|
526
|
+
# Security/architecture: benefit from multiple perspectives (even with 1 agent)
|
|
527
|
+
if requirements.domain in [TaskDomain.SECURITY, TaskDomain.ARCHITECTURE]:
|
|
528
|
+
return CompositionPattern.PARALLEL
|
|
529
|
+
|
|
530
|
+
# Documentation: teaching pattern (cheap → validate → expert if needed)
|
|
531
|
+
if requirements.domain == TaskDomain.DOCUMENTATION:
|
|
532
|
+
return CompositionPattern.TEACHING
|
|
533
|
+
|
|
534
|
+
# Refactoring: refinement pattern (identify → refactor → validate)
|
|
535
|
+
if requirements.domain == TaskDomain.REFACTORING:
|
|
536
|
+
return CompositionPattern.REFINEMENT
|
|
537
|
+
|
|
538
|
+
# Single agent: sequential (after domain-specific patterns)
|
|
539
|
+
if num_agents == 1:
|
|
540
|
+
return CompositionPattern.SEQUENTIAL
|
|
541
|
+
|
|
542
|
+
# Multiple agents with same capability: debate/consensus
|
|
543
|
+
capabilities = [cap for agent in agents for cap in agent.capabilities]
|
|
544
|
+
if len(capabilities) != len(set(capabilities)):
|
|
545
|
+
# Duplicate capabilities detected → debate
|
|
546
|
+
return CompositionPattern.DEBATE
|
|
547
|
+
|
|
548
|
+
# Testing domain: typically sequential (analyze → generate → validate)
|
|
549
|
+
if requirements.domain == TaskDomain.TESTING:
|
|
550
|
+
return CompositionPattern.SEQUENTIAL
|
|
551
|
+
|
|
552
|
+
# Complex tasks: adaptive routing
|
|
553
|
+
if requirements.complexity == TaskComplexity.COMPLEX:
|
|
554
|
+
return CompositionPattern.ADAPTIVE
|
|
555
|
+
|
|
556
|
+
# Default: sequential
|
|
557
|
+
return CompositionPattern.SEQUENTIAL
|
|
558
|
+
|
|
559
|
+
def _estimate_cost(self, agents: list[AgentTemplate]) -> float:
|
|
560
|
+
"""Estimate execution cost based on agent tiers.
|
|
561
|
+
|
|
562
|
+
Args:
|
|
563
|
+
agents: List of agents
|
|
564
|
+
|
|
565
|
+
Returns:
|
|
566
|
+
Estimated cost in arbitrary units
|
|
567
|
+
"""
|
|
568
|
+
tier_costs = {
|
|
569
|
+
"CHEAP": 1.0,
|
|
570
|
+
"CAPABLE": 3.0,
|
|
571
|
+
"PREMIUM": 10.0,
|
|
572
|
+
}
|
|
573
|
+
|
|
574
|
+
total_cost = 0.0
|
|
575
|
+
for agent in agents:
|
|
576
|
+
total_cost += tier_costs.get(agent.tier_preference, 3.0)
|
|
577
|
+
|
|
578
|
+
return total_cost
|
|
579
|
+
|
|
580
|
+
def _estimate_duration(self, agents: list[AgentTemplate], strategy: CompositionPattern) -> int:
|
|
581
|
+
"""Estimate execution duration in seconds.
|
|
582
|
+
|
|
583
|
+
Args:
|
|
584
|
+
agents: List of agents
|
|
585
|
+
strategy: Composition pattern
|
|
586
|
+
|
|
587
|
+
Returns:
|
|
588
|
+
Estimated duration in seconds
|
|
589
|
+
"""
|
|
590
|
+
# Get max timeout from agents
|
|
591
|
+
max_timeout = max(
|
|
592
|
+
(agent.resource_requirements.timeout_seconds for agent in agents),
|
|
593
|
+
default=300,
|
|
594
|
+
)
|
|
595
|
+
|
|
596
|
+
# Sequential: sum of timeouts
|
|
597
|
+
if strategy == CompositionPattern.SEQUENTIAL:
|
|
598
|
+
return sum(agent.resource_requirements.timeout_seconds for agent in agents)
|
|
599
|
+
|
|
600
|
+
# Parallel: max timeout
|
|
601
|
+
if strategy == CompositionPattern.PARALLEL:
|
|
602
|
+
return max_timeout
|
|
603
|
+
|
|
604
|
+
# Debate: multiple rounds, estimate 2x max timeout
|
|
605
|
+
if strategy == CompositionPattern.DEBATE:
|
|
606
|
+
return max_timeout * 2
|
|
607
|
+
|
|
608
|
+
# Teaching: initial attempt + possible expert review
|
|
609
|
+
if strategy == CompositionPattern.TEACHING:
|
|
610
|
+
return int(max_timeout * 1.5)
|
|
611
|
+
|
|
612
|
+
# Refinement: 3 passes (draft → review → polish)
|
|
613
|
+
if strategy == CompositionPattern.REFINEMENT:
|
|
614
|
+
return max_timeout * 3
|
|
615
|
+
|
|
616
|
+
# Adaptive: classification + specialist
|
|
617
|
+
if strategy == CompositionPattern.ADAPTIVE:
|
|
618
|
+
return int(max_timeout * 1.2)
|
|
619
|
+
|
|
620
|
+
# Default: max timeout
|
|
621
|
+
return max_timeout
|