attune-ai 2.1.5__py3-none-any.whl → 2.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/cli/__init__.py +3 -59
- attune/cli/commands/batch.py +4 -12
- attune/cli/commands/cache.py +7 -15
- attune/cli/commands/provider.py +17 -0
- attune/cli/commands/routing.py +3 -1
- attune/cli/commands/setup.py +122 -0
- attune/cli/commands/tier.py +1 -3
- attune/cli/commands/workflow.py +31 -0
- attune/cli/parsers/cache.py +1 -0
- attune/cli/parsers/help.py +1 -3
- attune/cli/parsers/provider.py +7 -0
- attune/cli/parsers/routing.py +1 -3
- attune/cli/parsers/setup.py +7 -0
- attune/cli/parsers/status.py +1 -3
- attune/cli/parsers/tier.py +1 -3
- attune/cli_minimal.py +9 -3
- attune/cli_router.py +9 -7
- attune/cli_unified.py +3 -0
- attune/dashboard/app.py +3 -1
- attune/dashboard/simple_server.py +3 -1
- attune/dashboard/standalone_server.py +7 -3
- attune/mcp/server.py +54 -102
- attune/memory/long_term.py +0 -2
- attune/memory/short_term/__init__.py +84 -0
- attune/memory/short_term/base.py +467 -0
- attune/memory/short_term/batch.py +219 -0
- attune/memory/short_term/caching.py +227 -0
- attune/memory/short_term/conflicts.py +265 -0
- attune/memory/short_term/cross_session.py +122 -0
- attune/memory/short_term/facade.py +655 -0
- attune/memory/short_term/pagination.py +215 -0
- attune/memory/short_term/patterns.py +271 -0
- attune/memory/short_term/pubsub.py +286 -0
- attune/memory/short_term/queues.py +244 -0
- attune/memory/short_term/security.py +300 -0
- attune/memory/short_term/sessions.py +250 -0
- attune/memory/short_term/streams.py +249 -0
- attune/memory/short_term/timelines.py +234 -0
- attune/memory/short_term/transactions.py +186 -0
- attune/memory/short_term/working.py +252 -0
- attune/meta_workflows/cli_commands/__init__.py +3 -0
- attune/meta_workflows/cli_commands/agent_commands.py +0 -4
- attune/meta_workflows/cli_commands/analytics_commands.py +0 -6
- attune/meta_workflows/cli_commands/config_commands.py +0 -5
- attune/meta_workflows/cli_commands/memory_commands.py +0 -5
- attune/meta_workflows/cli_commands/template_commands.py +0 -5
- attune/meta_workflows/cli_commands/workflow_commands.py +0 -6
- attune/models/adaptive_routing.py +4 -8
- attune/models/auth_cli.py +3 -9
- attune/models/auth_strategy.py +2 -4
- attune/models/telemetry/analytics.py +0 -2
- attune/models/telemetry/backend.py +0 -3
- attune/models/telemetry/storage.py +0 -2
- attune/orchestration/_strategies/__init__.py +156 -0
- attune/orchestration/_strategies/base.py +231 -0
- attune/orchestration/_strategies/conditional_strategies.py +373 -0
- attune/orchestration/_strategies/conditions.py +369 -0
- attune/orchestration/_strategies/core_strategies.py +491 -0
- attune/orchestration/_strategies/data_classes.py +64 -0
- attune/orchestration/_strategies/nesting.py +233 -0
- attune/orchestration/execution_strategies.py +58 -1567
- attune/orchestration/meta_orchestrator.py +1 -3
- attune/project_index/scanner.py +1 -3
- attune/project_index/scanner_parallel.py +7 -5
- attune/socratic_router.py +1 -3
- attune/telemetry/agent_coordination.py +9 -3
- attune/telemetry/agent_tracking.py +16 -3
- attune/telemetry/approval_gates.py +22 -5
- attune/telemetry/cli.py +1 -3
- attune/telemetry/commands/dashboard_commands.py +24 -8
- attune/telemetry/event_streaming.py +8 -2
- attune/telemetry/feedback_loop.py +10 -2
- attune/tools.py +1 -0
- attune/workflow_commands.py +1 -3
- attune/workflows/__init__.py +53 -10
- attune/workflows/autonomous_test_gen.py +158 -102
- attune/workflows/base.py +48 -672
- attune/workflows/batch_processing.py +1 -3
- attune/workflows/compat.py +156 -0
- attune/workflows/cost_mixin.py +141 -0
- attune/workflows/data_classes.py +92 -0
- attune/workflows/document_gen/workflow.py +11 -14
- attune/workflows/history.py +62 -37
- attune/workflows/llm_base.py +1 -3
- attune/workflows/migration.py +422 -0
- attune/workflows/output.py +2 -7
- attune/workflows/parsing_mixin.py +427 -0
- attune/workflows/perf_audit.py +3 -1
- attune/workflows/progress.py +9 -11
- attune/workflows/release_prep.py +5 -1
- attune/workflows/routing.py +0 -2
- attune/workflows/secure_release.py +2 -1
- attune/workflows/security_audit.py +19 -14
- attune/workflows/security_audit_phase3.py +28 -22
- attune/workflows/seo_optimization.py +27 -27
- attune/workflows/test_gen/test_templates.py +1 -4
- attune/workflows/test_gen/workflow.py +0 -2
- attune/workflows/test_gen_behavioral.py +6 -19
- attune/workflows/test_gen_parallel.py +6 -4
- {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/METADATA +4 -3
- {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/RECORD +116 -91
- {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/entry_points.txt +0 -2
- attune_healthcare/monitors/monitoring/__init__.py +9 -9
- attune_llm/agent_factory/__init__.py +6 -6
- attune_llm/commands/__init__.py +10 -10
- attune_llm/commands/models.py +3 -3
- attune_llm/config/__init__.py +8 -8
- attune_llm/learning/__init__.py +3 -3
- attune_llm/learning/extractor.py +5 -3
- attune_llm/learning/storage.py +5 -3
- attune_llm/security/__init__.py +17 -17
- attune_llm/utils/tokens.py +3 -1
- attune/cli_legacy.py +0 -3978
- attune/memory/short_term.py +0 -2192
- attune/workflows/manage_docs.py +0 -87
- attune/workflows/test5.py +0 -125
- {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/WHEEL +0 -0
- {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE +0 -0
- {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
- {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""Execution strategies for agent composition patterns.
|
|
2
2
|
|
|
3
|
-
This module implements the
|
|
3
|
+
This module implements the 13 grammar rules for composing agents:
|
|
4
4
|
1. Sequential (A → B → C)
|
|
5
5
|
2. Parallel (A || B || C)
|
|
6
6
|
3. Debate (A ⇄ B ⇄ C → Synthesis)
|
|
@@ -8,6 +8,12 @@ This module implements the 7 grammar rules for composing agents:
|
|
|
8
8
|
5. Refinement (Draft → Review → Polish)
|
|
9
9
|
6. Adaptive (Classifier → Specialist)
|
|
10
10
|
7. Conditional (if X then A else B) - branching based on gates
|
|
11
|
+
8. MultiConditional (switch/case pattern)
|
|
12
|
+
9. Nested (recursive workflow execution)
|
|
13
|
+
10. NestedSequential (sequential with nested workflow support)
|
|
14
|
+
11. ToolEnhanced (single agent with comprehensive tools)
|
|
15
|
+
12. PromptCachedSequential (sequential with cached context)
|
|
16
|
+
13. DelegationChain (hierarchical delegation)
|
|
11
17
|
|
|
12
18
|
Security:
|
|
13
19
|
- All agent outputs validated before passing to next agent
|
|
@@ -30,1547 +36,50 @@ Example:
|
|
|
30
36
|
"""
|
|
31
37
|
|
|
32
38
|
import asyncio
|
|
33
|
-
import json
|
|
34
39
|
import logging
|
|
35
|
-
import operator
|
|
36
|
-
import re
|
|
37
|
-
from abc import ABC, abstractmethod
|
|
38
|
-
from collections.abc import Callable
|
|
39
|
-
from dataclasses import dataclass, field
|
|
40
|
-
from enum import Enum
|
|
41
40
|
from typing import Any
|
|
42
41
|
|
|
42
|
+
from ._strategies.base import ExecutionStrategy
|
|
43
|
+
from ._strategies.conditional_strategies import (
|
|
44
|
+
ConditionalStrategy,
|
|
45
|
+
MultiConditionalStrategy,
|
|
46
|
+
NestedSequentialStrategy,
|
|
47
|
+
NestedStrategy,
|
|
48
|
+
StepDefinition, # noqa: F401 - re-exported
|
|
49
|
+
)
|
|
50
|
+
from ._strategies.conditions import (
|
|
51
|
+
Branch, # noqa: F401 - re-exported
|
|
52
|
+
Condition, # noqa: F401 - re-exported
|
|
53
|
+
ConditionEvaluator, # noqa: F401 - re-exported
|
|
54
|
+
ConditionType, # noqa: F401 - re-exported
|
|
55
|
+
)
|
|
56
|
+
from ._strategies.core_strategies import (
|
|
57
|
+
AdaptiveStrategy,
|
|
58
|
+
DebateStrategy,
|
|
59
|
+
ParallelStrategy,
|
|
60
|
+
RefinementStrategy,
|
|
61
|
+
SequentialStrategy,
|
|
62
|
+
TeachingStrategy,
|
|
63
|
+
)
|
|
64
|
+
from ._strategies.nesting import (
|
|
65
|
+
WORKFLOW_REGISTRY, # noqa: F401 - re-exported
|
|
66
|
+
InlineWorkflow, # noqa: F401 - re-exported
|
|
67
|
+
NestingContext, # noqa: F401 - re-exported
|
|
68
|
+
WorkflowDefinition, # noqa: F401 - re-exported
|
|
69
|
+
WorkflowReference, # noqa: F401 - re-exported
|
|
70
|
+
get_workflow, # noqa: F401 - re-exported
|
|
71
|
+
register_workflow, # noqa: F401 - re-exported
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
# Import from submodule for modular organization
|
|
75
|
+
from ._strategies.data_classes import AgentResult, StrategyResult
|
|
43
76
|
from .agent_templates import AgentTemplate
|
|
44
77
|
|
|
45
78
|
logger = logging.getLogger(__name__)
|
|
46
79
|
|
|
47
80
|
|
|
48
|
-
@dataclass
|
|
49
|
-
class AgentResult:
|
|
50
|
-
"""Result from agent execution.
|
|
51
|
-
|
|
52
|
-
Attributes:
|
|
53
|
-
agent_id: ID of agent that produced result
|
|
54
|
-
success: Whether execution succeeded
|
|
55
|
-
output: Agent output data
|
|
56
|
-
confidence: Confidence score (0-1)
|
|
57
|
-
duration_seconds: Execution time
|
|
58
|
-
error: Error message if failed
|
|
59
|
-
"""
|
|
60
|
-
|
|
61
|
-
agent_id: str
|
|
62
|
-
success: bool
|
|
63
|
-
output: dict[str, Any]
|
|
64
|
-
confidence: float = 0.0
|
|
65
|
-
duration_seconds: float = 0.0
|
|
66
|
-
error: str = ""
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
@dataclass
|
|
70
|
-
class StrategyResult:
|
|
71
|
-
"""Aggregated result from strategy execution.
|
|
72
|
-
|
|
73
|
-
Attributes:
|
|
74
|
-
success: Whether overall execution succeeded
|
|
75
|
-
outputs: List of individual agent results
|
|
76
|
-
aggregated_output: Combined/synthesized output
|
|
77
|
-
total_duration: Total execution time
|
|
78
|
-
errors: List of errors encountered
|
|
79
|
-
"""
|
|
80
|
-
|
|
81
|
-
success: bool
|
|
82
|
-
outputs: list[AgentResult]
|
|
83
|
-
aggregated_output: dict[str, Any]
|
|
84
|
-
total_duration: float = 0.0
|
|
85
|
-
errors: list[str] = field(default_factory=list)
|
|
86
|
-
|
|
87
|
-
def __post_init__(self):
|
|
88
|
-
"""Initialize errors list if None."""
|
|
89
|
-
if not self.errors:
|
|
90
|
-
self.errors = []
|
|
91
|
-
|
|
92
|
-
|
|
93
81
|
# =============================================================================
|
|
94
|
-
#
|
|
95
|
-
# =============================================================================
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
class ConditionType(Enum):
|
|
99
|
-
"""Type of condition for gate evaluation.
|
|
100
|
-
|
|
101
|
-
Attributes:
|
|
102
|
-
JSON_PREDICATE: MongoDB-style JSON predicate ({"field": {"$op": value}})
|
|
103
|
-
NATURAL_LANGUAGE: LLM-interpreted natural language condition
|
|
104
|
-
COMPOSITE: Logical combination of conditions (AND/OR)
|
|
105
|
-
"""
|
|
106
|
-
|
|
107
|
-
JSON_PREDICATE = "json"
|
|
108
|
-
NATURAL_LANGUAGE = "natural"
|
|
109
|
-
COMPOSITE = "composite"
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
@dataclass
|
|
113
|
-
class Condition:
|
|
114
|
-
"""A conditional gate for branching in agent workflows.
|
|
115
|
-
|
|
116
|
-
Supports hybrid syntax: JSON predicates for simple conditions,
|
|
117
|
-
natural language for complex semantic conditions.
|
|
118
|
-
|
|
119
|
-
Attributes:
|
|
120
|
-
predicate: JSON predicate dict or natural language string
|
|
121
|
-
condition_type: How to evaluate the condition
|
|
122
|
-
description: Human-readable description of the condition
|
|
123
|
-
source_field: Which field(s) in context to evaluate
|
|
124
|
-
|
|
125
|
-
JSON Predicate Operators:
|
|
126
|
-
$eq: Equal to value
|
|
127
|
-
$ne: Not equal to value
|
|
128
|
-
$gt: Greater than value
|
|
129
|
-
$gte: Greater than or equal to value
|
|
130
|
-
$lt: Less than value
|
|
131
|
-
$lte: Less than or equal to value
|
|
132
|
-
$in: Value is in list
|
|
133
|
-
$nin: Value is not in list
|
|
134
|
-
$exists: Field exists (or not)
|
|
135
|
-
$regex: Matches regex pattern
|
|
136
|
-
|
|
137
|
-
Example (JSON):
|
|
138
|
-
>>> # Low confidence triggers expert review
|
|
139
|
-
>>> cond = Condition(
|
|
140
|
-
... predicate={"confidence": {"$lt": 0.8}},
|
|
141
|
-
... description="Confidence is below threshold"
|
|
142
|
-
... )
|
|
143
|
-
|
|
144
|
-
Example (Natural Language):
|
|
145
|
-
>>> # LLM interprets complex semantic condition
|
|
146
|
-
>>> cond = Condition(
|
|
147
|
-
... predicate="The security audit found critical vulnerabilities",
|
|
148
|
-
... condition_type=ConditionType.NATURAL_LANGUAGE,
|
|
149
|
-
... description="Security issues detected"
|
|
150
|
-
... )
|
|
151
|
-
"""
|
|
152
|
-
|
|
153
|
-
predicate: dict[str, Any] | str
|
|
154
|
-
condition_type: ConditionType = ConditionType.JSON_PREDICATE
|
|
155
|
-
description: str = ""
|
|
156
|
-
source_field: str = "" # Empty means evaluate whole context
|
|
157
|
-
|
|
158
|
-
def __post_init__(self):
|
|
159
|
-
"""Validate condition and auto-detect type."""
|
|
160
|
-
if isinstance(self.predicate, str):
|
|
161
|
-
# Auto-detect: if it looks like prose, it's natural language
|
|
162
|
-
if " " in self.predicate and not self.predicate.startswith("{"):
|
|
163
|
-
object.__setattr__(self, "condition_type", ConditionType.NATURAL_LANGUAGE)
|
|
164
|
-
elif isinstance(self.predicate, dict):
|
|
165
|
-
# Validate JSON predicate structure
|
|
166
|
-
self._validate_predicate(self.predicate)
|
|
167
|
-
else:
|
|
168
|
-
raise ValueError(f"predicate must be dict or str, got {type(self.predicate)}")
|
|
169
|
-
|
|
170
|
-
def _validate_predicate(self, predicate: dict[str, Any]) -> None:
|
|
171
|
-
"""Validate JSON predicate structure (no code execution).
|
|
172
|
-
|
|
173
|
-
Args:
|
|
174
|
-
predicate: The predicate dict to validate
|
|
175
|
-
|
|
176
|
-
Raises:
|
|
177
|
-
ValueError: If predicate contains invalid operators
|
|
178
|
-
"""
|
|
179
|
-
valid_operators = {
|
|
180
|
-
"$eq",
|
|
181
|
-
"$ne",
|
|
182
|
-
"$gt",
|
|
183
|
-
"$gte",
|
|
184
|
-
"$lt",
|
|
185
|
-
"$lte",
|
|
186
|
-
"$in",
|
|
187
|
-
"$nin",
|
|
188
|
-
"$exists",
|
|
189
|
-
"$regex",
|
|
190
|
-
"$and",
|
|
191
|
-
"$or",
|
|
192
|
-
"$not",
|
|
193
|
-
}
|
|
194
|
-
|
|
195
|
-
for key, value in predicate.items():
|
|
196
|
-
if key.startswith("$"):
|
|
197
|
-
if key not in valid_operators:
|
|
198
|
-
raise ValueError(f"Invalid operator: {key}")
|
|
199
|
-
if isinstance(value, dict):
|
|
200
|
-
self._validate_predicate(value)
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
@dataclass
|
|
204
|
-
class Branch:
|
|
205
|
-
"""A branch in conditional execution.
|
|
206
|
-
|
|
207
|
-
Attributes:
|
|
208
|
-
agents: Agents to execute in this branch
|
|
209
|
-
strategy: Strategy to use for executing agents (default: sequential)
|
|
210
|
-
label: Human-readable branch label
|
|
211
|
-
"""
|
|
212
|
-
|
|
213
|
-
agents: list[AgentTemplate]
|
|
214
|
-
strategy: str = "sequential"
|
|
215
|
-
label: str = ""
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
# =============================================================================
|
|
219
|
-
# Nested Sentence Types (Phase 2 - Recursive Composition)
|
|
220
|
-
# =============================================================================
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
@dataclass
|
|
224
|
-
class WorkflowReference:
|
|
225
|
-
"""Reference to a workflow for nested composition.
|
|
226
|
-
|
|
227
|
-
Enables "sentences within sentences" - workflows that invoke other workflows.
|
|
228
|
-
Supports both registered workflow IDs and inline definitions.
|
|
229
|
-
|
|
230
|
-
Attributes:
|
|
231
|
-
workflow_id: ID of registered workflow (mutually exclusive with inline)
|
|
232
|
-
inline: Inline workflow definition (mutually exclusive with workflow_id)
|
|
233
|
-
context_mapping: Optional mapping of parent context fields to child
|
|
234
|
-
result_key: Key to store nested workflow result in parent context
|
|
235
|
-
|
|
236
|
-
Example (by ID):
|
|
237
|
-
>>> ref = WorkflowReference(
|
|
238
|
-
... workflow_id="security-audit-team",
|
|
239
|
-
... result_key="security_result"
|
|
240
|
-
... )
|
|
241
|
-
|
|
242
|
-
Example (inline):
|
|
243
|
-
>>> ref = WorkflowReference(
|
|
244
|
-
... inline=InlineWorkflow(
|
|
245
|
-
... agents=[agent1, agent2],
|
|
246
|
-
... strategy="parallel"
|
|
247
|
-
... ),
|
|
248
|
-
... result_key="analysis_result"
|
|
249
|
-
... )
|
|
250
|
-
"""
|
|
251
|
-
|
|
252
|
-
workflow_id: str = ""
|
|
253
|
-
inline: "InlineWorkflow | None" = None
|
|
254
|
-
context_mapping: dict[str, str] = field(default_factory=dict)
|
|
255
|
-
result_key: str = "nested_result"
|
|
256
|
-
|
|
257
|
-
def __post_init__(self):
|
|
258
|
-
"""Validate that exactly one reference type is provided."""
|
|
259
|
-
if bool(self.workflow_id) == bool(self.inline):
|
|
260
|
-
raise ValueError("WorkflowReference must have exactly one of: workflow_id or inline")
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
@dataclass
|
|
264
|
-
class InlineWorkflow:
|
|
265
|
-
"""Inline workflow definition for nested composition.
|
|
266
|
-
|
|
267
|
-
Allows defining a sub-workflow directly within a parent workflow,
|
|
268
|
-
without requiring registration.
|
|
269
|
-
|
|
270
|
-
Attributes:
|
|
271
|
-
agents: Agents to execute
|
|
272
|
-
strategy: Strategy name (from STRATEGY_REGISTRY)
|
|
273
|
-
description: Human-readable description
|
|
274
|
-
|
|
275
|
-
Example:
|
|
276
|
-
>>> inline = InlineWorkflow(
|
|
277
|
-
... agents=[analyzer, reviewer],
|
|
278
|
-
... strategy="sequential",
|
|
279
|
-
... description="Code review sub-workflow"
|
|
280
|
-
... )
|
|
281
|
-
"""
|
|
282
|
-
|
|
283
|
-
agents: list[AgentTemplate]
|
|
284
|
-
strategy: str = "sequential"
|
|
285
|
-
description: str = ""
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
class NestingContext:
|
|
289
|
-
"""Tracks nesting depth and prevents infinite recursion.
|
|
290
|
-
|
|
291
|
-
Attributes:
|
|
292
|
-
current_depth: Current nesting level (0 = root)
|
|
293
|
-
max_depth: Maximum allowed nesting depth
|
|
294
|
-
workflow_stack: Stack of workflow IDs for cycle detection
|
|
295
|
-
"""
|
|
296
|
-
|
|
297
|
-
CONTEXT_KEY = "_nesting"
|
|
298
|
-
DEFAULT_MAX_DEPTH = 3
|
|
299
|
-
|
|
300
|
-
def __init__(self, max_depth: int = DEFAULT_MAX_DEPTH):
|
|
301
|
-
"""Initialize nesting context.
|
|
302
|
-
|
|
303
|
-
Args:
|
|
304
|
-
max_depth: Maximum allowed nesting depth
|
|
305
|
-
"""
|
|
306
|
-
self.current_depth = 0
|
|
307
|
-
self.max_depth = max_depth
|
|
308
|
-
self.workflow_stack: list[str] = []
|
|
309
|
-
|
|
310
|
-
@classmethod
|
|
311
|
-
def from_context(cls, context: dict[str, Any]) -> "NestingContext":
|
|
312
|
-
"""Extract or create NestingContext from execution context.
|
|
313
|
-
|
|
314
|
-
Args:
|
|
315
|
-
context: Execution context dict
|
|
316
|
-
|
|
317
|
-
Returns:
|
|
318
|
-
NestingContext instance
|
|
319
|
-
"""
|
|
320
|
-
if cls.CONTEXT_KEY in context:
|
|
321
|
-
return context[cls.CONTEXT_KEY]
|
|
322
|
-
return cls()
|
|
323
|
-
|
|
324
|
-
def can_nest(self, workflow_id: str = "") -> bool:
|
|
325
|
-
"""Check if another nesting level is allowed.
|
|
326
|
-
|
|
327
|
-
Args:
|
|
328
|
-
workflow_id: ID of workflow to nest (for cycle detection)
|
|
329
|
-
|
|
330
|
-
Returns:
|
|
331
|
-
True if nesting is allowed
|
|
332
|
-
"""
|
|
333
|
-
if self.current_depth >= self.max_depth:
|
|
334
|
-
return False
|
|
335
|
-
if workflow_id and workflow_id in self.workflow_stack:
|
|
336
|
-
return False # Cycle detected
|
|
337
|
-
return True
|
|
338
|
-
|
|
339
|
-
def enter(self, workflow_id: str = "") -> "NestingContext":
|
|
340
|
-
"""Create a child context for nested execution.
|
|
341
|
-
|
|
342
|
-
Args:
|
|
343
|
-
workflow_id: ID of workflow being entered
|
|
344
|
-
|
|
345
|
-
Returns:
|
|
346
|
-
New NestingContext with incremented depth
|
|
347
|
-
"""
|
|
348
|
-
child = NestingContext(self.max_depth)
|
|
349
|
-
child.current_depth = self.current_depth + 1
|
|
350
|
-
child.workflow_stack = self.workflow_stack.copy()
|
|
351
|
-
if workflow_id:
|
|
352
|
-
child.workflow_stack.append(workflow_id)
|
|
353
|
-
return child
|
|
354
|
-
|
|
355
|
-
def to_context(self, context: dict[str, Any]) -> dict[str, Any]:
|
|
356
|
-
"""Add nesting context to execution context.
|
|
357
|
-
|
|
358
|
-
Args:
|
|
359
|
-
context: Execution context dict
|
|
360
|
-
|
|
361
|
-
Returns:
|
|
362
|
-
Updated context with nesting info
|
|
363
|
-
"""
|
|
364
|
-
context = context.copy()
|
|
365
|
-
context[self.CONTEXT_KEY] = self
|
|
366
|
-
return context
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
# Registry for named workflows (populated at runtime)
|
|
370
|
-
WORKFLOW_REGISTRY: dict[str, "WorkflowDefinition"] = {}
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
@dataclass
|
|
374
|
-
class WorkflowDefinition:
|
|
375
|
-
"""A registered workflow definition.
|
|
376
|
-
|
|
377
|
-
Workflows can be registered and referenced by ID in nested compositions.
|
|
378
|
-
|
|
379
|
-
Attributes:
|
|
380
|
-
id: Unique workflow identifier
|
|
381
|
-
agents: Agents in the workflow
|
|
382
|
-
strategy: Composition strategy name
|
|
383
|
-
description: Human-readable description
|
|
384
|
-
"""
|
|
385
|
-
|
|
386
|
-
id: str
|
|
387
|
-
agents: list[AgentTemplate]
|
|
388
|
-
strategy: str = "sequential"
|
|
389
|
-
description: str = ""
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
def register_workflow(workflow: WorkflowDefinition) -> None:
|
|
393
|
-
"""Register a workflow for nested references.
|
|
394
|
-
|
|
395
|
-
Args:
|
|
396
|
-
workflow: Workflow definition to register
|
|
397
|
-
"""
|
|
398
|
-
WORKFLOW_REGISTRY[workflow.id] = workflow
|
|
399
|
-
logger.info(f"Registered workflow: {workflow.id}")
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
def get_workflow(workflow_id: str) -> WorkflowDefinition:
|
|
403
|
-
"""Get a registered workflow by ID.
|
|
404
|
-
|
|
405
|
-
Args:
|
|
406
|
-
workflow_id: Workflow identifier
|
|
407
|
-
|
|
408
|
-
Returns:
|
|
409
|
-
WorkflowDefinition
|
|
410
|
-
|
|
411
|
-
Raises:
|
|
412
|
-
ValueError: If workflow is not registered
|
|
413
|
-
"""
|
|
414
|
-
if workflow_id not in WORKFLOW_REGISTRY:
|
|
415
|
-
raise ValueError(
|
|
416
|
-
f"Unknown workflow: {workflow_id}. Available: {list(WORKFLOW_REGISTRY.keys())}"
|
|
417
|
-
)
|
|
418
|
-
return WORKFLOW_REGISTRY[workflow_id]
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
class ConditionEvaluator:
|
|
422
|
-
"""Evaluates conditions against execution context.
|
|
423
|
-
|
|
424
|
-
Supports both JSON predicates (fast, deterministic) and
|
|
425
|
-
natural language conditions (LLM-interpreted, semantic).
|
|
426
|
-
|
|
427
|
-
Security:
|
|
428
|
-
- No eval() or exec() - all operators are whitelisted
|
|
429
|
-
- JSON predicates use safe comparison operators
|
|
430
|
-
- Natural language uses LLM API (no code execution)
|
|
431
|
-
"""
|
|
432
|
-
|
|
433
|
-
# Mapping of JSON operators to Python comparison functions
|
|
434
|
-
OPERATORS: dict[str, Callable[[Any, Any], bool]] = {
|
|
435
|
-
"$eq": operator.eq,
|
|
436
|
-
"$ne": operator.ne,
|
|
437
|
-
"$gt": operator.gt,
|
|
438
|
-
"$gte": operator.ge,
|
|
439
|
-
"$lt": operator.lt,
|
|
440
|
-
"$lte": operator.le,
|
|
441
|
-
"$in": lambda val, lst: val in lst,
|
|
442
|
-
"$nin": lambda val, lst: val not in lst,
|
|
443
|
-
"$exists": lambda val, exists: (val is not None) == exists,
|
|
444
|
-
"$regex": lambda val, pattern: bool(re.match(pattern, str(val))) if val else False,
|
|
445
|
-
}
|
|
446
|
-
|
|
447
|
-
def evaluate(self, condition: Condition, context: dict[str, Any]) -> bool:
|
|
448
|
-
"""Evaluate a condition against the current context.
|
|
449
|
-
|
|
450
|
-
Args:
|
|
451
|
-
condition: The condition to evaluate
|
|
452
|
-
context: Execution context with agent results
|
|
453
|
-
|
|
454
|
-
Returns:
|
|
455
|
-
True if condition is met, False otherwise
|
|
456
|
-
|
|
457
|
-
Example:
|
|
458
|
-
>>> evaluator = ConditionEvaluator()
|
|
459
|
-
>>> context = {"confidence": 0.6, "errors": 0}
|
|
460
|
-
>>> cond = Condition(predicate={"confidence": {"$lt": 0.8}})
|
|
461
|
-
>>> evaluator.evaluate(cond, context)
|
|
462
|
-
True
|
|
463
|
-
"""
|
|
464
|
-
if condition.condition_type == ConditionType.JSON_PREDICATE:
|
|
465
|
-
return self._evaluate_json(condition.predicate, context)
|
|
466
|
-
elif condition.condition_type == ConditionType.NATURAL_LANGUAGE:
|
|
467
|
-
return self._evaluate_natural_language(condition.predicate, context)
|
|
468
|
-
elif condition.condition_type == ConditionType.COMPOSITE:
|
|
469
|
-
return self._evaluate_composite(condition.predicate, context)
|
|
470
|
-
else:
|
|
471
|
-
raise ValueError(f"Unknown condition type: {condition.condition_type}")
|
|
472
|
-
|
|
473
|
-
def _evaluate_json(self, predicate: dict[str, Any], context: dict[str, Any]) -> bool:
|
|
474
|
-
"""Evaluate JSON predicate against context.
|
|
475
|
-
|
|
476
|
-
Args:
|
|
477
|
-
predicate: MongoDB-style predicate dict
|
|
478
|
-
context: Context to evaluate against
|
|
479
|
-
|
|
480
|
-
Returns:
|
|
481
|
-
True if all conditions match
|
|
482
|
-
"""
|
|
483
|
-
for field_name, condition_spec in predicate.items():
|
|
484
|
-
# Handle logical operators
|
|
485
|
-
if field_name == "$and":
|
|
486
|
-
return all(self._evaluate_json(sub, context) for sub in condition_spec)
|
|
487
|
-
if field_name == "$or":
|
|
488
|
-
return any(self._evaluate_json(sub, context) for sub in condition_spec)
|
|
489
|
-
if field_name == "$not":
|
|
490
|
-
return not self._evaluate_json(condition_spec, context)
|
|
491
|
-
|
|
492
|
-
# Get value from context (supports nested paths like "result.confidence")
|
|
493
|
-
value = self._get_nested_value(context, field_name)
|
|
494
|
-
|
|
495
|
-
# Evaluate condition
|
|
496
|
-
if isinstance(condition_spec, dict):
|
|
497
|
-
for op, target in condition_spec.items():
|
|
498
|
-
if op not in self.OPERATORS:
|
|
499
|
-
raise ValueError(f"Unknown operator: {op}")
|
|
500
|
-
if not self.OPERATORS[op](value, target):
|
|
501
|
-
return False
|
|
502
|
-
else:
|
|
503
|
-
# Direct equality check
|
|
504
|
-
if value != condition_spec:
|
|
505
|
-
return False
|
|
506
|
-
|
|
507
|
-
return True
|
|
508
|
-
|
|
509
|
-
def _get_nested_value(self, context: dict[str, Any], path: str) -> Any:
|
|
510
|
-
"""Get nested value from context using dot notation.
|
|
511
|
-
|
|
512
|
-
Args:
|
|
513
|
-
context: Context dict
|
|
514
|
-
path: Dot-separated path (e.g., "result.confidence")
|
|
515
|
-
|
|
516
|
-
Returns:
|
|
517
|
-
Value at path or None if not found
|
|
518
|
-
"""
|
|
519
|
-
parts = path.split(".")
|
|
520
|
-
current = context
|
|
521
|
-
|
|
522
|
-
for part in parts:
|
|
523
|
-
if isinstance(current, dict):
|
|
524
|
-
current = current.get(part)
|
|
525
|
-
else:
|
|
526
|
-
return None
|
|
527
|
-
|
|
528
|
-
return current
|
|
529
|
-
|
|
530
|
-
def _evaluate_natural_language(self, condition_text: str, context: dict[str, Any]) -> bool:
|
|
531
|
-
"""Evaluate natural language condition using LLM.
|
|
532
|
-
|
|
533
|
-
Args:
|
|
534
|
-
condition_text: Natural language condition
|
|
535
|
-
context: Context to evaluate against
|
|
536
|
-
|
|
537
|
-
Returns:
|
|
538
|
-
True if LLM determines condition is met
|
|
539
|
-
|
|
540
|
-
Note:
|
|
541
|
-
Falls back to keyword matching if LLM unavailable.
|
|
542
|
-
"""
|
|
543
|
-
logger.info(f"Evaluating natural language condition: {condition_text}")
|
|
544
|
-
|
|
545
|
-
# Try LLM evaluation first
|
|
546
|
-
try:
|
|
547
|
-
return self._evaluate_with_llm(condition_text, context)
|
|
548
|
-
except Exception as e:
|
|
549
|
-
logger.warning(f"LLM evaluation failed, using keyword fallback: {e}")
|
|
550
|
-
return self._keyword_fallback(condition_text, context)
|
|
551
|
-
|
|
552
|
-
def _evaluate_with_llm(self, condition_text: str, context: dict[str, Any]) -> bool:
|
|
553
|
-
"""Use LLM to evaluate natural language condition.
|
|
554
|
-
|
|
555
|
-
Args:
|
|
556
|
-
condition_text: The condition in natural language
|
|
557
|
-
context: Execution context
|
|
558
|
-
|
|
559
|
-
Returns:
|
|
560
|
-
LLM's determination (True/False)
|
|
561
|
-
"""
|
|
562
|
-
# Import LLM client lazily to avoid circular imports
|
|
563
|
-
try:
|
|
564
|
-
from ..llm import get_cheap_tier_client
|
|
565
|
-
except ImportError:
|
|
566
|
-
logger.warning("LLM client not available for natural language conditions")
|
|
567
|
-
raise
|
|
568
|
-
|
|
569
|
-
# Prepare context summary for LLM
|
|
570
|
-
context_summary = json.dumps(context, indent=2, default=str)[:2000]
|
|
571
|
-
|
|
572
|
-
prompt = f"""Evaluate whether the following condition is TRUE or FALSE based on the context.
|
|
573
|
-
|
|
574
|
-
Condition: {condition_text}
|
|
575
|
-
|
|
576
|
-
Context:
|
|
577
|
-
{context_summary}
|
|
578
|
-
|
|
579
|
-
Respond with ONLY "TRUE" or "FALSE" (no explanation)."""
|
|
580
|
-
|
|
581
|
-
client = get_cheap_tier_client()
|
|
582
|
-
response = client.complete(prompt, max_tokens=10)
|
|
583
|
-
|
|
584
|
-
result = response.strip().upper()
|
|
585
|
-
return result == "TRUE"
|
|
586
|
-
|
|
587
|
-
def _keyword_fallback(self, condition_text: str, context: dict[str, Any]) -> bool:
|
|
588
|
-
"""Fallback keyword-based evaluation for natural language.
|
|
589
|
-
|
|
590
|
-
Args:
|
|
591
|
-
condition_text: The condition text
|
|
592
|
-
context: Execution context
|
|
593
|
-
|
|
594
|
-
Returns:
|
|
595
|
-
True if keywords suggest condition is likely met
|
|
596
|
-
"""
|
|
597
|
-
# Simple keyword matching as fallback
|
|
598
|
-
condition_lower = condition_text.lower()
|
|
599
|
-
context_str = json.dumps(context, default=str).lower()
|
|
600
|
-
|
|
601
|
-
# Check for negation
|
|
602
|
-
is_negated = any(neg in condition_lower for neg in ["not ", "no ", "without "])
|
|
603
|
-
|
|
604
|
-
# Extract key terms
|
|
605
|
-
terms = re.findall(r"\b\w{4,}\b", condition_lower)
|
|
606
|
-
terms = [t for t in terms if t not in {"the", "that", "this", "with", "from"}]
|
|
607
|
-
|
|
608
|
-
# Count matching terms
|
|
609
|
-
matches = sum(1 for term in terms if term in context_str)
|
|
610
|
-
match_ratio = matches / len(terms) if terms else 0
|
|
611
|
-
|
|
612
|
-
result = match_ratio > 0.5
|
|
613
|
-
return not result if is_negated else result
|
|
614
|
-
|
|
615
|
-
def _evaluate_composite(self, predicate: dict[str, Any], context: dict[str, Any]) -> bool:
|
|
616
|
-
"""Evaluate composite condition (AND/OR of other conditions).
|
|
617
|
-
|
|
618
|
-
Args:
|
|
619
|
-
predicate: Composite predicate with $and/$or
|
|
620
|
-
context: Context to evaluate against
|
|
621
|
-
|
|
622
|
-
Returns:
|
|
623
|
-
Result of logical combination
|
|
624
|
-
"""
|
|
625
|
-
return self._evaluate_json(predicate, context)
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
class ExecutionStrategy(ABC):
|
|
629
|
-
"""Base class for agent composition strategies.
|
|
630
|
-
|
|
631
|
-
All strategies must implement execute() method to define
|
|
632
|
-
how agents are coordinated and results aggregated.
|
|
633
|
-
"""
|
|
634
|
-
|
|
635
|
-
@abstractmethod
|
|
636
|
-
async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
|
|
637
|
-
"""Execute agents using this strategy.
|
|
638
|
-
|
|
639
|
-
Args:
|
|
640
|
-
agents: List of agent templates to execute
|
|
641
|
-
context: Initial context for execution
|
|
642
|
-
|
|
643
|
-
Returns:
|
|
644
|
-
StrategyResult with aggregated outputs
|
|
645
|
-
|
|
646
|
-
Raises:
|
|
647
|
-
ValueError: If agents list is empty
|
|
648
|
-
TimeoutError: If execution exceeds timeout
|
|
649
|
-
"""
|
|
650
|
-
pass
|
|
651
|
-
|
|
652
|
-
async def _execute_agent(self, agent: AgentTemplate, context: dict[str, Any]) -> AgentResult:
|
|
653
|
-
"""Execute a single agent with real analysis tools.
|
|
654
|
-
|
|
655
|
-
Maps agent capabilities to real tool implementations and executes them.
|
|
656
|
-
|
|
657
|
-
Args:
|
|
658
|
-
agent: Agent template to execute
|
|
659
|
-
context: Execution context
|
|
660
|
-
|
|
661
|
-
Returns:
|
|
662
|
-
AgentResult with execution outcome
|
|
663
|
-
"""
|
|
664
|
-
import time
|
|
665
|
-
|
|
666
|
-
from ..orchestration.real_tools import (
|
|
667
|
-
RealCodeQualityAnalyzer,
|
|
668
|
-
RealCoverageAnalyzer,
|
|
669
|
-
RealDocumentationAnalyzer,
|
|
670
|
-
RealSecurityAuditor,
|
|
671
|
-
)
|
|
672
|
-
|
|
673
|
-
logger.info(f"Executing agent: {agent.id} ({agent.role})")
|
|
674
|
-
start_time = time.perf_counter()
|
|
675
|
-
|
|
676
|
-
# Get project root from context
|
|
677
|
-
project_root = context.get("project_root", ".")
|
|
678
|
-
target_path = context.get("target_path", "src")
|
|
679
|
-
|
|
680
|
-
try:
|
|
681
|
-
# Map agent ID to real tool implementation
|
|
682
|
-
if agent.id == "security_auditor" or "security" in agent.role.lower():
|
|
683
|
-
auditor = RealSecurityAuditor(project_root)
|
|
684
|
-
report = auditor.audit(target_path)
|
|
685
|
-
|
|
686
|
-
output = {
|
|
687
|
-
"agent_role": agent.role,
|
|
688
|
-
"total_issues": report.total_issues,
|
|
689
|
-
"critical_issues": report.critical_count, # Match workflow field name
|
|
690
|
-
"high_issues": report.high_count, # Match workflow field name
|
|
691
|
-
"medium_issues": report.medium_count, # Match workflow field name
|
|
692
|
-
"passed": report.passed,
|
|
693
|
-
"issues_by_file": report.issues_by_file,
|
|
694
|
-
}
|
|
695
|
-
success = report.passed
|
|
696
|
-
confidence = 1.0 if report.total_issues == 0 else 0.7
|
|
697
|
-
|
|
698
|
-
elif agent.id == "test_coverage_analyzer" or "coverage" in agent.role.lower():
|
|
699
|
-
analyzer = RealCoverageAnalyzer(project_root)
|
|
700
|
-
report = analyzer.analyze() # Analyzes all packages automatically
|
|
701
|
-
|
|
702
|
-
output = {
|
|
703
|
-
"agent_role": agent.role,
|
|
704
|
-
"coverage_percent": report.total_coverage, # Match workflow field name
|
|
705
|
-
"total_coverage": report.total_coverage, # Keep for compatibility
|
|
706
|
-
"files_analyzed": report.files_analyzed,
|
|
707
|
-
"uncovered_files": report.uncovered_files,
|
|
708
|
-
"passed": report.total_coverage >= 80.0,
|
|
709
|
-
}
|
|
710
|
-
success = report.total_coverage >= 80.0
|
|
711
|
-
confidence = min(report.total_coverage / 100.0, 1.0)
|
|
712
|
-
|
|
713
|
-
elif agent.id == "code_reviewer" or "quality" in agent.role.lower():
|
|
714
|
-
analyzer = RealCodeQualityAnalyzer(project_root)
|
|
715
|
-
report = analyzer.analyze(target_path)
|
|
716
|
-
|
|
717
|
-
output = {
|
|
718
|
-
"agent_role": agent.role,
|
|
719
|
-
"quality_score": report.quality_score,
|
|
720
|
-
"ruff_issues": report.ruff_issues,
|
|
721
|
-
"mypy_issues": report.mypy_issues,
|
|
722
|
-
"total_files": report.total_files,
|
|
723
|
-
"passed": report.passed,
|
|
724
|
-
}
|
|
725
|
-
success = report.passed
|
|
726
|
-
confidence = report.quality_score / 10.0
|
|
727
|
-
|
|
728
|
-
elif agent.id == "documentation_writer" or "documentation" in agent.role.lower():
|
|
729
|
-
analyzer = RealDocumentationAnalyzer(project_root)
|
|
730
|
-
report = analyzer.analyze(target_path)
|
|
731
|
-
|
|
732
|
-
output = {
|
|
733
|
-
"agent_role": agent.role,
|
|
734
|
-
"completeness": report.completeness_percentage,
|
|
735
|
-
"coverage_percent": report.completeness_percentage, # Match Release Prep field name
|
|
736
|
-
"total_functions": report.total_functions,
|
|
737
|
-
"documented_functions": report.documented_functions,
|
|
738
|
-
"total_classes": report.total_classes,
|
|
739
|
-
"documented_classes": report.documented_classes,
|
|
740
|
-
"missing_docstrings": report.missing_docstrings,
|
|
741
|
-
"passed": report.passed,
|
|
742
|
-
}
|
|
743
|
-
success = report.passed
|
|
744
|
-
confidence = report.completeness_percentage / 100.0
|
|
745
|
-
|
|
746
|
-
elif agent.id == "performance_optimizer" or "performance" in agent.role.lower():
|
|
747
|
-
# Performance analysis placeholder - mark as passed for now
|
|
748
|
-
# TODO: Implement real performance profiling
|
|
749
|
-
logger.warning("Performance analysis not yet implemented, returning placeholder")
|
|
750
|
-
output = {
|
|
751
|
-
"agent_role": agent.role,
|
|
752
|
-
"message": "Performance analysis not yet implemented",
|
|
753
|
-
"passed": True,
|
|
754
|
-
"placeholder": True,
|
|
755
|
-
}
|
|
756
|
-
success = True
|
|
757
|
-
confidence = 1.0
|
|
758
|
-
|
|
759
|
-
elif agent.id == "test_generator":
|
|
760
|
-
# Test generation requires different handling (LLM-based)
|
|
761
|
-
logger.info("Test generation requires manual invocation, returning placeholder")
|
|
762
|
-
output = {
|
|
763
|
-
"agent_role": agent.role,
|
|
764
|
-
"message": "Test generation requires manual invocation",
|
|
765
|
-
"passed": True,
|
|
766
|
-
}
|
|
767
|
-
success = True
|
|
768
|
-
confidence = 0.8
|
|
769
|
-
|
|
770
|
-
else:
|
|
771
|
-
# Unknown agent type - log warning and return placeholder
|
|
772
|
-
logger.warning(f"Unknown agent type: {agent.id}, returning placeholder")
|
|
773
|
-
output = {
|
|
774
|
-
"agent_role": agent.role,
|
|
775
|
-
"agent_id": agent.id,
|
|
776
|
-
"message": "Unknown agent type - no real implementation",
|
|
777
|
-
"passed": True,
|
|
778
|
-
}
|
|
779
|
-
success = True
|
|
780
|
-
confidence = 0.5
|
|
781
|
-
|
|
782
|
-
duration = time.perf_counter() - start_time
|
|
783
|
-
|
|
784
|
-
logger.info(
|
|
785
|
-
f"Agent {agent.id} completed: success={success}, "
|
|
786
|
-
f"confidence={confidence:.2f}, duration={duration:.2f}s"
|
|
787
|
-
)
|
|
788
|
-
|
|
789
|
-
return AgentResult(
|
|
790
|
-
agent_id=agent.id,
|
|
791
|
-
success=success,
|
|
792
|
-
output=output,
|
|
793
|
-
confidence=confidence,
|
|
794
|
-
duration_seconds=duration,
|
|
795
|
-
)
|
|
796
|
-
|
|
797
|
-
except Exception as e:
|
|
798
|
-
duration = time.perf_counter() - start_time
|
|
799
|
-
logger.error(f"Agent {agent.id} failed: {e}")
|
|
800
|
-
|
|
801
|
-
return AgentResult(
|
|
802
|
-
agent_id=agent.id,
|
|
803
|
-
success=False,
|
|
804
|
-
output={"agent_role": agent.role, "error_details": str(e)},
|
|
805
|
-
error=str(e),
|
|
806
|
-
confidence=0.0,
|
|
807
|
-
duration_seconds=duration,
|
|
808
|
-
)
|
|
809
|
-
|
|
810
|
-
def _aggregate_results(self, results: list[AgentResult]) -> dict[str, Any]:
|
|
811
|
-
"""Aggregate results from multiple agents.
|
|
812
|
-
|
|
813
|
-
Args:
|
|
814
|
-
results: List of agent results
|
|
815
|
-
|
|
816
|
-
Returns:
|
|
817
|
-
Aggregated output dictionary
|
|
818
|
-
"""
|
|
819
|
-
return {
|
|
820
|
-
"num_agents": len(results),
|
|
821
|
-
"all_succeeded": all(r.success for r in results),
|
|
822
|
-
"avg_confidence": (
|
|
823
|
-
sum(r.confidence for r in results) / len(results) if results else 0.0
|
|
824
|
-
),
|
|
825
|
-
"outputs": [r.output for r in results],
|
|
826
|
-
}
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
class SequentialStrategy(ExecutionStrategy):
|
|
830
|
-
"""Sequential composition (A → B → C).
|
|
831
|
-
|
|
832
|
-
Executes agents one after another, passing results forward.
|
|
833
|
-
Each agent receives output from previous agent in context.
|
|
834
|
-
|
|
835
|
-
Use when:
|
|
836
|
-
- Tasks must be done in order
|
|
837
|
-
- Each step depends on previous results
|
|
838
|
-
- Pipeline processing needed
|
|
839
|
-
|
|
840
|
-
Example:
|
|
841
|
-
Coverage Analyzer → Test Generator → Quality Validator
|
|
842
|
-
"""
|
|
843
|
-
|
|
844
|
-
async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
|
|
845
|
-
"""Execute agents sequentially.
|
|
846
|
-
|
|
847
|
-
Args:
|
|
848
|
-
agents: List of agents to execute in order
|
|
849
|
-
context: Initial context
|
|
850
|
-
|
|
851
|
-
Returns:
|
|
852
|
-
StrategyResult with sequential execution results
|
|
853
|
-
"""
|
|
854
|
-
if not agents:
|
|
855
|
-
raise ValueError("agents list cannot be empty")
|
|
856
|
-
|
|
857
|
-
logger.info(f"Sequential execution of {len(agents)} agents")
|
|
858
|
-
|
|
859
|
-
results: list[AgentResult] = []
|
|
860
|
-
current_context = context.copy()
|
|
861
|
-
total_duration = 0.0
|
|
862
|
-
|
|
863
|
-
for agent in agents:
|
|
864
|
-
try:
|
|
865
|
-
result = await self._execute_agent(agent, current_context)
|
|
866
|
-
results.append(result)
|
|
867
|
-
total_duration += result.duration_seconds
|
|
868
|
-
|
|
869
|
-
# Pass output to next agent's context
|
|
870
|
-
if result.success:
|
|
871
|
-
current_context[f"{agent.id}_output"] = result.output
|
|
872
|
-
else:
|
|
873
|
-
logger.error(f"Agent {agent.id} failed: {result.error}")
|
|
874
|
-
# Continue or stop based on error handling policy
|
|
875
|
-
# For now: continue to next agent
|
|
876
|
-
|
|
877
|
-
except Exception as e:
|
|
878
|
-
logger.exception(f"Error executing agent {agent.id}: {e}")
|
|
879
|
-
results.append(
|
|
880
|
-
AgentResult(
|
|
881
|
-
agent_id=agent.id,
|
|
882
|
-
success=False,
|
|
883
|
-
output={},
|
|
884
|
-
error=str(e),
|
|
885
|
-
)
|
|
886
|
-
)
|
|
887
|
-
|
|
888
|
-
return StrategyResult(
|
|
889
|
-
success=all(r.success for r in results),
|
|
890
|
-
outputs=results,
|
|
891
|
-
aggregated_output=self._aggregate_results(results),
|
|
892
|
-
total_duration=total_duration,
|
|
893
|
-
errors=[r.error for r in results if not r.success],
|
|
894
|
-
)
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
class ParallelStrategy(ExecutionStrategy):
|
|
898
|
-
"""Parallel composition (A || B || C).
|
|
899
|
-
|
|
900
|
-
Executes all agents simultaneously, aggregates results.
|
|
901
|
-
Each agent receives same initial context.
|
|
902
|
-
|
|
903
|
-
Use when:
|
|
904
|
-
- Independent validations needed
|
|
905
|
-
- Multi-perspective review desired
|
|
906
|
-
- Time optimization important
|
|
907
|
-
|
|
908
|
-
Example:
|
|
909
|
-
Security Audit || Performance Check || Code Quality || Docs Check
|
|
910
|
-
"""
|
|
911
|
-
|
|
912
|
-
async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
|
|
913
|
-
"""Execute agents in parallel.
|
|
914
|
-
|
|
915
|
-
Args:
|
|
916
|
-
agents: List of agents to execute concurrently
|
|
917
|
-
context: Initial context for all agents
|
|
918
|
-
|
|
919
|
-
Returns:
|
|
920
|
-
StrategyResult with parallel execution results
|
|
921
|
-
"""
|
|
922
|
-
if not agents:
|
|
923
|
-
raise ValueError("agents list cannot be empty")
|
|
924
|
-
|
|
925
|
-
logger.info(f"Parallel execution of {len(agents)} agents")
|
|
926
|
-
|
|
927
|
-
# Execute all agents concurrently
|
|
928
|
-
tasks = [self._execute_agent(agent, context) for agent in agents]
|
|
929
|
-
|
|
930
|
-
try:
|
|
931
|
-
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
932
|
-
except Exception as e:
|
|
933
|
-
logger.exception(f"Error in parallel execution: {e}")
|
|
934
|
-
raise
|
|
935
|
-
|
|
936
|
-
# Process results (handle exceptions)
|
|
937
|
-
processed_results: list[AgentResult] = []
|
|
938
|
-
for i, result in enumerate(results):
|
|
939
|
-
if isinstance(result, Exception):
|
|
940
|
-
logger.error(f"Agent {agents[i].id} raised exception: {result}")
|
|
941
|
-
processed_results.append(
|
|
942
|
-
AgentResult(
|
|
943
|
-
agent_id=agents[i].id,
|
|
944
|
-
success=False,
|
|
945
|
-
output={},
|
|
946
|
-
error=str(result),
|
|
947
|
-
)
|
|
948
|
-
)
|
|
949
|
-
else:
|
|
950
|
-
# Type checker doesn't know we already filtered out exceptions
|
|
951
|
-
assert isinstance(result, AgentResult)
|
|
952
|
-
processed_results.append(result)
|
|
953
|
-
|
|
954
|
-
total_duration = max((r.duration_seconds for r in processed_results), default=0.0)
|
|
955
|
-
|
|
956
|
-
return StrategyResult(
|
|
957
|
-
success=all(r.success for r in processed_results),
|
|
958
|
-
outputs=processed_results,
|
|
959
|
-
aggregated_output=self._aggregate_results(processed_results),
|
|
960
|
-
total_duration=total_duration,
|
|
961
|
-
errors=[r.error for r in processed_results if not r.success],
|
|
962
|
-
)
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
class DebateStrategy(ExecutionStrategy):
|
|
966
|
-
"""Debate/Consensus composition (A ⇄ B ⇄ C → Synthesis).
|
|
967
|
-
|
|
968
|
-
Agents provide independent opinions, then a synthesizer
|
|
969
|
-
aggregates and resolves conflicts.
|
|
970
|
-
|
|
971
|
-
Use when:
|
|
972
|
-
- Multiple expert opinions needed
|
|
973
|
-
- Architecture decisions require debate
|
|
974
|
-
- Tradeoff analysis needed
|
|
975
|
-
|
|
976
|
-
Example:
|
|
977
|
-
Architect(scale) || Architect(cost) || Architect(simplicity) → Synthesizer
|
|
978
|
-
"""
|
|
979
|
-
|
|
980
|
-
async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
|
|
981
|
-
"""Execute debate pattern.
|
|
982
|
-
|
|
983
|
-
Args:
|
|
984
|
-
agents: List of agents to debate (recommend 2-4)
|
|
985
|
-
context: Initial context
|
|
986
|
-
|
|
987
|
-
Returns:
|
|
988
|
-
StrategyResult with synthesized consensus
|
|
989
|
-
"""
|
|
990
|
-
if not agents:
|
|
991
|
-
raise ValueError("agents list cannot be empty")
|
|
992
|
-
|
|
993
|
-
if len(agents) < 2:
|
|
994
|
-
logger.warning("Debate pattern works best with 2+ agents")
|
|
995
|
-
|
|
996
|
-
logger.info(f"Debate execution with {len(agents)} agents")
|
|
997
|
-
|
|
998
|
-
# Phase 1: Parallel execution for independent opinions
|
|
999
|
-
parallel_strategy = ParallelStrategy()
|
|
1000
|
-
phase1_result = await parallel_strategy.execute(agents, context)
|
|
1001
|
-
|
|
1002
|
-
# Phase 2: Synthesis (simplified - no actual synthesizer agent)
|
|
1003
|
-
# In production: would use dedicated synthesizer agent
|
|
1004
|
-
synthesis = {
|
|
1005
|
-
"debate_participants": [r.agent_id for r in phase1_result.outputs],
|
|
1006
|
-
"opinions": [r.output for r in phase1_result.outputs],
|
|
1007
|
-
"consensus": self._synthesize_opinions(phase1_result.outputs),
|
|
1008
|
-
}
|
|
1009
|
-
|
|
1010
|
-
return StrategyResult(
|
|
1011
|
-
success=phase1_result.success,
|
|
1012
|
-
outputs=phase1_result.outputs,
|
|
1013
|
-
aggregated_output=synthesis,
|
|
1014
|
-
total_duration=phase1_result.total_duration,
|
|
1015
|
-
errors=phase1_result.errors,
|
|
1016
|
-
)
|
|
1017
|
-
|
|
1018
|
-
def _synthesize_opinions(self, results: list[AgentResult]) -> dict[str, Any]:
|
|
1019
|
-
"""Synthesize multiple agent opinions into consensus.
|
|
1020
|
-
|
|
1021
|
-
Args:
|
|
1022
|
-
results: Agent results to synthesize
|
|
1023
|
-
|
|
1024
|
-
Returns:
|
|
1025
|
-
Synthesized consensus
|
|
1026
|
-
"""
|
|
1027
|
-
# Simplified synthesis: majority vote on success
|
|
1028
|
-
success_votes = sum(1 for r in results if r.success)
|
|
1029
|
-
consensus_reached = success_votes > len(results) / 2
|
|
1030
|
-
|
|
1031
|
-
return {
|
|
1032
|
-
"consensus_reached": consensus_reached,
|
|
1033
|
-
"success_votes": success_votes,
|
|
1034
|
-
"total_votes": len(results),
|
|
1035
|
-
"avg_confidence": (
|
|
1036
|
-
sum(r.confidence for r in results) / len(results) if results else 0.0
|
|
1037
|
-
),
|
|
1038
|
-
}
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
class TeachingStrategy(ExecutionStrategy):
|
|
1042
|
-
"""Teaching/Validation (Junior → Expert Review).
|
|
1043
|
-
|
|
1044
|
-
Junior agent attempts task (cheap tier), expert validates.
|
|
1045
|
-
If validation fails, expert takes over.
|
|
1046
|
-
|
|
1047
|
-
Use when:
|
|
1048
|
-
- Cost-effective generation desired
|
|
1049
|
-
- Quality assurance critical
|
|
1050
|
-
- Simple tasks with review needed
|
|
1051
|
-
|
|
1052
|
-
Example:
|
|
1053
|
-
Junior Writer(CHEAP) → Quality Gate → (pass ? done : Expert Review(CAPABLE))
|
|
1054
|
-
"""
|
|
1055
|
-
|
|
1056
|
-
def __init__(self, quality_threshold: float = 0.7):
|
|
1057
|
-
"""Initialize teaching strategy.
|
|
1058
|
-
|
|
1059
|
-
Args:
|
|
1060
|
-
quality_threshold: Minimum confidence for junior to pass (0-1)
|
|
1061
|
-
"""
|
|
1062
|
-
self.quality_threshold = quality_threshold
|
|
1063
|
-
|
|
1064
|
-
async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
|
|
1065
|
-
"""Execute teaching pattern.
|
|
1066
|
-
|
|
1067
|
-
Args:
|
|
1068
|
-
agents: [junior_agent, expert_agent] (exactly 2)
|
|
1069
|
-
context: Initial context
|
|
1070
|
-
|
|
1071
|
-
Returns:
|
|
1072
|
-
StrategyResult with teaching outcome
|
|
1073
|
-
"""
|
|
1074
|
-
if len(agents) != 2:
|
|
1075
|
-
raise ValueError("Teaching strategy requires exactly 2 agents")
|
|
1076
|
-
|
|
1077
|
-
junior, expert = agents
|
|
1078
|
-
logger.info(f"Teaching: {junior.id} → {expert.id} validation")
|
|
1079
|
-
|
|
1080
|
-
results: list[AgentResult] = []
|
|
1081
|
-
total_duration = 0.0
|
|
1082
|
-
|
|
1083
|
-
# Phase 1: Junior attempt
|
|
1084
|
-
junior_result = await self._execute_agent(junior, context)
|
|
1085
|
-
results.append(junior_result)
|
|
1086
|
-
total_duration += junior_result.duration_seconds
|
|
1087
|
-
|
|
1088
|
-
# Phase 2: Quality gate
|
|
1089
|
-
if junior_result.success and junior_result.confidence >= self.quality_threshold:
|
|
1090
|
-
logger.info(f"Junior passed quality gate (confidence={junior_result.confidence:.2f})")
|
|
1091
|
-
aggregated = {"outcome": "junior_success", "junior_output": junior_result.output}
|
|
1092
|
-
else:
|
|
1093
|
-
logger.info(
|
|
1094
|
-
f"Junior failed quality gate, expert taking over "
|
|
1095
|
-
f"(confidence={junior_result.confidence:.2f})"
|
|
1096
|
-
)
|
|
1097
|
-
|
|
1098
|
-
# Phase 3: Expert takeover
|
|
1099
|
-
expert_context = context.copy()
|
|
1100
|
-
expert_context["junior_attempt"] = junior_result.output
|
|
1101
|
-
expert_result = await self._execute_agent(expert, expert_context)
|
|
1102
|
-
results.append(expert_result)
|
|
1103
|
-
total_duration += expert_result.duration_seconds
|
|
1104
|
-
|
|
1105
|
-
aggregated = {
|
|
1106
|
-
"outcome": "expert_takeover",
|
|
1107
|
-
"junior_output": junior_result.output,
|
|
1108
|
-
"expert_output": expert_result.output,
|
|
1109
|
-
}
|
|
1110
|
-
|
|
1111
|
-
return StrategyResult(
|
|
1112
|
-
success=all(r.success for r in results),
|
|
1113
|
-
outputs=results,
|
|
1114
|
-
aggregated_output=aggregated,
|
|
1115
|
-
total_duration=total_duration,
|
|
1116
|
-
errors=[r.error for r in results if not r.success],
|
|
1117
|
-
)
|
|
1118
|
-
|
|
1119
|
-
|
|
1120
|
-
class RefinementStrategy(ExecutionStrategy):
|
|
1121
|
-
"""Progressive Refinement (Draft → Review → Polish).
|
|
1122
|
-
|
|
1123
|
-
Iterative improvement through multiple quality levels.
|
|
1124
|
-
Each agent refines output from previous stage.
|
|
1125
|
-
|
|
1126
|
-
Use when:
|
|
1127
|
-
- Iterative improvement needed
|
|
1128
|
-
- Quality ladder desired
|
|
1129
|
-
- Multi-stage refinement beneficial
|
|
1130
|
-
|
|
1131
|
-
Example:
|
|
1132
|
-
Drafter(CHEAP) → Reviewer(CAPABLE) → Polisher(PREMIUM)
|
|
1133
|
-
"""
|
|
1134
|
-
|
|
1135
|
-
async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
|
|
1136
|
-
"""Execute refinement pattern.
|
|
1137
|
-
|
|
1138
|
-
Args:
|
|
1139
|
-
agents: [drafter, reviewer, polisher] (3+ agents)
|
|
1140
|
-
context: Initial context
|
|
1141
|
-
|
|
1142
|
-
Returns:
|
|
1143
|
-
StrategyResult with refined output
|
|
1144
|
-
"""
|
|
1145
|
-
if len(agents) < 2:
|
|
1146
|
-
raise ValueError("Refinement strategy requires at least 2 agents")
|
|
1147
|
-
|
|
1148
|
-
logger.info(f"Refinement with {len(agents)} stages")
|
|
1149
|
-
|
|
1150
|
-
results: list[AgentResult] = []
|
|
1151
|
-
current_context = context.copy()
|
|
1152
|
-
total_duration = 0.0
|
|
1153
|
-
|
|
1154
|
-
for i, agent in enumerate(agents):
|
|
1155
|
-
stage_name = f"stage_{i + 1}"
|
|
1156
|
-
logger.info(f"Refinement {stage_name}: {agent.id}")
|
|
1157
|
-
|
|
1158
|
-
result = await self._execute_agent(agent, current_context)
|
|
1159
|
-
results.append(result)
|
|
1160
|
-
total_duration += result.duration_seconds
|
|
1161
|
-
|
|
1162
|
-
if result.success:
|
|
1163
|
-
# Pass refined output to next stage
|
|
1164
|
-
current_context[f"{stage_name}_output"] = result.output
|
|
1165
|
-
current_context["previous_output"] = result.output
|
|
1166
|
-
else:
|
|
1167
|
-
logger.error(f"Refinement stage {i + 1} failed: {result.error}")
|
|
1168
|
-
break # Stop refinement on failure
|
|
1169
|
-
|
|
1170
|
-
# Final output is from last successful stage
|
|
1171
|
-
final_output = results[-1].output if results[-1].success else {}
|
|
1172
|
-
|
|
1173
|
-
return StrategyResult(
|
|
1174
|
-
success=all(r.success for r in results),
|
|
1175
|
-
outputs=results,
|
|
1176
|
-
aggregated_output={
|
|
1177
|
-
"refinement_stages": len(results),
|
|
1178
|
-
"final_output": final_output,
|
|
1179
|
-
"stage_outputs": [r.output for r in results],
|
|
1180
|
-
},
|
|
1181
|
-
total_duration=total_duration,
|
|
1182
|
-
errors=[r.error for r in results if not r.success],
|
|
1183
|
-
)
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
class AdaptiveStrategy(ExecutionStrategy):
|
|
1187
|
-
"""Adaptive Routing (Classifier → Specialist).
|
|
1188
|
-
|
|
1189
|
-
Classifier assesses task complexity, routes to appropriate specialist.
|
|
1190
|
-
Right-sizing: match agent tier to task needs.
|
|
1191
|
-
|
|
1192
|
-
Use when:
|
|
1193
|
-
- Variable task complexity
|
|
1194
|
-
- Cost optimization desired
|
|
1195
|
-
- Right-sizing important
|
|
1196
|
-
|
|
1197
|
-
Example:
|
|
1198
|
-
Classifier(CHEAP) → route(simple|moderate|complex) → Specialist(tier)
|
|
1199
|
-
"""
|
|
1200
|
-
|
|
1201
|
-
async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
|
|
1202
|
-
"""Execute adaptive routing pattern.
|
|
1203
|
-
|
|
1204
|
-
Args:
|
|
1205
|
-
agents: [classifier, *specialists] (2+ agents)
|
|
1206
|
-
context: Initial context
|
|
1207
|
-
|
|
1208
|
-
Returns:
|
|
1209
|
-
StrategyResult with routed execution
|
|
1210
|
-
"""
|
|
1211
|
-
if len(agents) < 2:
|
|
1212
|
-
raise ValueError("Adaptive strategy requires at least 2 agents")
|
|
1213
|
-
|
|
1214
|
-
classifier = agents[0]
|
|
1215
|
-
specialists = agents[1:]
|
|
1216
|
-
|
|
1217
|
-
logger.info(f"Adaptive: {classifier.id} → {len(specialists)} specialists")
|
|
1218
|
-
|
|
1219
|
-
results: list[AgentResult] = []
|
|
1220
|
-
total_duration = 0.0
|
|
1221
|
-
|
|
1222
|
-
# Phase 1: Classification
|
|
1223
|
-
classifier_result = await self._execute_agent(classifier, context)
|
|
1224
|
-
results.append(classifier_result)
|
|
1225
|
-
total_duration += classifier_result.duration_seconds
|
|
1226
|
-
|
|
1227
|
-
if not classifier_result.success:
|
|
1228
|
-
logger.error("Classifier failed, defaulting to first specialist")
|
|
1229
|
-
selected_specialist = specialists[0]
|
|
1230
|
-
else:
|
|
1231
|
-
# Phase 2: Route to specialist based on classification
|
|
1232
|
-
# Simplified: select based on confidence score
|
|
1233
|
-
if classifier_result.confidence > 0.8:
|
|
1234
|
-
# High confidence → simple task → cheap specialist
|
|
1235
|
-
selected_specialist = min(
|
|
1236
|
-
specialists,
|
|
1237
|
-
key=lambda s: {
|
|
1238
|
-
"CHEAP": 0,
|
|
1239
|
-
"CAPABLE": 1,
|
|
1240
|
-
"PREMIUM": 2,
|
|
1241
|
-
}.get(s.tier_preference, 1),
|
|
1242
|
-
)
|
|
1243
|
-
else:
|
|
1244
|
-
# Low confidence → complex task → premium specialist
|
|
1245
|
-
selected_specialist = max(
|
|
1246
|
-
specialists,
|
|
1247
|
-
key=lambda s: {
|
|
1248
|
-
"CHEAP": 0,
|
|
1249
|
-
"CAPABLE": 1,
|
|
1250
|
-
"PREMIUM": 2,
|
|
1251
|
-
}.get(s.tier_preference, 1),
|
|
1252
|
-
)
|
|
1253
|
-
|
|
1254
|
-
logger.info(f"Routed to specialist: {selected_specialist.id}")
|
|
1255
|
-
|
|
1256
|
-
# Phase 3: Execute selected specialist
|
|
1257
|
-
specialist_context = context.copy()
|
|
1258
|
-
specialist_context["classification"] = classifier_result.output
|
|
1259
|
-
specialist_result = await self._execute_agent(selected_specialist, specialist_context)
|
|
1260
|
-
results.append(specialist_result)
|
|
1261
|
-
total_duration += specialist_result.duration_seconds
|
|
1262
|
-
|
|
1263
|
-
return StrategyResult(
|
|
1264
|
-
success=all(r.success for r in results),
|
|
1265
|
-
outputs=results,
|
|
1266
|
-
aggregated_output={
|
|
1267
|
-
"classification": classifier_result.output,
|
|
1268
|
-
"selected_specialist": selected_specialist.id,
|
|
1269
|
-
"specialist_output": specialist_result.output,
|
|
1270
|
-
},
|
|
1271
|
-
total_duration=total_duration,
|
|
1272
|
-
errors=[r.error for r in results if not r.success],
|
|
1273
|
-
)
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
class ConditionalStrategy(ExecutionStrategy):
|
|
1277
|
-
"""Conditional branching (if X then A else B).
|
|
1278
|
-
|
|
1279
|
-
The 7th grammar rule enabling dynamic workflow decisions based on gates.
|
|
1280
|
-
|
|
1281
|
-
Use when:
|
|
1282
|
-
- Quality gates determine next steps
|
|
1283
|
-
- Error handling requires different paths
|
|
1284
|
-
- Agent consensus affects workflow
|
|
1285
|
-
"""
|
|
1286
|
-
|
|
1287
|
-
def __init__(
|
|
1288
|
-
self,
|
|
1289
|
-
condition: Condition,
|
|
1290
|
-
then_branch: Branch,
|
|
1291
|
-
else_branch: Branch | None = None,
|
|
1292
|
-
):
|
|
1293
|
-
"""Initialize conditional strategy."""
|
|
1294
|
-
self.condition = condition
|
|
1295
|
-
self.then_branch = then_branch
|
|
1296
|
-
self.else_branch = else_branch
|
|
1297
|
-
self.evaluator = ConditionEvaluator()
|
|
1298
|
-
|
|
1299
|
-
async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
|
|
1300
|
-
"""Execute conditional branching."""
|
|
1301
|
-
logger.info(f"Conditional: Evaluating '{self.condition.description or 'condition'}'")
|
|
1302
|
-
|
|
1303
|
-
condition_met = self.evaluator.evaluate(self.condition, context)
|
|
1304
|
-
logger.info(f"Conditional: Condition evaluated to {condition_met}")
|
|
1305
|
-
|
|
1306
|
-
if condition_met:
|
|
1307
|
-
selected_branch = self.then_branch
|
|
1308
|
-
branch_label = "then"
|
|
1309
|
-
else:
|
|
1310
|
-
if self.else_branch is None:
|
|
1311
|
-
return StrategyResult(
|
|
1312
|
-
success=True,
|
|
1313
|
-
outputs=[],
|
|
1314
|
-
aggregated_output={"branch_taken": None},
|
|
1315
|
-
total_duration=0.0,
|
|
1316
|
-
)
|
|
1317
|
-
selected_branch = self.else_branch
|
|
1318
|
-
branch_label = "else"
|
|
1319
|
-
|
|
1320
|
-
logger.info(f"Conditional: Taking '{branch_label}' branch")
|
|
1321
|
-
|
|
1322
|
-
branch_strategy = get_strategy(selected_branch.strategy)
|
|
1323
|
-
branch_context = context.copy()
|
|
1324
|
-
branch_context["_conditional"] = {"condition_met": condition_met, "branch": branch_label}
|
|
1325
|
-
|
|
1326
|
-
result = await branch_strategy.execute(selected_branch.agents, branch_context)
|
|
1327
|
-
result.aggregated_output["_conditional"] = {
|
|
1328
|
-
"condition_met": condition_met,
|
|
1329
|
-
"branch_taken": branch_label,
|
|
1330
|
-
}
|
|
1331
|
-
return result
|
|
1332
|
-
|
|
1333
|
-
|
|
1334
|
-
class MultiConditionalStrategy(ExecutionStrategy):
|
|
1335
|
-
"""Multiple conditional branches (switch/case pattern)."""
|
|
1336
|
-
|
|
1337
|
-
def __init__(
|
|
1338
|
-
self,
|
|
1339
|
-
conditions: list[tuple[Condition, Branch]],
|
|
1340
|
-
default_branch: Branch | None = None,
|
|
1341
|
-
):
|
|
1342
|
-
"""Initialize multi-conditional strategy."""
|
|
1343
|
-
self.conditions = conditions
|
|
1344
|
-
self.default_branch = default_branch
|
|
1345
|
-
self.evaluator = ConditionEvaluator()
|
|
1346
|
-
|
|
1347
|
-
async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
|
|
1348
|
-
"""Execute multi-conditional branching."""
|
|
1349
|
-
for i, (condition, branch) in enumerate(self.conditions):
|
|
1350
|
-
if self.evaluator.evaluate(condition, context):
|
|
1351
|
-
logger.info(f"MultiConditional: Condition {i + 1} matched")
|
|
1352
|
-
branch_strategy = get_strategy(branch.strategy)
|
|
1353
|
-
result = await branch_strategy.execute(branch.agents, context)
|
|
1354
|
-
result.aggregated_output["_matched_index"] = i
|
|
1355
|
-
return result
|
|
1356
|
-
|
|
1357
|
-
if self.default_branch:
|
|
1358
|
-
branch_strategy = get_strategy(self.default_branch.strategy)
|
|
1359
|
-
return await branch_strategy.execute(self.default_branch.agents, context)
|
|
1360
|
-
|
|
1361
|
-
return StrategyResult(
|
|
1362
|
-
success=True,
|
|
1363
|
-
outputs=[],
|
|
1364
|
-
aggregated_output={"reason": "No conditions matched"},
|
|
1365
|
-
total_duration=0.0,
|
|
1366
|
-
)
|
|
1367
|
-
|
|
1368
|
-
|
|
1369
|
-
class NestedStrategy(ExecutionStrategy):
|
|
1370
|
-
"""Nested workflow execution (sentences within sentences).
|
|
1371
|
-
|
|
1372
|
-
Enables recursive composition where workflows invoke other workflows.
|
|
1373
|
-
Implements the "subordinate clause" pattern in the grammar metaphor.
|
|
1374
|
-
|
|
1375
|
-
Features:
|
|
1376
|
-
- Reference workflows by ID or define inline
|
|
1377
|
-
- Configurable max depth (default: 3)
|
|
1378
|
-
- Cycle detection prevents infinite recursion
|
|
1379
|
-
- Full context inheritance from parent to child
|
|
1380
|
-
|
|
1381
|
-
Use when:
|
|
1382
|
-
- Complex multi-stage pipelines need modular sub-workflows
|
|
1383
|
-
- Reusable workflow components should be shared
|
|
1384
|
-
- Hierarchical team structures (teams containing sub-teams)
|
|
1385
|
-
|
|
1386
|
-
Example:
|
|
1387
|
-
>>> # Parent workflow with nested sub-workflow
|
|
1388
|
-
>>> strategy = NestedStrategy(
|
|
1389
|
-
... workflow_ref=WorkflowReference(workflow_id="security-audit"),
|
|
1390
|
-
... max_depth=3
|
|
1391
|
-
... )
|
|
1392
|
-
>>> result = await strategy.execute([], context)
|
|
1393
|
-
|
|
1394
|
-
Example (inline):
|
|
1395
|
-
>>> strategy = NestedStrategy(
|
|
1396
|
-
... workflow_ref=WorkflowReference(
|
|
1397
|
-
... inline=InlineWorkflow(
|
|
1398
|
-
... agents=[analyzer, reviewer],
|
|
1399
|
-
... strategy="parallel"
|
|
1400
|
-
... )
|
|
1401
|
-
... )
|
|
1402
|
-
... )
|
|
1403
|
-
"""
|
|
1404
|
-
|
|
1405
|
-
def __init__(
|
|
1406
|
-
self,
|
|
1407
|
-
workflow_ref: WorkflowReference,
|
|
1408
|
-
max_depth: int = NestingContext.DEFAULT_MAX_DEPTH,
|
|
1409
|
-
):
|
|
1410
|
-
"""Initialize nested strategy.
|
|
1411
|
-
|
|
1412
|
-
Args:
|
|
1413
|
-
workflow_ref: Reference to workflow (by ID or inline)
|
|
1414
|
-
max_depth: Maximum nesting depth allowed
|
|
1415
|
-
"""
|
|
1416
|
-
self.workflow_ref = workflow_ref
|
|
1417
|
-
self.max_depth = max_depth
|
|
1418
|
-
|
|
1419
|
-
async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
|
|
1420
|
-
"""Execute nested workflow.
|
|
1421
|
-
|
|
1422
|
-
Args:
|
|
1423
|
-
agents: Ignored (workflow_ref defines agents)
|
|
1424
|
-
context: Parent execution context (inherited by child)
|
|
1425
|
-
|
|
1426
|
-
Returns:
|
|
1427
|
-
StrategyResult from nested workflow execution
|
|
1428
|
-
|
|
1429
|
-
Raises:
|
|
1430
|
-
RecursionError: If max depth exceeded or cycle detected
|
|
1431
|
-
"""
|
|
1432
|
-
# Get or create nesting context
|
|
1433
|
-
nesting = NestingContext.from_context(context)
|
|
1434
|
-
|
|
1435
|
-
# Resolve workflow
|
|
1436
|
-
if self.workflow_ref.workflow_id:
|
|
1437
|
-
workflow_id = self.workflow_ref.workflow_id
|
|
1438
|
-
workflow = get_workflow(workflow_id)
|
|
1439
|
-
workflow_agents = workflow.agents
|
|
1440
|
-
strategy_name = workflow.strategy
|
|
1441
|
-
else:
|
|
1442
|
-
workflow_id = f"inline_{id(self.workflow_ref.inline)}"
|
|
1443
|
-
workflow_agents = self.workflow_ref.inline.agents
|
|
1444
|
-
strategy_name = self.workflow_ref.inline.strategy
|
|
1445
|
-
|
|
1446
|
-
# Check nesting limits
|
|
1447
|
-
if not nesting.can_nest(workflow_id):
|
|
1448
|
-
if nesting.current_depth >= nesting.max_depth:
|
|
1449
|
-
error_msg = (
|
|
1450
|
-
f"Maximum nesting depth ({nesting.max_depth}) exceeded. "
|
|
1451
|
-
f"Current stack: {' → '.join(nesting.workflow_stack)}"
|
|
1452
|
-
)
|
|
1453
|
-
else:
|
|
1454
|
-
error_msg = (
|
|
1455
|
-
f"Cycle detected: workflow '{workflow_id}' already in stack. "
|
|
1456
|
-
f"Stack: {' → '.join(nesting.workflow_stack)}"
|
|
1457
|
-
)
|
|
1458
|
-
logger.error(error_msg)
|
|
1459
|
-
raise RecursionError(error_msg)
|
|
1460
|
-
|
|
1461
|
-
logger.info(f"Nested: Entering '{workflow_id}' at depth {nesting.current_depth + 1}")
|
|
1462
|
-
|
|
1463
|
-
# Create child context with updated nesting
|
|
1464
|
-
child_nesting = nesting.enter(workflow_id)
|
|
1465
|
-
child_context = child_nesting.to_context(context.copy())
|
|
1466
|
-
|
|
1467
|
-
# Execute nested workflow
|
|
1468
|
-
strategy = get_strategy(strategy_name)
|
|
1469
|
-
result = await strategy.execute(workflow_agents, child_context)
|
|
1470
|
-
|
|
1471
|
-
# Augment result with nesting metadata
|
|
1472
|
-
result.aggregated_output["_nested"] = {
|
|
1473
|
-
"workflow_id": workflow_id,
|
|
1474
|
-
"depth": child_nesting.current_depth,
|
|
1475
|
-
"parent_stack": nesting.workflow_stack,
|
|
1476
|
-
}
|
|
1477
|
-
|
|
1478
|
-
# Store result under specified key if provided
|
|
1479
|
-
if self.workflow_ref.result_key:
|
|
1480
|
-
result.aggregated_output[self.workflow_ref.result_key] = result.aggregated_output.copy()
|
|
1481
|
-
|
|
1482
|
-
logger.info(f"Nested: Exiting '{workflow_id}'")
|
|
1483
|
-
|
|
1484
|
-
return result
|
|
1485
|
-
|
|
1486
|
-
|
|
1487
|
-
class NestedSequentialStrategy(ExecutionStrategy):
|
|
1488
|
-
"""Sequential execution with nested workflow support.
|
|
1489
|
-
|
|
1490
|
-
Like SequentialStrategy but steps can be either agents OR workflow references.
|
|
1491
|
-
Enables mixing direct agent execution with nested sub-workflows.
|
|
1492
|
-
|
|
1493
|
-
Example:
|
|
1494
|
-
>>> strategy = NestedSequentialStrategy(
|
|
1495
|
-
... steps=[
|
|
1496
|
-
... StepDefinition(agent=analyzer),
|
|
1497
|
-
... StepDefinition(workflow_ref=WorkflowReference(workflow_id="review-team")),
|
|
1498
|
-
... StepDefinition(agent=reporter),
|
|
1499
|
-
... ]
|
|
1500
|
-
... )
|
|
1501
|
-
"""
|
|
1502
|
-
|
|
1503
|
-
def __init__(
|
|
1504
|
-
self,
|
|
1505
|
-
steps: list["StepDefinition"],
|
|
1506
|
-
max_depth: int = NestingContext.DEFAULT_MAX_DEPTH,
|
|
1507
|
-
):
|
|
1508
|
-
"""Initialize nested sequential strategy.
|
|
1509
|
-
|
|
1510
|
-
Args:
|
|
1511
|
-
steps: List of step definitions (agents or workflow refs)
|
|
1512
|
-
max_depth: Maximum nesting depth
|
|
1513
|
-
"""
|
|
1514
|
-
self.steps = steps
|
|
1515
|
-
self.max_depth = max_depth
|
|
1516
|
-
|
|
1517
|
-
async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
|
|
1518
|
-
"""Execute steps sequentially, handling both agents and nested workflows."""
|
|
1519
|
-
if not self.steps:
|
|
1520
|
-
raise ValueError("steps list cannot be empty")
|
|
1521
|
-
|
|
1522
|
-
logger.info(f"NestedSequential: Executing {len(self.steps)} steps")
|
|
1523
|
-
|
|
1524
|
-
results: list[AgentResult] = []
|
|
1525
|
-
current_context = context.copy()
|
|
1526
|
-
total_duration = 0.0
|
|
1527
|
-
|
|
1528
|
-
for i, step in enumerate(self.steps):
|
|
1529
|
-
logger.info(f"NestedSequential: Step {i + 1}/{len(self.steps)}")
|
|
1530
|
-
|
|
1531
|
-
if step.agent:
|
|
1532
|
-
# Direct agent execution
|
|
1533
|
-
result = await self._execute_agent(step.agent, current_context)
|
|
1534
|
-
results.append(result)
|
|
1535
|
-
total_duration += result.duration_seconds
|
|
1536
|
-
|
|
1537
|
-
if result.success:
|
|
1538
|
-
current_context[f"{step.agent.id}_output"] = result.output
|
|
1539
|
-
else:
|
|
1540
|
-
# Nested workflow execution
|
|
1541
|
-
nested_strategy = NestedStrategy(
|
|
1542
|
-
workflow_ref=step.workflow_ref,
|
|
1543
|
-
max_depth=self.max_depth,
|
|
1544
|
-
)
|
|
1545
|
-
nested_result = await nested_strategy.execute([], current_context)
|
|
1546
|
-
total_duration += nested_result.total_duration
|
|
1547
|
-
|
|
1548
|
-
# Convert to AgentResult for consistency
|
|
1549
|
-
results.append(
|
|
1550
|
-
AgentResult(
|
|
1551
|
-
agent_id=f"nested_{step.workflow_ref.workflow_id or 'inline'}",
|
|
1552
|
-
success=nested_result.success,
|
|
1553
|
-
output=nested_result.aggregated_output,
|
|
1554
|
-
confidence=nested_result.aggregated_output.get("avg_confidence", 0.0),
|
|
1555
|
-
duration_seconds=nested_result.total_duration,
|
|
1556
|
-
)
|
|
1557
|
-
)
|
|
1558
|
-
|
|
1559
|
-
if nested_result.success:
|
|
1560
|
-
key = step.workflow_ref.result_key or f"step_{i}_output"
|
|
1561
|
-
current_context[key] = nested_result.aggregated_output
|
|
1562
|
-
|
|
1563
|
-
return StrategyResult(
|
|
1564
|
-
success=all(r.success for r in results),
|
|
1565
|
-
outputs=results,
|
|
1566
|
-
aggregated_output=self._aggregate_results(results),
|
|
1567
|
-
total_duration=total_duration,
|
|
1568
|
-
errors=[r.error for r in results if not r.success],
|
|
1569
|
-
)
|
|
1570
|
-
|
|
1571
|
-
|
|
1572
|
-
# =============================================================================
|
|
1573
|
-
# New Anthropic-Inspired Patterns (Patterns 8-10)
|
|
82
|
+
# Advanced Patterns (Patterns 11-13)
|
|
1574
83
|
# =============================================================================
|
|
1575
84
|
|
|
1576
85
|
|
|
@@ -1612,9 +121,7 @@ class ToolEnhancedStrategy(ExecutionStrategy):
|
|
|
1612
121
|
"""
|
|
1613
122
|
self.tools = tools or []
|
|
1614
123
|
|
|
1615
|
-
async def execute(
|
|
1616
|
-
self, agents: list[AgentTemplate], context: dict[str, Any]
|
|
1617
|
-
) -> StrategyResult:
|
|
124
|
+
async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
|
|
1618
125
|
"""Execute single agent with tool access.
|
|
1619
126
|
|
|
1620
127
|
Args:
|
|
@@ -1716,9 +223,7 @@ class PromptCachedSequentialStrategy(ExecutionStrategy):
|
|
|
1716
223
|
self.cached_context = cached_context
|
|
1717
224
|
self.cache_ttl = cache_ttl
|
|
1718
225
|
|
|
1719
|
-
async def execute(
|
|
1720
|
-
self, agents: list[AgentTemplate], context: dict[str, Any]
|
|
1721
|
-
) -> StrategyResult:
|
|
226
|
+
async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
|
|
1722
227
|
"""Execute agents sequentially with shared cache.
|
|
1723
228
|
|
|
1724
229
|
Args:
|
|
@@ -1825,9 +330,7 @@ class DelegationChainStrategy(ExecutionStrategy):
|
|
|
1825
330
|
"""
|
|
1826
331
|
self.max_depth = min(max_depth, self.MAX_DEPTH)
|
|
1827
332
|
|
|
1828
|
-
async def execute(
|
|
1829
|
-
self, agents: list[AgentTemplate], context: dict[str, Any]
|
|
1830
|
-
) -> StrategyResult:
|
|
333
|
+
async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
|
|
1831
334
|
"""Execute delegation chain with depth tracking.
|
|
1832
335
|
|
|
1833
336
|
Args:
|
|
@@ -1844,7 +347,9 @@ class DelegationChainStrategy(ExecutionStrategy):
|
|
|
1844
347
|
success=False,
|
|
1845
348
|
outputs=[],
|
|
1846
349
|
aggregated_output={},
|
|
1847
|
-
errors=[
|
|
350
|
+
errors=[
|
|
351
|
+
f"Max delegation depth ({self.max_depth}) exceeded at depth {current_depth}"
|
|
352
|
+
],
|
|
1848
353
|
)
|
|
1849
354
|
|
|
1850
355
|
if not agents:
|
|
@@ -1922,9 +427,7 @@ class DelegationChainStrategy(ExecutionStrategy):
|
|
|
1922
427
|
|
|
1923
428
|
client = LLMClient()
|
|
1924
429
|
|
|
1925
|
-
specialist_descriptions = "\n".join(
|
|
1926
|
-
[f"- {s.agent_id}: {s.role}" for s in specialists]
|
|
1927
|
-
)
|
|
430
|
+
specialist_descriptions = "\n".join([f"- {s.agent_id}: {s.role}" for s in specialists])
|
|
1928
431
|
|
|
1929
432
|
prompt = f"""Break down this task and assign to specialists:
|
|
1930
433
|
|
|
@@ -1952,7 +455,14 @@ Return JSON:
|
|
|
1952
455
|
return json.loads(response.get("content", "{}"))
|
|
1953
456
|
except json.JSONDecodeError:
|
|
1954
457
|
logger.warning("Failed to parse delegation plan, using fallback")
|
|
1955
|
-
return {
|
|
458
|
+
return {
|
|
459
|
+
"sub_tasks": [
|
|
460
|
+
{
|
|
461
|
+
"specialist_id": specialists[0].agent_id if specialists else "unknown",
|
|
462
|
+
"task": task,
|
|
463
|
+
}
|
|
464
|
+
]
|
|
465
|
+
}
|
|
1956
466
|
|
|
1957
467
|
async def _execute_specialist(
|
|
1958
468
|
self, specialist: AgentTemplate, context: dict[str, Any]
|
|
@@ -2044,27 +554,8 @@ Provide cohesive final analysis."""
|
|
|
2044
554
|
}
|
|
2045
555
|
|
|
2046
556
|
|
|
2047
|
-
@dataclass
|
|
2048
|
-
class StepDefinition:
|
|
2049
|
-
"""Definition of a step in NestedSequentialStrategy.
|
|
2050
|
-
|
|
2051
|
-
Either agent OR workflow_ref must be provided (mutually exclusive).
|
|
2052
|
-
|
|
2053
|
-
Attributes:
|
|
2054
|
-
agent: Agent to execute directly
|
|
2055
|
-
workflow_ref: Nested workflow to execute
|
|
2056
|
-
"""
|
|
2057
|
-
|
|
2058
|
-
agent: AgentTemplate | None = None
|
|
2059
|
-
workflow_ref: WorkflowReference | None = None
|
|
2060
|
-
|
|
2061
|
-
def __post_init__(self):
|
|
2062
|
-
"""Validate that exactly one step type is provided."""
|
|
2063
|
-
if bool(self.agent) == bool(self.workflow_ref):
|
|
2064
|
-
raise ValueError("StepDefinition must have exactly one of: agent or workflow_ref")
|
|
2065
|
-
|
|
2066
|
-
|
|
2067
557
|
# Strategy registry for lookup by name
|
|
558
|
+
# Note: Core and conditional strategies are also in _strategies._STRATEGY_REGISTRY
|
|
2068
559
|
STRATEGY_REGISTRY: dict[str, type[ExecutionStrategy]] = {
|
|
2069
560
|
# Original 7 patterns
|
|
2070
561
|
"sequential": SequentialStrategy,
|