empathy-framework 5.1.1__py3-none-any.whl → 5.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/METADATA +79 -6
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/RECORD +83 -64
- empathy_os/__init__.py +1 -1
- empathy_os/cache/hybrid.py +5 -1
- empathy_os/cli/commands/batch.py +8 -0
- empathy_os/cli/commands/profiling.py +4 -0
- empathy_os/cli/commands/workflow.py +8 -4
- empathy_os/cli_router.py +9 -0
- empathy_os/config.py +15 -2
- empathy_os/core_modules/__init__.py +15 -0
- empathy_os/dashboard/simple_server.py +62 -30
- empathy_os/mcp/__init__.py +10 -0
- empathy_os/mcp/server.py +506 -0
- empathy_os/memory/control_panel.py +1 -131
- empathy_os/memory/control_panel_support.py +145 -0
- empathy_os/memory/encryption.py +159 -0
- empathy_os/memory/long_term.py +46 -631
- empathy_os/memory/long_term_types.py +99 -0
- empathy_os/memory/mixins/__init__.py +25 -0
- empathy_os/memory/mixins/backend_init_mixin.py +249 -0
- empathy_os/memory/mixins/capabilities_mixin.py +208 -0
- empathy_os/memory/mixins/handoff_mixin.py +208 -0
- empathy_os/memory/mixins/lifecycle_mixin.py +49 -0
- empathy_os/memory/mixins/long_term_mixin.py +352 -0
- empathy_os/memory/mixins/promotion_mixin.py +109 -0
- empathy_os/memory/mixins/short_term_mixin.py +182 -0
- empathy_os/memory/short_term.py +61 -12
- empathy_os/memory/simple_storage.py +302 -0
- empathy_os/memory/storage_backend.py +167 -0
- empathy_os/memory/types.py +8 -3
- empathy_os/memory/unified.py +21 -1120
- empathy_os/meta_workflows/cli_commands/__init__.py +56 -0
- empathy_os/meta_workflows/cli_commands/agent_commands.py +321 -0
- empathy_os/meta_workflows/cli_commands/analytics_commands.py +442 -0
- empathy_os/meta_workflows/cli_commands/config_commands.py +232 -0
- empathy_os/meta_workflows/cli_commands/memory_commands.py +182 -0
- empathy_os/meta_workflows/cli_commands/template_commands.py +354 -0
- empathy_os/meta_workflows/cli_commands/workflow_commands.py +382 -0
- empathy_os/meta_workflows/cli_meta_workflows.py +52 -1802
- empathy_os/models/telemetry/__init__.py +71 -0
- empathy_os/models/telemetry/analytics.py +594 -0
- empathy_os/models/telemetry/backend.py +196 -0
- empathy_os/models/telemetry/data_models.py +431 -0
- empathy_os/models/telemetry/storage.py +489 -0
- empathy_os/orchestration/__init__.py +35 -0
- empathy_os/orchestration/execution_strategies.py +481 -0
- empathy_os/orchestration/meta_orchestrator.py +488 -1
- empathy_os/routing/workflow_registry.py +36 -0
- empathy_os/telemetry/agent_coordination.py +2 -3
- empathy_os/telemetry/agent_tracking.py +26 -7
- empathy_os/telemetry/approval_gates.py +18 -24
- empathy_os/telemetry/cli.py +19 -724
- empathy_os/telemetry/commands/__init__.py +14 -0
- empathy_os/telemetry/commands/dashboard_commands.py +696 -0
- empathy_os/telemetry/event_streaming.py +7 -3
- empathy_os/telemetry/feedback_loop.py +28 -15
- empathy_os/tools.py +183 -0
- empathy_os/workflows/__init__.py +5 -0
- empathy_os/workflows/autonomous_test_gen.py +860 -161
- empathy_os/workflows/base.py +6 -2
- empathy_os/workflows/code_review.py +4 -1
- empathy_os/workflows/document_gen/__init__.py +25 -0
- empathy_os/workflows/document_gen/config.py +30 -0
- empathy_os/workflows/document_gen/report_formatter.py +162 -0
- empathy_os/workflows/{document_gen.py → document_gen/workflow.py} +5 -184
- empathy_os/workflows/output.py +4 -1
- empathy_os/workflows/progress.py +8 -2
- empathy_os/workflows/security_audit.py +2 -2
- empathy_os/workflows/security_audit_phase3.py +7 -4
- empathy_os/workflows/seo_optimization.py +633 -0
- empathy_os/workflows/test_gen/__init__.py +52 -0
- empathy_os/workflows/test_gen/ast_analyzer.py +249 -0
- empathy_os/workflows/test_gen/config.py +88 -0
- empathy_os/workflows/test_gen/data_models.py +38 -0
- empathy_os/workflows/test_gen/report_formatter.py +289 -0
- empathy_os/workflows/test_gen/test_templates.py +381 -0
- empathy_os/workflows/test_gen/workflow.py +655 -0
- empathy_os/workflows/test_gen.py +42 -1905
- empathy_os/cli/parsers/cache 2.py +0 -65
- empathy_os/cli_router 2.py +0 -416
- empathy_os/dashboard/app 2.py +0 -512
- empathy_os/dashboard/simple_server 2.py +0 -403
- empathy_os/dashboard/standalone_server 2.py +0 -536
- empathy_os/memory/types 2.py +0 -441
- empathy_os/models/adaptive_routing 2.py +0 -437
- empathy_os/models/telemetry.py +0 -1660
- empathy_os/project_index/scanner_parallel 2.py +0 -291
- empathy_os/telemetry/agent_coordination 2.py +0 -478
- empathy_os/telemetry/agent_tracking 2.py +0 -350
- empathy_os/telemetry/approval_gates 2.py +0 -563
- empathy_os/telemetry/event_streaming 2.py +0 -405
- empathy_os/telemetry/feedback_loop 2.py +0 -557
- empathy_os/vscode_bridge 2.py +0 -173
- empathy_os/workflows/progressive/__init__ 2.py +0 -92
- empathy_os/workflows/progressive/cli 2.py +0 -242
- empathy_os/workflows/progressive/core 2.py +0 -488
- empathy_os/workflows/progressive/orchestrator 2.py +0 -701
- empathy_os/workflows/progressive/reports 2.py +0 -528
- empathy_os/workflows/progressive/telemetry 2.py +0 -280
- empathy_os/workflows/progressive/test_gen 2.py +0 -514
- empathy_os/workflows/progressive/workflow 2.py +0 -628
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/WHEEL +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/licenses/LICENSE +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/top_level.txt +0 -0
|
@@ -1569,6 +1569,481 @@ class NestedSequentialStrategy(ExecutionStrategy):
|
|
|
1569
1569
|
)
|
|
1570
1570
|
|
|
1571
1571
|
|
|
1572
|
+
# =============================================================================
|
|
1573
|
+
# New Anthropic-Inspired Patterns (Patterns 8-10)
|
|
1574
|
+
# =============================================================================
|
|
1575
|
+
|
|
1576
|
+
|
|
1577
|
+
class ToolEnhancedStrategy(ExecutionStrategy):
|
|
1578
|
+
"""Single agent with comprehensive tool access.
|
|
1579
|
+
|
|
1580
|
+
Anthropic Pattern: Use tools over multiple agents when possible.
|
|
1581
|
+
A single agent with rich tooling often outperforms multiple specialized agents.
|
|
1582
|
+
|
|
1583
|
+
Example:
|
|
1584
|
+
# Instead of: FileReader → Parser → Analyzer → Writer
|
|
1585
|
+
# Use: Single agent with [read, parse, analyze, write] tools
|
|
1586
|
+
|
|
1587
|
+
Benefits:
|
|
1588
|
+
- Reduced LLM calls (1 vs 4+)
|
|
1589
|
+
- Simpler coordination
|
|
1590
|
+
- Lower cost
|
|
1591
|
+
- Better context preservation
|
|
1592
|
+
|
|
1593
|
+
Security:
|
|
1594
|
+
- Tool schemas validated before execution
|
|
1595
|
+
- No eval() or exec() usage
|
|
1596
|
+
- Tool execution sandboxed
|
|
1597
|
+
"""
|
|
1598
|
+
|
|
1599
|
+
def __init__(self, tools: list[dict[str, Any]] | None = None):
|
|
1600
|
+
"""Initialize with tool definitions.
|
|
1601
|
+
|
|
1602
|
+
Args:
|
|
1603
|
+
tools: List of tool definitions in Anthropic format
|
|
1604
|
+
[
|
|
1605
|
+
{
|
|
1606
|
+
"name": "tool_name",
|
|
1607
|
+
"description": "What the tool does",
|
|
1608
|
+
"input_schema": {...}
|
|
1609
|
+
},
|
|
1610
|
+
...
|
|
1611
|
+
]
|
|
1612
|
+
"""
|
|
1613
|
+
self.tools = tools or []
|
|
1614
|
+
|
|
1615
|
+
async def execute(
|
|
1616
|
+
self, agents: list[AgentTemplate], context: dict[str, Any]
|
|
1617
|
+
) -> StrategyResult:
|
|
1618
|
+
"""Execute single agent with tool access.
|
|
1619
|
+
|
|
1620
|
+
Args:
|
|
1621
|
+
agents: Single agent (others ignored)
|
|
1622
|
+
context: Execution context with task
|
|
1623
|
+
|
|
1624
|
+
Returns:
|
|
1625
|
+
Result with tool usage trace
|
|
1626
|
+
"""
|
|
1627
|
+
if not agents:
|
|
1628
|
+
return StrategyResult(
|
|
1629
|
+
success=False, outputs=[], aggregated_output={}, errors=["No agent provided"]
|
|
1630
|
+
)
|
|
1631
|
+
|
|
1632
|
+
agent = agents[0] # Use first agent only
|
|
1633
|
+
start_time = asyncio.get_event_loop().time()
|
|
1634
|
+
|
|
1635
|
+
# Execute with tool access
|
|
1636
|
+
try:
|
|
1637
|
+
result = await self._execute_with_tools(agent=agent, context=context, tools=self.tools)
|
|
1638
|
+
|
|
1639
|
+
duration = asyncio.get_event_loop().time() - start_time
|
|
1640
|
+
|
|
1641
|
+
return StrategyResult(
|
|
1642
|
+
success=result["success"],
|
|
1643
|
+
outputs=[
|
|
1644
|
+
AgentResult(
|
|
1645
|
+
agent_id=agent.agent_id,
|
|
1646
|
+
success=result["success"],
|
|
1647
|
+
output=result["output"],
|
|
1648
|
+
confidence=result.get("confidence", 1.0),
|
|
1649
|
+
duration_seconds=duration,
|
|
1650
|
+
)
|
|
1651
|
+
],
|
|
1652
|
+
aggregated_output=result["output"],
|
|
1653
|
+
total_duration=duration,
|
|
1654
|
+
)
|
|
1655
|
+
except Exception as e:
|
|
1656
|
+
logger.exception(f"Tool-enhanced execution failed: {e}")
|
|
1657
|
+
duration = asyncio.get_event_loop().time() - start_time
|
|
1658
|
+
return StrategyResult(
|
|
1659
|
+
success=False,
|
|
1660
|
+
outputs=[],
|
|
1661
|
+
aggregated_output={},
|
|
1662
|
+
total_duration=duration,
|
|
1663
|
+
errors=[str(e)],
|
|
1664
|
+
)
|
|
1665
|
+
|
|
1666
|
+
async def _execute_with_tools(
|
|
1667
|
+
self, agent: AgentTemplate, context: dict[str, Any], tools: list[dict[str, Any]]
|
|
1668
|
+
) -> dict[str, Any]:
|
|
1669
|
+
"""Execute agent with tool use enabled."""
|
|
1670
|
+
from empathy_os.models import LLMClient
|
|
1671
|
+
|
|
1672
|
+
client = LLMClient()
|
|
1673
|
+
|
|
1674
|
+
# Agent makes autonomous tool use decisions
|
|
1675
|
+
response = await client.call(
|
|
1676
|
+
prompt=context.get("task", ""),
|
|
1677
|
+
system_prompt=agent.system_prompt,
|
|
1678
|
+
tools=tools if tools else None,
|
|
1679
|
+
tier=agent.tier,
|
|
1680
|
+
workflow_id=f"tool-enhanced:{agent.agent_id}",
|
|
1681
|
+
)
|
|
1682
|
+
|
|
1683
|
+
return {"success": True, "output": response, "confidence": 1.0}
|
|
1684
|
+
|
|
1685
|
+
|
|
1686
|
+
class PromptCachedSequentialStrategy(ExecutionStrategy):
|
|
1687
|
+
"""Sequential execution with shared cached context.
|
|
1688
|
+
|
|
1689
|
+
Anthropic Pattern: Cache large unchanging contexts across agent calls.
|
|
1690
|
+
Saves 90%+ on prompt tokens for repeated workflows.
|
|
1691
|
+
|
|
1692
|
+
Example:
|
|
1693
|
+
# All agents share cached codebase context
|
|
1694
|
+
# Only task-specific prompts vary
|
|
1695
|
+
# Massive token savings on subsequent calls
|
|
1696
|
+
|
|
1697
|
+
Benefits:
|
|
1698
|
+
- 90%+ token cost reduction
|
|
1699
|
+
- Faster response times (cache hits)
|
|
1700
|
+
- Consistent context across agents
|
|
1701
|
+
|
|
1702
|
+
Security:
|
|
1703
|
+
- Cached content validated once
|
|
1704
|
+
- No executable code in cache
|
|
1705
|
+
- Cache size limits enforced
|
|
1706
|
+
"""
|
|
1707
|
+
|
|
1708
|
+
def __init__(self, cached_context: str | None = None, cache_ttl: int = 3600):
|
|
1709
|
+
"""Initialize with optional cached context.
|
|
1710
|
+
|
|
1711
|
+
Args:
|
|
1712
|
+
cached_context: Large unchanging context to cache
|
|
1713
|
+
(e.g., documentation, code files, guidelines)
|
|
1714
|
+
cache_ttl: Cache time-to-live in seconds (default: 1 hour)
|
|
1715
|
+
"""
|
|
1716
|
+
self.cached_context = cached_context
|
|
1717
|
+
self.cache_ttl = cache_ttl
|
|
1718
|
+
|
|
1719
|
+
async def execute(
|
|
1720
|
+
self, agents: list[AgentTemplate], context: dict[str, Any]
|
|
1721
|
+
) -> StrategyResult:
|
|
1722
|
+
"""Execute agents sequentially with shared cache.
|
|
1723
|
+
|
|
1724
|
+
Args:
|
|
1725
|
+
agents: List of agents to execute in order
|
|
1726
|
+
context: Execution context with task
|
|
1727
|
+
|
|
1728
|
+
Returns:
|
|
1729
|
+
Result with cumulative outputs
|
|
1730
|
+
"""
|
|
1731
|
+
from empathy_os.models import LLMClient
|
|
1732
|
+
|
|
1733
|
+
client = LLMClient()
|
|
1734
|
+
outputs = []
|
|
1735
|
+
current_output = context.get("input", {})
|
|
1736
|
+
start_time = asyncio.get_event_loop().time()
|
|
1737
|
+
|
|
1738
|
+
for agent in agents:
|
|
1739
|
+
try:
|
|
1740
|
+
# Build prompt with cached context
|
|
1741
|
+
if self.cached_context:
|
|
1742
|
+
full_prompt = f"""{self.cached_context}
|
|
1743
|
+
|
|
1744
|
+
---
|
|
1745
|
+
|
|
1746
|
+
Current task: {context.get('task', '')}
|
|
1747
|
+
Previous output: {current_output}
|
|
1748
|
+
Your role: {agent.role}"""
|
|
1749
|
+
else:
|
|
1750
|
+
full_prompt = f"{context.get('task', '')}\n\nPrevious: {current_output}"
|
|
1751
|
+
|
|
1752
|
+
# Execute with caching enabled
|
|
1753
|
+
response = await client.call(
|
|
1754
|
+
prompt=full_prompt,
|
|
1755
|
+
system_prompt=agent.system_prompt,
|
|
1756
|
+
tier=agent.tier,
|
|
1757
|
+
workflow_id=f"cached-seq:{agent.agent_id}",
|
|
1758
|
+
enable_caching=True, # Anthropic prompt caching
|
|
1759
|
+
)
|
|
1760
|
+
|
|
1761
|
+
result = AgentResult(
|
|
1762
|
+
agent_id=agent.agent_id,
|
|
1763
|
+
success=True,
|
|
1764
|
+
output=response,
|
|
1765
|
+
confidence=1.0,
|
|
1766
|
+
duration_seconds=response.get("duration", 0.0),
|
|
1767
|
+
)
|
|
1768
|
+
|
|
1769
|
+
outputs.append(result)
|
|
1770
|
+
current_output = response.get("content", "")
|
|
1771
|
+
|
|
1772
|
+
except Exception as e:
|
|
1773
|
+
logger.exception(f"Agent {agent.agent_id} failed: {e}")
|
|
1774
|
+
result = AgentResult(
|
|
1775
|
+
agent_id=agent.agent_id,
|
|
1776
|
+
success=False,
|
|
1777
|
+
output={},
|
|
1778
|
+
confidence=0.0,
|
|
1779
|
+
duration_seconds=0.0,
|
|
1780
|
+
error=str(e),
|
|
1781
|
+
)
|
|
1782
|
+
outputs.append(result)
|
|
1783
|
+
|
|
1784
|
+
duration = asyncio.get_event_loop().time() - start_time
|
|
1785
|
+
|
|
1786
|
+
return StrategyResult(
|
|
1787
|
+
success=all(r.success for r in outputs),
|
|
1788
|
+
outputs=outputs,
|
|
1789
|
+
aggregated_output={"final_output": current_output},
|
|
1790
|
+
total_duration=duration,
|
|
1791
|
+
errors=[r.error for r in outputs if not r.success],
|
|
1792
|
+
)
|
|
1793
|
+
|
|
1794
|
+
|
|
1795
|
+
class DelegationChainStrategy(ExecutionStrategy):
|
|
1796
|
+
"""Hierarchical delegation with max depth enforcement.
|
|
1797
|
+
|
|
1798
|
+
Anthropic Pattern: Keep agent hierarchies shallow (≤3 levels).
|
|
1799
|
+
Coordinator delegates to specialists, specialists can delegate further.
|
|
1800
|
+
|
|
1801
|
+
Example:
|
|
1802
|
+
Level 1: Coordinator (analyzes task)
|
|
1803
|
+
Level 2: Domain specialists (security, performance, quality)
|
|
1804
|
+
Level 3: Sub-specialists (SQL injection, XSS, etc.)
|
|
1805
|
+
Level 4: ❌ NOT ALLOWED (too deep)
|
|
1806
|
+
|
|
1807
|
+
Benefits:
|
|
1808
|
+
- Complex specialization within depth limits
|
|
1809
|
+
- Clear delegation hierarchy
|
|
1810
|
+
- Prevents runaway recursion
|
|
1811
|
+
|
|
1812
|
+
Security:
|
|
1813
|
+
- Max depth enforced (default: 3)
|
|
1814
|
+
- Delegation trace logged
|
|
1815
|
+
- Circular delegation prevented
|
|
1816
|
+
"""
|
|
1817
|
+
|
|
1818
|
+
MAX_DEPTH = 3
|
|
1819
|
+
|
|
1820
|
+
def __init__(self, max_depth: int = 3):
|
|
1821
|
+
"""Initialize with depth limit.
|
|
1822
|
+
|
|
1823
|
+
Args:
|
|
1824
|
+
max_depth: Maximum delegation depth (default: 3, max: 3)
|
|
1825
|
+
"""
|
|
1826
|
+
self.max_depth = min(max_depth, self.MAX_DEPTH)
|
|
1827
|
+
|
|
1828
|
+
async def execute(
|
|
1829
|
+
self, agents: list[AgentTemplate], context: dict[str, Any]
|
|
1830
|
+
) -> StrategyResult:
|
|
1831
|
+
"""Execute delegation chain with depth tracking.
|
|
1832
|
+
|
|
1833
|
+
Args:
|
|
1834
|
+
agents: Hierarchical agent structure [coordinator, specialist1, specialist2, ...]
|
|
1835
|
+
context: Execution context with task
|
|
1836
|
+
|
|
1837
|
+
Returns:
|
|
1838
|
+
Result with delegation trace
|
|
1839
|
+
"""
|
|
1840
|
+
current_depth = context.get("_delegation_depth", 0)
|
|
1841
|
+
|
|
1842
|
+
if current_depth >= self.max_depth:
|
|
1843
|
+
return StrategyResult(
|
|
1844
|
+
success=False,
|
|
1845
|
+
outputs=[],
|
|
1846
|
+
aggregated_output={},
|
|
1847
|
+
errors=[f"Max delegation depth ({self.max_depth}) exceeded at depth {current_depth}"],
|
|
1848
|
+
)
|
|
1849
|
+
|
|
1850
|
+
if not agents:
|
|
1851
|
+
return StrategyResult(
|
|
1852
|
+
success=False,
|
|
1853
|
+
outputs=[],
|
|
1854
|
+
aggregated_output={},
|
|
1855
|
+
errors=["No agents provided for delegation"],
|
|
1856
|
+
)
|
|
1857
|
+
|
|
1858
|
+
start_time = asyncio.get_event_loop().time()
|
|
1859
|
+
|
|
1860
|
+
# Execute coordinator (first agent)
|
|
1861
|
+
coordinator = agents[0]
|
|
1862
|
+
specialists = agents[1:]
|
|
1863
|
+
|
|
1864
|
+
try:
|
|
1865
|
+
# Coordinator analyzes and plans delegation
|
|
1866
|
+
delegation_plan = await self._plan_delegation(
|
|
1867
|
+
coordinator=coordinator, task=context.get("task", ""), specialists=specialists
|
|
1868
|
+
)
|
|
1869
|
+
|
|
1870
|
+
# Execute delegated tasks
|
|
1871
|
+
results = []
|
|
1872
|
+
for sub_task in delegation_plan.get("sub_tasks", []):
|
|
1873
|
+
specialist_id = sub_task.get("specialist_id")
|
|
1874
|
+
specialist = self._find_specialist(specialist_id, specialists)
|
|
1875
|
+
|
|
1876
|
+
if specialist:
|
|
1877
|
+
# Recursive delegation (with depth tracking)
|
|
1878
|
+
sub_context = {
|
|
1879
|
+
**context,
|
|
1880
|
+
"task": sub_task.get("task", ""),
|
|
1881
|
+
"_delegation_depth": current_depth + 1,
|
|
1882
|
+
}
|
|
1883
|
+
|
|
1884
|
+
sub_result = await self._execute_specialist(
|
|
1885
|
+
specialist=specialist, context=sub_context
|
|
1886
|
+
)
|
|
1887
|
+
|
|
1888
|
+
results.append(sub_result)
|
|
1889
|
+
|
|
1890
|
+
# Synthesize results
|
|
1891
|
+
final_output = await self._synthesize_results(
|
|
1892
|
+
coordinator=coordinator, results=results, original_task=context.get("task", "")
|
|
1893
|
+
)
|
|
1894
|
+
|
|
1895
|
+
duration = asyncio.get_event_loop().time() - start_time
|
|
1896
|
+
|
|
1897
|
+
return StrategyResult(
|
|
1898
|
+
success=True,
|
|
1899
|
+
outputs=results,
|
|
1900
|
+
aggregated_output=final_output,
|
|
1901
|
+
total_duration=duration,
|
|
1902
|
+
)
|
|
1903
|
+
|
|
1904
|
+
except Exception as e:
|
|
1905
|
+
logger.exception(f"Delegation chain failed: {e}")
|
|
1906
|
+
duration = asyncio.get_event_loop().time() - start_time
|
|
1907
|
+
return StrategyResult(
|
|
1908
|
+
success=False,
|
|
1909
|
+
outputs=[],
|
|
1910
|
+
aggregated_output={},
|
|
1911
|
+
total_duration=duration,
|
|
1912
|
+
errors=[str(e)],
|
|
1913
|
+
)
|
|
1914
|
+
|
|
1915
|
+
async def _plan_delegation(
|
|
1916
|
+
self, coordinator: AgentTemplate, task: str, specialists: list[AgentTemplate]
|
|
1917
|
+
) -> dict[str, Any]:
|
|
1918
|
+
"""Coordinator plans delegation strategy."""
|
|
1919
|
+
import json
|
|
1920
|
+
|
|
1921
|
+
from empathy_os.models import LLMClient
|
|
1922
|
+
|
|
1923
|
+
client = LLMClient()
|
|
1924
|
+
|
|
1925
|
+
specialist_descriptions = "\n".join(
|
|
1926
|
+
[f"- {s.agent_id}: {s.role}" for s in specialists]
|
|
1927
|
+
)
|
|
1928
|
+
|
|
1929
|
+
prompt = f"""Break down this task and assign to specialists:
|
|
1930
|
+
|
|
1931
|
+
Task: {task}
|
|
1932
|
+
|
|
1933
|
+
Available specialists:
|
|
1934
|
+
{specialist_descriptions}
|
|
1935
|
+
|
|
1936
|
+
Return JSON:
|
|
1937
|
+
{{
|
|
1938
|
+
"sub_tasks": [
|
|
1939
|
+
{{"specialist_id": "...", "task": "..."}},
|
|
1940
|
+
...
|
|
1941
|
+
]
|
|
1942
|
+
}}"""
|
|
1943
|
+
|
|
1944
|
+
response = await client.call(
|
|
1945
|
+
prompt=prompt,
|
|
1946
|
+
system_prompt=coordinator.system_prompt or "You are a task coordinator.",
|
|
1947
|
+
tier=coordinator.tier,
|
|
1948
|
+
workflow_id=f"delegation:{coordinator.agent_id}",
|
|
1949
|
+
)
|
|
1950
|
+
|
|
1951
|
+
try:
|
|
1952
|
+
return json.loads(response.get("content", "{}"))
|
|
1953
|
+
except json.JSONDecodeError:
|
|
1954
|
+
logger.warning("Failed to parse delegation plan, using fallback")
|
|
1955
|
+
return {"sub_tasks": [{"specialist_id": specialists[0].agent_id if specialists else "unknown", "task": task}]}
|
|
1956
|
+
|
|
1957
|
+
async def _execute_specialist(
|
|
1958
|
+
self, specialist: AgentTemplate, context: dict[str, Any]
|
|
1959
|
+
) -> AgentResult:
|
|
1960
|
+
"""Execute specialist agent."""
|
|
1961
|
+
from empathy_os.models import LLMClient
|
|
1962
|
+
|
|
1963
|
+
client = LLMClient()
|
|
1964
|
+
start_time = asyncio.get_event_loop().time()
|
|
1965
|
+
|
|
1966
|
+
try:
|
|
1967
|
+
response = await client.call(
|
|
1968
|
+
prompt=context.get("task", ""),
|
|
1969
|
+
system_prompt=specialist.system_prompt,
|
|
1970
|
+
tier=specialist.tier,
|
|
1971
|
+
workflow_id=f"specialist:{specialist.agent_id}",
|
|
1972
|
+
)
|
|
1973
|
+
|
|
1974
|
+
duration = asyncio.get_event_loop().time() - start_time
|
|
1975
|
+
|
|
1976
|
+
return AgentResult(
|
|
1977
|
+
agent_id=specialist.agent_id,
|
|
1978
|
+
success=True,
|
|
1979
|
+
output=response,
|
|
1980
|
+
confidence=1.0,
|
|
1981
|
+
duration_seconds=duration,
|
|
1982
|
+
)
|
|
1983
|
+
except Exception as e:
|
|
1984
|
+
logger.exception(f"Specialist {specialist.agent_id} failed: {e}")
|
|
1985
|
+
duration = asyncio.get_event_loop().time() - start_time
|
|
1986
|
+
return AgentResult(
|
|
1987
|
+
agent_id=specialist.agent_id,
|
|
1988
|
+
success=False,
|
|
1989
|
+
output={},
|
|
1990
|
+
confidence=0.0,
|
|
1991
|
+
duration_seconds=duration,
|
|
1992
|
+
error=str(e),
|
|
1993
|
+
)
|
|
1994
|
+
|
|
1995
|
+
def _find_specialist(
|
|
1996
|
+
self, specialist_id: str, agents: list[AgentTemplate]
|
|
1997
|
+
) -> AgentTemplate | None:
|
|
1998
|
+
"""Find specialist by ID."""
|
|
1999
|
+
for agent in agents:
|
|
2000
|
+
if agent.agent_id == specialist_id:
|
|
2001
|
+
return agent
|
|
2002
|
+
return None
|
|
2003
|
+
|
|
2004
|
+
async def _synthesize_results(
|
|
2005
|
+
self, coordinator: AgentTemplate, results: list[AgentResult], original_task: str
|
|
2006
|
+
) -> dict[str, Any]:
|
|
2007
|
+
"""Coordinator synthesizes specialist results."""
|
|
2008
|
+
from empathy_os.models import LLMClient
|
|
2009
|
+
|
|
2010
|
+
client = LLMClient()
|
|
2011
|
+
|
|
2012
|
+
specialist_reports = "\n\n".join(
|
|
2013
|
+
[f"## {r.agent_id}\n{r.output.get('content', '')}" for r in results]
|
|
2014
|
+
)
|
|
2015
|
+
|
|
2016
|
+
prompt = f"""Synthesize these specialist reports:
|
|
2017
|
+
|
|
2018
|
+
Original task: {original_task}
|
|
2019
|
+
|
|
2020
|
+
{specialist_reports}
|
|
2021
|
+
|
|
2022
|
+
Provide cohesive final analysis."""
|
|
2023
|
+
|
|
2024
|
+
try:
|
|
2025
|
+
response = await client.call(
|
|
2026
|
+
prompt=prompt,
|
|
2027
|
+
system_prompt=coordinator.system_prompt or "You are a synthesis coordinator.",
|
|
2028
|
+
tier=coordinator.tier,
|
|
2029
|
+
workflow_id=f"synthesis:{coordinator.agent_id}",
|
|
2030
|
+
)
|
|
2031
|
+
|
|
2032
|
+
return {
|
|
2033
|
+
"synthesis": response.get("content", ""),
|
|
2034
|
+
"specialist_reports": [r.output for r in results],
|
|
2035
|
+
"delegation_depth": len(results),
|
|
2036
|
+
}
|
|
2037
|
+
except Exception as e:
|
|
2038
|
+
logger.exception(f"Synthesis failed: {e}")
|
|
2039
|
+
return {
|
|
2040
|
+
"synthesis": "Synthesis failed",
|
|
2041
|
+
"specialist_reports": [r.output for r in results],
|
|
2042
|
+
"delegation_depth": len(results),
|
|
2043
|
+
"error": str(e),
|
|
2044
|
+
}
|
|
2045
|
+
|
|
2046
|
+
|
|
1572
2047
|
@dataclass
|
|
1573
2048
|
class StepDefinition:
|
|
1574
2049
|
"""Definition of a step in NestedSequentialStrategy.
|
|
@@ -1591,6 +2066,7 @@ class StepDefinition:
|
|
|
1591
2066
|
|
|
1592
2067
|
# Strategy registry for lookup by name
|
|
1593
2068
|
STRATEGY_REGISTRY: dict[str, type[ExecutionStrategy]] = {
|
|
2069
|
+
# Original 7 patterns
|
|
1594
2070
|
"sequential": SequentialStrategy,
|
|
1595
2071
|
"parallel": ParallelStrategy,
|
|
1596
2072
|
"debate": DebateStrategy,
|
|
@@ -1598,9 +2074,14 @@ STRATEGY_REGISTRY: dict[str, type[ExecutionStrategy]] = {
|
|
|
1598
2074
|
"refinement": RefinementStrategy,
|
|
1599
2075
|
"adaptive": AdaptiveStrategy,
|
|
1600
2076
|
"conditional": ConditionalStrategy,
|
|
2077
|
+
# Additional patterns
|
|
1601
2078
|
"multi_conditional": MultiConditionalStrategy,
|
|
1602
2079
|
"nested": NestedStrategy,
|
|
1603
2080
|
"nested_sequential": NestedSequentialStrategy,
|
|
2081
|
+
# New Anthropic-inspired patterns (8-10)
|
|
2082
|
+
"tool_enhanced": ToolEnhancedStrategy,
|
|
2083
|
+
"prompt_cached_sequential": PromptCachedSequentialStrategy,
|
|
2084
|
+
"delegation_chain": DelegationChainStrategy,
|
|
1604
2085
|
}
|
|
1605
2086
|
|
|
1606
2087
|
|