empathy-framework 5.3.0__py3-none-any.whl → 5.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (458) hide show
  1. empathy_framework-5.4.0.dist-info/METADATA +47 -0
  2. empathy_framework-5.4.0.dist-info/RECORD +8 -0
  3. {empathy_framework-5.3.0.dist-info → empathy_framework-5.4.0.dist-info}/top_level.txt +0 -1
  4. empathy_healthcare_plugin/__init__.py +12 -11
  5. empathy_llm_toolkit/__init__.py +12 -26
  6. empathy_os/__init__.py +12 -356
  7. empathy_software_plugin/__init__.py +12 -11
  8. empathy_framework-5.3.0.dist-info/METADATA +0 -1026
  9. empathy_framework-5.3.0.dist-info/RECORD +0 -456
  10. empathy_framework-5.3.0.dist-info/entry_points.txt +0 -26
  11. empathy_framework-5.3.0.dist-info/licenses/LICENSE +0 -201
  12. empathy_framework-5.3.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -101
  13. empathy_healthcare_plugin/monitors/__init__.py +0 -9
  14. empathy_healthcare_plugin/monitors/clinical_protocol_monitor.py +0 -315
  15. empathy_healthcare_plugin/monitors/monitoring/__init__.py +0 -44
  16. empathy_healthcare_plugin/monitors/monitoring/protocol_checker.py +0 -300
  17. empathy_healthcare_plugin/monitors/monitoring/protocol_loader.py +0 -214
  18. empathy_healthcare_plugin/monitors/monitoring/sensor_parsers.py +0 -306
  19. empathy_healthcare_plugin/monitors/monitoring/trajectory_analyzer.py +0 -389
  20. empathy_healthcare_plugin/protocols/cardiac.json +0 -93
  21. empathy_healthcare_plugin/protocols/post_operative.json +0 -92
  22. empathy_healthcare_plugin/protocols/respiratory.json +0 -92
  23. empathy_healthcare_plugin/protocols/sepsis.json +0 -141
  24. empathy_llm_toolkit/README.md +0 -553
  25. empathy_llm_toolkit/agent_factory/__init__.py +0 -53
  26. empathy_llm_toolkit/agent_factory/adapters/__init__.py +0 -85
  27. empathy_llm_toolkit/agent_factory/adapters/autogen_adapter.py +0 -312
  28. empathy_llm_toolkit/agent_factory/adapters/crewai_adapter.py +0 -483
  29. empathy_llm_toolkit/agent_factory/adapters/haystack_adapter.py +0 -298
  30. empathy_llm_toolkit/agent_factory/adapters/langchain_adapter.py +0 -362
  31. empathy_llm_toolkit/agent_factory/adapters/langgraph_adapter.py +0 -333
  32. empathy_llm_toolkit/agent_factory/adapters/native.py +0 -228
  33. empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +0 -423
  34. empathy_llm_toolkit/agent_factory/base.py +0 -305
  35. empathy_llm_toolkit/agent_factory/crews/__init__.py +0 -67
  36. empathy_llm_toolkit/agent_factory/crews/code_review.py +0 -1113
  37. empathy_llm_toolkit/agent_factory/crews/health_check.py +0 -1262
  38. empathy_llm_toolkit/agent_factory/crews/refactoring.py +0 -1128
  39. empathy_llm_toolkit/agent_factory/crews/security_audit.py +0 -1018
  40. empathy_llm_toolkit/agent_factory/decorators.py +0 -287
  41. empathy_llm_toolkit/agent_factory/factory.py +0 -558
  42. empathy_llm_toolkit/agent_factory/framework.py +0 -193
  43. empathy_llm_toolkit/agent_factory/memory_integration.py +0 -328
  44. empathy_llm_toolkit/agent_factory/resilient.py +0 -320
  45. empathy_llm_toolkit/agents_md/__init__.py +0 -22
  46. empathy_llm_toolkit/agents_md/loader.py +0 -218
  47. empathy_llm_toolkit/agents_md/parser.py +0 -271
  48. empathy_llm_toolkit/agents_md/registry.py +0 -307
  49. empathy_llm_toolkit/claude_memory.py +0 -466
  50. empathy_llm_toolkit/cli/__init__.py +0 -8
  51. empathy_llm_toolkit/cli/sync_claude.py +0 -487
  52. empathy_llm_toolkit/code_health.py +0 -1313
  53. empathy_llm_toolkit/commands/__init__.py +0 -51
  54. empathy_llm_toolkit/commands/context.py +0 -375
  55. empathy_llm_toolkit/commands/loader.py +0 -301
  56. empathy_llm_toolkit/commands/models.py +0 -231
  57. empathy_llm_toolkit/commands/parser.py +0 -371
  58. empathy_llm_toolkit/commands/registry.py +0 -429
  59. empathy_llm_toolkit/config/__init__.py +0 -29
  60. empathy_llm_toolkit/config/unified.py +0 -291
  61. empathy_llm_toolkit/context/__init__.py +0 -22
  62. empathy_llm_toolkit/context/compaction.py +0 -455
  63. empathy_llm_toolkit/context/manager.py +0 -434
  64. empathy_llm_toolkit/contextual_patterns.py +0 -361
  65. empathy_llm_toolkit/core.py +0 -907
  66. empathy_llm_toolkit/git_pattern_extractor.py +0 -435
  67. empathy_llm_toolkit/hooks/__init__.py +0 -24
  68. empathy_llm_toolkit/hooks/config.py +0 -306
  69. empathy_llm_toolkit/hooks/executor.py +0 -289
  70. empathy_llm_toolkit/hooks/registry.py +0 -302
  71. empathy_llm_toolkit/hooks/scripts/__init__.py +0 -39
  72. empathy_llm_toolkit/hooks/scripts/evaluate_session.py +0 -201
  73. empathy_llm_toolkit/hooks/scripts/first_time_init.py +0 -285
  74. empathy_llm_toolkit/hooks/scripts/pre_compact.py +0 -207
  75. empathy_llm_toolkit/hooks/scripts/session_end.py +0 -183
  76. empathy_llm_toolkit/hooks/scripts/session_start.py +0 -163
  77. empathy_llm_toolkit/hooks/scripts/suggest_compact.py +0 -225
  78. empathy_llm_toolkit/learning/__init__.py +0 -30
  79. empathy_llm_toolkit/learning/evaluator.py +0 -438
  80. empathy_llm_toolkit/learning/extractor.py +0 -514
  81. empathy_llm_toolkit/learning/storage.py +0 -560
  82. empathy_llm_toolkit/levels.py +0 -227
  83. empathy_llm_toolkit/pattern_confidence.py +0 -414
  84. empathy_llm_toolkit/pattern_resolver.py +0 -272
  85. empathy_llm_toolkit/pattern_summary.py +0 -350
  86. empathy_llm_toolkit/providers.py +0 -967
  87. empathy_llm_toolkit/routing/__init__.py +0 -32
  88. empathy_llm_toolkit/routing/model_router.py +0 -362
  89. empathy_llm_toolkit/security/IMPLEMENTATION_SUMMARY.md +0 -413
  90. empathy_llm_toolkit/security/PHASE2_COMPLETE.md +0 -384
  91. empathy_llm_toolkit/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +0 -271
  92. empathy_llm_toolkit/security/QUICK_REFERENCE.md +0 -316
  93. empathy_llm_toolkit/security/README.md +0 -262
  94. empathy_llm_toolkit/security/__init__.py +0 -62
  95. empathy_llm_toolkit/security/audit_logger.py +0 -929
  96. empathy_llm_toolkit/security/audit_logger_example.py +0 -152
  97. empathy_llm_toolkit/security/pii_scrubber.py +0 -640
  98. empathy_llm_toolkit/security/secrets_detector.py +0 -678
  99. empathy_llm_toolkit/security/secrets_detector_example.py +0 -304
  100. empathy_llm_toolkit/security/secure_memdocs.py +0 -1192
  101. empathy_llm_toolkit/security/secure_memdocs_example.py +0 -278
  102. empathy_llm_toolkit/session_status.py +0 -745
  103. empathy_llm_toolkit/state.py +0 -246
  104. empathy_llm_toolkit/utils/__init__.py +0 -5
  105. empathy_llm_toolkit/utils/tokens.py +0 -349
  106. empathy_os/adaptive/__init__.py +0 -13
  107. empathy_os/adaptive/task_complexity.py +0 -127
  108. empathy_os/agent_monitoring.py +0 -414
  109. empathy_os/cache/__init__.py +0 -117
  110. empathy_os/cache/base.py +0 -166
  111. empathy_os/cache/dependency_manager.py +0 -256
  112. empathy_os/cache/hash_only.py +0 -251
  113. empathy_os/cache/hybrid.py +0 -457
  114. empathy_os/cache/storage.py +0 -285
  115. empathy_os/cache_monitor.py +0 -356
  116. empathy_os/cache_stats.py +0 -298
  117. empathy_os/cli/__init__.py +0 -152
  118. empathy_os/cli/__main__.py +0 -12
  119. empathy_os/cli/commands/__init__.py +0 -1
  120. empathy_os/cli/commands/batch.py +0 -264
  121. empathy_os/cli/commands/cache.py +0 -248
  122. empathy_os/cli/commands/help.py +0 -331
  123. empathy_os/cli/commands/info.py +0 -140
  124. empathy_os/cli/commands/inspect.py +0 -436
  125. empathy_os/cli/commands/inspection.py +0 -57
  126. empathy_os/cli/commands/memory.py +0 -48
  127. empathy_os/cli/commands/metrics.py +0 -92
  128. empathy_os/cli/commands/orchestrate.py +0 -184
  129. empathy_os/cli/commands/patterns.py +0 -207
  130. empathy_os/cli/commands/profiling.py +0 -202
  131. empathy_os/cli/commands/provider.py +0 -98
  132. empathy_os/cli/commands/routing.py +0 -285
  133. empathy_os/cli/commands/setup.py +0 -96
  134. empathy_os/cli/commands/status.py +0 -235
  135. empathy_os/cli/commands/sync.py +0 -166
  136. empathy_os/cli/commands/tier.py +0 -121
  137. empathy_os/cli/commands/utilities.py +0 -114
  138. empathy_os/cli/commands/workflow.py +0 -579
  139. empathy_os/cli/core.py +0 -32
  140. empathy_os/cli/parsers/__init__.py +0 -68
  141. empathy_os/cli/parsers/batch.py +0 -118
  142. empathy_os/cli/parsers/cache.py +0 -65
  143. empathy_os/cli/parsers/help.py +0 -41
  144. empathy_os/cli/parsers/info.py +0 -26
  145. empathy_os/cli/parsers/inspect.py +0 -66
  146. empathy_os/cli/parsers/metrics.py +0 -42
  147. empathy_os/cli/parsers/orchestrate.py +0 -61
  148. empathy_os/cli/parsers/patterns.py +0 -54
  149. empathy_os/cli/parsers/provider.py +0 -40
  150. empathy_os/cli/parsers/routing.py +0 -110
  151. empathy_os/cli/parsers/setup.py +0 -42
  152. empathy_os/cli/parsers/status.py +0 -47
  153. empathy_os/cli/parsers/sync.py +0 -31
  154. empathy_os/cli/parsers/tier.py +0 -33
  155. empathy_os/cli/parsers/workflow.py +0 -77
  156. empathy_os/cli/utils/__init__.py +0 -1
  157. empathy_os/cli/utils/data.py +0 -242
  158. empathy_os/cli/utils/helpers.py +0 -68
  159. empathy_os/cli_legacy.py +0 -3957
  160. empathy_os/cli_minimal.py +0 -1159
  161. empathy_os/cli_router.py +0 -437
  162. empathy_os/cli_unified.py +0 -814
  163. empathy_os/config/__init__.py +0 -66
  164. empathy_os/config/xml_config.py +0 -286
  165. empathy_os/config.py +0 -545
  166. empathy_os/coordination.py +0 -870
  167. empathy_os/core.py +0 -1511
  168. empathy_os/core_modules/__init__.py +0 -15
  169. empathy_os/cost_tracker.py +0 -626
  170. empathy_os/dashboard/__init__.py +0 -41
  171. empathy_os/dashboard/app.py +0 -512
  172. empathy_os/dashboard/simple_server.py +0 -435
  173. empathy_os/dashboard/standalone_server.py +0 -547
  174. empathy_os/discovery.py +0 -306
  175. empathy_os/emergence.py +0 -306
  176. empathy_os/exceptions.py +0 -123
  177. empathy_os/feedback_loops.py +0 -373
  178. empathy_os/hot_reload/README.md +0 -473
  179. empathy_os/hot_reload/__init__.py +0 -62
  180. empathy_os/hot_reload/config.py +0 -83
  181. empathy_os/hot_reload/integration.py +0 -229
  182. empathy_os/hot_reload/reloader.py +0 -298
  183. empathy_os/hot_reload/watcher.py +0 -183
  184. empathy_os/hot_reload/websocket.py +0 -177
  185. empathy_os/levels.py +0 -577
  186. empathy_os/leverage_points.py +0 -441
  187. empathy_os/logging_config.py +0 -261
  188. empathy_os/mcp/__init__.py +0 -10
  189. empathy_os/mcp/server.py +0 -506
  190. empathy_os/memory/__init__.py +0 -237
  191. empathy_os/memory/claude_memory.py +0 -469
  192. empathy_os/memory/config.py +0 -224
  193. empathy_os/memory/control_panel.py +0 -1290
  194. empathy_os/memory/control_panel_support.py +0 -145
  195. empathy_os/memory/cross_session.py +0 -845
  196. empathy_os/memory/edges.py +0 -179
  197. empathy_os/memory/encryption.py +0 -159
  198. empathy_os/memory/file_session.py +0 -770
  199. empathy_os/memory/graph.py +0 -570
  200. empathy_os/memory/long_term.py +0 -913
  201. empathy_os/memory/long_term_types.py +0 -99
  202. empathy_os/memory/mixins/__init__.py +0 -25
  203. empathy_os/memory/mixins/backend_init_mixin.py +0 -249
  204. empathy_os/memory/mixins/capabilities_mixin.py +0 -208
  205. empathy_os/memory/mixins/handoff_mixin.py +0 -208
  206. empathy_os/memory/mixins/lifecycle_mixin.py +0 -49
  207. empathy_os/memory/mixins/long_term_mixin.py +0 -352
  208. empathy_os/memory/mixins/promotion_mixin.py +0 -109
  209. empathy_os/memory/mixins/short_term_mixin.py +0 -182
  210. empathy_os/memory/nodes.py +0 -179
  211. empathy_os/memory/redis_bootstrap.py +0 -540
  212. empathy_os/memory/security/__init__.py +0 -31
  213. empathy_os/memory/security/audit_logger.py +0 -932
  214. empathy_os/memory/security/pii_scrubber.py +0 -640
  215. empathy_os/memory/security/secrets_detector.py +0 -678
  216. empathy_os/memory/short_term.py +0 -2192
  217. empathy_os/memory/simple_storage.py +0 -302
  218. empathy_os/memory/storage/__init__.py +0 -15
  219. empathy_os/memory/storage_backend.py +0 -167
  220. empathy_os/memory/summary_index.py +0 -583
  221. empathy_os/memory/types.py +0 -446
  222. empathy_os/memory/unified.py +0 -182
  223. empathy_os/meta_workflows/__init__.py +0 -74
  224. empathy_os/meta_workflows/agent_creator.py +0 -248
  225. empathy_os/meta_workflows/builtin_templates.py +0 -567
  226. empathy_os/meta_workflows/cli_commands/__init__.py +0 -56
  227. empathy_os/meta_workflows/cli_commands/agent_commands.py +0 -321
  228. empathy_os/meta_workflows/cli_commands/analytics_commands.py +0 -442
  229. empathy_os/meta_workflows/cli_commands/config_commands.py +0 -232
  230. empathy_os/meta_workflows/cli_commands/memory_commands.py +0 -182
  231. empathy_os/meta_workflows/cli_commands/template_commands.py +0 -354
  232. empathy_os/meta_workflows/cli_commands/workflow_commands.py +0 -382
  233. empathy_os/meta_workflows/cli_meta_workflows.py +0 -59
  234. empathy_os/meta_workflows/form_engine.py +0 -292
  235. empathy_os/meta_workflows/intent_detector.py +0 -409
  236. empathy_os/meta_workflows/models.py +0 -569
  237. empathy_os/meta_workflows/pattern_learner.py +0 -738
  238. empathy_os/meta_workflows/plan_generator.py +0 -384
  239. empathy_os/meta_workflows/session_context.py +0 -397
  240. empathy_os/meta_workflows/template_registry.py +0 -229
  241. empathy_os/meta_workflows/workflow.py +0 -984
  242. empathy_os/metrics/__init__.py +0 -12
  243. empathy_os/metrics/collector.py +0 -31
  244. empathy_os/metrics/prompt_metrics.py +0 -194
  245. empathy_os/models/__init__.py +0 -172
  246. empathy_os/models/__main__.py +0 -13
  247. empathy_os/models/adaptive_routing.py +0 -437
  248. empathy_os/models/auth_cli.py +0 -444
  249. empathy_os/models/auth_strategy.py +0 -450
  250. empathy_os/models/cli.py +0 -655
  251. empathy_os/models/empathy_executor.py +0 -354
  252. empathy_os/models/executor.py +0 -257
  253. empathy_os/models/fallback.py +0 -762
  254. empathy_os/models/provider_config.py +0 -282
  255. empathy_os/models/registry.py +0 -472
  256. empathy_os/models/tasks.py +0 -359
  257. empathy_os/models/telemetry/__init__.py +0 -71
  258. empathy_os/models/telemetry/analytics.py +0 -594
  259. empathy_os/models/telemetry/backend.py +0 -196
  260. empathy_os/models/telemetry/data_models.py +0 -431
  261. empathy_os/models/telemetry/storage.py +0 -489
  262. empathy_os/models/token_estimator.py +0 -420
  263. empathy_os/models/validation.py +0 -280
  264. empathy_os/monitoring/__init__.py +0 -52
  265. empathy_os/monitoring/alerts.py +0 -946
  266. empathy_os/monitoring/alerts_cli.py +0 -448
  267. empathy_os/monitoring/multi_backend.py +0 -271
  268. empathy_os/monitoring/otel_backend.py +0 -362
  269. empathy_os/optimization/__init__.py +0 -19
  270. empathy_os/optimization/context_optimizer.py +0 -272
  271. empathy_os/orchestration/__init__.py +0 -67
  272. empathy_os/orchestration/agent_templates.py +0 -707
  273. empathy_os/orchestration/config_store.py +0 -499
  274. empathy_os/orchestration/execution_strategies.py +0 -2111
  275. empathy_os/orchestration/meta_orchestrator.py +0 -1168
  276. empathy_os/orchestration/pattern_learner.py +0 -696
  277. empathy_os/orchestration/real_tools.py +0 -931
  278. empathy_os/pattern_cache.py +0 -187
  279. empathy_os/pattern_library.py +0 -542
  280. empathy_os/patterns/debugging/all_patterns.json +0 -81
  281. empathy_os/patterns/debugging/workflow_20260107_1770825e.json +0 -77
  282. empathy_os/patterns/refactoring_memory.json +0 -89
  283. empathy_os/persistence.py +0 -564
  284. empathy_os/platform_utils.py +0 -265
  285. empathy_os/plugins/__init__.py +0 -28
  286. empathy_os/plugins/base.py +0 -361
  287. empathy_os/plugins/registry.py +0 -268
  288. empathy_os/project_index/__init__.py +0 -32
  289. empathy_os/project_index/cli.py +0 -335
  290. empathy_os/project_index/index.py +0 -667
  291. empathy_os/project_index/models.py +0 -504
  292. empathy_os/project_index/reports.py +0 -474
  293. empathy_os/project_index/scanner.py +0 -777
  294. empathy_os/project_index/scanner_parallel.py +0 -291
  295. empathy_os/prompts/__init__.py +0 -61
  296. empathy_os/prompts/config.py +0 -77
  297. empathy_os/prompts/context.py +0 -177
  298. empathy_os/prompts/parser.py +0 -285
  299. empathy_os/prompts/registry.py +0 -313
  300. empathy_os/prompts/templates.py +0 -208
  301. empathy_os/redis_config.py +0 -302
  302. empathy_os/redis_memory.py +0 -799
  303. empathy_os/resilience/__init__.py +0 -56
  304. empathy_os/resilience/circuit_breaker.py +0 -256
  305. empathy_os/resilience/fallback.py +0 -179
  306. empathy_os/resilience/health.py +0 -300
  307. empathy_os/resilience/retry.py +0 -209
  308. empathy_os/resilience/timeout.py +0 -135
  309. empathy_os/routing/__init__.py +0 -43
  310. empathy_os/routing/chain_executor.py +0 -433
  311. empathy_os/routing/classifier.py +0 -217
  312. empathy_os/routing/smart_router.py +0 -234
  313. empathy_os/routing/workflow_registry.py +0 -343
  314. empathy_os/scaffolding/README.md +0 -589
  315. empathy_os/scaffolding/__init__.py +0 -35
  316. empathy_os/scaffolding/__main__.py +0 -14
  317. empathy_os/scaffolding/cli.py +0 -240
  318. empathy_os/socratic/__init__.py +0 -256
  319. empathy_os/socratic/ab_testing.py +0 -958
  320. empathy_os/socratic/blueprint.py +0 -533
  321. empathy_os/socratic/cli.py +0 -703
  322. empathy_os/socratic/collaboration.py +0 -1114
  323. empathy_os/socratic/domain_templates.py +0 -924
  324. empathy_os/socratic/embeddings.py +0 -738
  325. empathy_os/socratic/engine.py +0 -794
  326. empathy_os/socratic/explainer.py +0 -682
  327. empathy_os/socratic/feedback.py +0 -772
  328. empathy_os/socratic/forms.py +0 -629
  329. empathy_os/socratic/generator.py +0 -732
  330. empathy_os/socratic/llm_analyzer.py +0 -637
  331. empathy_os/socratic/mcp_server.py +0 -702
  332. empathy_os/socratic/session.py +0 -312
  333. empathy_os/socratic/storage.py +0 -667
  334. empathy_os/socratic/success.py +0 -730
  335. empathy_os/socratic/visual_editor.py +0 -860
  336. empathy_os/socratic/web_ui.py +0 -958
  337. empathy_os/telemetry/__init__.py +0 -39
  338. empathy_os/telemetry/agent_coordination.py +0 -475
  339. empathy_os/telemetry/agent_tracking.py +0 -367
  340. empathy_os/telemetry/approval_gates.py +0 -545
  341. empathy_os/telemetry/cli.py +0 -1231
  342. empathy_os/telemetry/commands/__init__.py +0 -14
  343. empathy_os/telemetry/commands/dashboard_commands.py +0 -696
  344. empathy_os/telemetry/event_streaming.py +0 -409
  345. empathy_os/telemetry/feedback_loop.py +0 -567
  346. empathy_os/telemetry/usage_tracker.py +0 -591
  347. empathy_os/templates.py +0 -754
  348. empathy_os/test_generator/__init__.py +0 -38
  349. empathy_os/test_generator/__main__.py +0 -14
  350. empathy_os/test_generator/cli.py +0 -234
  351. empathy_os/test_generator/generator.py +0 -355
  352. empathy_os/test_generator/risk_analyzer.py +0 -216
  353. empathy_os/tier_recommender.py +0 -384
  354. empathy_os/tools.py +0 -183
  355. empathy_os/trust/__init__.py +0 -28
  356. empathy_os/trust/circuit_breaker.py +0 -579
  357. empathy_os/trust_building.py +0 -527
  358. empathy_os/validation/__init__.py +0 -19
  359. empathy_os/validation/xml_validator.py +0 -281
  360. empathy_os/vscode_bridge.py +0 -173
  361. empathy_os/workflow_commands.py +0 -780
  362. empathy_os/workflow_patterns/__init__.py +0 -33
  363. empathy_os/workflow_patterns/behavior.py +0 -249
  364. empathy_os/workflow_patterns/core.py +0 -76
  365. empathy_os/workflow_patterns/output.py +0 -99
  366. empathy_os/workflow_patterns/registry.py +0 -255
  367. empathy_os/workflow_patterns/structural.py +0 -288
  368. empathy_os/workflows/__init__.py +0 -539
  369. empathy_os/workflows/autonomous_test_gen.py +0 -1268
  370. empathy_os/workflows/base.py +0 -2667
  371. empathy_os/workflows/batch_processing.py +0 -342
  372. empathy_os/workflows/bug_predict.py +0 -1084
  373. empathy_os/workflows/builder.py +0 -273
  374. empathy_os/workflows/caching.py +0 -253
  375. empathy_os/workflows/code_review.py +0 -1048
  376. empathy_os/workflows/code_review_adapters.py +0 -312
  377. empathy_os/workflows/code_review_pipeline.py +0 -722
  378. empathy_os/workflows/config.py +0 -645
  379. empathy_os/workflows/dependency_check.py +0 -644
  380. empathy_os/workflows/document_gen/__init__.py +0 -25
  381. empathy_os/workflows/document_gen/config.py +0 -30
  382. empathy_os/workflows/document_gen/report_formatter.py +0 -162
  383. empathy_os/workflows/document_gen/workflow.py +0 -1426
  384. empathy_os/workflows/document_manager.py +0 -216
  385. empathy_os/workflows/document_manager_README.md +0 -134
  386. empathy_os/workflows/documentation_orchestrator.py +0 -1205
  387. empathy_os/workflows/history.py +0 -510
  388. empathy_os/workflows/keyboard_shortcuts/__init__.py +0 -39
  389. empathy_os/workflows/keyboard_shortcuts/generators.py +0 -391
  390. empathy_os/workflows/keyboard_shortcuts/parsers.py +0 -416
  391. empathy_os/workflows/keyboard_shortcuts/prompts.py +0 -295
  392. empathy_os/workflows/keyboard_shortcuts/schema.py +0 -193
  393. empathy_os/workflows/keyboard_shortcuts/workflow.py +0 -509
  394. empathy_os/workflows/llm_base.py +0 -363
  395. empathy_os/workflows/manage_docs.py +0 -87
  396. empathy_os/workflows/manage_docs_README.md +0 -134
  397. empathy_os/workflows/manage_documentation.py +0 -821
  398. empathy_os/workflows/new_sample_workflow1.py +0 -149
  399. empathy_os/workflows/new_sample_workflow1_README.md +0 -150
  400. empathy_os/workflows/orchestrated_health_check.py +0 -849
  401. empathy_os/workflows/orchestrated_release_prep.py +0 -600
  402. empathy_os/workflows/output.py +0 -413
  403. empathy_os/workflows/perf_audit.py +0 -863
  404. empathy_os/workflows/pr_review.py +0 -762
  405. empathy_os/workflows/progress.py +0 -785
  406. empathy_os/workflows/progress_server.py +0 -322
  407. empathy_os/workflows/progressive/README 2.md +0 -454
  408. empathy_os/workflows/progressive/README.md +0 -454
  409. empathy_os/workflows/progressive/__init__.py +0 -82
  410. empathy_os/workflows/progressive/cli.py +0 -219
  411. empathy_os/workflows/progressive/core.py +0 -488
  412. empathy_os/workflows/progressive/orchestrator.py +0 -723
  413. empathy_os/workflows/progressive/reports.py +0 -520
  414. empathy_os/workflows/progressive/telemetry.py +0 -274
  415. empathy_os/workflows/progressive/test_gen.py +0 -495
  416. empathy_os/workflows/progressive/workflow.py +0 -589
  417. empathy_os/workflows/refactor_plan.py +0 -694
  418. empathy_os/workflows/release_prep.py +0 -895
  419. empathy_os/workflows/release_prep_crew.py +0 -969
  420. empathy_os/workflows/research_synthesis.py +0 -404
  421. empathy_os/workflows/routing.py +0 -168
  422. empathy_os/workflows/secure_release.py +0 -593
  423. empathy_os/workflows/security_adapters.py +0 -297
  424. empathy_os/workflows/security_audit.py +0 -1329
  425. empathy_os/workflows/security_audit_phase3.py +0 -355
  426. empathy_os/workflows/seo_optimization.py +0 -633
  427. empathy_os/workflows/step_config.py +0 -234
  428. empathy_os/workflows/telemetry_mixin.py +0 -269
  429. empathy_os/workflows/test5.py +0 -125
  430. empathy_os/workflows/test5_README.md +0 -158
  431. empathy_os/workflows/test_coverage_boost_crew.py +0 -849
  432. empathy_os/workflows/test_gen/__init__.py +0 -52
  433. empathy_os/workflows/test_gen/ast_analyzer.py +0 -249
  434. empathy_os/workflows/test_gen/config.py +0 -88
  435. empathy_os/workflows/test_gen/data_models.py +0 -38
  436. empathy_os/workflows/test_gen/report_formatter.py +0 -289
  437. empathy_os/workflows/test_gen/test_templates.py +0 -381
  438. empathy_os/workflows/test_gen/workflow.py +0 -655
  439. empathy_os/workflows/test_gen.py +0 -54
  440. empathy_os/workflows/test_gen_behavioral.py +0 -477
  441. empathy_os/workflows/test_gen_parallel.py +0 -341
  442. empathy_os/workflows/test_lifecycle.py +0 -526
  443. empathy_os/workflows/test_maintenance.py +0 -627
  444. empathy_os/workflows/test_maintenance_cli.py +0 -590
  445. empathy_os/workflows/test_maintenance_crew.py +0 -840
  446. empathy_os/workflows/test_runner.py +0 -622
  447. empathy_os/workflows/tier_tracking.py +0 -531
  448. empathy_os/workflows/xml_enhanced_crew.py +0 -285
  449. empathy_software_plugin/SOFTWARE_PLUGIN_README.md +0 -57
  450. empathy_software_plugin/cli/__init__.py +0 -120
  451. empathy_software_plugin/cli/inspect.py +0 -362
  452. empathy_software_plugin/cli.py +0 -574
  453. empathy_software_plugin/plugin.py +0 -188
  454. workflow_scaffolding/__init__.py +0 -11
  455. workflow_scaffolding/__main__.py +0 -12
  456. workflow_scaffolding/cli.py +0 -206
  457. workflow_scaffolding/generator.py +0 -265
  458. {empathy_framework-5.3.0.dist-info → empathy_framework-5.4.0.dist-info}/WHEEL +0 -0
@@ -1,2111 +0,0 @@
1
- """Execution strategies for agent composition patterns.
2
-
3
- This module implements the 7 grammar rules for composing agents:
4
- 1. Sequential (A → B → C)
5
- 2. Parallel (A || B || C)
6
- 3. Debate (A ⇄ B ⇄ C → Synthesis)
7
- 4. Teaching (Junior → Expert validation)
8
- 5. Refinement (Draft → Review → Polish)
9
- 6. Adaptive (Classifier → Specialist)
10
- 7. Conditional (if X then A else B) - branching based on gates
11
-
12
- Security:
13
- - All agent outputs validated before passing to next agent
14
- - No eval() or exec() usage
15
- - Timeout enforcement at strategy level
16
- - Condition predicates validated (no code execution)
17
-
18
- Example:
19
- >>> strategy = SequentialStrategy()
20
- >>> agents = [agent1, agent2, agent3]
21
- >>> result = await strategy.execute(agents, context)
22
-
23
- >>> # Conditional branching example
24
- >>> cond_strategy = ConditionalStrategy(
25
- ... condition=Condition(predicate={"confidence": {"$lt": 0.8}}),
26
- ... then_branch=expert_agents,
27
- ... else_branch=fast_agents
28
- ... )
29
- >>> result = await cond_strategy.execute([], context)
30
- """
31
-
32
- import asyncio
33
- import json
34
- import logging
35
- import operator
36
- import re
37
- from abc import ABC, abstractmethod
38
- from collections.abc import Callable
39
- from dataclasses import dataclass, field
40
- from enum import Enum
41
- from typing import Any
42
-
43
- from .agent_templates import AgentTemplate
44
-
45
- logger = logging.getLogger(__name__)
46
-
47
-
48
- @dataclass
49
- class AgentResult:
50
- """Result from agent execution.
51
-
52
- Attributes:
53
- agent_id: ID of agent that produced result
54
- success: Whether execution succeeded
55
- output: Agent output data
56
- confidence: Confidence score (0-1)
57
- duration_seconds: Execution time
58
- error: Error message if failed
59
- """
60
-
61
- agent_id: str
62
- success: bool
63
- output: dict[str, Any]
64
- confidence: float = 0.0
65
- duration_seconds: float = 0.0
66
- error: str = ""
67
-
68
-
69
- @dataclass
70
- class StrategyResult:
71
- """Aggregated result from strategy execution.
72
-
73
- Attributes:
74
- success: Whether overall execution succeeded
75
- outputs: List of individual agent results
76
- aggregated_output: Combined/synthesized output
77
- total_duration: Total execution time
78
- errors: List of errors encountered
79
- """
80
-
81
- success: bool
82
- outputs: list[AgentResult]
83
- aggregated_output: dict[str, Any]
84
- total_duration: float = 0.0
85
- errors: list[str] = field(default_factory=list)
86
-
87
- def __post_init__(self):
88
- """Initialize errors list if None."""
89
- if not self.errors:
90
- self.errors = []
91
-
92
-
93
- # =============================================================================
94
- # Conditional Grammar Types (Pattern 7)
95
- # =============================================================================
96
-
97
-
98
- class ConditionType(Enum):
99
- """Type of condition for gate evaluation.
100
-
101
- Attributes:
102
- JSON_PREDICATE: MongoDB-style JSON predicate ({"field": {"$op": value}})
103
- NATURAL_LANGUAGE: LLM-interpreted natural language condition
104
- COMPOSITE: Logical combination of conditions (AND/OR)
105
- """
106
-
107
- JSON_PREDICATE = "json"
108
- NATURAL_LANGUAGE = "natural"
109
- COMPOSITE = "composite"
110
-
111
-
112
- @dataclass
113
- class Condition:
114
- """A conditional gate for branching in agent workflows.
115
-
116
- Supports hybrid syntax: JSON predicates for simple conditions,
117
- natural language for complex semantic conditions.
118
-
119
- Attributes:
120
- predicate: JSON predicate dict or natural language string
121
- condition_type: How to evaluate the condition
122
- description: Human-readable description of the condition
123
- source_field: Which field(s) in context to evaluate
124
-
125
- JSON Predicate Operators:
126
- $eq: Equal to value
127
- $ne: Not equal to value
128
- $gt: Greater than value
129
- $gte: Greater than or equal to value
130
- $lt: Less than value
131
- $lte: Less than or equal to value
132
- $in: Value is in list
133
- $nin: Value is not in list
134
- $exists: Field exists (or not)
135
- $regex: Matches regex pattern
136
-
137
- Example (JSON):
138
- >>> # Low confidence triggers expert review
139
- >>> cond = Condition(
140
- ... predicate={"confidence": {"$lt": 0.8}},
141
- ... description="Confidence is below threshold"
142
- ... )
143
-
144
- Example (Natural Language):
145
- >>> # LLM interprets complex semantic condition
146
- >>> cond = Condition(
147
- ... predicate="The security audit found critical vulnerabilities",
148
- ... condition_type=ConditionType.NATURAL_LANGUAGE,
149
- ... description="Security issues detected"
150
- ... )
151
- """
152
-
153
- predicate: dict[str, Any] | str
154
- condition_type: ConditionType = ConditionType.JSON_PREDICATE
155
- description: str = ""
156
- source_field: str = "" # Empty means evaluate whole context
157
-
158
- def __post_init__(self):
159
- """Validate condition and auto-detect type."""
160
- if isinstance(self.predicate, str):
161
- # Auto-detect: if it looks like prose, it's natural language
162
- if " " in self.predicate and not self.predicate.startswith("{"):
163
- object.__setattr__(self, "condition_type", ConditionType.NATURAL_LANGUAGE)
164
- elif isinstance(self.predicate, dict):
165
- # Validate JSON predicate structure
166
- self._validate_predicate(self.predicate)
167
- else:
168
- raise ValueError(f"predicate must be dict or str, got {type(self.predicate)}")
169
-
170
- def _validate_predicate(self, predicate: dict[str, Any]) -> None:
171
- """Validate JSON predicate structure (no code execution).
172
-
173
- Args:
174
- predicate: The predicate dict to validate
175
-
176
- Raises:
177
- ValueError: If predicate contains invalid operators
178
- """
179
- valid_operators = {
180
- "$eq",
181
- "$ne",
182
- "$gt",
183
- "$gte",
184
- "$lt",
185
- "$lte",
186
- "$in",
187
- "$nin",
188
- "$exists",
189
- "$regex",
190
- "$and",
191
- "$or",
192
- "$not",
193
- }
194
-
195
- for key, value in predicate.items():
196
- if key.startswith("$"):
197
- if key not in valid_operators:
198
- raise ValueError(f"Invalid operator: {key}")
199
- if isinstance(value, dict):
200
- self._validate_predicate(value)
201
-
202
-
203
- @dataclass
204
- class Branch:
205
- """A branch in conditional execution.
206
-
207
- Attributes:
208
- agents: Agents to execute in this branch
209
- strategy: Strategy to use for executing agents (default: sequential)
210
- label: Human-readable branch label
211
- """
212
-
213
- agents: list[AgentTemplate]
214
- strategy: str = "sequential"
215
- label: str = ""
216
-
217
-
218
- # =============================================================================
219
- # Nested Sentence Types (Phase 2 - Recursive Composition)
220
- # =============================================================================
221
-
222
-
223
- @dataclass
224
- class WorkflowReference:
225
- """Reference to a workflow for nested composition.
226
-
227
- Enables "sentences within sentences" - workflows that invoke other workflows.
228
- Supports both registered workflow IDs and inline definitions.
229
-
230
- Attributes:
231
- workflow_id: ID of registered workflow (mutually exclusive with inline)
232
- inline: Inline workflow definition (mutually exclusive with workflow_id)
233
- context_mapping: Optional mapping of parent context fields to child
234
- result_key: Key to store nested workflow result in parent context
235
-
236
- Example (by ID):
237
- >>> ref = WorkflowReference(
238
- ... workflow_id="security-audit-team",
239
- ... result_key="security_result"
240
- ... )
241
-
242
- Example (inline):
243
- >>> ref = WorkflowReference(
244
- ... inline=InlineWorkflow(
245
- ... agents=[agent1, agent2],
246
- ... strategy="parallel"
247
- ... ),
248
- ... result_key="analysis_result"
249
- ... )
250
- """
251
-
252
- workflow_id: str = ""
253
- inline: "InlineWorkflow | None" = None
254
- context_mapping: dict[str, str] = field(default_factory=dict)
255
- result_key: str = "nested_result"
256
-
257
- def __post_init__(self):
258
- """Validate that exactly one reference type is provided."""
259
- if bool(self.workflow_id) == bool(self.inline):
260
- raise ValueError("WorkflowReference must have exactly one of: workflow_id or inline")
261
-
262
-
263
- @dataclass
264
- class InlineWorkflow:
265
- """Inline workflow definition for nested composition.
266
-
267
- Allows defining a sub-workflow directly within a parent workflow,
268
- without requiring registration.
269
-
270
- Attributes:
271
- agents: Agents to execute
272
- strategy: Strategy name (from STRATEGY_REGISTRY)
273
- description: Human-readable description
274
-
275
- Example:
276
- >>> inline = InlineWorkflow(
277
- ... agents=[analyzer, reviewer],
278
- ... strategy="sequential",
279
- ... description="Code review sub-workflow"
280
- ... )
281
- """
282
-
283
- agents: list[AgentTemplate]
284
- strategy: str = "sequential"
285
- description: str = ""
286
-
287
-
288
- class NestingContext:
289
- """Tracks nesting depth and prevents infinite recursion.
290
-
291
- Attributes:
292
- current_depth: Current nesting level (0 = root)
293
- max_depth: Maximum allowed nesting depth
294
- workflow_stack: Stack of workflow IDs for cycle detection
295
- """
296
-
297
- CONTEXT_KEY = "_nesting"
298
- DEFAULT_MAX_DEPTH = 3
299
-
300
- def __init__(self, max_depth: int = DEFAULT_MAX_DEPTH):
301
- """Initialize nesting context.
302
-
303
- Args:
304
- max_depth: Maximum allowed nesting depth
305
- """
306
- self.current_depth = 0
307
- self.max_depth = max_depth
308
- self.workflow_stack: list[str] = []
309
-
310
- @classmethod
311
- def from_context(cls, context: dict[str, Any]) -> "NestingContext":
312
- """Extract or create NestingContext from execution context.
313
-
314
- Args:
315
- context: Execution context dict
316
-
317
- Returns:
318
- NestingContext instance
319
- """
320
- if cls.CONTEXT_KEY in context:
321
- return context[cls.CONTEXT_KEY]
322
- return cls()
323
-
324
- def can_nest(self, workflow_id: str = "") -> bool:
325
- """Check if another nesting level is allowed.
326
-
327
- Args:
328
- workflow_id: ID of workflow to nest (for cycle detection)
329
-
330
- Returns:
331
- True if nesting is allowed
332
- """
333
- if self.current_depth >= self.max_depth:
334
- return False
335
- if workflow_id and workflow_id in self.workflow_stack:
336
- return False # Cycle detected
337
- return True
338
-
339
- def enter(self, workflow_id: str = "") -> "NestingContext":
340
- """Create a child context for nested execution.
341
-
342
- Args:
343
- workflow_id: ID of workflow being entered
344
-
345
- Returns:
346
- New NestingContext with incremented depth
347
- """
348
- child = NestingContext(self.max_depth)
349
- child.current_depth = self.current_depth + 1
350
- child.workflow_stack = self.workflow_stack.copy()
351
- if workflow_id:
352
- child.workflow_stack.append(workflow_id)
353
- return child
354
-
355
- def to_context(self, context: dict[str, Any]) -> dict[str, Any]:
356
- """Add nesting context to execution context.
357
-
358
- Args:
359
- context: Execution context dict
360
-
361
- Returns:
362
- Updated context with nesting info
363
- """
364
- context = context.copy()
365
- context[self.CONTEXT_KEY] = self
366
- return context
367
-
368
-
369
- # Registry for named workflows (populated at runtime)
370
- WORKFLOW_REGISTRY: dict[str, "WorkflowDefinition"] = {}
371
-
372
-
373
- @dataclass
374
- class WorkflowDefinition:
375
- """A registered workflow definition.
376
-
377
- Workflows can be registered and referenced by ID in nested compositions.
378
-
379
- Attributes:
380
- id: Unique workflow identifier
381
- agents: Agents in the workflow
382
- strategy: Composition strategy name
383
- description: Human-readable description
384
- """
385
-
386
- id: str
387
- agents: list[AgentTemplate]
388
- strategy: str = "sequential"
389
- description: str = ""
390
-
391
-
392
- def register_workflow(workflow: WorkflowDefinition) -> None:
393
- """Register a workflow for nested references.
394
-
395
- Args:
396
- workflow: Workflow definition to register
397
- """
398
- WORKFLOW_REGISTRY[workflow.id] = workflow
399
- logger.info(f"Registered workflow: {workflow.id}")
400
-
401
-
402
- def get_workflow(workflow_id: str) -> WorkflowDefinition:
403
- """Get a registered workflow by ID.
404
-
405
- Args:
406
- workflow_id: Workflow identifier
407
-
408
- Returns:
409
- WorkflowDefinition
410
-
411
- Raises:
412
- ValueError: If workflow is not registered
413
- """
414
- if workflow_id not in WORKFLOW_REGISTRY:
415
- raise ValueError(
416
- f"Unknown workflow: {workflow_id}. Available: {list(WORKFLOW_REGISTRY.keys())}"
417
- )
418
- return WORKFLOW_REGISTRY[workflow_id]
419
-
420
-
421
- class ConditionEvaluator:
422
- """Evaluates conditions against execution context.
423
-
424
- Supports both JSON predicates (fast, deterministic) and
425
- natural language conditions (LLM-interpreted, semantic).
426
-
427
- Security:
428
- - No eval() or exec() - all operators are whitelisted
429
- - JSON predicates use safe comparison operators
430
- - Natural language uses LLM API (no code execution)
431
- """
432
-
433
- # Mapping of JSON operators to Python comparison functions
434
- OPERATORS: dict[str, Callable[[Any, Any], bool]] = {
435
- "$eq": operator.eq,
436
- "$ne": operator.ne,
437
- "$gt": operator.gt,
438
- "$gte": operator.ge,
439
- "$lt": operator.lt,
440
- "$lte": operator.le,
441
- "$in": lambda val, lst: val in lst,
442
- "$nin": lambda val, lst: val not in lst,
443
- "$exists": lambda val, exists: (val is not None) == exists,
444
- "$regex": lambda val, pattern: bool(re.match(pattern, str(val))) if val else False,
445
- }
446
-
447
- def evaluate(self, condition: Condition, context: dict[str, Any]) -> bool:
448
- """Evaluate a condition against the current context.
449
-
450
- Args:
451
- condition: The condition to evaluate
452
- context: Execution context with agent results
453
-
454
- Returns:
455
- True if condition is met, False otherwise
456
-
457
- Example:
458
- >>> evaluator = ConditionEvaluator()
459
- >>> context = {"confidence": 0.6, "errors": 0}
460
- >>> cond = Condition(predicate={"confidence": {"$lt": 0.8}})
461
- >>> evaluator.evaluate(cond, context)
462
- True
463
- """
464
- if condition.condition_type == ConditionType.JSON_PREDICATE:
465
- return self._evaluate_json(condition.predicate, context)
466
- elif condition.condition_type == ConditionType.NATURAL_LANGUAGE:
467
- return self._evaluate_natural_language(condition.predicate, context)
468
- elif condition.condition_type == ConditionType.COMPOSITE:
469
- return self._evaluate_composite(condition.predicate, context)
470
- else:
471
- raise ValueError(f"Unknown condition type: {condition.condition_type}")
472
-
473
- def _evaluate_json(self, predicate: dict[str, Any], context: dict[str, Any]) -> bool:
474
- """Evaluate JSON predicate against context.
475
-
476
- Args:
477
- predicate: MongoDB-style predicate dict
478
- context: Context to evaluate against
479
-
480
- Returns:
481
- True if all conditions match
482
- """
483
- for field_name, condition_spec in predicate.items():
484
- # Handle logical operators
485
- if field_name == "$and":
486
- return all(self._evaluate_json(sub, context) for sub in condition_spec)
487
- if field_name == "$or":
488
- return any(self._evaluate_json(sub, context) for sub in condition_spec)
489
- if field_name == "$not":
490
- return not self._evaluate_json(condition_spec, context)
491
-
492
- # Get value from context (supports nested paths like "result.confidence")
493
- value = self._get_nested_value(context, field_name)
494
-
495
- # Evaluate condition
496
- if isinstance(condition_spec, dict):
497
- for op, target in condition_spec.items():
498
- if op not in self.OPERATORS:
499
- raise ValueError(f"Unknown operator: {op}")
500
- if not self.OPERATORS[op](value, target):
501
- return False
502
- else:
503
- # Direct equality check
504
- if value != condition_spec:
505
- return False
506
-
507
- return True
508
-
509
- def _get_nested_value(self, context: dict[str, Any], path: str) -> Any:
510
- """Get nested value from context using dot notation.
511
-
512
- Args:
513
- context: Context dict
514
- path: Dot-separated path (e.g., "result.confidence")
515
-
516
- Returns:
517
- Value at path or None if not found
518
- """
519
- parts = path.split(".")
520
- current = context
521
-
522
- for part in parts:
523
- if isinstance(current, dict):
524
- current = current.get(part)
525
- else:
526
- return None
527
-
528
- return current
529
-
530
- def _evaluate_natural_language(self, condition_text: str, context: dict[str, Any]) -> bool:
531
- """Evaluate natural language condition using LLM.
532
-
533
- Args:
534
- condition_text: Natural language condition
535
- context: Context to evaluate against
536
-
537
- Returns:
538
- True if LLM determines condition is met
539
-
540
- Note:
541
- Falls back to keyword matching if LLM unavailable.
542
- """
543
- logger.info(f"Evaluating natural language condition: {condition_text}")
544
-
545
- # Try LLM evaluation first
546
- try:
547
- return self._evaluate_with_llm(condition_text, context)
548
- except Exception as e:
549
- logger.warning(f"LLM evaluation failed, using keyword fallback: {e}")
550
- return self._keyword_fallback(condition_text, context)
551
-
552
- def _evaluate_with_llm(self, condition_text: str, context: dict[str, Any]) -> bool:
553
- """Use LLM to evaluate natural language condition.
554
-
555
- Args:
556
- condition_text: The condition in natural language
557
- context: Execution context
558
-
559
- Returns:
560
- LLM's determination (True/False)
561
- """
562
- # Import LLM client lazily to avoid circular imports
563
- try:
564
- from ..llm import get_cheap_tier_client
565
- except ImportError:
566
- logger.warning("LLM client not available for natural language conditions")
567
- raise
568
-
569
- # Prepare context summary for LLM
570
- context_summary = json.dumps(context, indent=2, default=str)[:2000]
571
-
572
- prompt = f"""Evaluate whether the following condition is TRUE or FALSE based on the context.
573
-
574
- Condition: {condition_text}
575
-
576
- Context:
577
- {context_summary}
578
-
579
- Respond with ONLY "TRUE" or "FALSE" (no explanation)."""
580
-
581
- client = get_cheap_tier_client()
582
- response = client.complete(prompt, max_tokens=10)
583
-
584
- result = response.strip().upper()
585
- return result == "TRUE"
586
-
587
- def _keyword_fallback(self, condition_text: str, context: dict[str, Any]) -> bool:
588
- """Fallback keyword-based evaluation for natural language.
589
-
590
- Args:
591
- condition_text: The condition text
592
- context: Execution context
593
-
594
- Returns:
595
- True if keywords suggest condition is likely met
596
- """
597
- # Simple keyword matching as fallback
598
- condition_lower = condition_text.lower()
599
- context_str = json.dumps(context, default=str).lower()
600
-
601
- # Check for negation
602
- is_negated = any(neg in condition_lower for neg in ["not ", "no ", "without "])
603
-
604
- # Extract key terms
605
- terms = re.findall(r"\b\w{4,}\b", condition_lower)
606
- terms = [t for t in terms if t not in {"the", "that", "this", "with", "from"}]
607
-
608
- # Count matching terms
609
- matches = sum(1 for term in terms if term in context_str)
610
- match_ratio = matches / len(terms) if terms else 0
611
-
612
- result = match_ratio > 0.5
613
- return not result if is_negated else result
614
-
615
- def _evaluate_composite(self, predicate: dict[str, Any], context: dict[str, Any]) -> bool:
616
- """Evaluate composite condition (AND/OR of other conditions).
617
-
618
- Args:
619
- predicate: Composite predicate with $and/$or
620
- context: Context to evaluate against
621
-
622
- Returns:
623
- Result of logical combination
624
- """
625
- return self._evaluate_json(predicate, context)
626
-
627
-
628
- class ExecutionStrategy(ABC):
629
- """Base class for agent composition strategies.
630
-
631
- All strategies must implement execute() method to define
632
- how agents are coordinated and results aggregated.
633
- """
634
-
635
- @abstractmethod
636
- async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
637
- """Execute agents using this strategy.
638
-
639
- Args:
640
- agents: List of agent templates to execute
641
- context: Initial context for execution
642
-
643
- Returns:
644
- StrategyResult with aggregated outputs
645
-
646
- Raises:
647
- ValueError: If agents list is empty
648
- TimeoutError: If execution exceeds timeout
649
- """
650
- pass
651
-
652
- async def _execute_agent(self, agent: AgentTemplate, context: dict[str, Any]) -> AgentResult:
653
- """Execute a single agent with real analysis tools.
654
-
655
- Maps agent capabilities to real tool implementations and executes them.
656
-
657
- Args:
658
- agent: Agent template to execute
659
- context: Execution context
660
-
661
- Returns:
662
- AgentResult with execution outcome
663
- """
664
- import time
665
-
666
- from ..orchestration.real_tools import (
667
- RealCodeQualityAnalyzer,
668
- RealCoverageAnalyzer,
669
- RealDocumentationAnalyzer,
670
- RealSecurityAuditor,
671
- )
672
-
673
- logger.info(f"Executing agent: {agent.id} ({agent.role})")
674
- start_time = time.perf_counter()
675
-
676
- # Get project root from context
677
- project_root = context.get("project_root", ".")
678
- target_path = context.get("target_path", "src")
679
-
680
- try:
681
- # Map agent ID to real tool implementation
682
- if agent.id == "security_auditor" or "security" in agent.role.lower():
683
- auditor = RealSecurityAuditor(project_root)
684
- report = auditor.audit(target_path)
685
-
686
- output = {
687
- "agent_role": agent.role,
688
- "total_issues": report.total_issues,
689
- "critical_issues": report.critical_count, # Match workflow field name
690
- "high_issues": report.high_count, # Match workflow field name
691
- "medium_issues": report.medium_count, # Match workflow field name
692
- "passed": report.passed,
693
- "issues_by_file": report.issues_by_file,
694
- }
695
- success = report.passed
696
- confidence = 1.0 if report.total_issues == 0 else 0.7
697
-
698
- elif agent.id == "test_coverage_analyzer" or "coverage" in agent.role.lower():
699
- analyzer = RealCoverageAnalyzer(project_root)
700
- report = analyzer.analyze() # Analyzes all packages automatically
701
-
702
- output = {
703
- "agent_role": agent.role,
704
- "coverage_percent": report.total_coverage, # Match workflow field name
705
- "total_coverage": report.total_coverage, # Keep for compatibility
706
- "files_analyzed": report.files_analyzed,
707
- "uncovered_files": report.uncovered_files,
708
- "passed": report.total_coverage >= 80.0,
709
- }
710
- success = report.total_coverage >= 80.0
711
- confidence = min(report.total_coverage / 100.0, 1.0)
712
-
713
- elif agent.id == "code_reviewer" or "quality" in agent.role.lower():
714
- analyzer = RealCodeQualityAnalyzer(project_root)
715
- report = analyzer.analyze(target_path)
716
-
717
- output = {
718
- "agent_role": agent.role,
719
- "quality_score": report.quality_score,
720
- "ruff_issues": report.ruff_issues,
721
- "mypy_issues": report.mypy_issues,
722
- "total_files": report.total_files,
723
- "passed": report.passed,
724
- }
725
- success = report.passed
726
- confidence = report.quality_score / 10.0
727
-
728
- elif agent.id == "documentation_writer" or "documentation" in agent.role.lower():
729
- analyzer = RealDocumentationAnalyzer(project_root)
730
- report = analyzer.analyze(target_path)
731
-
732
- output = {
733
- "agent_role": agent.role,
734
- "completeness": report.completeness_percentage,
735
- "coverage_percent": report.completeness_percentage, # Match Release Prep field name
736
- "total_functions": report.total_functions,
737
- "documented_functions": report.documented_functions,
738
- "total_classes": report.total_classes,
739
- "documented_classes": report.documented_classes,
740
- "missing_docstrings": report.missing_docstrings,
741
- "passed": report.passed,
742
- }
743
- success = report.passed
744
- confidence = report.completeness_percentage / 100.0
745
-
746
- elif agent.id == "performance_optimizer" or "performance" in agent.role.lower():
747
- # Performance analysis placeholder - mark as passed for now
748
- # TODO: Implement real performance profiling
749
- logger.warning("Performance analysis not yet implemented, returning placeholder")
750
- output = {
751
- "agent_role": agent.role,
752
- "message": "Performance analysis not yet implemented",
753
- "passed": True,
754
- "placeholder": True,
755
- }
756
- success = True
757
- confidence = 1.0
758
-
759
- elif agent.id == "test_generator":
760
- # Test generation requires different handling (LLM-based)
761
- logger.info("Test generation requires manual invocation, returning placeholder")
762
- output = {
763
- "agent_role": agent.role,
764
- "message": "Test generation requires manual invocation",
765
- "passed": True,
766
- }
767
- success = True
768
- confidence = 0.8
769
-
770
- else:
771
- # Unknown agent type - log warning and return placeholder
772
- logger.warning(f"Unknown agent type: {agent.id}, returning placeholder")
773
- output = {
774
- "agent_role": agent.role,
775
- "agent_id": agent.id,
776
- "message": "Unknown agent type - no real implementation",
777
- "passed": True,
778
- }
779
- success = True
780
- confidence = 0.5
781
-
782
- duration = time.perf_counter() - start_time
783
-
784
- logger.info(
785
- f"Agent {agent.id} completed: success={success}, "
786
- f"confidence={confidence:.2f}, duration={duration:.2f}s"
787
- )
788
-
789
- return AgentResult(
790
- agent_id=agent.id,
791
- success=success,
792
- output=output,
793
- confidence=confidence,
794
- duration_seconds=duration,
795
- )
796
-
797
- except Exception as e:
798
- duration = time.perf_counter() - start_time
799
- logger.error(f"Agent {agent.id} failed: {e}")
800
-
801
- return AgentResult(
802
- agent_id=agent.id,
803
- success=False,
804
- output={"agent_role": agent.role, "error_details": str(e)},
805
- error=str(e),
806
- confidence=0.0,
807
- duration_seconds=duration,
808
- )
809
-
810
- def _aggregate_results(self, results: list[AgentResult]) -> dict[str, Any]:
811
- """Aggregate results from multiple agents.
812
-
813
- Args:
814
- results: List of agent results
815
-
816
- Returns:
817
- Aggregated output dictionary
818
- """
819
- return {
820
- "num_agents": len(results),
821
- "all_succeeded": all(r.success for r in results),
822
- "avg_confidence": (
823
- sum(r.confidence for r in results) / len(results) if results else 0.0
824
- ),
825
- "outputs": [r.output for r in results],
826
- }
827
-
828
-
829
- class SequentialStrategy(ExecutionStrategy):
830
- """Sequential composition (A → B → C).
831
-
832
- Executes agents one after another, passing results forward.
833
- Each agent receives output from previous agent in context.
834
-
835
- Use when:
836
- - Tasks must be done in order
837
- - Each step depends on previous results
838
- - Pipeline processing needed
839
-
840
- Example:
841
- Coverage Analyzer → Test Generator → Quality Validator
842
- """
843
-
844
- async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
845
- """Execute agents sequentially.
846
-
847
- Args:
848
- agents: List of agents to execute in order
849
- context: Initial context
850
-
851
- Returns:
852
- StrategyResult with sequential execution results
853
- """
854
- if not agents:
855
- raise ValueError("agents list cannot be empty")
856
-
857
- logger.info(f"Sequential execution of {len(agents)} agents")
858
-
859
- results: list[AgentResult] = []
860
- current_context = context.copy()
861
- total_duration = 0.0
862
-
863
- for agent in agents:
864
- try:
865
- result = await self._execute_agent(agent, current_context)
866
- results.append(result)
867
- total_duration += result.duration_seconds
868
-
869
- # Pass output to next agent's context
870
- if result.success:
871
- current_context[f"{agent.id}_output"] = result.output
872
- else:
873
- logger.error(f"Agent {agent.id} failed: {result.error}")
874
- # Continue or stop based on error handling policy
875
- # For now: continue to next agent
876
-
877
- except Exception as e:
878
- logger.exception(f"Error executing agent {agent.id}: {e}")
879
- results.append(
880
- AgentResult(
881
- agent_id=agent.id,
882
- success=False,
883
- output={},
884
- error=str(e),
885
- )
886
- )
887
-
888
- return StrategyResult(
889
- success=all(r.success for r in results),
890
- outputs=results,
891
- aggregated_output=self._aggregate_results(results),
892
- total_duration=total_duration,
893
- errors=[r.error for r in results if not r.success],
894
- )
895
-
896
-
897
- class ParallelStrategy(ExecutionStrategy):
898
- """Parallel composition (A || B || C).
899
-
900
- Executes all agents simultaneously, aggregates results.
901
- Each agent receives same initial context.
902
-
903
- Use when:
904
- - Independent validations needed
905
- - Multi-perspective review desired
906
- - Time optimization important
907
-
908
- Example:
909
- Security Audit || Performance Check || Code Quality || Docs Check
910
- """
911
-
912
- async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
913
- """Execute agents in parallel.
914
-
915
- Args:
916
- agents: List of agents to execute concurrently
917
- context: Initial context for all agents
918
-
919
- Returns:
920
- StrategyResult with parallel execution results
921
- """
922
- if not agents:
923
- raise ValueError("agents list cannot be empty")
924
-
925
- logger.info(f"Parallel execution of {len(agents)} agents")
926
-
927
- # Execute all agents concurrently
928
- tasks = [self._execute_agent(agent, context) for agent in agents]
929
-
930
- try:
931
- results = await asyncio.gather(*tasks, return_exceptions=True)
932
- except Exception as e:
933
- logger.exception(f"Error in parallel execution: {e}")
934
- raise
935
-
936
- # Process results (handle exceptions)
937
- processed_results: list[AgentResult] = []
938
- for i, result in enumerate(results):
939
- if isinstance(result, Exception):
940
- logger.error(f"Agent {agents[i].id} raised exception: {result}")
941
- processed_results.append(
942
- AgentResult(
943
- agent_id=agents[i].id,
944
- success=False,
945
- output={},
946
- error=str(result),
947
- )
948
- )
949
- else:
950
- # Type checker doesn't know we already filtered out exceptions
951
- assert isinstance(result, AgentResult)
952
- processed_results.append(result)
953
-
954
- total_duration = max((r.duration_seconds for r in processed_results), default=0.0)
955
-
956
- return StrategyResult(
957
- success=all(r.success for r in processed_results),
958
- outputs=processed_results,
959
- aggregated_output=self._aggregate_results(processed_results),
960
- total_duration=total_duration,
961
- errors=[r.error for r in processed_results if not r.success],
962
- )
963
-
964
-
965
- class DebateStrategy(ExecutionStrategy):
966
- """Debate/Consensus composition (A ⇄ B ⇄ C → Synthesis).
967
-
968
- Agents provide independent opinions, then a synthesizer
969
- aggregates and resolves conflicts.
970
-
971
- Use when:
972
- - Multiple expert opinions needed
973
- - Architecture decisions require debate
974
- - Tradeoff analysis needed
975
-
976
- Example:
977
- Architect(scale) || Architect(cost) || Architect(simplicity) → Synthesizer
978
- """
979
-
980
- async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
981
- """Execute debate pattern.
982
-
983
- Args:
984
- agents: List of agents to debate (recommend 2-4)
985
- context: Initial context
986
-
987
- Returns:
988
- StrategyResult with synthesized consensus
989
- """
990
- if not agents:
991
- raise ValueError("agents list cannot be empty")
992
-
993
- if len(agents) < 2:
994
- logger.warning("Debate pattern works best with 2+ agents")
995
-
996
- logger.info(f"Debate execution with {len(agents)} agents")
997
-
998
- # Phase 1: Parallel execution for independent opinions
999
- parallel_strategy = ParallelStrategy()
1000
- phase1_result = await parallel_strategy.execute(agents, context)
1001
-
1002
- # Phase 2: Synthesis (simplified - no actual synthesizer agent)
1003
- # In production: would use dedicated synthesizer agent
1004
- synthesis = {
1005
- "debate_participants": [r.agent_id for r in phase1_result.outputs],
1006
- "opinions": [r.output for r in phase1_result.outputs],
1007
- "consensus": self._synthesize_opinions(phase1_result.outputs),
1008
- }
1009
-
1010
- return StrategyResult(
1011
- success=phase1_result.success,
1012
- outputs=phase1_result.outputs,
1013
- aggregated_output=synthesis,
1014
- total_duration=phase1_result.total_duration,
1015
- errors=phase1_result.errors,
1016
- )
1017
-
1018
- def _synthesize_opinions(self, results: list[AgentResult]) -> dict[str, Any]:
1019
- """Synthesize multiple agent opinions into consensus.
1020
-
1021
- Args:
1022
- results: Agent results to synthesize
1023
-
1024
- Returns:
1025
- Synthesized consensus
1026
- """
1027
- # Simplified synthesis: majority vote on success
1028
- success_votes = sum(1 for r in results if r.success)
1029
- consensus_reached = success_votes > len(results) / 2
1030
-
1031
- return {
1032
- "consensus_reached": consensus_reached,
1033
- "success_votes": success_votes,
1034
- "total_votes": len(results),
1035
- "avg_confidence": (
1036
- sum(r.confidence for r in results) / len(results) if results else 0.0
1037
- ),
1038
- }
1039
-
1040
-
1041
- class TeachingStrategy(ExecutionStrategy):
1042
- """Teaching/Validation (Junior → Expert Review).
1043
-
1044
- Junior agent attempts task (cheap tier), expert validates.
1045
- If validation fails, expert takes over.
1046
-
1047
- Use when:
1048
- - Cost-effective generation desired
1049
- - Quality assurance critical
1050
- - Simple tasks with review needed
1051
-
1052
- Example:
1053
- Junior Writer(CHEAP) → Quality Gate → (pass ? done : Expert Review(CAPABLE))
1054
- """
1055
-
1056
- def __init__(self, quality_threshold: float = 0.7):
1057
- """Initialize teaching strategy.
1058
-
1059
- Args:
1060
- quality_threshold: Minimum confidence for junior to pass (0-1)
1061
- """
1062
- self.quality_threshold = quality_threshold
1063
-
1064
- async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
1065
- """Execute teaching pattern.
1066
-
1067
- Args:
1068
- agents: [junior_agent, expert_agent] (exactly 2)
1069
- context: Initial context
1070
-
1071
- Returns:
1072
- StrategyResult with teaching outcome
1073
- """
1074
- if len(agents) != 2:
1075
- raise ValueError("Teaching strategy requires exactly 2 agents")
1076
-
1077
- junior, expert = agents
1078
- logger.info(f"Teaching: {junior.id} → {expert.id} validation")
1079
-
1080
- results: list[AgentResult] = []
1081
- total_duration = 0.0
1082
-
1083
- # Phase 1: Junior attempt
1084
- junior_result = await self._execute_agent(junior, context)
1085
- results.append(junior_result)
1086
- total_duration += junior_result.duration_seconds
1087
-
1088
- # Phase 2: Quality gate
1089
- if junior_result.success and junior_result.confidence >= self.quality_threshold:
1090
- logger.info(f"Junior passed quality gate (confidence={junior_result.confidence:.2f})")
1091
- aggregated = {"outcome": "junior_success", "junior_output": junior_result.output}
1092
- else:
1093
- logger.info(
1094
- f"Junior failed quality gate, expert taking over "
1095
- f"(confidence={junior_result.confidence:.2f})"
1096
- )
1097
-
1098
- # Phase 3: Expert takeover
1099
- expert_context = context.copy()
1100
- expert_context["junior_attempt"] = junior_result.output
1101
- expert_result = await self._execute_agent(expert, expert_context)
1102
- results.append(expert_result)
1103
- total_duration += expert_result.duration_seconds
1104
-
1105
- aggregated = {
1106
- "outcome": "expert_takeover",
1107
- "junior_output": junior_result.output,
1108
- "expert_output": expert_result.output,
1109
- }
1110
-
1111
- return StrategyResult(
1112
- success=all(r.success for r in results),
1113
- outputs=results,
1114
- aggregated_output=aggregated,
1115
- total_duration=total_duration,
1116
- errors=[r.error for r in results if not r.success],
1117
- )
1118
-
1119
-
1120
- class RefinementStrategy(ExecutionStrategy):
1121
- """Progressive Refinement (Draft → Review → Polish).
1122
-
1123
- Iterative improvement through multiple quality levels.
1124
- Each agent refines output from previous stage.
1125
-
1126
- Use when:
1127
- - Iterative improvement needed
1128
- - Quality ladder desired
1129
- - Multi-stage refinement beneficial
1130
-
1131
- Example:
1132
- Drafter(CHEAP) → Reviewer(CAPABLE) → Polisher(PREMIUM)
1133
- """
1134
-
1135
- async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
1136
- """Execute refinement pattern.
1137
-
1138
- Args:
1139
- agents: [drafter, reviewer, polisher] (3+ agents)
1140
- context: Initial context
1141
-
1142
- Returns:
1143
- StrategyResult with refined output
1144
- """
1145
- if len(agents) < 2:
1146
- raise ValueError("Refinement strategy requires at least 2 agents")
1147
-
1148
- logger.info(f"Refinement with {len(agents)} stages")
1149
-
1150
- results: list[AgentResult] = []
1151
- current_context = context.copy()
1152
- total_duration = 0.0
1153
-
1154
- for i, agent in enumerate(agents):
1155
- stage_name = f"stage_{i + 1}"
1156
- logger.info(f"Refinement {stage_name}: {agent.id}")
1157
-
1158
- result = await self._execute_agent(agent, current_context)
1159
- results.append(result)
1160
- total_duration += result.duration_seconds
1161
-
1162
- if result.success:
1163
- # Pass refined output to next stage
1164
- current_context[f"{stage_name}_output"] = result.output
1165
- current_context["previous_output"] = result.output
1166
- else:
1167
- logger.error(f"Refinement stage {i + 1} failed: {result.error}")
1168
- break # Stop refinement on failure
1169
-
1170
- # Final output is from last successful stage
1171
- final_output = results[-1].output if results[-1].success else {}
1172
-
1173
- return StrategyResult(
1174
- success=all(r.success for r in results),
1175
- outputs=results,
1176
- aggregated_output={
1177
- "refinement_stages": len(results),
1178
- "final_output": final_output,
1179
- "stage_outputs": [r.output for r in results],
1180
- },
1181
- total_duration=total_duration,
1182
- errors=[r.error for r in results if not r.success],
1183
- )
1184
-
1185
-
1186
- class AdaptiveStrategy(ExecutionStrategy):
1187
- """Adaptive Routing (Classifier → Specialist).
1188
-
1189
- Classifier assesses task complexity, routes to appropriate specialist.
1190
- Right-sizing: match agent tier to task needs.
1191
-
1192
- Use when:
1193
- - Variable task complexity
1194
- - Cost optimization desired
1195
- - Right-sizing important
1196
-
1197
- Example:
1198
- Classifier(CHEAP) → route(simple|moderate|complex) → Specialist(tier)
1199
- """
1200
-
1201
- async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
1202
- """Execute adaptive routing pattern.
1203
-
1204
- Args:
1205
- agents: [classifier, *specialists] (2+ agents)
1206
- context: Initial context
1207
-
1208
- Returns:
1209
- StrategyResult with routed execution
1210
- """
1211
- if len(agents) < 2:
1212
- raise ValueError("Adaptive strategy requires at least 2 agents")
1213
-
1214
- classifier = agents[0]
1215
- specialists = agents[1:]
1216
-
1217
- logger.info(f"Adaptive: {classifier.id} → {len(specialists)} specialists")
1218
-
1219
- results: list[AgentResult] = []
1220
- total_duration = 0.0
1221
-
1222
- # Phase 1: Classification
1223
- classifier_result = await self._execute_agent(classifier, context)
1224
- results.append(classifier_result)
1225
- total_duration += classifier_result.duration_seconds
1226
-
1227
- if not classifier_result.success:
1228
- logger.error("Classifier failed, defaulting to first specialist")
1229
- selected_specialist = specialists[0]
1230
- else:
1231
- # Phase 2: Route to specialist based on classification
1232
- # Simplified: select based on confidence score
1233
- if classifier_result.confidence > 0.8:
1234
- # High confidence → simple task → cheap specialist
1235
- selected_specialist = min(
1236
- specialists,
1237
- key=lambda s: {
1238
- "CHEAP": 0,
1239
- "CAPABLE": 1,
1240
- "PREMIUM": 2,
1241
- }.get(s.tier_preference, 1),
1242
- )
1243
- else:
1244
- # Low confidence → complex task → premium specialist
1245
- selected_specialist = max(
1246
- specialists,
1247
- key=lambda s: {
1248
- "CHEAP": 0,
1249
- "CAPABLE": 1,
1250
- "PREMIUM": 2,
1251
- }.get(s.tier_preference, 1),
1252
- )
1253
-
1254
- logger.info(f"Routed to specialist: {selected_specialist.id}")
1255
-
1256
- # Phase 3: Execute selected specialist
1257
- specialist_context = context.copy()
1258
- specialist_context["classification"] = classifier_result.output
1259
- specialist_result = await self._execute_agent(selected_specialist, specialist_context)
1260
- results.append(specialist_result)
1261
- total_duration += specialist_result.duration_seconds
1262
-
1263
- return StrategyResult(
1264
- success=all(r.success for r in results),
1265
- outputs=results,
1266
- aggregated_output={
1267
- "classification": classifier_result.output,
1268
- "selected_specialist": selected_specialist.id,
1269
- "specialist_output": specialist_result.output,
1270
- },
1271
- total_duration=total_duration,
1272
- errors=[r.error for r in results if not r.success],
1273
- )
1274
-
1275
-
1276
- class ConditionalStrategy(ExecutionStrategy):
1277
- """Conditional branching (if X then A else B).
1278
-
1279
- The 7th grammar rule enabling dynamic workflow decisions based on gates.
1280
-
1281
- Use when:
1282
- - Quality gates determine next steps
1283
- - Error handling requires different paths
1284
- - Agent consensus affects workflow
1285
- """
1286
-
1287
- def __init__(
1288
- self,
1289
- condition: Condition,
1290
- then_branch: Branch,
1291
- else_branch: Branch | None = None,
1292
- ):
1293
- """Initialize conditional strategy."""
1294
- self.condition = condition
1295
- self.then_branch = then_branch
1296
- self.else_branch = else_branch
1297
- self.evaluator = ConditionEvaluator()
1298
-
1299
- async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
1300
- """Execute conditional branching."""
1301
- logger.info(f"Conditional: Evaluating '{self.condition.description or 'condition'}'")
1302
-
1303
- condition_met = self.evaluator.evaluate(self.condition, context)
1304
- logger.info(f"Conditional: Condition evaluated to {condition_met}")
1305
-
1306
- if condition_met:
1307
- selected_branch = self.then_branch
1308
- branch_label = "then"
1309
- else:
1310
- if self.else_branch is None:
1311
- return StrategyResult(
1312
- success=True,
1313
- outputs=[],
1314
- aggregated_output={"branch_taken": None},
1315
- total_duration=0.0,
1316
- )
1317
- selected_branch = self.else_branch
1318
- branch_label = "else"
1319
-
1320
- logger.info(f"Conditional: Taking '{branch_label}' branch")
1321
-
1322
- branch_strategy = get_strategy(selected_branch.strategy)
1323
- branch_context = context.copy()
1324
- branch_context["_conditional"] = {"condition_met": condition_met, "branch": branch_label}
1325
-
1326
- result = await branch_strategy.execute(selected_branch.agents, branch_context)
1327
- result.aggregated_output["_conditional"] = {
1328
- "condition_met": condition_met,
1329
- "branch_taken": branch_label,
1330
- }
1331
- return result
1332
-
1333
-
1334
- class MultiConditionalStrategy(ExecutionStrategy):
1335
- """Multiple conditional branches (switch/case pattern)."""
1336
-
1337
- def __init__(
1338
- self,
1339
- conditions: list[tuple[Condition, Branch]],
1340
- default_branch: Branch | None = None,
1341
- ):
1342
- """Initialize multi-conditional strategy."""
1343
- self.conditions = conditions
1344
- self.default_branch = default_branch
1345
- self.evaluator = ConditionEvaluator()
1346
-
1347
- async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
1348
- """Execute multi-conditional branching."""
1349
- for i, (condition, branch) in enumerate(self.conditions):
1350
- if self.evaluator.evaluate(condition, context):
1351
- logger.info(f"MultiConditional: Condition {i + 1} matched")
1352
- branch_strategy = get_strategy(branch.strategy)
1353
- result = await branch_strategy.execute(branch.agents, context)
1354
- result.aggregated_output["_matched_index"] = i
1355
- return result
1356
-
1357
- if self.default_branch:
1358
- branch_strategy = get_strategy(self.default_branch.strategy)
1359
- return await branch_strategy.execute(self.default_branch.agents, context)
1360
-
1361
- return StrategyResult(
1362
- success=True,
1363
- outputs=[],
1364
- aggregated_output={"reason": "No conditions matched"},
1365
- total_duration=0.0,
1366
- )
1367
-
1368
-
1369
- class NestedStrategy(ExecutionStrategy):
1370
- """Nested workflow execution (sentences within sentences).
1371
-
1372
- Enables recursive composition where workflows invoke other workflows.
1373
- Implements the "subordinate clause" pattern in the grammar metaphor.
1374
-
1375
- Features:
1376
- - Reference workflows by ID or define inline
1377
- - Configurable max depth (default: 3)
1378
- - Cycle detection prevents infinite recursion
1379
- - Full context inheritance from parent to child
1380
-
1381
- Use when:
1382
- - Complex multi-stage pipelines need modular sub-workflows
1383
- - Reusable workflow components should be shared
1384
- - Hierarchical team structures (teams containing sub-teams)
1385
-
1386
- Example:
1387
- >>> # Parent workflow with nested sub-workflow
1388
- >>> strategy = NestedStrategy(
1389
- ... workflow_ref=WorkflowReference(workflow_id="security-audit"),
1390
- ... max_depth=3
1391
- ... )
1392
- >>> result = await strategy.execute([], context)
1393
-
1394
- Example (inline):
1395
- >>> strategy = NestedStrategy(
1396
- ... workflow_ref=WorkflowReference(
1397
- ... inline=InlineWorkflow(
1398
- ... agents=[analyzer, reviewer],
1399
- ... strategy="parallel"
1400
- ... )
1401
- ... )
1402
- ... )
1403
- """
1404
-
1405
- def __init__(
1406
- self,
1407
- workflow_ref: WorkflowReference,
1408
- max_depth: int = NestingContext.DEFAULT_MAX_DEPTH,
1409
- ):
1410
- """Initialize nested strategy.
1411
-
1412
- Args:
1413
- workflow_ref: Reference to workflow (by ID or inline)
1414
- max_depth: Maximum nesting depth allowed
1415
- """
1416
- self.workflow_ref = workflow_ref
1417
- self.max_depth = max_depth
1418
-
1419
- async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
1420
- """Execute nested workflow.
1421
-
1422
- Args:
1423
- agents: Ignored (workflow_ref defines agents)
1424
- context: Parent execution context (inherited by child)
1425
-
1426
- Returns:
1427
- StrategyResult from nested workflow execution
1428
-
1429
- Raises:
1430
- RecursionError: If max depth exceeded or cycle detected
1431
- """
1432
- # Get or create nesting context
1433
- nesting = NestingContext.from_context(context)
1434
-
1435
- # Resolve workflow
1436
- if self.workflow_ref.workflow_id:
1437
- workflow_id = self.workflow_ref.workflow_id
1438
- workflow = get_workflow(workflow_id)
1439
- workflow_agents = workflow.agents
1440
- strategy_name = workflow.strategy
1441
- else:
1442
- workflow_id = f"inline_{id(self.workflow_ref.inline)}"
1443
- workflow_agents = self.workflow_ref.inline.agents
1444
- strategy_name = self.workflow_ref.inline.strategy
1445
-
1446
- # Check nesting limits
1447
- if not nesting.can_nest(workflow_id):
1448
- if nesting.current_depth >= nesting.max_depth:
1449
- error_msg = (
1450
- f"Maximum nesting depth ({nesting.max_depth}) exceeded. "
1451
- f"Current stack: {' → '.join(nesting.workflow_stack)}"
1452
- )
1453
- else:
1454
- error_msg = (
1455
- f"Cycle detected: workflow '{workflow_id}' already in stack. "
1456
- f"Stack: {' → '.join(nesting.workflow_stack)}"
1457
- )
1458
- logger.error(error_msg)
1459
- raise RecursionError(error_msg)
1460
-
1461
- logger.info(f"Nested: Entering '{workflow_id}' at depth {nesting.current_depth + 1}")
1462
-
1463
- # Create child context with updated nesting
1464
- child_nesting = nesting.enter(workflow_id)
1465
- child_context = child_nesting.to_context(context.copy())
1466
-
1467
- # Execute nested workflow
1468
- strategy = get_strategy(strategy_name)
1469
- result = await strategy.execute(workflow_agents, child_context)
1470
-
1471
- # Augment result with nesting metadata
1472
- result.aggregated_output["_nested"] = {
1473
- "workflow_id": workflow_id,
1474
- "depth": child_nesting.current_depth,
1475
- "parent_stack": nesting.workflow_stack,
1476
- }
1477
-
1478
- # Store result under specified key if provided
1479
- if self.workflow_ref.result_key:
1480
- result.aggregated_output[self.workflow_ref.result_key] = result.aggregated_output.copy()
1481
-
1482
- logger.info(f"Nested: Exiting '{workflow_id}'")
1483
-
1484
- return result
1485
-
1486
-
1487
- class NestedSequentialStrategy(ExecutionStrategy):
1488
- """Sequential execution with nested workflow support.
1489
-
1490
- Like SequentialStrategy but steps can be either agents OR workflow references.
1491
- Enables mixing direct agent execution with nested sub-workflows.
1492
-
1493
- Example:
1494
- >>> strategy = NestedSequentialStrategy(
1495
- ... steps=[
1496
- ... StepDefinition(agent=analyzer),
1497
- ... StepDefinition(workflow_ref=WorkflowReference(workflow_id="review-team")),
1498
- ... StepDefinition(agent=reporter),
1499
- ... ]
1500
- ... )
1501
- """
1502
-
1503
- def __init__(
1504
- self,
1505
- steps: list["StepDefinition"],
1506
- max_depth: int = NestingContext.DEFAULT_MAX_DEPTH,
1507
- ):
1508
- """Initialize nested sequential strategy.
1509
-
1510
- Args:
1511
- steps: List of step definitions (agents or workflow refs)
1512
- max_depth: Maximum nesting depth
1513
- """
1514
- self.steps = steps
1515
- self.max_depth = max_depth
1516
-
1517
- async def execute(self, agents: list[AgentTemplate], context: dict[str, Any]) -> StrategyResult:
1518
- """Execute steps sequentially, handling both agents and nested workflows."""
1519
- if not self.steps:
1520
- raise ValueError("steps list cannot be empty")
1521
-
1522
- logger.info(f"NestedSequential: Executing {len(self.steps)} steps")
1523
-
1524
- results: list[AgentResult] = []
1525
- current_context = context.copy()
1526
- total_duration = 0.0
1527
-
1528
- for i, step in enumerate(self.steps):
1529
- logger.info(f"NestedSequential: Step {i + 1}/{len(self.steps)}")
1530
-
1531
- if step.agent:
1532
- # Direct agent execution
1533
- result = await self._execute_agent(step.agent, current_context)
1534
- results.append(result)
1535
- total_duration += result.duration_seconds
1536
-
1537
- if result.success:
1538
- current_context[f"{step.agent.id}_output"] = result.output
1539
- else:
1540
- # Nested workflow execution
1541
- nested_strategy = NestedStrategy(
1542
- workflow_ref=step.workflow_ref,
1543
- max_depth=self.max_depth,
1544
- )
1545
- nested_result = await nested_strategy.execute([], current_context)
1546
- total_duration += nested_result.total_duration
1547
-
1548
- # Convert to AgentResult for consistency
1549
- results.append(
1550
- AgentResult(
1551
- agent_id=f"nested_{step.workflow_ref.workflow_id or 'inline'}",
1552
- success=nested_result.success,
1553
- output=nested_result.aggregated_output,
1554
- confidence=nested_result.aggregated_output.get("avg_confidence", 0.0),
1555
- duration_seconds=nested_result.total_duration,
1556
- )
1557
- )
1558
-
1559
- if nested_result.success:
1560
- key = step.workflow_ref.result_key or f"step_{i}_output"
1561
- current_context[key] = nested_result.aggregated_output
1562
-
1563
- return StrategyResult(
1564
- success=all(r.success for r in results),
1565
- outputs=results,
1566
- aggregated_output=self._aggregate_results(results),
1567
- total_duration=total_duration,
1568
- errors=[r.error for r in results if not r.success],
1569
- )
1570
-
1571
-
1572
- # =============================================================================
1573
- # New Anthropic-Inspired Patterns (Patterns 8-10)
1574
- # =============================================================================
1575
-
1576
-
1577
- class ToolEnhancedStrategy(ExecutionStrategy):
1578
- """Single agent with comprehensive tool access.
1579
-
1580
- Anthropic Pattern: Use tools over multiple agents when possible.
1581
- A single agent with rich tooling often outperforms multiple specialized agents.
1582
-
1583
- Example:
1584
- # Instead of: FileReader → Parser → Analyzer → Writer
1585
- # Use: Single agent with [read, parse, analyze, write] tools
1586
-
1587
- Benefits:
1588
- - Reduced LLM calls (1 vs 4+)
1589
- - Simpler coordination
1590
- - Lower cost
1591
- - Better context preservation
1592
-
1593
- Security:
1594
- - Tool schemas validated before execution
1595
- - No eval() or exec() usage
1596
- - Tool execution sandboxed
1597
- """
1598
-
1599
- def __init__(self, tools: list[dict[str, Any]] | None = None):
1600
- """Initialize with tool definitions.
1601
-
1602
- Args:
1603
- tools: List of tool definitions in Anthropic format
1604
- [
1605
- {
1606
- "name": "tool_name",
1607
- "description": "What the tool does",
1608
- "input_schema": {...}
1609
- },
1610
- ...
1611
- ]
1612
- """
1613
- self.tools = tools or []
1614
-
1615
- async def execute(
1616
- self, agents: list[AgentTemplate], context: dict[str, Any]
1617
- ) -> StrategyResult:
1618
- """Execute single agent with tool access.
1619
-
1620
- Args:
1621
- agents: Single agent (others ignored)
1622
- context: Execution context with task
1623
-
1624
- Returns:
1625
- Result with tool usage trace
1626
- """
1627
- if not agents:
1628
- return StrategyResult(
1629
- success=False, outputs=[], aggregated_output={}, errors=["No agent provided"]
1630
- )
1631
-
1632
- agent = agents[0] # Use first agent only
1633
- start_time = asyncio.get_event_loop().time()
1634
-
1635
- # Execute with tool access
1636
- try:
1637
- result = await self._execute_with_tools(agent=agent, context=context, tools=self.tools)
1638
-
1639
- duration = asyncio.get_event_loop().time() - start_time
1640
-
1641
- return StrategyResult(
1642
- success=result["success"],
1643
- outputs=[
1644
- AgentResult(
1645
- agent_id=agent.agent_id,
1646
- success=result["success"],
1647
- output=result["output"],
1648
- confidence=result.get("confidence", 1.0),
1649
- duration_seconds=duration,
1650
- )
1651
- ],
1652
- aggregated_output=result["output"],
1653
- total_duration=duration,
1654
- )
1655
- except Exception as e:
1656
- logger.exception(f"Tool-enhanced execution failed: {e}")
1657
- duration = asyncio.get_event_loop().time() - start_time
1658
- return StrategyResult(
1659
- success=False,
1660
- outputs=[],
1661
- aggregated_output={},
1662
- total_duration=duration,
1663
- errors=[str(e)],
1664
- )
1665
-
1666
- async def _execute_with_tools(
1667
- self, agent: AgentTemplate, context: dict[str, Any], tools: list[dict[str, Any]]
1668
- ) -> dict[str, Any]:
1669
- """Execute agent with tool use enabled."""
1670
- from empathy_os.models import LLMClient
1671
-
1672
- client = LLMClient()
1673
-
1674
- # Agent makes autonomous tool use decisions
1675
- response = await client.call(
1676
- prompt=context.get("task", ""),
1677
- system_prompt=agent.system_prompt,
1678
- tools=tools if tools else None,
1679
- tier=agent.tier,
1680
- workflow_id=f"tool-enhanced:{agent.agent_id}",
1681
- )
1682
-
1683
- return {"success": True, "output": response, "confidence": 1.0}
1684
-
1685
-
1686
- class PromptCachedSequentialStrategy(ExecutionStrategy):
1687
- """Sequential execution with shared cached context.
1688
-
1689
- Anthropic Pattern: Cache large unchanging contexts across agent calls.
1690
- Saves 90%+ on prompt tokens for repeated workflows.
1691
-
1692
- Example:
1693
- # All agents share cached codebase context
1694
- # Only task-specific prompts vary
1695
- # Massive token savings on subsequent calls
1696
-
1697
- Benefits:
1698
- - 90%+ token cost reduction
1699
- - Faster response times (cache hits)
1700
- - Consistent context across agents
1701
-
1702
- Security:
1703
- - Cached content validated once
1704
- - No executable code in cache
1705
- - Cache size limits enforced
1706
- """
1707
-
1708
- def __init__(self, cached_context: str | None = None, cache_ttl: int = 3600):
1709
- """Initialize with optional cached context.
1710
-
1711
- Args:
1712
- cached_context: Large unchanging context to cache
1713
- (e.g., documentation, code files, guidelines)
1714
- cache_ttl: Cache time-to-live in seconds (default: 1 hour)
1715
- """
1716
- self.cached_context = cached_context
1717
- self.cache_ttl = cache_ttl
1718
-
1719
- async def execute(
1720
- self, agents: list[AgentTemplate], context: dict[str, Any]
1721
- ) -> StrategyResult:
1722
- """Execute agents sequentially with shared cache.
1723
-
1724
- Args:
1725
- agents: List of agents to execute in order
1726
- context: Execution context with task
1727
-
1728
- Returns:
1729
- Result with cumulative outputs
1730
- """
1731
- from empathy_os.models import LLMClient
1732
-
1733
- client = LLMClient()
1734
- outputs = []
1735
- current_output = context.get("input", {})
1736
- start_time = asyncio.get_event_loop().time()
1737
-
1738
- for agent in agents:
1739
- try:
1740
- # Build prompt with cached context
1741
- if self.cached_context:
1742
- full_prompt = f"""{self.cached_context}
1743
-
1744
- ---
1745
-
1746
- Current task: {context.get('task', '')}
1747
- Previous output: {current_output}
1748
- Your role: {agent.role}"""
1749
- else:
1750
- full_prompt = f"{context.get('task', '')}\n\nPrevious: {current_output}"
1751
-
1752
- # Execute with caching enabled
1753
- response = await client.call(
1754
- prompt=full_prompt,
1755
- system_prompt=agent.system_prompt,
1756
- tier=agent.tier,
1757
- workflow_id=f"cached-seq:{agent.agent_id}",
1758
- enable_caching=True, # Anthropic prompt caching
1759
- )
1760
-
1761
- result = AgentResult(
1762
- agent_id=agent.agent_id,
1763
- success=True,
1764
- output=response,
1765
- confidence=1.0,
1766
- duration_seconds=response.get("duration", 0.0),
1767
- )
1768
-
1769
- outputs.append(result)
1770
- current_output = response.get("content", "")
1771
-
1772
- except Exception as e:
1773
- logger.exception(f"Agent {agent.agent_id} failed: {e}")
1774
- result = AgentResult(
1775
- agent_id=agent.agent_id,
1776
- success=False,
1777
- output={},
1778
- confidence=0.0,
1779
- duration_seconds=0.0,
1780
- error=str(e),
1781
- )
1782
- outputs.append(result)
1783
-
1784
- duration = asyncio.get_event_loop().time() - start_time
1785
-
1786
- return StrategyResult(
1787
- success=all(r.success for r in outputs),
1788
- outputs=outputs,
1789
- aggregated_output={"final_output": current_output},
1790
- total_duration=duration,
1791
- errors=[r.error for r in outputs if not r.success],
1792
- )
1793
-
1794
-
1795
- class DelegationChainStrategy(ExecutionStrategy):
1796
- """Hierarchical delegation with max depth enforcement.
1797
-
1798
- Anthropic Pattern: Keep agent hierarchies shallow (≤3 levels).
1799
- Coordinator delegates to specialists, specialists can delegate further.
1800
-
1801
- Example:
1802
- Level 1: Coordinator (analyzes task)
1803
- Level 2: Domain specialists (security, performance, quality)
1804
- Level 3: Sub-specialists (SQL injection, XSS, etc.)
1805
- Level 4: ❌ NOT ALLOWED (too deep)
1806
-
1807
- Benefits:
1808
- - Complex specialization within depth limits
1809
- - Clear delegation hierarchy
1810
- - Prevents runaway recursion
1811
-
1812
- Security:
1813
- - Max depth enforced (default: 3)
1814
- - Delegation trace logged
1815
- - Circular delegation prevented
1816
- """
1817
-
1818
- MAX_DEPTH = 3
1819
-
1820
- def __init__(self, max_depth: int = 3):
1821
- """Initialize with depth limit.
1822
-
1823
- Args:
1824
- max_depth: Maximum delegation depth (default: 3, max: 3)
1825
- """
1826
- self.max_depth = min(max_depth, self.MAX_DEPTH)
1827
-
1828
- async def execute(
1829
- self, agents: list[AgentTemplate], context: dict[str, Any]
1830
- ) -> StrategyResult:
1831
- """Execute delegation chain with depth tracking.
1832
-
1833
- Args:
1834
- agents: Hierarchical agent structure [coordinator, specialist1, specialist2, ...]
1835
- context: Execution context with task
1836
-
1837
- Returns:
1838
- Result with delegation trace
1839
- """
1840
- current_depth = context.get("_delegation_depth", 0)
1841
-
1842
- if current_depth >= self.max_depth:
1843
- return StrategyResult(
1844
- success=False,
1845
- outputs=[],
1846
- aggregated_output={},
1847
- errors=[f"Max delegation depth ({self.max_depth}) exceeded at depth {current_depth}"],
1848
- )
1849
-
1850
- if not agents:
1851
- return StrategyResult(
1852
- success=False,
1853
- outputs=[],
1854
- aggregated_output={},
1855
- errors=["No agents provided for delegation"],
1856
- )
1857
-
1858
- start_time = asyncio.get_event_loop().time()
1859
-
1860
- # Execute coordinator (first agent)
1861
- coordinator = agents[0]
1862
- specialists = agents[1:]
1863
-
1864
- try:
1865
- # Coordinator analyzes and plans delegation
1866
- delegation_plan = await self._plan_delegation(
1867
- coordinator=coordinator, task=context.get("task", ""), specialists=specialists
1868
- )
1869
-
1870
- # Execute delegated tasks
1871
- results = []
1872
- for sub_task in delegation_plan.get("sub_tasks", []):
1873
- specialist_id = sub_task.get("specialist_id")
1874
- specialist = self._find_specialist(specialist_id, specialists)
1875
-
1876
- if specialist:
1877
- # Recursive delegation (with depth tracking)
1878
- sub_context = {
1879
- **context,
1880
- "task": sub_task.get("task", ""),
1881
- "_delegation_depth": current_depth + 1,
1882
- }
1883
-
1884
- sub_result = await self._execute_specialist(
1885
- specialist=specialist, context=sub_context
1886
- )
1887
-
1888
- results.append(sub_result)
1889
-
1890
- # Synthesize results
1891
- final_output = await self._synthesize_results(
1892
- coordinator=coordinator, results=results, original_task=context.get("task", "")
1893
- )
1894
-
1895
- duration = asyncio.get_event_loop().time() - start_time
1896
-
1897
- return StrategyResult(
1898
- success=True,
1899
- outputs=results,
1900
- aggregated_output=final_output,
1901
- total_duration=duration,
1902
- )
1903
-
1904
- except Exception as e:
1905
- logger.exception(f"Delegation chain failed: {e}")
1906
- duration = asyncio.get_event_loop().time() - start_time
1907
- return StrategyResult(
1908
- success=False,
1909
- outputs=[],
1910
- aggregated_output={},
1911
- total_duration=duration,
1912
- errors=[str(e)],
1913
- )
1914
-
1915
- async def _plan_delegation(
1916
- self, coordinator: AgentTemplate, task: str, specialists: list[AgentTemplate]
1917
- ) -> dict[str, Any]:
1918
- """Coordinator plans delegation strategy."""
1919
- import json
1920
-
1921
- from empathy_os.models import LLMClient
1922
-
1923
- client = LLMClient()
1924
-
1925
- specialist_descriptions = "\n".join(
1926
- [f"- {s.agent_id}: {s.role}" for s in specialists]
1927
- )
1928
-
1929
- prompt = f"""Break down this task and assign to specialists:
1930
-
1931
- Task: {task}
1932
-
1933
- Available specialists:
1934
- {specialist_descriptions}
1935
-
1936
- Return JSON:
1937
- {{
1938
- "sub_tasks": [
1939
- {{"specialist_id": "...", "task": "..."}},
1940
- ...
1941
- ]
1942
- }}"""
1943
-
1944
- response = await client.call(
1945
- prompt=prompt,
1946
- system_prompt=coordinator.system_prompt or "You are a task coordinator.",
1947
- tier=coordinator.tier,
1948
- workflow_id=f"delegation:{coordinator.agent_id}",
1949
- )
1950
-
1951
- try:
1952
- return json.loads(response.get("content", "{}"))
1953
- except json.JSONDecodeError:
1954
- logger.warning("Failed to parse delegation plan, using fallback")
1955
- return {"sub_tasks": [{"specialist_id": specialists[0].agent_id if specialists else "unknown", "task": task}]}
1956
-
1957
- async def _execute_specialist(
1958
- self, specialist: AgentTemplate, context: dict[str, Any]
1959
- ) -> AgentResult:
1960
- """Execute specialist agent."""
1961
- from empathy_os.models import LLMClient
1962
-
1963
- client = LLMClient()
1964
- start_time = asyncio.get_event_loop().time()
1965
-
1966
- try:
1967
- response = await client.call(
1968
- prompt=context.get("task", ""),
1969
- system_prompt=specialist.system_prompt,
1970
- tier=specialist.tier,
1971
- workflow_id=f"specialist:{specialist.agent_id}",
1972
- )
1973
-
1974
- duration = asyncio.get_event_loop().time() - start_time
1975
-
1976
- return AgentResult(
1977
- agent_id=specialist.agent_id,
1978
- success=True,
1979
- output=response,
1980
- confidence=1.0,
1981
- duration_seconds=duration,
1982
- )
1983
- except Exception as e:
1984
- logger.exception(f"Specialist {specialist.agent_id} failed: {e}")
1985
- duration = asyncio.get_event_loop().time() - start_time
1986
- return AgentResult(
1987
- agent_id=specialist.agent_id,
1988
- success=False,
1989
- output={},
1990
- confidence=0.0,
1991
- duration_seconds=duration,
1992
- error=str(e),
1993
- )
1994
-
1995
- def _find_specialist(
1996
- self, specialist_id: str, agents: list[AgentTemplate]
1997
- ) -> AgentTemplate | None:
1998
- """Find specialist by ID."""
1999
- for agent in agents:
2000
- if agent.agent_id == specialist_id:
2001
- return agent
2002
- return None
2003
-
2004
- async def _synthesize_results(
2005
- self, coordinator: AgentTemplate, results: list[AgentResult], original_task: str
2006
- ) -> dict[str, Any]:
2007
- """Coordinator synthesizes specialist results."""
2008
- from empathy_os.models import LLMClient
2009
-
2010
- client = LLMClient()
2011
-
2012
- specialist_reports = "\n\n".join(
2013
- [f"## {r.agent_id}\n{r.output.get('content', '')}" for r in results]
2014
- )
2015
-
2016
- prompt = f"""Synthesize these specialist reports:
2017
-
2018
- Original task: {original_task}
2019
-
2020
- {specialist_reports}
2021
-
2022
- Provide cohesive final analysis."""
2023
-
2024
- try:
2025
- response = await client.call(
2026
- prompt=prompt,
2027
- system_prompt=coordinator.system_prompt or "You are a synthesis coordinator.",
2028
- tier=coordinator.tier,
2029
- workflow_id=f"synthesis:{coordinator.agent_id}",
2030
- )
2031
-
2032
- return {
2033
- "synthesis": response.get("content", ""),
2034
- "specialist_reports": [r.output for r in results],
2035
- "delegation_depth": len(results),
2036
- }
2037
- except Exception as e:
2038
- logger.exception(f"Synthesis failed: {e}")
2039
- return {
2040
- "synthesis": "Synthesis failed",
2041
- "specialist_reports": [r.output for r in results],
2042
- "delegation_depth": len(results),
2043
- "error": str(e),
2044
- }
2045
-
2046
-
2047
- @dataclass
2048
- class StepDefinition:
2049
- """Definition of a step in NestedSequentialStrategy.
2050
-
2051
- Either agent OR workflow_ref must be provided (mutually exclusive).
2052
-
2053
- Attributes:
2054
- agent: Agent to execute directly
2055
- workflow_ref: Nested workflow to execute
2056
- """
2057
-
2058
- agent: AgentTemplate | None = None
2059
- workflow_ref: WorkflowReference | None = None
2060
-
2061
- def __post_init__(self):
2062
- """Validate that exactly one step type is provided."""
2063
- if bool(self.agent) == bool(self.workflow_ref):
2064
- raise ValueError("StepDefinition must have exactly one of: agent or workflow_ref")
2065
-
2066
-
2067
- # Strategy registry for lookup by name
2068
- STRATEGY_REGISTRY: dict[str, type[ExecutionStrategy]] = {
2069
- # Original 7 patterns
2070
- "sequential": SequentialStrategy,
2071
- "parallel": ParallelStrategy,
2072
- "debate": DebateStrategy,
2073
- "teaching": TeachingStrategy,
2074
- "refinement": RefinementStrategy,
2075
- "adaptive": AdaptiveStrategy,
2076
- "conditional": ConditionalStrategy,
2077
- # Additional patterns
2078
- "multi_conditional": MultiConditionalStrategy,
2079
- "nested": NestedStrategy,
2080
- "nested_sequential": NestedSequentialStrategy,
2081
- # New Anthropic-inspired patterns (8-10)
2082
- "tool_enhanced": ToolEnhancedStrategy,
2083
- "prompt_cached_sequential": PromptCachedSequentialStrategy,
2084
- "delegation_chain": DelegationChainStrategy,
2085
- }
2086
-
2087
-
2088
- def get_strategy(strategy_name: str) -> ExecutionStrategy:
2089
- """Get strategy instance by name.
2090
-
2091
- Args:
2092
- strategy_name: Strategy name (e.g., "sequential", "parallel")
2093
-
2094
- Returns:
2095
- ExecutionStrategy instance
2096
-
2097
- Raises:
2098
- ValueError: If strategy name is invalid
2099
-
2100
- Example:
2101
- >>> strategy = get_strategy("sequential")
2102
- >>> isinstance(strategy, SequentialStrategy)
2103
- True
2104
- """
2105
- if strategy_name not in STRATEGY_REGISTRY:
2106
- raise ValueError(
2107
- f"Unknown strategy: {strategy_name}. Available: {list(STRATEGY_REGISTRY.keys())}"
2108
- )
2109
-
2110
- strategy_class = STRATEGY_REGISTRY[strategy_name]
2111
- return strategy_class()