aiecs 1.0.1__py3-none-any.whl → 1.7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiecs might be problematic. Click here for more details.

Files changed (340) hide show
  1. aiecs/__init__.py +13 -16
  2. aiecs/__main__.py +7 -7
  3. aiecs/aiecs_client.py +269 -75
  4. aiecs/application/executors/operation_executor.py +79 -54
  5. aiecs/application/knowledge_graph/__init__.py +7 -0
  6. aiecs/application/knowledge_graph/builder/__init__.py +37 -0
  7. aiecs/application/knowledge_graph/builder/data_quality.py +302 -0
  8. aiecs/application/knowledge_graph/builder/data_reshaping.py +293 -0
  9. aiecs/application/knowledge_graph/builder/document_builder.py +369 -0
  10. aiecs/application/knowledge_graph/builder/graph_builder.py +490 -0
  11. aiecs/application/knowledge_graph/builder/import_optimizer.py +396 -0
  12. aiecs/application/knowledge_graph/builder/schema_inference.py +462 -0
  13. aiecs/application/knowledge_graph/builder/schema_mapping.py +563 -0
  14. aiecs/application/knowledge_graph/builder/structured_pipeline.py +1384 -0
  15. aiecs/application/knowledge_graph/builder/text_chunker.py +317 -0
  16. aiecs/application/knowledge_graph/extractors/__init__.py +27 -0
  17. aiecs/application/knowledge_graph/extractors/base.py +98 -0
  18. aiecs/application/knowledge_graph/extractors/llm_entity_extractor.py +422 -0
  19. aiecs/application/knowledge_graph/extractors/llm_relation_extractor.py +347 -0
  20. aiecs/application/knowledge_graph/extractors/ner_entity_extractor.py +241 -0
  21. aiecs/application/knowledge_graph/fusion/__init__.py +78 -0
  22. aiecs/application/knowledge_graph/fusion/ab_testing.py +395 -0
  23. aiecs/application/knowledge_graph/fusion/abbreviation_expander.py +327 -0
  24. aiecs/application/knowledge_graph/fusion/alias_index.py +597 -0
  25. aiecs/application/knowledge_graph/fusion/alias_matcher.py +384 -0
  26. aiecs/application/knowledge_graph/fusion/cache_coordinator.py +343 -0
  27. aiecs/application/knowledge_graph/fusion/entity_deduplicator.py +433 -0
  28. aiecs/application/knowledge_graph/fusion/entity_linker.py +511 -0
  29. aiecs/application/knowledge_graph/fusion/evaluation_dataset.py +240 -0
  30. aiecs/application/knowledge_graph/fusion/knowledge_fusion.py +632 -0
  31. aiecs/application/knowledge_graph/fusion/matching_config.py +489 -0
  32. aiecs/application/knowledge_graph/fusion/name_normalizer.py +352 -0
  33. aiecs/application/knowledge_graph/fusion/relation_deduplicator.py +183 -0
  34. aiecs/application/knowledge_graph/fusion/semantic_name_matcher.py +464 -0
  35. aiecs/application/knowledge_graph/fusion/similarity_pipeline.py +534 -0
  36. aiecs/application/knowledge_graph/pattern_matching/__init__.py +21 -0
  37. aiecs/application/knowledge_graph/pattern_matching/pattern_matcher.py +342 -0
  38. aiecs/application/knowledge_graph/pattern_matching/query_executor.py +366 -0
  39. aiecs/application/knowledge_graph/profiling/__init__.py +12 -0
  40. aiecs/application/knowledge_graph/profiling/query_plan_visualizer.py +195 -0
  41. aiecs/application/knowledge_graph/profiling/query_profiler.py +223 -0
  42. aiecs/application/knowledge_graph/reasoning/__init__.py +27 -0
  43. aiecs/application/knowledge_graph/reasoning/evidence_synthesis.py +341 -0
  44. aiecs/application/knowledge_graph/reasoning/inference_engine.py +500 -0
  45. aiecs/application/knowledge_graph/reasoning/logic_form_parser.py +163 -0
  46. aiecs/application/knowledge_graph/reasoning/logic_parser/__init__.py +79 -0
  47. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_builder.py +513 -0
  48. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_nodes.py +913 -0
  49. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_validator.py +866 -0
  50. aiecs/application/knowledge_graph/reasoning/logic_parser/error_handler.py +475 -0
  51. aiecs/application/knowledge_graph/reasoning/logic_parser/parser.py +396 -0
  52. aiecs/application/knowledge_graph/reasoning/logic_parser/query_context.py +208 -0
  53. aiecs/application/knowledge_graph/reasoning/logic_query_integration.py +170 -0
  54. aiecs/application/knowledge_graph/reasoning/query_planner.py +855 -0
  55. aiecs/application/knowledge_graph/reasoning/reasoning_engine.py +518 -0
  56. aiecs/application/knowledge_graph/retrieval/__init__.py +27 -0
  57. aiecs/application/knowledge_graph/retrieval/query_intent_classifier.py +211 -0
  58. aiecs/application/knowledge_graph/retrieval/retrieval_strategies.py +592 -0
  59. aiecs/application/knowledge_graph/retrieval/strategy_types.py +23 -0
  60. aiecs/application/knowledge_graph/search/__init__.py +59 -0
  61. aiecs/application/knowledge_graph/search/hybrid_search.py +457 -0
  62. aiecs/application/knowledge_graph/search/reranker.py +293 -0
  63. aiecs/application/knowledge_graph/search/reranker_strategies.py +535 -0
  64. aiecs/application/knowledge_graph/search/text_similarity.py +392 -0
  65. aiecs/application/knowledge_graph/traversal/__init__.py +15 -0
  66. aiecs/application/knowledge_graph/traversal/enhanced_traversal.py +305 -0
  67. aiecs/application/knowledge_graph/traversal/path_scorer.py +271 -0
  68. aiecs/application/knowledge_graph/validators/__init__.py +13 -0
  69. aiecs/application/knowledge_graph/validators/relation_validator.py +239 -0
  70. aiecs/application/knowledge_graph/visualization/__init__.py +11 -0
  71. aiecs/application/knowledge_graph/visualization/graph_visualizer.py +313 -0
  72. aiecs/common/__init__.py +9 -0
  73. aiecs/common/knowledge_graph/__init__.py +17 -0
  74. aiecs/common/knowledge_graph/runnable.py +471 -0
  75. aiecs/config/__init__.py +20 -5
  76. aiecs/config/config.py +762 -31
  77. aiecs/config/graph_config.py +131 -0
  78. aiecs/config/tool_config.py +399 -0
  79. aiecs/core/__init__.py +29 -13
  80. aiecs/core/interface/__init__.py +2 -2
  81. aiecs/core/interface/execution_interface.py +22 -22
  82. aiecs/core/interface/storage_interface.py +37 -88
  83. aiecs/core/registry/__init__.py +31 -0
  84. aiecs/core/registry/service_registry.py +92 -0
  85. aiecs/domain/__init__.py +270 -1
  86. aiecs/domain/agent/__init__.py +191 -0
  87. aiecs/domain/agent/base_agent.py +3870 -0
  88. aiecs/domain/agent/exceptions.py +99 -0
  89. aiecs/domain/agent/graph_aware_mixin.py +569 -0
  90. aiecs/domain/agent/hybrid_agent.py +1435 -0
  91. aiecs/domain/agent/integration/__init__.py +29 -0
  92. aiecs/domain/agent/integration/context_compressor.py +216 -0
  93. aiecs/domain/agent/integration/context_engine_adapter.py +587 -0
  94. aiecs/domain/agent/integration/protocols.py +281 -0
  95. aiecs/domain/agent/integration/retry_policy.py +218 -0
  96. aiecs/domain/agent/integration/role_config.py +213 -0
  97. aiecs/domain/agent/knowledge_aware_agent.py +1892 -0
  98. aiecs/domain/agent/lifecycle.py +291 -0
  99. aiecs/domain/agent/llm_agent.py +692 -0
  100. aiecs/domain/agent/memory/__init__.py +12 -0
  101. aiecs/domain/agent/memory/conversation.py +1124 -0
  102. aiecs/domain/agent/migration/__init__.py +14 -0
  103. aiecs/domain/agent/migration/conversion.py +163 -0
  104. aiecs/domain/agent/migration/legacy_wrapper.py +86 -0
  105. aiecs/domain/agent/models.py +884 -0
  106. aiecs/domain/agent/observability.py +479 -0
  107. aiecs/domain/agent/persistence.py +449 -0
  108. aiecs/domain/agent/prompts/__init__.py +29 -0
  109. aiecs/domain/agent/prompts/builder.py +159 -0
  110. aiecs/domain/agent/prompts/formatters.py +187 -0
  111. aiecs/domain/agent/prompts/template.py +255 -0
  112. aiecs/domain/agent/registry.py +253 -0
  113. aiecs/domain/agent/tool_agent.py +444 -0
  114. aiecs/domain/agent/tools/__init__.py +15 -0
  115. aiecs/domain/agent/tools/schema_generator.py +364 -0
  116. aiecs/domain/community/__init__.py +155 -0
  117. aiecs/domain/community/agent_adapter.py +469 -0
  118. aiecs/domain/community/analytics.py +432 -0
  119. aiecs/domain/community/collaborative_workflow.py +648 -0
  120. aiecs/domain/community/communication_hub.py +634 -0
  121. aiecs/domain/community/community_builder.py +320 -0
  122. aiecs/domain/community/community_integration.py +796 -0
  123. aiecs/domain/community/community_manager.py +803 -0
  124. aiecs/domain/community/decision_engine.py +849 -0
  125. aiecs/domain/community/exceptions.py +231 -0
  126. aiecs/domain/community/models/__init__.py +33 -0
  127. aiecs/domain/community/models/community_models.py +234 -0
  128. aiecs/domain/community/resource_manager.py +461 -0
  129. aiecs/domain/community/shared_context_manager.py +589 -0
  130. aiecs/domain/context/__init__.py +40 -10
  131. aiecs/domain/context/context_engine.py +1910 -0
  132. aiecs/domain/context/conversation_models.py +87 -53
  133. aiecs/domain/context/graph_memory.py +582 -0
  134. aiecs/domain/execution/model.py +12 -4
  135. aiecs/domain/knowledge_graph/__init__.py +19 -0
  136. aiecs/domain/knowledge_graph/models/__init__.py +52 -0
  137. aiecs/domain/knowledge_graph/models/entity.py +148 -0
  138. aiecs/domain/knowledge_graph/models/evidence.py +178 -0
  139. aiecs/domain/knowledge_graph/models/inference_rule.py +184 -0
  140. aiecs/domain/knowledge_graph/models/path.py +171 -0
  141. aiecs/domain/knowledge_graph/models/path_pattern.py +171 -0
  142. aiecs/domain/knowledge_graph/models/query.py +261 -0
  143. aiecs/domain/knowledge_graph/models/query_plan.py +181 -0
  144. aiecs/domain/knowledge_graph/models/relation.py +202 -0
  145. aiecs/domain/knowledge_graph/schema/__init__.py +23 -0
  146. aiecs/domain/knowledge_graph/schema/entity_type.py +131 -0
  147. aiecs/domain/knowledge_graph/schema/graph_schema.py +253 -0
  148. aiecs/domain/knowledge_graph/schema/property_schema.py +143 -0
  149. aiecs/domain/knowledge_graph/schema/relation_type.py +163 -0
  150. aiecs/domain/knowledge_graph/schema/schema_manager.py +691 -0
  151. aiecs/domain/knowledge_graph/schema/type_enums.py +209 -0
  152. aiecs/domain/task/dsl_processor.py +172 -56
  153. aiecs/domain/task/model.py +20 -8
  154. aiecs/domain/task/task_context.py +27 -24
  155. aiecs/infrastructure/__init__.py +0 -2
  156. aiecs/infrastructure/graph_storage/__init__.py +11 -0
  157. aiecs/infrastructure/graph_storage/base.py +837 -0
  158. aiecs/infrastructure/graph_storage/batch_operations.py +458 -0
  159. aiecs/infrastructure/graph_storage/cache.py +424 -0
  160. aiecs/infrastructure/graph_storage/distributed.py +223 -0
  161. aiecs/infrastructure/graph_storage/error_handling.py +380 -0
  162. aiecs/infrastructure/graph_storage/graceful_degradation.py +294 -0
  163. aiecs/infrastructure/graph_storage/health_checks.py +378 -0
  164. aiecs/infrastructure/graph_storage/in_memory.py +1197 -0
  165. aiecs/infrastructure/graph_storage/index_optimization.py +446 -0
  166. aiecs/infrastructure/graph_storage/lazy_loading.py +431 -0
  167. aiecs/infrastructure/graph_storage/metrics.py +344 -0
  168. aiecs/infrastructure/graph_storage/migration.py +400 -0
  169. aiecs/infrastructure/graph_storage/pagination.py +483 -0
  170. aiecs/infrastructure/graph_storage/performance_monitoring.py +456 -0
  171. aiecs/infrastructure/graph_storage/postgres.py +1563 -0
  172. aiecs/infrastructure/graph_storage/property_storage.py +353 -0
  173. aiecs/infrastructure/graph_storage/protocols.py +76 -0
  174. aiecs/infrastructure/graph_storage/query_optimizer.py +642 -0
  175. aiecs/infrastructure/graph_storage/schema_cache.py +290 -0
  176. aiecs/infrastructure/graph_storage/sqlite.py +1373 -0
  177. aiecs/infrastructure/graph_storage/streaming.py +487 -0
  178. aiecs/infrastructure/graph_storage/tenant.py +412 -0
  179. aiecs/infrastructure/messaging/celery_task_manager.py +92 -54
  180. aiecs/infrastructure/messaging/websocket_manager.py +51 -35
  181. aiecs/infrastructure/monitoring/__init__.py +22 -0
  182. aiecs/infrastructure/monitoring/executor_metrics.py +45 -11
  183. aiecs/infrastructure/monitoring/global_metrics_manager.py +212 -0
  184. aiecs/infrastructure/monitoring/structured_logger.py +3 -7
  185. aiecs/infrastructure/monitoring/tracing_manager.py +63 -35
  186. aiecs/infrastructure/persistence/__init__.py +14 -1
  187. aiecs/infrastructure/persistence/context_engine_client.py +184 -0
  188. aiecs/infrastructure/persistence/database_manager.py +67 -43
  189. aiecs/infrastructure/persistence/file_storage.py +180 -103
  190. aiecs/infrastructure/persistence/redis_client.py +74 -21
  191. aiecs/llm/__init__.py +73 -25
  192. aiecs/llm/callbacks/__init__.py +11 -0
  193. aiecs/llm/{custom_callbacks.py → callbacks/custom_callbacks.py} +26 -19
  194. aiecs/llm/client_factory.py +224 -36
  195. aiecs/llm/client_resolver.py +155 -0
  196. aiecs/llm/clients/__init__.py +38 -0
  197. aiecs/llm/clients/base_client.py +324 -0
  198. aiecs/llm/clients/google_function_calling_mixin.py +457 -0
  199. aiecs/llm/clients/googleai_client.py +241 -0
  200. aiecs/llm/clients/openai_client.py +158 -0
  201. aiecs/llm/clients/openai_compatible_mixin.py +367 -0
  202. aiecs/llm/clients/vertex_client.py +897 -0
  203. aiecs/llm/clients/xai_client.py +201 -0
  204. aiecs/llm/config/__init__.py +51 -0
  205. aiecs/llm/config/config_loader.py +272 -0
  206. aiecs/llm/config/config_validator.py +206 -0
  207. aiecs/llm/config/model_config.py +143 -0
  208. aiecs/llm/protocols.py +149 -0
  209. aiecs/llm/utils/__init__.py +10 -0
  210. aiecs/llm/utils/validate_config.py +89 -0
  211. aiecs/main.py +140 -121
  212. aiecs/scripts/aid/VERSION_MANAGEMENT.md +138 -0
  213. aiecs/scripts/aid/__init__.py +19 -0
  214. aiecs/scripts/aid/module_checker.py +499 -0
  215. aiecs/scripts/aid/version_manager.py +235 -0
  216. aiecs/scripts/{DEPENDENCY_SYSTEM_SUMMARY.md → dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md} +1 -0
  217. aiecs/scripts/{README_DEPENDENCY_CHECKER.md → dependance_check/README_DEPENDENCY_CHECKER.md} +1 -0
  218. aiecs/scripts/dependance_check/__init__.py +15 -0
  219. aiecs/scripts/dependance_check/dependency_checker.py +1835 -0
  220. aiecs/scripts/{dependency_fixer.py → dependance_check/dependency_fixer.py} +192 -90
  221. aiecs/scripts/{download_nlp_data.py → dependance_check/download_nlp_data.py} +203 -71
  222. aiecs/scripts/dependance_patch/__init__.py +7 -0
  223. aiecs/scripts/dependance_patch/fix_weasel/__init__.py +11 -0
  224. aiecs/scripts/{fix_weasel_validator.py → dependance_patch/fix_weasel/fix_weasel_validator.py} +21 -14
  225. aiecs/scripts/{patch_weasel_library.sh → dependance_patch/fix_weasel/patch_weasel_library.sh} +1 -1
  226. aiecs/scripts/knowledge_graph/__init__.py +3 -0
  227. aiecs/scripts/knowledge_graph/run_threshold_experiments.py +212 -0
  228. aiecs/scripts/migrations/multi_tenancy/README.md +142 -0
  229. aiecs/scripts/tools_develop/README.md +671 -0
  230. aiecs/scripts/tools_develop/README_CONFIG_CHECKER.md +273 -0
  231. aiecs/scripts/tools_develop/TOOLS_CONFIG_GUIDE.md +1287 -0
  232. aiecs/scripts/tools_develop/TOOL_AUTO_DISCOVERY.md +234 -0
  233. aiecs/scripts/tools_develop/__init__.py +21 -0
  234. aiecs/scripts/tools_develop/check_all_tools_config.py +548 -0
  235. aiecs/scripts/tools_develop/check_type_annotations.py +257 -0
  236. aiecs/scripts/tools_develop/pre-commit-schema-coverage.sh +66 -0
  237. aiecs/scripts/tools_develop/schema_coverage.py +511 -0
  238. aiecs/scripts/tools_develop/validate_tool_schemas.py +475 -0
  239. aiecs/scripts/tools_develop/verify_executor_config_fix.py +98 -0
  240. aiecs/scripts/tools_develop/verify_tools.py +352 -0
  241. aiecs/tasks/__init__.py +0 -1
  242. aiecs/tasks/worker.py +115 -47
  243. aiecs/tools/__init__.py +194 -72
  244. aiecs/tools/apisource/__init__.py +99 -0
  245. aiecs/tools/apisource/intelligence/__init__.py +19 -0
  246. aiecs/tools/apisource/intelligence/data_fusion.py +632 -0
  247. aiecs/tools/apisource/intelligence/query_analyzer.py +417 -0
  248. aiecs/tools/apisource/intelligence/search_enhancer.py +385 -0
  249. aiecs/tools/apisource/monitoring/__init__.py +9 -0
  250. aiecs/tools/apisource/monitoring/metrics.py +330 -0
  251. aiecs/tools/apisource/providers/__init__.py +112 -0
  252. aiecs/tools/apisource/providers/base.py +671 -0
  253. aiecs/tools/apisource/providers/census.py +397 -0
  254. aiecs/tools/apisource/providers/fred.py +535 -0
  255. aiecs/tools/apisource/providers/newsapi.py +409 -0
  256. aiecs/tools/apisource/providers/worldbank.py +352 -0
  257. aiecs/tools/apisource/reliability/__init__.py +12 -0
  258. aiecs/tools/apisource/reliability/error_handler.py +363 -0
  259. aiecs/tools/apisource/reliability/fallback_strategy.py +376 -0
  260. aiecs/tools/apisource/tool.py +832 -0
  261. aiecs/tools/apisource/utils/__init__.py +9 -0
  262. aiecs/tools/apisource/utils/validators.py +334 -0
  263. aiecs/tools/base_tool.py +415 -21
  264. aiecs/tools/docs/__init__.py +121 -0
  265. aiecs/tools/docs/ai_document_orchestrator.py +607 -0
  266. aiecs/tools/docs/ai_document_writer_orchestrator.py +2350 -0
  267. aiecs/tools/docs/content_insertion_tool.py +1320 -0
  268. aiecs/tools/docs/document_creator_tool.py +1323 -0
  269. aiecs/tools/docs/document_layout_tool.py +1160 -0
  270. aiecs/tools/docs/document_parser_tool.py +1011 -0
  271. aiecs/tools/docs/document_writer_tool.py +1829 -0
  272. aiecs/tools/knowledge_graph/__init__.py +17 -0
  273. aiecs/tools/knowledge_graph/graph_reasoning_tool.py +807 -0
  274. aiecs/tools/knowledge_graph/graph_search_tool.py +944 -0
  275. aiecs/tools/knowledge_graph/kg_builder_tool.py +524 -0
  276. aiecs/tools/langchain_adapter.py +300 -138
  277. aiecs/tools/schema_generator.py +455 -0
  278. aiecs/tools/search_tool/__init__.py +100 -0
  279. aiecs/tools/search_tool/analyzers.py +581 -0
  280. aiecs/tools/search_tool/cache.py +264 -0
  281. aiecs/tools/search_tool/constants.py +128 -0
  282. aiecs/tools/search_tool/context.py +224 -0
  283. aiecs/tools/search_tool/core.py +778 -0
  284. aiecs/tools/search_tool/deduplicator.py +119 -0
  285. aiecs/tools/search_tool/error_handler.py +242 -0
  286. aiecs/tools/search_tool/metrics.py +343 -0
  287. aiecs/tools/search_tool/rate_limiter.py +172 -0
  288. aiecs/tools/search_tool/schemas.py +275 -0
  289. aiecs/tools/statistics/__init__.py +80 -0
  290. aiecs/tools/statistics/ai_data_analysis_orchestrator.py +646 -0
  291. aiecs/tools/statistics/ai_insight_generator_tool.py +508 -0
  292. aiecs/tools/statistics/ai_report_orchestrator_tool.py +684 -0
  293. aiecs/tools/statistics/data_loader_tool.py +555 -0
  294. aiecs/tools/statistics/data_profiler_tool.py +638 -0
  295. aiecs/tools/statistics/data_transformer_tool.py +580 -0
  296. aiecs/tools/statistics/data_visualizer_tool.py +498 -0
  297. aiecs/tools/statistics/model_trainer_tool.py +507 -0
  298. aiecs/tools/statistics/statistical_analyzer_tool.py +472 -0
  299. aiecs/tools/task_tools/__init__.py +49 -36
  300. aiecs/tools/task_tools/chart_tool.py +200 -184
  301. aiecs/tools/task_tools/classfire_tool.py +268 -267
  302. aiecs/tools/task_tools/image_tool.py +175 -131
  303. aiecs/tools/task_tools/office_tool.py +226 -146
  304. aiecs/tools/task_tools/pandas_tool.py +477 -121
  305. aiecs/tools/task_tools/report_tool.py +390 -142
  306. aiecs/tools/task_tools/research_tool.py +149 -79
  307. aiecs/tools/task_tools/scraper_tool.py +339 -145
  308. aiecs/tools/task_tools/stats_tool.py +448 -209
  309. aiecs/tools/temp_file_manager.py +26 -24
  310. aiecs/tools/tool_executor/__init__.py +18 -16
  311. aiecs/tools/tool_executor/tool_executor.py +364 -52
  312. aiecs/utils/LLM_output_structor.py +74 -48
  313. aiecs/utils/__init__.py +14 -3
  314. aiecs/utils/base_callback.py +0 -3
  315. aiecs/utils/cache_provider.py +696 -0
  316. aiecs/utils/execution_utils.py +50 -31
  317. aiecs/utils/prompt_loader.py +1 -0
  318. aiecs/utils/token_usage_repository.py +37 -11
  319. aiecs/ws/socket_server.py +14 -4
  320. {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/METADATA +52 -15
  321. aiecs-1.7.6.dist-info/RECORD +337 -0
  322. aiecs-1.7.6.dist-info/entry_points.txt +13 -0
  323. aiecs/config/registry.py +0 -19
  324. aiecs/domain/context/content_engine.py +0 -982
  325. aiecs/llm/base_client.py +0 -99
  326. aiecs/llm/openai_client.py +0 -125
  327. aiecs/llm/vertex_client.py +0 -186
  328. aiecs/llm/xai_client.py +0 -184
  329. aiecs/scripts/dependency_checker.py +0 -857
  330. aiecs/scripts/quick_dependency_check.py +0 -269
  331. aiecs/tools/task_tools/search_api.py +0 -7
  332. aiecs-1.0.1.dist-info/RECORD +0 -90
  333. aiecs-1.0.1.dist-info/entry_points.txt +0 -7
  334. /aiecs/scripts/{setup_nlp_data.sh → dependance_check/setup_nlp_data.sh} +0 -0
  335. /aiecs/scripts/{README_WEASEL_PATCH.md → dependance_patch/fix_weasel/README_WEASEL_PATCH.md} +0 -0
  336. /aiecs/scripts/{fix_weasel_validator.sh → dependance_patch/fix_weasel/fix_weasel_validator.sh} +0 -0
  337. /aiecs/scripts/{run_weasel_patch.sh → dependance_patch/fix_weasel/run_weasel_patch.sh} +0 -0
  338. {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/WHEEL +0 -0
  339. {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/licenses/LICENSE +0 -0
  340. {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,3870 @@
1
+ """
2
+ Base AI Agent
3
+
4
+ Abstract base class for all AI agents in the AIECS system.
5
+ """
6
+
7
+ from abc import ABC, abstractmethod
8
+ from datetime import datetime
9
+ from typing import Dict, List, Any, Optional, Callable, Union, TYPE_CHECKING, AsyncIterator, Set
10
+ from dataclasses import dataclass
11
+ import logging
12
+ import time
13
+ import asyncio
14
+ import json
15
+
16
+ from .models import (
17
+ AgentState,
18
+ AgentType,
19
+ AgentConfiguration,
20
+ AgentGoal,
21
+ AgentMetrics,
22
+ AgentCapabilityDeclaration,
23
+ GoalStatus,
24
+ GoalPriority,
25
+ MemoryType,
26
+ )
27
+ from .exceptions import (
28
+ InvalidStateTransitionError,
29
+ ConfigurationError,
30
+ AgentInitializationError,
31
+ SerializationError,
32
+ )
33
+
34
+ # Import protocols for type hints
35
+ if TYPE_CHECKING:
36
+ from aiecs.llm.protocols import LLMClientProtocol
37
+ from aiecs.domain.agent.integration.protocols import (
38
+ ConfigManagerProtocol,
39
+ CheckpointerProtocol,
40
+ )
41
+ from aiecs.tools.base_tool import BaseTool
42
+ from aiecs.domain.context.context_engine import ContextEngine
43
+
44
+ logger = logging.getLogger(__name__)
45
+
46
+
47
+ class OperationTimer:
48
+ """
49
+ Context manager for timing operations and tracking metrics.
50
+
51
+ Automatically records operation duration and can be used to track
52
+ operation-level performance metrics.
53
+
54
+ Example:
55
+ with agent.track_operation_time("llm_call") as timer:
56
+ result = llm.generate(prompt)
57
+ # timer.duration contains the elapsed time in seconds
58
+ """
59
+
60
+ def __init__(self, operation_name: str, agent: Optional["BaseAIAgent"] = None):
61
+ """
62
+ Initialize operation timer.
63
+
64
+ Args:
65
+ operation_name: Name of the operation being timed
66
+ agent: Optional agent instance for automatic metrics recording
67
+ """
68
+ self.operation_name = operation_name
69
+ self.agent = agent
70
+ self.start_time: Optional[float] = None
71
+ self.end_time: Optional[float] = None
72
+ self.duration: Optional[float] = None
73
+ self.error: Optional[Exception] = None
74
+
75
+ def __enter__(self) -> "OperationTimer":
76
+ """Start timing."""
77
+ self.start_time = time.time()
78
+ return self
79
+
80
+ def __exit__(self, exc_type, exc_val, exc_tb) -> None:
81
+ """
82
+ Stop timing and record metrics.
83
+
84
+ Args:
85
+ exc_type: Exception type if an error occurred
86
+ exc_val: Exception value if an error occurred
87
+ exc_tb: Exception traceback if an error occurred
88
+
89
+ Returns:
90
+ False to propagate exceptions
91
+ """
92
+ self.end_time = time.time()
93
+ if self.start_time is not None:
94
+ self.duration = self.end_time - self.start_time
95
+
96
+ # Track error if one occurred
97
+ if exc_val is not None:
98
+ self.error = exc_val
99
+
100
+ # Record metrics if agent is provided
101
+ if self.agent and self.duration is not None:
102
+ self.agent._record_operation_metrics(
103
+ operation_name=self.operation_name,
104
+ duration=self.duration,
105
+ success=exc_val is None,
106
+ )
107
+
108
+ # Don't suppress exceptions
109
+ return None
110
+
111
+ def get_duration_ms(self) -> Optional[float]:
112
+ """Get duration in milliseconds."""
113
+ return self.duration * 1000 if self.duration is not None else None
114
+
115
+
116
+ @dataclass
117
+ class CacheConfig:
118
+ """
119
+ Configuration for tool result caching.
120
+
121
+ Provides control over caching behavior to improve performance and reduce costs
122
+ by avoiding redundant tool executions. Supports TTL-based expiration, size limits,
123
+ and automatic cleanup.
124
+
125
+ **Key Features:**
126
+ - TTL-based cache expiration (default and per-tool)
127
+ - Size limits to prevent memory exhaustion
128
+ - Automatic cleanup when capacity threshold reached
129
+ - Configurable cache key generation
130
+ - Input hashing for large parameters
131
+
132
+ Attributes:
133
+ enabled: Enable/disable caching globally
134
+ default_ttl: Default time-to-live in seconds for cached entries (default: 300 = 5 minutes)
135
+ tool_specific_ttl: Dictionary mapping tool names to custom TTL values (overrides default_ttl)
136
+ max_cache_size: Maximum number of cached entries before cleanup (default: 1000)
137
+ max_memory_mb: Maximum cache memory usage in MB (approximate, default: 100)
138
+ cleanup_interval: Interval in seconds between cleanup checks (default: 60)
139
+ cleanup_threshold: Capacity threshold (0.0-1.0) to trigger cleanup (default: 0.9 = 90%)
140
+ include_timestamp_in_key: Whether to include timestamp in cache key (default: False)
141
+ hash_large_inputs: Whether to hash inputs larger than 1KB for cache keys (default: True)
142
+
143
+ Examples:
144
+ # Example 1: Basic caching configuration
145
+ config = CacheConfig(
146
+ enabled=True,
147
+ default_ttl=300, # 5 minutes
148
+ max_cache_size=1000
149
+ )
150
+
151
+ # Example 2: Per-tool TTL overrides
152
+ config = CacheConfig(
153
+ enabled=True,
154
+ default_ttl=300,
155
+ tool_specific_ttl={
156
+ "search": 600, # Search results cached for 10 minutes
157
+ "calculator": 3600, # Calculator results cached for 1 hour
158
+ "weather": 1800 # Weather data cached for 30 minutes
159
+ }
160
+ )
161
+
162
+ # Example 3: Aggressive caching for expensive operations
163
+ config = CacheConfig(
164
+ enabled=True,
165
+ default_ttl=3600, # 1 hour default
166
+ max_cache_size=5000,
167
+ max_memory_mb=500,
168
+ cleanup_threshold=0.95 # Cleanup at 95% capacity
169
+ )
170
+
171
+ # Example 4: Disable caching for time-sensitive tools
172
+ config = CacheConfig(
173
+ enabled=False # Disable caching entirely
174
+ )
175
+
176
+ # Example 5: Cache with timestamp-aware keys
177
+ config = CacheConfig(
178
+ enabled=True,
179
+ default_ttl=300,
180
+ include_timestamp_in_key=True # Include timestamp for time-sensitive caching
181
+ )
182
+ """
183
+
184
+ # Cache enablement
185
+ enabled: bool = True # Enable/disable caching
186
+
187
+ # TTL settings
188
+ default_ttl: int = 300 # Default TTL in seconds (5 minutes)
189
+ tool_specific_ttl: Optional[Dict[str, int]] = None # Per-tool TTL overrides
190
+
191
+ # Size limits
192
+ max_cache_size: int = 1000 # Maximum number of cached entries
193
+ max_memory_mb: int = 100 # Maximum cache memory in MB (approximate)
194
+
195
+ # Cleanup settings
196
+ cleanup_interval: int = 60 # Cleanup interval in seconds
197
+ cleanup_threshold: float = 0.9 # Trigger cleanup at 90% capacity
198
+
199
+ # Cache key settings
200
+ include_timestamp_in_key: bool = False # Include timestamp in cache key
201
+ hash_large_inputs: bool = True # Hash inputs larger than 1KB
202
+
203
+ def __post_init__(self):
204
+ """Initialize defaults."""
205
+ if self.tool_specific_ttl is None:
206
+ self.tool_specific_ttl = {}
207
+
208
+ def get_ttl(self, tool_name: str) -> int:
209
+ """
210
+ Get TTL for a specific tool.
211
+
212
+ Args:
213
+ tool_name: Name of the tool
214
+
215
+ Returns:
216
+ TTL in seconds
217
+ """
218
+ if self.tool_specific_ttl is None:
219
+ return self.default_ttl
220
+ return self.tool_specific_ttl.get(tool_name, self.default_ttl)
221
+
222
+
223
+ class BaseAIAgent(ABC):
224
+ """
225
+ Abstract base class for AI agents.
226
+
227
+ Provides common functionality for agent lifecycle management,
228
+ state management, memory, goals, and metrics tracking.
229
+
230
+ This base class supports extensive flexibility and advanced features:
231
+
232
+ **Tool Flexibility:**
233
+ - Accept tool names (List[str]) for backward compatibility
234
+ - Accept pre-configured tool instances (Dict[str, BaseTool]) with preserved state
235
+ - Automatic tool loading and validation
236
+
237
+ **LLM Client Flexibility:**
238
+ - Accept any object implementing LLMClientProtocol (duck typing)
239
+ - No requirement for BaseLLMClient inheritance
240
+ - Custom LLM client wrappers fully supported
241
+
242
+ **Advanced Features:**
243
+ - ContextEngine integration for persistent conversation history
244
+ - Custom config managers for dynamic configuration
245
+ - Checkpointers for state persistence (LangGraph compatible)
246
+ - Agent collaboration (delegation, peer review, consensus)
247
+ - Agent learning from experiences
248
+ - Resource management (rate limiting, quotas)
249
+ - Performance tracking and health monitoring
250
+ - Tool result caching
251
+ - Parallel tool execution
252
+ - Streaming responses
253
+ - Error recovery strategies
254
+
255
+ Examples:
256
+ # Example 1: Basic agent with tool names (backward compatible)
257
+ agent = HybridAgent(
258
+ agent_id="agent1",
259
+ name="My Agent",
260
+ agent_type=AgentType.HYBRID,
261
+ config=config,
262
+ tools=["search", "calculator"] # Tool names loaded by subclass
263
+ )
264
+
265
+ # Example 2: Agent with tool instances (preserves tool state)
266
+ from aiecs.tools import BaseTool
267
+
268
+ class StatefulSearchTool(BaseTool):
269
+ def __init__(self, api_key: str):
270
+ self.api_key = api_key
271
+ self.call_count = 0 # State preserved
272
+
273
+ async def run_async(self, query: str):
274
+ self.call_count += 1
275
+ return f"Search results for: {query}"
276
+
277
+ agent = HybridAgent(
278
+ agent_id="agent1",
279
+ name="My Agent",
280
+ agent_type=AgentType.HYBRID,
281
+ config=config,
282
+ tools={
283
+ "search": StatefulSearchTool(api_key="..."),
284
+ "calculator": CalculatorTool()
285
+ },
286
+ llm_client=OpenAIClient()
287
+ )
288
+ # Tool state (call_count) is preserved across agent operations
289
+
290
+ # Example 3: Agent with custom LLM client (no BaseLLMClient inheritance)
291
+ class CustomLLMClient:
292
+ provider_name = "custom"
293
+
294
+ async def generate_text(self, messages, **kwargs):
295
+ # Custom implementation
296
+ return LLMResponse(content="...", provider="custom")
297
+
298
+ async def stream_text(self, messages, **kwargs):
299
+ async for token in self._custom_stream():
300
+ yield token
301
+
302
+ async def close(self):
303
+ # Cleanup
304
+ pass
305
+
306
+ agent = LLMAgent(
307
+ agent_id="agent1",
308
+ name="My LLM Agent",
309
+ agent_type=AgentType.CONVERSATIONAL,
310
+ config=config,
311
+ llm_client=CustomLLMClient() # Works without BaseLLMClient!
312
+ )
313
+
314
+ # Example 4: Agent with ContextEngine for persistent storage
315
+ from aiecs.domain.context import ContextEngine
316
+
317
+ context_engine = ContextEngine()
318
+ await context_engine.initialize()
319
+
320
+ agent = HybridAgent(
321
+ agent_id="agent1",
322
+ name="My Agent",
323
+ agent_type=AgentType.HYBRID,
324
+ config=config,
325
+ tools=["search"],
326
+ llm_client=OpenAIClient(),
327
+ context_engine=context_engine # Enables persistent conversation history
328
+ )
329
+ # Conversation history persists across agent restarts
330
+
331
+ # Example 5: Agent with custom config manager
332
+ class DatabaseConfigManager:
333
+ async def get_config(self, key: str):
334
+ # Load from database
335
+ return await db.get_config(key)
336
+
337
+ async def update_config(self, key: str, value: Any):
338
+ # Update in database
339
+ await db.update_config(key, value)
340
+
341
+ agent = HybridAgent(
342
+ agent_id="agent1",
343
+ name="My Agent",
344
+ agent_type=AgentType.HYBRID,
345
+ config=config,
346
+ tools=["search"],
347
+ llm_client=OpenAIClient(),
348
+ config_manager=DatabaseConfigManager() # Dynamic config loading
349
+ )
350
+
351
+ # Example 6: Agent with checkpointer for LangGraph integration
352
+ class RedisCheckpointer:
353
+ async def save(self, agent_id: str, state: Dict[str, Any]):
354
+ await redis.set(f"checkpoint:{agent_id}", json.dumps(state))
355
+
356
+ async def load(self, agent_id: str) -> Optional[Dict[str, Any]]:
357
+ data = await redis.get(f"checkpoint:{agent_id}")
358
+ return json.loads(data) if data else None
359
+
360
+ agent = HybridAgent(
361
+ agent_id="agent1",
362
+ name="My Agent",
363
+ agent_type=AgentType.HYBRID,
364
+ config=config,
365
+ tools=["search"],
366
+ llm_client=OpenAIClient(),
367
+ checkpointer=RedisCheckpointer() # LangGraph-compatible checkpointing
368
+ )
369
+
370
+ # Example 7: Agent with collaboration features
371
+ agent_registry = {
372
+ "agent2": other_agent_instance,
373
+ "agent3": another_agent_instance
374
+ }
375
+
376
+ agent = HybridAgent(
377
+ agent_id="agent1",
378
+ name="My Agent",
379
+ agent_type=AgentType.HYBRID,
380
+ config=config,
381
+ tools=["search"],
382
+ llm_client=OpenAIClient(),
383
+ collaboration_enabled=True,
384
+ agent_registry=agent_registry # Enable delegation and peer review
385
+ )
386
+
387
+ # Delegate task to another agent
388
+ result = await agent.delegate_task(
389
+ task_description="Analyze this data",
390
+ target_agent_id="agent2"
391
+ )
392
+
393
+ # Example 8: Agent with learning enabled
394
+ from aiecs.domain.agent.models import ResourceLimits
395
+
396
+ agent = HybridAgent(
397
+ agent_id="agent1",
398
+ name="My Agent",
399
+ agent_type=AgentType.HYBRID,
400
+ config=config,
401
+ tools=["search"],
402
+ llm_client=OpenAIClient(),
403
+ learning_enabled=True # Learn from past experiences
404
+ )
405
+
406
+ # Record experience
407
+ await agent.record_experience(
408
+ task_type="data_analysis",
409
+ approach="parallel_tools",
410
+ success=True,
411
+ execution_time=2.5
412
+ )
413
+
414
+ # Get recommended approach based on history
415
+ approach = await agent.get_recommended_approach("data_analysis")
416
+ print(f"Recommended: {approach}")
417
+
418
+ # Example 9: Agent with resource limits
419
+ from aiecs.domain.agent.models import ResourceLimits
420
+
421
+ resource_limits = ResourceLimits(
422
+ max_concurrent_tasks=5,
423
+ max_tokens_per_minute=10000,
424
+ max_tool_calls_per_minute=100
425
+ )
426
+
427
+ agent = HybridAgent(
428
+ agent_id="agent1",
429
+ name="My Agent",
430
+ agent_type=AgentType.HYBRID,
431
+ config=config,
432
+ tools=["search"],
433
+ llm_client=OpenAIClient(),
434
+ resource_limits=resource_limits # Rate limiting and quotas
435
+ )
436
+
437
+ # Check resource availability before executing
438
+ if await agent.check_resource_availability():
439
+ result = await agent.execute_task(task, context)
440
+ else:
441
+ await agent.wait_for_resources(timeout=30.0)
442
+
443
+ # Example 10: Agent with performance tracking
444
+ agent = HybridAgent(
445
+ agent_id="agent1",
446
+ name="My Agent",
447
+ agent_type=AgentType.HYBRID,
448
+ config=config,
449
+ tools=["search"],
450
+ llm_client=OpenAIClient()
451
+ )
452
+
453
+ # Track operation performance
454
+ with agent.track_operation_time("data_processing"):
455
+ result = await agent.execute_task(task, context)
456
+
457
+ # Get performance metrics
458
+ metrics = agent.get_performance_metrics()
459
+ print(f"Average response time: {metrics['avg_response_time']}s")
460
+ print(f"P95 response time: {metrics['p95_response_time']}s")
461
+
462
+ # Get health status
463
+ health = agent.get_health_status()
464
+ print(f"Health score: {health['score']}")
465
+ print(f"Status: {health['status']}")
466
+
467
+ # Example 11: Agent with tool caching
468
+ agent = HybridAgent(
469
+ agent_id="agent1",
470
+ name="My Agent",
471
+ agent_type=AgentType.HYBRID,
472
+ config=config,
473
+ tools=["search"],
474
+ llm_client=OpenAIClient()
475
+ )
476
+
477
+ # Execute tool with caching (30 second TTL)
478
+ result1 = await agent.execute_tool_with_cache(
479
+ tool_name="search",
480
+ operation="query",
481
+ parameters={"q": "AI"},
482
+ cache_ttl=30
483
+ )
484
+
485
+ # Second call uses cache (no API call)
486
+ result2 = await agent.execute_tool_with_cache(
487
+ tool_name="search",
488
+ operation="query",
489
+ parameters={"q": "AI"},
490
+ cache_ttl=30
491
+ )
492
+
493
+ # Get cache statistics
494
+ stats = agent.get_cache_stats()
495
+ print(f"Cache hit rate: {stats['hit_rate']:.1%}")
496
+
497
+ # Example 12: Agent with parallel tool execution
498
+ agent = HybridAgent(
499
+ agent_id="agent1",
500
+ name="My Agent",
501
+ agent_type=AgentType.HYBRID,
502
+ config=config,
503
+ tools=["search", "calculator", "translator"],
504
+ llm_client=OpenAIClient()
505
+ )
506
+
507
+ # Execute multiple independent tools in parallel (3-5x faster)
508
+ results = await agent.execute_tools_parallel([
509
+ {"tool": "search", "operation": "query", "parameters": {"q": "AI"}},
510
+ {"tool": "calculator", "operation": "add", "parameters": {"a": 1, "b": 2}},
511
+ {"tool": "translator", "operation": "translate", "parameters": {"text": "Hello"}}
512
+ ], max_concurrency=3)
513
+
514
+ # Example 13: Agent with streaming responses
515
+ agent = HybridAgent(
516
+ agent_id="agent1",
517
+ name="My Agent",
518
+ agent_type=AgentType.HYBRID,
519
+ config=config,
520
+ tools=["search"],
521
+ llm_client=OpenAIClient()
522
+ )
523
+
524
+ # Stream task execution (tokens + tool calls)
525
+ async for event in agent.execute_task_streaming(task, context):
526
+ if event['type'] == 'token':
527
+ print(event['content'], end='', flush=True)
528
+ elif event['type'] == 'tool_call':
529
+ print(f"\\nCalling {event['tool_name']}...")
530
+ elif event['type'] == 'result':
531
+ print(f"\\nFinal result: {event['output']}")
532
+
533
+ # Example 14: Agent with error recovery
534
+ agent = HybridAgent(
535
+ agent_id="agent1",
536
+ name="My Agent",
537
+ agent_type=AgentType.HYBRID,
538
+ config=config,
539
+ tools=["search"],
540
+ llm_client=OpenAIClient()
541
+ )
542
+
543
+ # Execute with automatic recovery strategies
544
+ result = await agent.execute_with_recovery(
545
+ task=task,
546
+ context=context,
547
+ strategies=["retry", "simplify", "fallback", "delegate"]
548
+ )
549
+ # Automatically tries retry → simplify → fallback → delegate if errors occur
550
+ """
551
+
552
+ def __init__(
553
+ self,
554
+ agent_id: str,
555
+ name: str,
556
+ agent_type: AgentType,
557
+ config: AgentConfiguration,
558
+ description: Optional[str] = None,
559
+ version: str = "1.0.0",
560
+ tools: Optional[Union[List[str], Dict[str, "BaseTool"]]] = None,
561
+ llm_client: Optional["LLMClientProtocol"] = None,
562
+ config_manager: Optional["ConfigManagerProtocol"] = None,
563
+ checkpointer: Optional["CheckpointerProtocol"] = None,
564
+ context_engine: Optional["ContextEngine"] = None,
565
+ collaboration_enabled: bool = False,
566
+ agent_registry: Optional[Dict[str, Any]] = None,
567
+ learning_enabled: bool = False,
568
+ resource_limits: Optional[Any] = None,
569
+ ):
570
+ """
571
+ Initialize the base agent.
572
+
573
+ Args:
574
+ agent_id: Unique identifier for the agent
575
+ name: Agent name
576
+ agent_type: Type of agent
577
+ config: Agent configuration
578
+ description: Optional agent description
579
+ version: Agent version
580
+ tools: Optional tools - either list of tool names or dict of tool instances.
581
+ List[str]: Tool names to be loaded by subclass
582
+ Dict[str, BaseTool]: Pre-configured tool instances with state
583
+ llm_client: Optional LLM client (any object implementing LLMClientProtocol).
584
+ Supports custom LLM clients without BaseLLMClient inheritance.
585
+ config_manager: Optional configuration manager for dynamic config loading
586
+ checkpointer: Optional checkpointer for state persistence (LangGraph compatible)
587
+ context_engine: Optional ContextEngine instance for persistent conversation history
588
+ collaboration_enabled: Enable agent collaboration features (delegation, peer review)
589
+ agent_registry: Registry of other agents for collaboration (agent_id -> agent instance)
590
+ learning_enabled: Enable agent learning from experiences
591
+ resource_limits: Optional resource limits configuration
592
+ and session management. If provided, enables persistent storage
593
+ across agent restarts.
594
+
595
+ Example:
596
+ # With tool instances and ContextEngine
597
+ from aiecs.domain.context import ContextEngine
598
+
599
+ context_engine = ContextEngine()
600
+ await context_engine.initialize()
601
+
602
+ agent = HybridAgent(
603
+ agent_id="agent1",
604
+ name="My Agent",
605
+ agent_type=AgentType.HYBRID,
606
+ config=config,
607
+ tools={
608
+ "search": SearchTool(api_key="..."),
609
+ "calculator": CalculatorTool()
610
+ },
611
+ llm_client=CustomLLMClient(), # Custom client, no inheritance needed
612
+ config_manager=DatabaseConfigManager(),
613
+ checkpointer=RedisCheckpointer(),
614
+ context_engine=context_engine # Enables persistent storage
615
+ )
616
+
617
+ # With tool names (backward compatible)
618
+ agent = HybridAgent(
619
+ agent_id="agent1",
620
+ name="My Agent",
621
+ agent_type=AgentType.HYBRID,
622
+ config=config,
623
+ tools=["search", "calculator"] # Loaded by subclass
624
+ )
625
+ """
626
+ # Identity
627
+ self.agent_id = agent_id
628
+ self.name = name
629
+ self.agent_type = agent_type
630
+ self.description = description or f"{agent_type.value} agent"
631
+ self.version = version
632
+
633
+ # Configuration
634
+ self._config = config
635
+ self._config_manager = config_manager
636
+
637
+ # State
638
+ self._state = AgentState.CREATED
639
+ self._previous_state: Optional[AgentState] = None
640
+
641
+ # Memory storage (in-memory dict, can be replaced with sophisticated
642
+ # storage)
643
+ self._memory: Dict[str, Any] = {}
644
+ self._memory_metadata: Dict[str, Dict[str, Any]] = {}
645
+
646
+ # Goals
647
+ self._goals: Dict[str, AgentGoal] = {}
648
+
649
+ # Capabilities
650
+ self._capabilities: Dict[str, AgentCapabilityDeclaration] = {}
651
+
652
+ # Metrics
653
+ self._metrics = AgentMetrics() # type: ignore[call-arg]
654
+
655
+ # Timestamps
656
+ self.created_at = datetime.utcnow()
657
+ self.updated_at = datetime.utcnow()
658
+ self.last_active_at: Optional[datetime] = None
659
+
660
+ # Current task tracking
661
+ self._current_task_id: Optional[str] = None
662
+
663
+ # Tools (optional - only set if tools provided)
664
+ self._tools_input = tools # Store original input
665
+ self._available_tools: Optional[List[str]] = None
666
+ self._tool_instances: Optional[Dict[str, "BaseTool"]] = None
667
+
668
+ # LLM client (optional)
669
+ self._llm_client = llm_client
670
+
671
+ # Checkpointer (optional)
672
+ self._checkpointer = checkpointer
673
+
674
+ # ContextEngine (optional - Phase 4 enhancement)
675
+ self._context_engine = context_engine
676
+
677
+ # Tool result cache (Phase 7 enhancement)
678
+ self._cache_config = CacheConfig()
679
+ self._tool_cache: Dict[str, Any] = {} # Cache key -> result
680
+ self._cache_timestamps: Dict[str, float] = {} # Cache key -> timestamp
681
+ self._cache_access_count: Dict[str, int] = {} # Cache key -> access count
682
+ self._last_cleanup_time = time.time()
683
+
684
+ # Agent collaboration (Phase 7 enhancement - tasks 1.15.15-1.15.22)
685
+ self._collaboration_enabled = collaboration_enabled
686
+ self._agent_registry = agent_registry or {}
687
+
688
+ # Agent learning (Phase 8 enhancement - tasks 1.16.4-1.16.10)
689
+ self._learning_enabled = learning_enabled
690
+ self._experiences: List[Any] = [] # List of Experience objects
691
+ self._max_experiences = 1000 # Limit stored experiences
692
+
693
+ # Resource management (Phase 8 enhancement - tasks 1.16.11-1.16.17)
694
+ from .models import ResourceLimits
695
+
696
+ self._resource_limits = resource_limits or ResourceLimits() # type: ignore[call-arg]
697
+ self._active_tasks: set = set() # Set of active task IDs
698
+ self._token_usage_window: List[tuple] = [] # List of (timestamp, token_count)
699
+ self._tool_call_window: List[float] = [] # List of timestamps
700
+
701
+ features = []
702
+ if context_engine:
703
+ features.append("ContextEngine")
704
+ if collaboration_enabled:
705
+ features.append("collaboration")
706
+ if learning_enabled:
707
+ features.append("learning")
708
+ if resource_limits:
709
+ features.append("resource limits")
710
+
711
+ feature_str = f" with {', '.join(features)}" if features else ""
712
+ logger.info(f"Agent initialized: {self.agent_id} ({self.name}, {self.agent_type.value}){feature_str}")
713
+
714
+ # ==================== State Management ====================
715
+
716
+ @property
717
+ def state(self) -> AgentState:
718
+ """Get current agent state."""
719
+ return self._state
720
+
721
+ def get_state(self) -> AgentState:
722
+ """Get current agent state."""
723
+ return self._state
724
+
725
+ def _transition_state(self, new_state: AgentState) -> None:
726
+ """
727
+ Transition to a new state with validation.
728
+
729
+ Args:
730
+ new_state: Target state
731
+
732
+ Raises:
733
+ InvalidStateTransitionError: If transition is invalid
734
+ """
735
+ # Define valid transitions
736
+ valid_transitions = {
737
+ AgentState.CREATED: {AgentState.INITIALIZING},
738
+ AgentState.INITIALIZING: {AgentState.ACTIVE, AgentState.ERROR},
739
+ AgentState.ACTIVE: {
740
+ AgentState.BUSY,
741
+ AgentState.IDLE,
742
+ AgentState.STOPPED,
743
+ AgentState.ERROR,
744
+ },
745
+ AgentState.BUSY: {AgentState.ACTIVE, AgentState.ERROR},
746
+ AgentState.IDLE: {AgentState.ACTIVE, AgentState.STOPPED},
747
+ AgentState.ERROR: {AgentState.ACTIVE, AgentState.STOPPED},
748
+ AgentState.STOPPED: set(), # Terminal state
749
+ }
750
+
751
+ if new_state not in valid_transitions.get(self._state, set()):
752
+ raise InvalidStateTransitionError(
753
+ agent_id=self.agent_id,
754
+ current_state=self._state.value,
755
+ attempted_state=new_state.value,
756
+ )
757
+
758
+ self._previous_state = self._state
759
+ self._state = new_state
760
+ self.updated_at = datetime.utcnow()
761
+
762
+ logger.info(f"Agent {self.agent_id} state: {self._previous_state.value} → {new_state.value}")
763
+
764
+ # ==================== Lifecycle Methods ====================
765
+
766
+ async def initialize(self) -> None:
767
+ """
768
+ Initialize the agent.
769
+
770
+ This method should be called before the agent can be used.
771
+ Override in subclasses to add initialization logic.
772
+
773
+ Raises:
774
+ AgentInitializationError: If initialization fails
775
+ """
776
+ try:
777
+ self._transition_state(AgentState.INITIALIZING)
778
+ logger.info(f"Initializing agent {self.agent_id}...")
779
+
780
+ # Subclass initialization
781
+ await self._initialize()
782
+
783
+ self._transition_state(AgentState.ACTIVE)
784
+ self.last_active_at = datetime.utcnow()
785
+ logger.info(f"Agent {self.agent_id} initialized successfully")
786
+
787
+ except Exception as e:
788
+ self._transition_state(AgentState.ERROR)
789
+ logger.error(f"Agent {self.agent_id} initialization failed: {e}")
790
+ raise AgentInitializationError(
791
+ f"Failed to initialize agent {self.agent_id}: {str(e)}",
792
+ agent_id=self.agent_id,
793
+ )
794
+
795
+ @abstractmethod
796
+ async def _initialize(self) -> None:
797
+ """
798
+ Subclass-specific initialization logic.
799
+
800
+ Override this method in subclasses to implement
801
+ custom initialization.
802
+ """
803
+
804
+ async def activate(self) -> None:
805
+ """Activate the agent."""
806
+ if self._state == AgentState.IDLE:
807
+ self._transition_state(AgentState.ACTIVE)
808
+ self.last_active_at = datetime.utcnow()
809
+ logger.info(f"Agent {self.agent_id} activated")
810
+ else:
811
+ logger.warning(f"Agent {self.agent_id} cannot be activated from state {self._state.value}")
812
+
813
+ async def deactivate(self) -> None:
814
+ """Deactivate the agent (enter idle state)."""
815
+ if self._state == AgentState.ACTIVE:
816
+ self._transition_state(AgentState.IDLE)
817
+ logger.info(f"Agent {self.agent_id} deactivated")
818
+ else:
819
+ logger.warning(f"Agent {self.agent_id} cannot be deactivated from state {self._state.value}")
820
+
821
+ async def shutdown(self) -> None:
822
+ """
823
+ Shutdown the agent.
824
+
825
+ Override in subclasses to add cleanup logic.
826
+ """
827
+ logger.info(f"Shutting down agent {self.agent_id}...")
828
+ await self._shutdown()
829
+ self._transition_state(AgentState.STOPPED)
830
+ logger.info(f"Agent {self.agent_id} shut down")
831
+
832
+ @abstractmethod
833
+ async def _shutdown(self) -> None:
834
+ """
835
+ Subclass-specific shutdown logic.
836
+
837
+ Override this method in subclasses to implement
838
+ custom cleanup.
839
+ """
840
+
841
+ # ==================== Tool and LLM Client Helper Methods ====================
842
+
843
+ def _load_tools(self) -> None:
844
+ """
845
+ Load tools from the tools input parameter.
846
+
847
+ Handles both List[str] (tool names) and Dict[str, BaseTool] (tool instances).
848
+ Sets _available_tools and _tool_instances appropriately.
849
+
850
+ This helper method should be called by subclasses during initialization
851
+ if they want to use BaseAIAgent's tool management.
852
+
853
+ Raises:
854
+ ConfigurationError: If tools input is invalid
855
+ """
856
+ if self._tools_input is None:
857
+ # No tools provided
858
+ return
859
+
860
+ if isinstance(self._tools_input, list):
861
+ # Tool names - store for subclass to load
862
+ self._available_tools = self._tools_input
863
+ logger.debug(f"Agent {self.agent_id}: Registered {len(self._tools_input)} tool names")
864
+
865
+ elif isinstance(self._tools_input, dict):
866
+ # Tool instances - validate and store
867
+ from aiecs.tools.base_tool import BaseTool
868
+
869
+ for tool_name, tool_instance in self._tools_input.items():
870
+ if not isinstance(tool_instance, BaseTool):
871
+ raise ConfigurationError(f"Tool '{tool_name}' must be a BaseTool instance, got {type(tool_instance)}")
872
+
873
+ self._tool_instances = self._tools_input
874
+ self._available_tools = list(self._tools_input.keys())
875
+ logger.debug(f"Agent {self.agent_id}: Registered {len(self._tools_input)} tool instances")
876
+
877
+ else:
878
+ raise ConfigurationError(f"Tools must be List[str] or Dict[str, BaseTool], got {type(self._tools_input)}")
879
+
880
+ def _validate_llm_client(self) -> None:
881
+ """
882
+ Validate that the LLM client implements the required protocol.
883
+
884
+ Checks that the LLM client has the required methods:
885
+ - generate_text
886
+ - stream_text
887
+ - close
888
+ - provider_name (property)
889
+
890
+ This helper method should be called by subclasses during initialization
891
+ if they want to use BaseAIAgent's LLM client validation.
892
+
893
+ Raises:
894
+ ConfigurationError: If LLM client doesn't implement required methods
895
+ """
896
+ if self._llm_client is None:
897
+ return
898
+
899
+ required_methods = ["generate_text", "stream_text", "close"]
900
+ required_properties = ["provider_name"]
901
+
902
+ for method_name in required_methods:
903
+ if not hasattr(self._llm_client, method_name):
904
+ raise ConfigurationError(f"LLM client must implement '{method_name}' method")
905
+ if not callable(getattr(self._llm_client, method_name)):
906
+ raise ConfigurationError(f"LLM client '{method_name}' must be callable")
907
+
908
+ for prop_name in required_properties:
909
+ if not hasattr(self._llm_client, prop_name):
910
+ raise ConfigurationError(f"LLM client must have '{prop_name}' property")
911
+
912
+ logger.debug(f"Agent {self.agent_id}: LLM client validated successfully")
913
+
914
+ def _get_tool_instances(self) -> Optional[Dict[str, "BaseTool"]]:
915
+ """
916
+ Get tool instances dictionary.
917
+
918
+ Returns:
919
+ Dictionary of tool instances, or None if no tool instances available
920
+ """
921
+ return self._tool_instances
922
+
923
+ def get_config_manager(self) -> Optional["ConfigManagerProtocol"]:
924
+ """
925
+ Get the configuration manager.
926
+
927
+ Returns:
928
+ Configuration manager instance, or None if not configured
929
+ """
930
+ return self._config_manager
931
+
932
+ # ==================== Abstract Execution Methods ====================
933
+
934
+ @abstractmethod
935
+ async def execute_task(self, task: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
936
+ """
937
+ Execute a task.
938
+
939
+ Args:
940
+ task: Task specification
941
+ context: Execution context
942
+
943
+ Returns:
944
+ Task execution result
945
+
946
+ Raises:
947
+ TaskExecutionError: If task execution fails
948
+
949
+ Note:
950
+ Subclasses can use `_execute_with_retry()` to wrap task execution
951
+ with automatic retry logic based on agent configuration.
952
+ """
953
+
954
+ @abstractmethod
955
+ async def process_message(self, message: str, sender_id: Optional[str] = None) -> Dict[str, Any]:
956
+ """
957
+ Process an incoming message.
958
+
959
+ Args:
960
+ message: Message content
961
+ sender_id: Optional sender identifier
962
+
963
+ Returns:
964
+ Response dictionary
965
+
966
+ Note:
967
+ Subclasses can use `_execute_with_retry()` to wrap message processing
968
+ with automatic retry logic based on agent configuration.
969
+ """
970
+
971
+ # ==================== Retry Logic Integration ====================
972
+
973
+ async def _execute_with_retry(self, func: Callable, *args, **kwargs) -> Any:
974
+ """
975
+ Execute a function with retry logic using agent's retry policy.
976
+
977
+ This helper method wraps function execution with automatic retry based on
978
+ the agent's configuration. It uses EnhancedRetryPolicy for sophisticated
979
+ error handling with exponential backoff and error classification.
980
+
981
+ Args:
982
+ func: Async function to execute
983
+ *args: Function positional arguments
984
+ **kwargs: Function keyword arguments
985
+
986
+ Returns:
987
+ Function result
988
+
989
+ Raises:
990
+ Exception: If all retries are exhausted
991
+
992
+ Example:
993
+ ```python
994
+ async def _execute_task_internal(self, task, context):
995
+ # Actual task execution logic
996
+ return result
997
+
998
+ async def execute_task(self, task, context):
999
+ return await self._execute_with_retry(
1000
+ self._execute_task_internal,
1001
+ task,
1002
+ context
1003
+ )
1004
+ ```
1005
+ """
1006
+ from .integration.retry_policy import EnhancedRetryPolicy
1007
+
1008
+ # Get retry policy from configuration
1009
+ retry_config = self._config.retry_policy
1010
+
1011
+ # Create retry policy instance
1012
+ retry_policy = EnhancedRetryPolicy(
1013
+ max_retries=retry_config.max_retries,
1014
+ base_delay=retry_config.base_delay,
1015
+ max_delay=retry_config.max_delay,
1016
+ exponential_base=retry_config.exponential_factor,
1017
+ jitter=retry_config.jitter_factor > 0,
1018
+ )
1019
+
1020
+ # Execute with retry
1021
+ return await retry_policy.execute_with_retry(func, *args, **kwargs)
1022
+
1023
+ # ==================== Memory Management ====================
1024
+
1025
+ async def add_to_memory(
1026
+ self,
1027
+ key: str,
1028
+ value: Any,
1029
+ memory_type: MemoryType = MemoryType.SHORT_TERM,
1030
+ metadata: Optional[Dict[str, Any]] = None,
1031
+ ) -> None:
1032
+ """
1033
+ Add an item to agent memory.
1034
+
1035
+ Args:
1036
+ key: Memory key
1037
+ value: Memory value
1038
+ memory_type: Type of memory (short_term or long_term)
1039
+ metadata: Optional metadata
1040
+ """
1041
+ self._memory[key] = value
1042
+ self._memory_metadata[key] = {
1043
+ "type": memory_type.value,
1044
+ "timestamp": datetime.utcnow(),
1045
+ "metadata": metadata or {},
1046
+ }
1047
+ logger.debug(f"Agent {self.agent_id} added memory: {key} ({memory_type.value})")
1048
+
1049
+ async def retrieve_memory(self, key: str, default: Any = None) -> Any:
1050
+ """
1051
+ Retrieve an item from memory.
1052
+
1053
+ Args:
1054
+ key: Memory key
1055
+ default: Default value if key not found
1056
+
1057
+ Returns:
1058
+ Memory value or default
1059
+ """
1060
+ return self._memory.get(key, default)
1061
+
1062
+ async def clear_memory(self, memory_type: Optional[MemoryType] = None) -> None:
1063
+ """
1064
+ Clear agent memory.
1065
+
1066
+ Args:
1067
+ memory_type: If specified, clear only this type of memory
1068
+ """
1069
+ if memory_type is None:
1070
+ self._memory.clear()
1071
+ self._memory_metadata.clear()
1072
+ logger.info(f"Agent {self.agent_id} cleared all memory")
1073
+ else:
1074
+ keys_to_remove = [k for k, v in self._memory_metadata.items() if v.get("type") == memory_type.value]
1075
+ for key in keys_to_remove:
1076
+ del self._memory[key]
1077
+ del self._memory_metadata[key]
1078
+ logger.info(f"Agent {self.agent_id} cleared {memory_type.value} memory")
1079
+
1080
+ def get_memory_summary(self) -> Dict[str, Any]:
1081
+ """Get a summary of agent memory."""
1082
+ return {
1083
+ "total_items": len(self._memory),
1084
+ "short_term_count": sum(1 for v in self._memory_metadata.values() if v.get("type") == MemoryType.SHORT_TERM.value),
1085
+ "long_term_count": sum(1 for v in self._memory_metadata.values() if v.get("type") == MemoryType.LONG_TERM.value),
1086
+ }
1087
+
1088
+ # ==================== Goal Management ====================
1089
+
1090
+ def set_goal(
1091
+ self,
1092
+ description: str,
1093
+ priority: GoalPriority = GoalPriority.MEDIUM,
1094
+ success_criteria: Optional[str] = None,
1095
+ deadline: Optional[datetime] = None,
1096
+ ) -> str:
1097
+ """
1098
+ Set a new goal for the agent.
1099
+
1100
+ Args:
1101
+ description: Goal description
1102
+ priority: Goal priority
1103
+ success_criteria: Success criteria
1104
+ deadline: Goal deadline
1105
+
1106
+ Returns:
1107
+ Goal ID
1108
+ """
1109
+ goal = AgentGoal( # type: ignore[call-arg]
1110
+ description=description,
1111
+ priority=priority,
1112
+ success_criteria=success_criteria,
1113
+ deadline=deadline,
1114
+ )
1115
+ self._goals[goal.goal_id] = goal
1116
+ logger.info(f"Agent {self.agent_id} set goal: {goal.goal_id} ({priority.value})")
1117
+ return goal.goal_id
1118
+
1119
+ def get_goals(self, status: Optional[GoalStatus] = None) -> List[AgentGoal]:
1120
+ """
1121
+ Get agent goals.
1122
+
1123
+ Args:
1124
+ status: Filter by status (optional)
1125
+
1126
+ Returns:
1127
+ List of goals
1128
+ """
1129
+ if status is None:
1130
+ return list(self._goals.values())
1131
+ return [g for g in self._goals.values() if g.status == status]
1132
+
1133
+ def get_goal(self, goal_id: str) -> Optional[AgentGoal]:
1134
+ """Get a specific goal by ID."""
1135
+ return self._goals.get(goal_id)
1136
+
1137
+ def update_goal_status(
1138
+ self,
1139
+ goal_id: str,
1140
+ status: GoalStatus,
1141
+ progress: Optional[float] = None,
1142
+ ) -> None:
1143
+ """
1144
+ Update goal status.
1145
+
1146
+ Args:
1147
+ goal_id: Goal ID
1148
+ status: New status
1149
+ progress: Optional progress percentage
1150
+ """
1151
+ if goal_id not in self._goals:
1152
+ logger.warning(f"Goal {goal_id} not found for agent {self.agent_id}")
1153
+ return
1154
+
1155
+ goal = self._goals[goal_id]
1156
+ goal.status = status
1157
+
1158
+ if progress is not None:
1159
+ goal.progress = progress
1160
+
1161
+ if status == GoalStatus.IN_PROGRESS and goal.started_at is None:
1162
+ goal.started_at = datetime.utcnow()
1163
+ elif status == GoalStatus.ACHIEVED:
1164
+ goal.achieved_at = datetime.utcnow()
1165
+
1166
+ logger.info(f"Agent {self.agent_id} updated goal {goal_id}: {status.value}")
1167
+
1168
+ # ==================== Configuration Management ====================
1169
+
1170
+ def get_config(self) -> AgentConfiguration:
1171
+ """Get agent configuration."""
1172
+ return self._config
1173
+
1174
+ def update_config(self, updates: Dict[str, Any]) -> None:
1175
+ """
1176
+ Update agent configuration.
1177
+
1178
+ Args:
1179
+ updates: Configuration updates
1180
+
1181
+ Raises:
1182
+ ConfigurationError: If configuration is invalid
1183
+ """
1184
+ try:
1185
+ # Update configuration
1186
+ for key, value in updates.items():
1187
+ if hasattr(self._config, key):
1188
+ setattr(self._config, key, value)
1189
+ else:
1190
+ logger.warning(f"Unknown config key: {key}")
1191
+
1192
+ self.updated_at = datetime.utcnow()
1193
+ logger.info(f"Agent {self.agent_id} configuration updated")
1194
+
1195
+ except Exception as e:
1196
+ raise ConfigurationError(
1197
+ f"Failed to update configuration: {str(e)}",
1198
+ agent_id=self.agent_id,
1199
+ )
1200
+
1201
+ # ==================== Capability Management ====================
1202
+
1203
+ def declare_capability(
1204
+ self,
1205
+ capability_type: str,
1206
+ level: str,
1207
+ description: Optional[str] = None,
1208
+ constraints: Optional[Dict[str, Any]] = None,
1209
+ ) -> None:
1210
+ """
1211
+ Declare an agent capability.
1212
+
1213
+ Args:
1214
+ capability_type: Type of capability
1215
+ level: Proficiency level
1216
+ description: Capability description
1217
+ constraints: Capability constraints
1218
+ """
1219
+ from .models import CapabilityLevel
1220
+
1221
+ capability = AgentCapabilityDeclaration(
1222
+ capability_type=capability_type,
1223
+ level=CapabilityLevel(level),
1224
+ description=description,
1225
+ constraints=constraints or {},
1226
+ )
1227
+ self._capabilities[capability_type] = capability
1228
+ logger.info(f"Agent {self.agent_id} declared capability: {capability_type} ({level})")
1229
+
1230
+ def has_capability(self, capability_type: str) -> bool:
1231
+ """Check if agent has a capability."""
1232
+ return capability_type in self._capabilities
1233
+
1234
+ def get_capabilities(self) -> List[AgentCapabilityDeclaration]:
1235
+ """Get all agent capabilities."""
1236
+ return list(self._capabilities.values())
1237
+
1238
+ # ==================== Metrics Tracking ====================
1239
+
1240
+ def get_metrics(self) -> AgentMetrics:
1241
+ """Get agent metrics."""
1242
+ return self._metrics
1243
+
1244
+ def update_metrics(
1245
+ self,
1246
+ execution_time: Optional[float] = None,
1247
+ success: bool = True,
1248
+ quality_score: Optional[float] = None,
1249
+ tokens_used: Optional[int] = None,
1250
+ tool_calls: Optional[int] = None,
1251
+ ) -> None:
1252
+ """
1253
+ Update agent metrics.
1254
+
1255
+ Args:
1256
+ execution_time: Task execution time
1257
+ success: Whether task succeeded
1258
+ quality_score: Quality score (0-1)
1259
+ tokens_used: Tokens used
1260
+ tool_calls: Number of tool calls
1261
+ """
1262
+ self._metrics.total_tasks_executed += 1
1263
+
1264
+ if success:
1265
+ self._metrics.successful_tasks += 1
1266
+ else:
1267
+ self._metrics.failed_tasks += 1
1268
+
1269
+ # Update success rate
1270
+ self._metrics.success_rate = self._metrics.successful_tasks / self._metrics.total_tasks_executed * 100
1271
+
1272
+ # Update execution time
1273
+ if execution_time is not None:
1274
+ self._metrics.total_execution_time += execution_time
1275
+ self._metrics.average_execution_time = self._metrics.total_execution_time / self._metrics.total_tasks_executed
1276
+
1277
+ if self._metrics.min_execution_time is None or execution_time < self._metrics.min_execution_time:
1278
+ self._metrics.min_execution_time = execution_time
1279
+ if self._metrics.max_execution_time is None or execution_time > self._metrics.max_execution_time:
1280
+ self._metrics.max_execution_time = execution_time
1281
+
1282
+ # Update quality score
1283
+ if quality_score is not None:
1284
+ if self._metrics.average_quality_score is None:
1285
+ self._metrics.average_quality_score = quality_score
1286
+ else:
1287
+ # Running average
1288
+ total_quality = self._metrics.average_quality_score * (self._metrics.total_tasks_executed - 1)
1289
+ self._metrics.average_quality_score = (total_quality + quality_score) / self._metrics.total_tasks_executed
1290
+
1291
+ # Update resource usage
1292
+ if tokens_used is not None:
1293
+ self._metrics.total_tokens_used += tokens_used
1294
+ if tool_calls is not None:
1295
+ self._metrics.total_tool_calls += tool_calls
1296
+
1297
+ self._metrics.updated_at = datetime.utcnow()
1298
+
1299
+ def update_session_metrics(
1300
+ self,
1301
+ session_status: str,
1302
+ session_duration: Optional[float] = None,
1303
+ session_requests: int = 0,
1304
+ ) -> None:
1305
+ """
1306
+ Update session-level metrics.
1307
+
1308
+ This method should be called when a session is created, updated, or ended
1309
+ to track session-level statistics in agent metrics.
1310
+
1311
+ Args:
1312
+ session_status: Session status (active, completed, failed, expired)
1313
+ session_duration: Session duration in seconds (for ended sessions)
1314
+ session_requests: Number of requests in the session
1315
+
1316
+ Example:
1317
+ # When creating a session
1318
+ agent.update_session_metrics(session_status="active")
1319
+
1320
+ # When ending a session
1321
+ agent.update_session_metrics(
1322
+ session_status="completed",
1323
+ session_duration=300.5,
1324
+ session_requests=15
1325
+ )
1326
+ """
1327
+ # Update session counts based on status
1328
+ if session_status == "active":
1329
+ self._metrics.total_sessions += 1
1330
+ self._metrics.active_sessions += 1
1331
+ elif session_status == "completed":
1332
+ self._metrics.completed_sessions += 1
1333
+ if self._metrics.active_sessions > 0:
1334
+ self._metrics.active_sessions -= 1
1335
+ elif session_status == "failed":
1336
+ self._metrics.failed_sessions += 1
1337
+ if self._metrics.active_sessions > 0:
1338
+ self._metrics.active_sessions -= 1
1339
+ elif session_status == "expired":
1340
+ self._metrics.expired_sessions += 1
1341
+ if self._metrics.active_sessions > 0:
1342
+ self._metrics.active_sessions -= 1
1343
+
1344
+ # Update session request tracking
1345
+ if session_requests > 0:
1346
+ self._metrics.total_session_requests += session_requests
1347
+
1348
+ # Update average session duration
1349
+ if session_duration is not None and session_duration > 0:
1350
+ completed_count = self._metrics.completed_sessions + self._metrics.failed_sessions + self._metrics.expired_sessions
1351
+ if completed_count > 0:
1352
+ if self._metrics.average_session_duration is None:
1353
+ self._metrics.average_session_duration = session_duration
1354
+ else:
1355
+ # Running average
1356
+ total_duration = self._metrics.average_session_duration * (completed_count - 1)
1357
+ self._metrics.average_session_duration = (total_duration + session_duration) / completed_count
1358
+
1359
+ # Update average requests per session
1360
+ if self._metrics.total_sessions > 0:
1361
+ self._metrics.average_requests_per_session = self._metrics.total_session_requests / self._metrics.total_sessions
1362
+
1363
+ self._metrics.updated_at = datetime.utcnow()
1364
+ logger.debug(f"Agent {self.agent_id} session metrics updated: " f"status={session_status}, total_sessions={self._metrics.total_sessions}, " f"active_sessions={self._metrics.active_sessions}")
1365
+
1366
+ # ==================== Performance Tracking ====================
1367
+
1368
+ def track_operation_time(self, operation_name: str) -> OperationTimer:
1369
+ """
1370
+ Create a context manager for tracking operation time.
1371
+
1372
+ This method returns an OperationTimer that automatically records
1373
+ operation duration and updates agent metrics when the operation completes.
1374
+
1375
+ Args:
1376
+ operation_name: Name of the operation to track
1377
+
1378
+ Returns:
1379
+ OperationTimer context manager
1380
+
1381
+ Example:
1382
+ with agent.track_operation_time("llm_call") as timer:
1383
+ result = await llm.generate(prompt)
1384
+ # Metrics are automatically recorded
1385
+
1386
+ # Access duration if needed
1387
+ print(f"Operation took {timer.duration} seconds")
1388
+ """
1389
+ return OperationTimer(operation_name=operation_name, agent=self)
1390
+
1391
+ def _record_operation_metrics(self, operation_name: str, duration: float, success: bool = True) -> None:
1392
+ """
1393
+ Record operation-level metrics.
1394
+
1395
+ This method is called automatically by OperationTimer but can also
1396
+ be called manually to record operation metrics.
1397
+
1398
+ Args:
1399
+ operation_name: Name of the operation
1400
+ duration: Operation duration in seconds
1401
+ success: Whether the operation succeeded
1402
+
1403
+ Example:
1404
+ # Manual recording
1405
+ start = time.time()
1406
+ try:
1407
+ result = perform_operation()
1408
+ agent._record_operation_metrics("custom_op", time.time() - start, True)
1409
+ except Exception:
1410
+ agent._record_operation_metrics("custom_op", time.time() - start, False)
1411
+ raise
1412
+ """
1413
+ # Update operation counts
1414
+ if operation_name not in self._metrics.operation_counts:
1415
+ self._metrics.operation_counts[operation_name] = 0
1416
+ self._metrics.operation_total_time[operation_name] = 0.0
1417
+ self._metrics.operation_error_counts[operation_name] = 0
1418
+
1419
+ self._metrics.operation_counts[operation_name] += 1
1420
+ self._metrics.operation_total_time[operation_name] += duration
1421
+
1422
+ if not success:
1423
+ self._metrics.operation_error_counts[operation_name] += 1
1424
+
1425
+ # Add to operation history (keep last 100 operations)
1426
+ operation_record = {
1427
+ "operation": operation_name,
1428
+ "duration": duration,
1429
+ "success": success,
1430
+ "timestamp": datetime.utcnow().isoformat(),
1431
+ }
1432
+ self._metrics.operation_history.append(operation_record)
1433
+
1434
+ # Keep only last 100 operations
1435
+ if len(self._metrics.operation_history) > 100:
1436
+ self._metrics.operation_history = self._metrics.operation_history[-100:]
1437
+
1438
+ # Recalculate percentiles
1439
+ self._update_operation_percentiles()
1440
+
1441
+ self._metrics.updated_at = datetime.utcnow()
1442
+ logger.debug(f"Agent {self.agent_id} operation metrics recorded: " f"operation={operation_name}, duration={duration:.3f}s, success={success}")
1443
+
1444
+ def _update_operation_percentiles(self) -> None:
1445
+ """Update operation time percentiles from operation history."""
1446
+ if not self._metrics.operation_history:
1447
+ return
1448
+
1449
+ # Extract durations from operation history
1450
+ durations = [op["duration"] for op in self._metrics.operation_history]
1451
+
1452
+ # Calculate percentiles
1453
+ self._metrics.p50_operation_time = self._calculate_percentile(durations, 50)
1454
+ self._metrics.p95_operation_time = self._calculate_percentile(durations, 95)
1455
+ self._metrics.p99_operation_time = self._calculate_percentile(durations, 99)
1456
+
1457
+ def _calculate_percentile(self, values: List[float], percentile: int) -> Optional[float]:
1458
+ """
1459
+ Calculate percentile from a list of values.
1460
+
1461
+ Args:
1462
+ values: List of numeric values
1463
+ percentile: Percentile to calculate (0-100)
1464
+
1465
+ Returns:
1466
+ Percentile value or None if values is empty
1467
+
1468
+ Example:
1469
+ p95 = agent._calculate_percentile([1.0, 2.0, 3.0, 4.0, 5.0], 95)
1470
+ """
1471
+ if not values:
1472
+ return None
1473
+
1474
+ sorted_values = sorted(values)
1475
+ index = int(len(sorted_values) * percentile / 100)
1476
+
1477
+ # Handle edge cases
1478
+ if index >= len(sorted_values):
1479
+ index = len(sorted_values) - 1
1480
+
1481
+ return sorted_values[index]
1482
+
1483
+ def get_performance_metrics(self) -> Dict[str, Any]:
1484
+ """
1485
+ Get comprehensive performance metrics.
1486
+
1487
+ Returns detailed performance statistics including operation-level
1488
+ metrics, percentiles, and aggregated statistics.
1489
+
1490
+ Returns:
1491
+ Dictionary with performance metrics
1492
+
1493
+ Example:
1494
+ metrics = agent.get_performance_metrics()
1495
+ print(f"P95 latency: {metrics['p95_operation_time']}s")
1496
+ print(f"Total operations: {metrics['total_operations']}")
1497
+ for op_name, stats in metrics['operations'].items():
1498
+ print(f"{op_name}: {stats['count']} calls, avg {stats['avg_time']:.3f}s")
1499
+ """
1500
+ # Calculate per-operation statistics
1501
+ operations = {}
1502
+ for op_name, count in self._metrics.operation_counts.items():
1503
+ total_time = self._metrics.operation_total_time.get(op_name, 0.0)
1504
+ error_count = self._metrics.operation_error_counts.get(op_name, 0)
1505
+
1506
+ operations[op_name] = {
1507
+ "count": count,
1508
+ "total_time": total_time,
1509
+ "average_time": total_time / count if count > 0 else 0.0,
1510
+ "error_count": error_count,
1511
+ "error_rate": (error_count / count * 100) if count > 0 else 0.0,
1512
+ }
1513
+
1514
+ return {
1515
+ "total_operations": sum(self._metrics.operation_counts.values()),
1516
+ "operations": operations,
1517
+ "p50_operation_time": self._metrics.p50_operation_time,
1518
+ "p95_operation_time": self._metrics.p95_operation_time,
1519
+ "p99_operation_time": self._metrics.p99_operation_time,
1520
+ "recent_operations": self._metrics.operation_history[-10:], # Last 10 operations
1521
+ }
1522
+
1523
+ def get_health_status(self) -> Dict[str, Any]:
1524
+ """
1525
+ Get agent health status with health score calculation.
1526
+
1527
+ Calculates a health score (0-100) based on multiple factors:
1528
+ - Success rate (40% weight)
1529
+ - Error rate (30% weight)
1530
+ - Performance (20% weight)
1531
+ - Session health (10% weight)
1532
+
1533
+ Returns:
1534
+ Dictionary with health status and score
1535
+
1536
+ Example:
1537
+ health = agent.get_health_status()
1538
+ print(f"Health score: {health['health_score']}/100")
1539
+ print(f"Status: {health['status']}") # healthy, degraded, unhealthy
1540
+ if health['issues']:
1541
+ print(f"Issues: {', '.join(health['issues'])}")
1542
+ """
1543
+ issues = []
1544
+ health_score = 100.0
1545
+
1546
+ # Factor 1: Success rate (40% weight)
1547
+ success_rate = self._metrics.success_rate
1548
+ if success_rate < 50:
1549
+ issues.append("Low success rate")
1550
+ health_score -= 40
1551
+ elif success_rate < 80:
1552
+ issues.append("Moderate success rate")
1553
+ health_score -= 20
1554
+ elif success_rate < 95:
1555
+ health_score -= 10
1556
+
1557
+ # Factor 2: Error rate (30% weight)
1558
+ total_tasks = self._metrics.total_tasks_executed
1559
+ if total_tasks > 0:
1560
+ error_rate = (self._metrics.failed_tasks / total_tasks) * 100
1561
+ if error_rate > 50:
1562
+ issues.append("High error rate")
1563
+ health_score -= 30
1564
+ elif error_rate > 20:
1565
+ issues.append("Elevated error rate")
1566
+ health_score -= 15
1567
+ elif error_rate > 5:
1568
+ health_score -= 5
1569
+
1570
+ # Factor 3: Performance (20% weight)
1571
+ if self._metrics.p95_operation_time is not None:
1572
+ # Consider p95 > 5s as slow
1573
+ if self._metrics.p95_operation_time > 10:
1574
+ issues.append("Very slow operations (p95 > 10s)")
1575
+ health_score -= 20
1576
+ elif self._metrics.p95_operation_time > 5:
1577
+ issues.append("Slow operations (p95 > 5s)")
1578
+ health_score -= 10
1579
+
1580
+ # Factor 4: Session health (10% weight)
1581
+ if self._metrics.total_sessions > 0:
1582
+ session_failure_rate = (self._metrics.failed_sessions + self._metrics.expired_sessions) / self._metrics.total_sessions * 100
1583
+ if session_failure_rate > 30:
1584
+ issues.append("High session failure rate")
1585
+ health_score -= 10
1586
+ elif session_failure_rate > 10:
1587
+ health_score -= 5
1588
+
1589
+ # Ensure health score is in valid range
1590
+ health_score = max(0.0, min(100.0, health_score))
1591
+
1592
+ # Determine status
1593
+ if health_score >= 80:
1594
+ status = "healthy"
1595
+ elif health_score >= 50:
1596
+ status = "degraded"
1597
+ else:
1598
+ status = "unhealthy"
1599
+
1600
+ return {
1601
+ "health_score": health_score,
1602
+ "status": status,
1603
+ "issues": issues,
1604
+ "metrics_summary": {
1605
+ "success_rate": success_rate,
1606
+ "total_tasks": total_tasks,
1607
+ "total_sessions": self._metrics.total_sessions,
1608
+ "active_sessions": self._metrics.active_sessions,
1609
+ "p95_operation_time": self._metrics.p95_operation_time,
1610
+ },
1611
+ "timestamp": datetime.utcnow().isoformat(),
1612
+ }
1613
+
1614
+ def get_comprehensive_status(self) -> Dict[str, Any]:
1615
+ """
1616
+ Get comprehensive agent status combining all metrics.
1617
+
1618
+ Returns a complete view of agent state, health, performance,
1619
+ and operational metrics.
1620
+
1621
+ Returns:
1622
+ Dictionary with comprehensive status information
1623
+
1624
+ Example:
1625
+ status = agent.get_comprehensive_status()
1626
+ print(f"Agent: {status['agent_id']}")
1627
+ print(f"State: {status['state']}")
1628
+ print(f"Health: {status['health']['status']} ({status['health']['health_score']}/100)")
1629
+ print(f"Tasks: {status['metrics']['total_tasks_executed']}")
1630
+ print(f"Sessions: {status['metrics']['total_sessions']}")
1631
+ """
1632
+ return {
1633
+ "agent_id": self.agent_id,
1634
+ "name": self.name,
1635
+ "type": self.agent_type.value,
1636
+ "version": self.version,
1637
+ "state": self._state.value,
1638
+ "health": self.get_health_status(),
1639
+ "performance": self.get_performance_metrics(),
1640
+ "metrics": {
1641
+ # Task metrics
1642
+ "total_tasks_executed": self._metrics.total_tasks_executed,
1643
+ "successful_tasks": self._metrics.successful_tasks,
1644
+ "failed_tasks": self._metrics.failed_tasks,
1645
+ "success_rate": self._metrics.success_rate,
1646
+ # Execution time metrics
1647
+ "average_execution_time": self._metrics.average_execution_time,
1648
+ "total_execution_time": self._metrics.total_execution_time,
1649
+ # Session metrics
1650
+ "total_sessions": self._metrics.total_sessions,
1651
+ "active_sessions": self._metrics.active_sessions,
1652
+ "completed_sessions": self._metrics.completed_sessions,
1653
+ "failed_sessions": self._metrics.failed_sessions,
1654
+ "expired_sessions": self._metrics.expired_sessions,
1655
+ # Resource usage
1656
+ "total_tokens_used": self._metrics.total_tokens_used,
1657
+ "total_tool_calls": self._metrics.total_tool_calls,
1658
+ # Error tracking
1659
+ "error_count": self._metrics.error_count,
1660
+ "error_types": self._metrics.error_types,
1661
+ },
1662
+ "capabilities": [cap.capability_type for cap in self.get_capabilities()],
1663
+ "active_goals": len([g for g in self._goals.values() if g.status == GoalStatus.IN_PROGRESS]),
1664
+ "timestamp": datetime.utcnow().isoformat(),
1665
+ }
1666
+
1667
+ def reset_metrics(self) -> None:
1668
+ """
1669
+ Reset performance and session metrics.
1670
+
1671
+ Resets all metrics to their initial state while preserving
1672
+ agent configuration and state.
1673
+
1674
+ Example:
1675
+ # Reset metrics at the start of a new monitoring period
1676
+ agent.reset_metrics()
1677
+ """
1678
+ self._metrics = AgentMetrics(last_reset_at=datetime.utcnow()) # type: ignore[call-arg]
1679
+ logger.info(f"Agent {self.agent_id} metrics reset")
1680
+
1681
+ # ==================== Serialization ====================
1682
+
1683
+ def to_dict(self) -> Dict[str, Any]:
1684
+ """
1685
+ Serialize agent to dictionary.
1686
+
1687
+ Includes health status and performance metrics for comprehensive
1688
+ agent state representation.
1689
+
1690
+ Returns:
1691
+ Dictionary representation
1692
+
1693
+ Raises:
1694
+ SerializationError: If serialization fails
1695
+ """
1696
+ try:
1697
+ return {
1698
+ "agent_id": self.agent_id,
1699
+ "name": self.name,
1700
+ "agent_type": self.agent_type.value,
1701
+ "description": self.description,
1702
+ "version": self.version,
1703
+ "state": self._state.value,
1704
+ "config": self._config.model_dump(),
1705
+ "goals": [g.model_dump() for g in self._goals.values()],
1706
+ "capabilities": [c.model_dump() for c in self._capabilities.values()],
1707
+ "metrics": self._metrics.model_dump(),
1708
+ "health_status": self.get_health_status(), # Phase 3 enhancement
1709
+ "performance_metrics": self.get_performance_metrics(), # Phase 3 enhancement
1710
+ "memory_summary": self.get_memory_summary(),
1711
+ "created_at": self.created_at.isoformat(),
1712
+ "updated_at": self.updated_at.isoformat(),
1713
+ "last_active_at": (self.last_active_at.isoformat() if self.last_active_at else None),
1714
+ }
1715
+ except Exception as e:
1716
+ raise SerializationError(
1717
+ f"Failed to serialize agent: {str(e)}",
1718
+ agent_id=self.agent_id,
1719
+ )
1720
+
1721
+ @classmethod
1722
+ def from_dict(cls, data: Dict[str, Any]) -> "BaseAIAgent":
1723
+ """
1724
+ Deserialize agent from dictionary.
1725
+
1726
+ Args:
1727
+ data: Dictionary representation
1728
+
1729
+ Returns:
1730
+ Agent instance
1731
+
1732
+ Raises:
1733
+ SerializationError: If deserialization fails
1734
+ """
1735
+ raise NotImplementedError("from_dict must be implemented by subclasses")
1736
+
1737
+ # ==================== Checkpointer Support ====================
1738
+
1739
+ async def save_checkpoint(self, session_id: str, checkpoint_id: Optional[str] = None) -> Optional[str]:
1740
+ """
1741
+ Save agent state checkpoint.
1742
+
1743
+ This method saves the current agent state using the configured checkpointer.
1744
+ If no checkpointer is configured, logs a warning and returns None.
1745
+
1746
+ Args:
1747
+ session_id: Session identifier for the checkpoint
1748
+ checkpoint_id: Optional checkpoint identifier (auto-generated if None)
1749
+
1750
+ Returns:
1751
+ Checkpoint ID if saved successfully, None otherwise
1752
+
1753
+ Example:
1754
+ # Save checkpoint with auto-generated ID
1755
+ checkpoint_id = await agent.save_checkpoint(session_id="session-123")
1756
+
1757
+ # Save checkpoint with custom ID
1758
+ checkpoint_id = await agent.save_checkpoint(
1759
+ session_id="session-123",
1760
+ checkpoint_id="v1.0"
1761
+ )
1762
+
1763
+ Note:
1764
+ Requires a checkpointer to be configured during agent initialization.
1765
+ The checkpoint includes full agent state from to_dict().
1766
+ """
1767
+ if not self._checkpointer:
1768
+ logger.warning(f"Agent {self.agent_id}: No checkpointer configured, cannot save checkpoint")
1769
+ return None
1770
+
1771
+ try:
1772
+ # Get current agent state
1773
+ checkpoint_data = self.to_dict()
1774
+
1775
+ # Add checkpoint metadata
1776
+ checkpoint_data["checkpoint_metadata"] = {
1777
+ "session_id": session_id,
1778
+ "checkpoint_id": checkpoint_id,
1779
+ "saved_at": datetime.utcnow().isoformat(),
1780
+ "agent_version": self.version,
1781
+ }
1782
+
1783
+ # Save using checkpointer
1784
+ saved_checkpoint_id = await self._checkpointer.save_checkpoint(
1785
+ agent_id=self.agent_id,
1786
+ session_id=session_id,
1787
+ checkpoint_data=checkpoint_data,
1788
+ )
1789
+
1790
+ logger.info(f"Agent {self.agent_id}: Checkpoint saved successfully " f"(session={session_id}, checkpoint={saved_checkpoint_id})")
1791
+ return saved_checkpoint_id
1792
+
1793
+ except Exception as e:
1794
+ logger.error(f"Agent {self.agent_id}: Failed to save checkpoint " f"(session={session_id}): {e}")
1795
+ return None
1796
+
1797
+ async def load_checkpoint(self, session_id: str, checkpoint_id: Optional[str] = None) -> bool:
1798
+ """
1799
+ Load agent state from checkpoint.
1800
+
1801
+ This method loads agent state from a saved checkpoint using the configured
1802
+ checkpointer. If no checkpointer is configured, logs a warning and returns False.
1803
+
1804
+ Args:
1805
+ session_id: Session identifier for the checkpoint
1806
+ checkpoint_id: Optional checkpoint identifier (loads latest if None)
1807
+
1808
+ Returns:
1809
+ True if checkpoint loaded successfully, False otherwise
1810
+
1811
+ Example:
1812
+ # Load latest checkpoint
1813
+ success = await agent.load_checkpoint(session_id="session-123")
1814
+
1815
+ # Load specific checkpoint
1816
+ success = await agent.load_checkpoint(
1817
+ session_id="session-123",
1818
+ checkpoint_id="v1.0"
1819
+ )
1820
+
1821
+ Note:
1822
+ Requires a checkpointer to be configured during agent initialization.
1823
+ This method updates the agent's internal state from the checkpoint.
1824
+ Not all state may be restorable (e.g., runtime objects, connections).
1825
+ """
1826
+ if not self._checkpointer:
1827
+ logger.warning(f"Agent {self.agent_id}: No checkpointer configured, cannot load checkpoint")
1828
+ return False
1829
+
1830
+ try:
1831
+ # Load checkpoint data
1832
+ checkpoint_data = await self._checkpointer.load_checkpoint(
1833
+ agent_id=self.agent_id,
1834
+ session_id=session_id,
1835
+ checkpoint_id=checkpoint_id,
1836
+ )
1837
+
1838
+ if not checkpoint_data:
1839
+ logger.warning(f"Agent {self.agent_id}: No checkpoint found " f"(session={session_id}, checkpoint={checkpoint_id or 'latest'})")
1840
+ return False
1841
+
1842
+ # Restore agent state from checkpoint
1843
+ self._restore_from_checkpoint(checkpoint_data)
1844
+
1845
+ logger.info(f"Agent {self.agent_id}: Checkpoint loaded successfully " f"(session={session_id}, checkpoint={checkpoint_id or 'latest'})")
1846
+ return True
1847
+
1848
+ except Exception as e:
1849
+ logger.error(f"Agent {self.agent_id}: Failed to load checkpoint " f"(session={session_id}, checkpoint={checkpoint_id or 'latest'}): {e}")
1850
+ return False
1851
+
1852
+ def _restore_from_checkpoint(self, checkpoint_data: Dict[str, Any]) -> None:
1853
+ """
1854
+ Restore agent state from checkpoint data.
1855
+
1856
+ This is an internal method that updates the agent's state from checkpoint data.
1857
+ Subclasses can override this to customize restoration logic.
1858
+
1859
+ Args:
1860
+ checkpoint_data: Checkpoint data dictionary
1861
+
1862
+ Note:
1863
+ This method restores basic agent state. Runtime objects like
1864
+ connections, file handles, etc. are not restored.
1865
+ """
1866
+ # Restore basic state
1867
+ if "state" in checkpoint_data:
1868
+ try:
1869
+ self._state = AgentState(checkpoint_data["state"])
1870
+ except (ValueError, KeyError):
1871
+ logger.warning("Could not restore state from checkpoint")
1872
+
1873
+ # Restore metrics
1874
+ if "metrics" in checkpoint_data:
1875
+ try:
1876
+ self._metrics = AgentMetrics(**checkpoint_data["metrics"])
1877
+ except Exception as e:
1878
+ logger.warning(f"Could not restore metrics from checkpoint: {e}")
1879
+
1880
+ # Restore goals
1881
+ if "goals" in checkpoint_data:
1882
+ try:
1883
+ self._goals = {}
1884
+ for goal_data in checkpoint_data["goals"]:
1885
+ goal = AgentGoal(**goal_data)
1886
+ self._goals[goal.goal_id] = goal
1887
+ except Exception as e:
1888
+ logger.warning(f"Could not restore goals from checkpoint: {e}")
1889
+
1890
+ # Update timestamps
1891
+ self.updated_at = datetime.utcnow()
1892
+
1893
+ logger.debug(f"Agent {self.agent_id}: State restored from checkpoint")
1894
+
1895
+ # ==================== Utility Methods ====================
1896
+
1897
+ def is_available(self) -> bool:
1898
+ """Check if agent is available for tasks."""
1899
+ return self._state == AgentState.ACTIVE
1900
+
1901
+ def is_busy(self) -> bool:
1902
+ """Check if agent is currently busy."""
1903
+ return self._state == AgentState.BUSY
1904
+
1905
+ async def execute_tool(self, tool_name: str, parameters: Dict[str, Any]) -> Any:
1906
+ """
1907
+ Execute a single tool with given parameters.
1908
+
1909
+ This is a default implementation that subclasses can override.
1910
+ For ToolAgent, this calls _execute_tool with operation from parameters.
1911
+
1912
+ Args:
1913
+ tool_name: Name of the tool to execute
1914
+ parameters: Tool parameters (may include 'operation' key)
1915
+
1916
+ Returns:
1917
+ Tool execution result
1918
+ """
1919
+ # Check if we have tool instances
1920
+ if hasattr(self, "_tool_instances") and self._tool_instances:
1921
+ tool = self._tool_instances.get(tool_name)
1922
+ if tool:
1923
+ # Make a copy to avoid modifying the original
1924
+ params = parameters.copy()
1925
+
1926
+ # Try to execute the tool directly (for custom tools with execute method)
1927
+ if hasattr(tool, "execute"):
1928
+ return await tool.execute(**params)
1929
+ # For standard tools with run_async
1930
+ elif hasattr(tool, "run_async"):
1931
+ # Check if operation is specified
1932
+ operation = params.pop("operation", None)
1933
+ if operation:
1934
+ return await tool.run_async(operation, **params)
1935
+ else:
1936
+ return await tool.run_async(**params)
1937
+
1938
+ raise NotImplementedError(f"execute_tool not implemented for {self.__class__.__name__}. " "Tool {tool_name} not found or doesn't have execute/run_async method.")
1939
+
1940
+ # ==================== Parallel Tool Execution (Phase 7) ====================
1941
+
1942
+ async def execute_tools_parallel(
1943
+ self,
1944
+ tool_calls: List[Dict[str, Any]],
1945
+ max_concurrency: int = 5,
1946
+ ) -> List[Dict[str, Any]]:
1947
+ """
1948
+ Execute multiple tools in parallel with concurrency limit.
1949
+
1950
+ Args:
1951
+ tool_calls: List of tool call dicts with 'tool_name' and 'parameters'
1952
+ max_concurrency: Maximum number of concurrent tool executions
1953
+
1954
+ Returns:
1955
+ List of results in same order as tool_calls
1956
+
1957
+ Example:
1958
+ tool_calls = [
1959
+ {"tool_name": "search", "parameters": {"query": "AI"}},
1960
+ {"tool_name": "calculator", "parameters": {"expression": "2+2"}},
1961
+ {"tool_name": "search", "parameters": {"query": "ML"}},
1962
+ ]
1963
+ results = await agent.execute_tools_parallel(tool_calls, max_concurrency=2)
1964
+ """
1965
+ if not tool_calls:
1966
+ return []
1967
+
1968
+ # Create semaphore for concurrency control
1969
+ semaphore = asyncio.Semaphore(max_concurrency)
1970
+
1971
+ async def execute_with_semaphore(tool_call: Dict[str, Any], index: int):
1972
+ """Execute tool with semaphore."""
1973
+ async with semaphore:
1974
+ tool_name = tool_call.get("tool_name")
1975
+ parameters = tool_call.get("parameters", {})
1976
+
1977
+ if tool_name is None:
1978
+ raise ValueError("tool_name is required in tool_call")
1979
+
1980
+ try:
1981
+ # Execute tool (subclass should implement execute_tool)
1982
+ result = await self.execute_tool(tool_name, parameters)
1983
+ return {"index": index, "success": True, "result": result}
1984
+ except Exception as e:
1985
+ logger.error(f"Tool {tool_name} failed: {e}")
1986
+ return {
1987
+ "index": index,
1988
+ "success": False,
1989
+ "error": str(e),
1990
+ "tool_name": tool_name,
1991
+ }
1992
+
1993
+ # Execute all tools in parallel
1994
+ tasks = [execute_with_semaphore(tool_call, i) for i, tool_call in enumerate(tool_calls)]
1995
+
1996
+ results_unordered = await asyncio.gather(*tasks, return_exceptions=True)
1997
+
1998
+ # Sort results by index to maintain order
1999
+ valid_results = [r for r in results_unordered if not isinstance(r, Exception) and isinstance(r, dict) and "index" in r]
2000
+ results_sorted = sorted(
2001
+ valid_results,
2002
+ key=lambda x: x["index"], # type: ignore[index]
2003
+ )
2004
+
2005
+ # Remove index from results
2006
+ return [{k: v for k, v in r.items() if k != "index"} for r in results_sorted]
2007
+
2008
+ async def analyze_tool_dependencies(self, tool_calls: List[Dict[str, Any]]) -> Dict[str, List[str]]:
2009
+ """
2010
+ Analyze dependencies between tool calls.
2011
+
2012
+ Detects if one tool's output is used as input to another tool.
2013
+
2014
+ Args:
2015
+ tool_calls: List of tool call dicts
2016
+
2017
+ Returns:
2018
+ Dict mapping tool index to list of dependency indices
2019
+
2020
+ Example:
2021
+ tool_calls = [
2022
+ {"tool_name": "search", "parameters": {"query": "AI"}},
2023
+ {"tool_name": "summarize", "parameters": {"text": "${0.result}"}},
2024
+ ]
2025
+ deps = await agent.analyze_tool_dependencies(tool_calls)
2026
+ # deps = {"1": ["0"]} # Tool 1 depends on tool 0
2027
+ """
2028
+ dependencies: Dict[str, List[str]] = {}
2029
+
2030
+ for i, tool_call in enumerate(tool_calls):
2031
+ deps = []
2032
+ parameters = tool_call.get("parameters", {})
2033
+
2034
+ # Check if parameters reference other tool results
2035
+ param_str = json.dumps(parameters)
2036
+
2037
+ # Look for ${index.field} patterns
2038
+ import re
2039
+
2040
+ matches = re.findall(r"\$\{(\d+)\.", param_str)
2041
+ deps = list(set(matches)) # Remove duplicates
2042
+
2043
+ if deps:
2044
+ dependencies[str(i)] = deps
2045
+
2046
+ return dependencies
2047
+
2048
+ async def execute_tools_with_dependencies(self, tool_calls: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
2049
+ """
2050
+ Execute tools respecting dependencies using topological sort.
2051
+
2052
+ Args:
2053
+ tool_calls: List of tool call dicts
2054
+
2055
+ Returns:
2056
+ List of results in same order as tool_calls
2057
+
2058
+ Example:
2059
+ tool_calls = [
2060
+ {"tool_name": "search", "parameters": {"query": "AI"}},
2061
+ {"tool_name": "summarize", "parameters": {"text": "${0.result}"}},
2062
+ ]
2063
+ results = await agent.execute_tools_with_dependencies(tool_calls)
2064
+ """
2065
+ # Analyze dependencies
2066
+ dependencies = await self.analyze_tool_dependencies(tool_calls)
2067
+
2068
+ # Topological sort
2069
+ executed: Set[int] = set()
2070
+ results: List[Optional[Dict[str, Any]]] = [None] * len(tool_calls)
2071
+
2072
+ def can_execute(index: int) -> bool:
2073
+ """Check if tool can be executed."""
2074
+ deps = dependencies.get(str(index), [])
2075
+ return all(int(dep) in executed for dep in deps)
2076
+
2077
+ # Execute tools in dependency order
2078
+ while len(executed) < len(tool_calls):
2079
+ # Find tools that can be executed
2080
+ ready = [i for i in range(len(tool_calls)) if i not in executed and can_execute(i)]
2081
+
2082
+ if not ready:
2083
+ # Circular dependency or error
2084
+ logger.error("Circular dependency detected or no tools ready")
2085
+ break
2086
+
2087
+ # Execute ready tools in parallel
2088
+ ready_calls = [tool_calls[i] for i in ready]
2089
+ ready_results = await self.execute_tools_parallel(ready_calls)
2090
+
2091
+ # Store results and mark as executed
2092
+ for i, result in zip(ready, ready_results):
2093
+ if result is not None:
2094
+ results[i] = result
2095
+ executed.add(i)
2096
+
2097
+ # Substitute results in dependent tool calls
2098
+ for j in range(len(tool_calls)):
2099
+ if j not in executed:
2100
+ tool_calls[j] = self._substitute_tool_result(tool_calls[j], i, result)
2101
+
2102
+ # Filter out None values and return
2103
+ return [r for r in results if r is not None]
2104
+
2105
+ def _substitute_tool_result(self, tool_call: Dict[str, Any], source_index: int, source_result: Dict[str, Any]) -> Dict[str, Any]:
2106
+ """
2107
+ Substitute tool result references in parameters.
2108
+
2109
+ Args:
2110
+ tool_call: Tool call dict
2111
+ source_index: Index of source tool
2112
+ source_result: Result from source tool
2113
+
2114
+ Returns:
2115
+ Updated tool call dict
2116
+ """
2117
+ import re
2118
+
2119
+ param_str = json.dumps(tool_call.get("parameters", {}))
2120
+
2121
+ # Replace ${index.field} with actual values
2122
+ pattern = rf"\$\{{{source_index}\.(\w+)\}}"
2123
+
2124
+ def replacer(match):
2125
+ field = match.group(1)
2126
+ value = source_result.get(field)
2127
+ return json.dumps(value) if value is not None else "null"
2128
+
2129
+ param_str = re.sub(pattern, replacer, param_str)
2130
+
2131
+ tool_call["parameters"] = json.loads(param_str)
2132
+ return tool_call
2133
+
2134
+ # ==================== Tool Result Caching (Phase 7) ====================
2135
+
2136
+ def _generate_cache_key(self, tool_name: str, parameters: Dict[str, Any]) -> str:
2137
+ """
2138
+ Generate cache key for tool result.
2139
+
2140
+ Args:
2141
+ tool_name: Name of the tool
2142
+ parameters: Tool parameters
2143
+
2144
+ Returns:
2145
+ Cache key string
2146
+
2147
+ Example:
2148
+ key = agent._generate_cache_key("search", {"query": "AI"})
2149
+ """
2150
+ # Sort parameters for consistent keys
2151
+ param_str = json.dumps(parameters, sort_keys=True)
2152
+
2153
+ # Hash large inputs
2154
+ if self._cache_config.hash_large_inputs and len(param_str) > 1024:
2155
+ import hashlib
2156
+
2157
+ param_hash = hashlib.md5(param_str.encode()).hexdigest()
2158
+ cache_key = f"{tool_name}:{param_hash}"
2159
+ else:
2160
+ cache_key = f"{tool_name}:{param_str}"
2161
+
2162
+ # Include timestamp if configured
2163
+ if self._cache_config.include_timestamp_in_key:
2164
+ timestamp = int(time.time() / 60) # Minute-level granularity
2165
+ cache_key = f"{cache_key}:{timestamp}"
2166
+
2167
+ return cache_key
2168
+
2169
+ async def execute_tool_with_cache(self, tool_name: str, parameters: Dict[str, Any]) -> Any:
2170
+ """
2171
+ Execute tool with caching support.
2172
+
2173
+ Args:
2174
+ tool_name: Name of the tool
2175
+ parameters: Tool parameters
2176
+
2177
+ Returns:
2178
+ Tool result (from cache or fresh execution)
2179
+
2180
+ Example:
2181
+ result = await agent.execute_tool_with_cache("search", {"query": "AI"})
2182
+ """
2183
+ if not self._cache_config.enabled:
2184
+ # Cache disabled, execute directly
2185
+ return await self.execute_tool(tool_name, parameters)
2186
+
2187
+ # Generate cache key
2188
+ cache_key = self._generate_cache_key(tool_name, parameters)
2189
+
2190
+ # Check cache
2191
+ if cache_key in self._tool_cache:
2192
+ # Check TTL
2193
+ cached_time = self._cache_timestamps.get(cache_key, 0)
2194
+ ttl = self._cache_config.get_ttl(tool_name)
2195
+ age = time.time() - cached_time
2196
+
2197
+ if age < ttl:
2198
+ # Cache hit
2199
+ self._cache_access_count[cache_key] = self._cache_access_count.get(cache_key, 0) + 1
2200
+ logger.debug(f"Cache hit for {tool_name} (age: {age:.1f}s)")
2201
+ return self._tool_cache[cache_key]
2202
+ else:
2203
+ # Cache expired
2204
+ logger.debug(f"Cache expired for {tool_name} (age: {age:.1f}s)")
2205
+ del self._tool_cache[cache_key]
2206
+ del self._cache_timestamps[cache_key]
2207
+ if cache_key in self._cache_access_count:
2208
+ del self._cache_access_count[cache_key]
2209
+
2210
+ # Cache miss - execute tool
2211
+ logger.debug(f"Cache miss for {tool_name}")
2212
+ result = await self.execute_tool(tool_name, parameters)
2213
+
2214
+ # Store in cache
2215
+ self._tool_cache[cache_key] = result
2216
+ self._cache_timestamps[cache_key] = time.time()
2217
+ self._cache_access_count[cache_key] = 0
2218
+
2219
+ # Cleanup if needed
2220
+ await self._cleanup_cache()
2221
+
2222
+ return result
2223
+
2224
+ def invalidate_cache(self, tool_name: Optional[str] = None, pattern: Optional[str] = None) -> int:
2225
+ """
2226
+ Invalidate cache entries.
2227
+
2228
+ Args:
2229
+ tool_name: Invalidate all entries for this tool (optional)
2230
+ pattern: Invalidate entries matching pattern (optional)
2231
+
2232
+ Returns:
2233
+ Number of entries invalidated
2234
+
2235
+ Example:
2236
+ # Invalidate all search results
2237
+ count = agent.invalidate_cache(tool_name="search")
2238
+
2239
+ # Invalidate all cache
2240
+ count = agent.invalidate_cache()
2241
+ """
2242
+ if tool_name is None and pattern is None:
2243
+ # Invalidate all
2244
+ count = len(self._tool_cache)
2245
+ self._tool_cache.clear()
2246
+ self._cache_timestamps.clear()
2247
+ self._cache_access_count.clear()
2248
+ logger.info(f"Invalidated all cache ({count} entries)")
2249
+ return count
2250
+
2251
+ # Invalidate matching entries
2252
+ keys_to_delete = []
2253
+
2254
+ for key in list(self._tool_cache.keys()):
2255
+ if tool_name and key.startswith(f"{tool_name}:"):
2256
+ keys_to_delete.append(key)
2257
+ elif pattern and pattern in key:
2258
+ keys_to_delete.append(key)
2259
+
2260
+ for key in keys_to_delete:
2261
+ del self._tool_cache[key]
2262
+ del self._cache_timestamps[key]
2263
+ if key in self._cache_access_count:
2264
+ del self._cache_access_count[key]
2265
+
2266
+ logger.info(f"Invalidated {len(keys_to_delete)} cache entries")
2267
+ return len(keys_to_delete)
2268
+
2269
+ def get_cache_stats(self) -> Dict[str, Any]:
2270
+ """
2271
+ Get cache statistics.
2272
+
2273
+ Returns:
2274
+ Dictionary with cache statistics
2275
+
2276
+ Example:
2277
+ stats = agent.get_cache_stats()
2278
+ print(f"Cache size: {stats['size']}")
2279
+ print(f"Hit rate: {stats['hit_rate']:.1%}")
2280
+ """
2281
+ total_entries = len(self._tool_cache)
2282
+ total_accesses = sum(self._cache_access_count.values())
2283
+
2284
+ # Calculate hit rate (approximate)
2285
+ cache_hits = sum(count for count in self._cache_access_count.values() if count > 0)
2286
+ hit_rate = cache_hits / total_accesses if total_accesses > 0 else 0.0
2287
+
2288
+ # Calculate memory usage (approximate)
2289
+ import sys
2290
+
2291
+ memory_bytes = sum(sys.getsizeof(v) for v in self._tool_cache.values())
2292
+ memory_mb = memory_bytes / (1024 * 1024)
2293
+
2294
+ # Per-tool stats
2295
+ tool_stats = {}
2296
+ for key in self._tool_cache.keys():
2297
+ tool_name = key.split(":")[0]
2298
+ if tool_name not in tool_stats:
2299
+ tool_stats[tool_name] = {"count": 0, "accesses": 0}
2300
+ tool_stats[tool_name]["count"] += 1
2301
+ tool_stats[tool_name]["accesses"] += self._cache_access_count.get(key, 0)
2302
+
2303
+ return {
2304
+ "enabled": self._cache_config.enabled,
2305
+ "size": total_entries,
2306
+ "max_size": self._cache_config.max_cache_size,
2307
+ "memory_mb": memory_mb,
2308
+ "max_memory_mb": self._cache_config.max_memory_mb,
2309
+ "total_accesses": total_accesses,
2310
+ "hit_rate": hit_rate,
2311
+ "tool_stats": tool_stats,
2312
+ }
2313
+
2314
+ async def _cleanup_cache(self) -> None:
2315
+ """
2316
+ Cleanup cache based on size and memory limits.
2317
+
2318
+ Removes least recently used entries when limits are exceeded.
2319
+ """
2320
+ # Check if cleanup needed
2321
+ current_time = time.time()
2322
+ if current_time - self._last_cleanup_time < self._cache_config.cleanup_interval:
2323
+ return
2324
+
2325
+ self._last_cleanup_time = current_time
2326
+
2327
+ # Check size limit
2328
+ if len(self._tool_cache) > self._cache_config.max_cache_size * self._cache_config.cleanup_threshold:
2329
+ # Remove oldest entries
2330
+ entries_to_remove = int(len(self._tool_cache) - self._cache_config.max_cache_size * 0.8)
2331
+
2332
+ # Sort by timestamp (oldest first)
2333
+ sorted_keys = sorted(self._cache_timestamps.items(), key=lambda x: x[1])
2334
+
2335
+ for key, _ in sorted_keys[:entries_to_remove]:
2336
+ del self._tool_cache[key]
2337
+ del self._cache_timestamps[key]
2338
+ if key in self._cache_access_count:
2339
+ del self._cache_access_count[key]
2340
+
2341
+ logger.debug(f"Cleaned up {entries_to_remove} cache entries (size limit)")
2342
+
2343
+ # ==================== Streaming Support (Phase 7 - Tasks 1.15.11-1.15.12) ====================
2344
+
2345
+ async def execute_task_streaming(self, task: Dict[str, Any], context: Dict[str, Any]) -> AsyncIterator[Dict[str, Any]]:
2346
+ """
2347
+ Execute a task with streaming results.
2348
+
2349
+ This method streams task execution events as they occur, including:
2350
+ - Status updates (started, thinking, acting, completed)
2351
+ - LLM tokens (for agents with LLM clients)
2352
+ - Tool calls and results (for agents with tools)
2353
+ - Final result
2354
+
2355
+ Args:
2356
+ task: Task specification
2357
+ context: Execution context
2358
+
2359
+ Yields:
2360
+ Dict[str, Any]: Event dictionaries with 'type' and event-specific data
2361
+
2362
+ Event types:
2363
+ - 'status': Status update (e.g., started, thinking, completed)
2364
+ - 'token': LLM token (for streaming text generation)
2365
+ - 'tool_call': Tool execution started
2366
+ - 'tool_result': Tool execution completed
2367
+ - 'result': Final task result
2368
+ - 'error': Error occurred
2369
+
2370
+ Example:
2371
+ ```python
2372
+ async for event in agent.execute_task_streaming(task, context):
2373
+ if event['type'] == 'token':
2374
+ print(event['content'], end='', flush=True)
2375
+ elif event['type'] == 'tool_call':
2376
+ print(f"\\nCalling tool: {event['tool_name']}")
2377
+ elif event['type'] == 'tool_result':
2378
+ print(f"Tool result: {event['result']}")
2379
+ elif event['type'] == 'result':
2380
+ print(f"\\nFinal result: {event['output']}")
2381
+ ```
2382
+
2383
+ Note:
2384
+ Subclasses should override this method to provide streaming support.
2385
+ Default implementation falls back to non-streaming execute_task.
2386
+ """
2387
+ # Default implementation: execute task and yield result
2388
+ yield {"type": "status", "status": "started", "timestamp": datetime.utcnow().isoformat()}
2389
+
2390
+ try:
2391
+ result = await self.execute_task(task, context)
2392
+ yield {"type": "result", **result}
2393
+ except Exception as e:
2394
+ yield {
2395
+ "type": "error",
2396
+ "error": str(e),
2397
+ "timestamp": datetime.utcnow().isoformat(),
2398
+ }
2399
+ raise
2400
+
2401
+ async def process_message_streaming(self, message: str, sender_id: Optional[str] = None) -> AsyncIterator[str]:
2402
+ """
2403
+ Process a message with streaming response.
2404
+
2405
+ This method streams the response text as it's generated, providing
2406
+ a better user experience for long responses.
2407
+
2408
+ Args:
2409
+ message: Message content
2410
+ sender_id: Optional sender identifier
2411
+
2412
+ Yields:
2413
+ str: Response text tokens/chunks
2414
+
2415
+ Example:
2416
+ ```python
2417
+ async for token in agent.process_message_streaming("Hello!"):
2418
+ print(token, end='', flush=True)
2419
+ ```
2420
+
2421
+ Note:
2422
+ Subclasses should override this method to provide streaming support.
2423
+ Default implementation falls back to non-streaming process_message.
2424
+ """
2425
+ # Default implementation: process message and yield result
2426
+ try:
2427
+ result = await self.process_message(message, sender_id)
2428
+ response = result.get("response", "")
2429
+ yield response
2430
+ except Exception as e:
2431
+ logger.error(f"Streaming message processing failed: {e}")
2432
+ raise
2433
+
2434
+ # ==================== Agent Collaboration (Phase 7 - Tasks 1.15.15-1.15.22) ====================
2435
+
2436
+ async def delegate_task(
2437
+ self,
2438
+ task: Dict[str, Any],
2439
+ required_capabilities: Optional[List[str]] = None,
2440
+ target_agent_id: Optional[str] = None,
2441
+ ) -> Dict[str, Any]:
2442
+ """
2443
+ Delegate a task to another capable agent.
2444
+
2445
+ Args:
2446
+ task: Task specification to delegate
2447
+ required_capabilities: Required capabilities for the task
2448
+ target_agent_id: Specific agent to delegate to (if None, finds capable agent)
2449
+
2450
+ Returns:
2451
+ Task execution result from delegated agent
2452
+
2453
+ Raises:
2454
+ ValueError: If collaboration not enabled or no capable agent found
2455
+
2456
+ Example:
2457
+ ```python
2458
+ # Delegate to specific agent
2459
+ result = await agent.delegate_task(
2460
+ task={"description": "Search for AI papers"},
2461
+ target_agent_id="search_agent"
2462
+ )
2463
+
2464
+ # Delegate to any capable agent
2465
+ result = await agent.delegate_task(
2466
+ task={"description": "Analyze data"},
2467
+ required_capabilities=["data_analysis", "statistics"]
2468
+ )
2469
+ ```
2470
+ """
2471
+ if not self._collaboration_enabled:
2472
+ raise ValueError("Agent collaboration is not enabled")
2473
+
2474
+ # Find target agent
2475
+ if target_agent_id:
2476
+ target_agent = self._agent_registry.get(target_agent_id)
2477
+ if not target_agent:
2478
+ raise ValueError(f"Agent {target_agent_id} not found in registry")
2479
+ elif required_capabilities:
2480
+ capable_agents = await self.find_capable_agents(required_capabilities)
2481
+ if not capable_agents:
2482
+ raise ValueError(f"No capable agents found for capabilities: {required_capabilities}")
2483
+ target_agent = capable_agents[0] # Use first capable agent
2484
+ else:
2485
+ raise ValueError("Either target_agent_id or required_capabilities must be provided")
2486
+
2487
+ logger.info(f"Agent {self.agent_id} delegating task to {target_agent.agent_id}")
2488
+
2489
+ # Delegate task
2490
+ try:
2491
+ result = await target_agent.execute_task(task, context={"delegated_by": self.agent_id})
2492
+ logger.info(f"Task delegation successful: {self.agent_id} -> {target_agent.agent_id}")
2493
+ return result
2494
+ except Exception as e:
2495
+ logger.error(f"Task delegation failed: {e}")
2496
+ raise
2497
+
2498
+ async def find_capable_agents(self, required_capabilities: List[str]) -> List[Any]:
2499
+ """
2500
+ Find agents with required capabilities.
2501
+
2502
+ Args:
2503
+ required_capabilities: List of required capability names
2504
+
2505
+ Returns:
2506
+ List of agents that have all required capabilities
2507
+
2508
+ Example:
2509
+ ```python
2510
+ agents = await agent.find_capable_agents(["search", "summarize"])
2511
+ for capable_agent in agents:
2512
+ print(f"Found: {capable_agent.name}")
2513
+ ```
2514
+ """
2515
+ if not self._collaboration_enabled:
2516
+ return []
2517
+
2518
+ capable_agents = []
2519
+ for agent_id, agent in self._agent_registry.items():
2520
+ # Skip self
2521
+ if agent_id == self.agent_id:
2522
+ continue
2523
+
2524
+ # Check if agent has all required capabilities
2525
+ agent_capabilities = getattr(agent, "capabilities", [])
2526
+ if all(cap in agent_capabilities for cap in required_capabilities):
2527
+ capable_agents.append(agent)
2528
+
2529
+ logger.debug(f"Found {len(capable_agents)} capable agents for {required_capabilities}")
2530
+ return capable_agents
2531
+
2532
+ async def request_peer_review(
2533
+ self,
2534
+ task: Dict[str, Any],
2535
+ result: Dict[str, Any],
2536
+ reviewer_id: Optional[str] = None,
2537
+ ) -> Dict[str, Any]:
2538
+ """
2539
+ Request peer review of a task result.
2540
+
2541
+ Args:
2542
+ task: Original task specification
2543
+ result: Task execution result to review
2544
+ reviewer_id: Specific reviewer agent ID (if None, selects automatically)
2545
+
2546
+ Returns:
2547
+ Review result with 'approved' (bool), 'feedback' (str), 'reviewer_id' (str)
2548
+
2549
+ Example:
2550
+ ```python
2551
+ result = await agent.execute_task(task, context)
2552
+ review = await agent.request_peer_review(task, result)
2553
+ if review['approved']:
2554
+ print(f"Approved: {review['feedback']}")
2555
+ else:
2556
+ print(f"Needs revision: {review['feedback']}")
2557
+ ```
2558
+ """
2559
+ if not self._collaboration_enabled:
2560
+ raise ValueError("Agent collaboration is not enabled")
2561
+
2562
+ # Find reviewer
2563
+ if reviewer_id:
2564
+ reviewer = self._agent_registry.get(reviewer_id)
2565
+ if not reviewer:
2566
+ raise ValueError(f"Reviewer {reviewer_id} not found in registry")
2567
+ else:
2568
+ # Select first available agent (excluding self)
2569
+ available_reviewers = [agent for agent_id, agent in self._agent_registry.items() if agent_id != self.agent_id]
2570
+ if not available_reviewers:
2571
+ raise ValueError("No reviewers available")
2572
+ reviewer = available_reviewers[0]
2573
+
2574
+ logger.info(f"Agent {self.agent_id} requesting review from {reviewer.agent_id}")
2575
+
2576
+ # Request review
2577
+ try:
2578
+ if hasattr(reviewer, "review_result"):
2579
+ review = await reviewer.review_result(task, result)
2580
+ else:
2581
+ # Fallback: use execute_task with review prompt
2582
+ task_desc = task.get("description", "")
2583
+ task_result = result.get("output", "")
2584
+ review_task = {
2585
+ "description": (f"Review this task result:\nTask: {task_desc}\nResult: {task_result}"),
2586
+ "task_id": f"review_{task.get('task_id', 'unknown')}",
2587
+ }
2588
+ review_result = await reviewer.execute_task(review_task, context={})
2589
+ review = {
2590
+ "approved": True, # Assume approved if no explicit review method
2591
+ "feedback": review_result.get("output", ""),
2592
+ "reviewer_id": reviewer.agent_id,
2593
+ }
2594
+
2595
+ logger.info(f"Review received from {reviewer.agent_id}")
2596
+ return review
2597
+ except Exception as e:
2598
+ logger.error(f"Peer review failed: {e}")
2599
+ raise
2600
+
2601
+ async def collaborate_on_task(
2602
+ self,
2603
+ task: Dict[str, Any],
2604
+ collaborator_ids: List[str],
2605
+ strategy: str = "parallel",
2606
+ ) -> Dict[str, Any]:
2607
+ """
2608
+ Collaborate with other agents on a task.
2609
+
2610
+ Args:
2611
+ task: Task specification
2612
+ collaborator_ids: List of agent IDs to collaborate with
2613
+ strategy: Collaboration strategy - 'parallel', 'sequential', or 'consensus'
2614
+
2615
+ Returns:
2616
+ Aggregated result based on strategy
2617
+
2618
+ Strategies:
2619
+ - parallel: All agents work simultaneously, results aggregated
2620
+ - sequential: Agents work in order, each building on previous results
2621
+ - consensus: All agents work independently, best result selected by voting
2622
+
2623
+ Example:
2624
+ ```python
2625
+ # Parallel collaboration
2626
+ result = await agent.collaborate_on_task(
2627
+ task={"description": "Analyze market trends"},
2628
+ collaborator_ids=["analyst1", "analyst2", "analyst3"],
2629
+ strategy="parallel"
2630
+ )
2631
+
2632
+ # Sequential collaboration (pipeline)
2633
+ result = await agent.collaborate_on_task(
2634
+ task={"description": "Research and summarize"},
2635
+ collaborator_ids=["researcher", "summarizer"],
2636
+ strategy="sequential"
2637
+ )
2638
+
2639
+ # Consensus collaboration
2640
+ result = await agent.collaborate_on_task(
2641
+ task={"description": "Make recommendation"},
2642
+ collaborator_ids=["expert1", "expert2", "expert3"],
2643
+ strategy="consensus"
2644
+ )
2645
+ ```
2646
+ """
2647
+ if not self._collaboration_enabled:
2648
+ raise ValueError("Agent collaboration is not enabled")
2649
+
2650
+ # Get collaborator agents
2651
+ collaborators = []
2652
+ for agent_id in collaborator_ids:
2653
+ agent = self._agent_registry.get(agent_id)
2654
+ if not agent:
2655
+ logger.warning(f"Collaborator {agent_id} not found, skipping")
2656
+ continue
2657
+ collaborators.append(agent)
2658
+
2659
+ if not collaborators:
2660
+ raise ValueError("No valid collaborators found")
2661
+
2662
+ logger.info(f"Agent {self.agent_id} collaborating with {len(collaborators)} agents " f"using {strategy} strategy")
2663
+
2664
+ # Execute based on strategy
2665
+ if strategy == "parallel":
2666
+ return await self._collaborate_parallel(task, collaborators)
2667
+ elif strategy == "sequential":
2668
+ return await self._collaborate_sequential(task, collaborators)
2669
+ elif strategy == "consensus":
2670
+ return await self._collaborate_consensus(task, collaborators)
2671
+ else:
2672
+ raise ValueError(f"Unknown collaboration strategy: {strategy}")
2673
+
2674
+ async def _collaborate_parallel(self, task: Dict[str, Any], collaborators: List[Any]) -> Dict[str, Any]:
2675
+ """
2676
+ Parallel collaboration: all agents work simultaneously.
2677
+
2678
+ Args:
2679
+ task: Task specification
2680
+ collaborators: List of collaborator agents
2681
+
2682
+ Returns:
2683
+ Aggregated result
2684
+ """
2685
+ # Execute task on all agents in parallel
2686
+ tasks = [agent.execute_task(task, context={"collaboration": "parallel"}) for agent in collaborators]
2687
+
2688
+ results = await asyncio.gather(*tasks, return_exceptions=True)
2689
+
2690
+ # Aggregate results
2691
+ return await self._aggregate_results(task, results, collaborators)
2692
+
2693
+ async def _collaborate_sequential(self, task: Dict[str, Any], collaborators: List[Any]) -> Dict[str, Any]:
2694
+ """
2695
+ Sequential collaboration: agents work in order, building on previous results.
2696
+
2697
+ Args:
2698
+ task: Task specification
2699
+ collaborators: List of collaborator agents (in execution order)
2700
+
2701
+ Returns:
2702
+ Final result from last agent
2703
+ """
2704
+ current_task = task.copy()
2705
+ results = []
2706
+
2707
+ for i, agent in enumerate(collaborators):
2708
+ logger.debug(f"Sequential step {i + 1}/{len(collaborators)}: {agent.agent_id}")
2709
+
2710
+ # Execute task
2711
+ result = await agent.execute_task(current_task, context={"collaboration": "sequential", "step": i + 1})
2712
+ results.append(result)
2713
+
2714
+ # Update task for next agent with previous result
2715
+ if i < len(collaborators) - 1:
2716
+ current_task = {
2717
+ "description": f"{task.get('description')}\n\nPrevious result: {result.get('output')}",
2718
+ "task_id": f"{task.get('task_id', 'unknown')}_step_{i + 2}",
2719
+ }
2720
+
2721
+ # Return final result
2722
+ return {
2723
+ "success": True,
2724
+ "output": results[-1].get("output") if results else "",
2725
+ "collaboration_strategy": "sequential",
2726
+ "steps": len(results),
2727
+ "all_results": results,
2728
+ "timestamp": datetime.utcnow().isoformat(),
2729
+ }
2730
+
2731
+ async def _collaborate_consensus(self, task: Dict[str, Any], collaborators: List[Any]) -> Dict[str, Any]:
2732
+ """
2733
+ Consensus collaboration: all agents work independently, best result selected.
2734
+
2735
+ Args:
2736
+ task: Task specification
2737
+ collaborators: List of collaborator agents
2738
+
2739
+ Returns:
2740
+ Best result selected by consensus
2741
+ """
2742
+ # Execute task on all agents in parallel
2743
+ tasks = [agent.execute_task(task, context={"collaboration": "consensus"}) for agent in collaborators]
2744
+
2745
+ results = await asyncio.gather(*tasks, return_exceptions=True)
2746
+
2747
+ # Select best result by consensus
2748
+ return await self._select_consensus_result(task, results, collaborators)
2749
+
2750
+ async def _aggregate_results(self, task: Dict[str, Any], results: List[Any], collaborators: List[Any]) -> Dict[str, Any]:
2751
+ """
2752
+ Aggregate results from parallel collaboration.
2753
+
2754
+ Args:
2755
+ task: Original task
2756
+ results: List of results from collaborators
2757
+ collaborators: List of collaborator agents
2758
+
2759
+ Returns:
2760
+ Aggregated result
2761
+ """
2762
+ successful_results = []
2763
+ errors = []
2764
+
2765
+ for i, result in enumerate(results):
2766
+ if isinstance(result, Exception):
2767
+ errors.append({"agent": collaborators[i].agent_id, "error": str(result)})
2768
+ else:
2769
+ successful_results.append({"agent": collaborators[i].agent_id, "result": result})
2770
+
2771
+ # Combine outputs
2772
+ combined_output = "\n\n".join([f"[{r['agent']}]: {r['result'].get('output', '')}" for r in successful_results])
2773
+
2774
+ return {
2775
+ "success": len(successful_results) > 0,
2776
+ "output": combined_output,
2777
+ "collaboration_strategy": "parallel",
2778
+ "successful_agents": len(successful_results),
2779
+ "failed_agents": len(errors),
2780
+ "results": successful_results,
2781
+ "errors": errors if errors else None,
2782
+ "timestamp": datetime.utcnow().isoformat(),
2783
+ }
2784
+
2785
+ async def _select_consensus_result(self, task: Dict[str, Any], results: List[Any], collaborators: List[Any]) -> Dict[str, Any]:
2786
+ """
2787
+ Select best result by consensus voting.
2788
+
2789
+ Args:
2790
+ task: Original task
2791
+ results: List of results from collaborators
2792
+ collaborators: List of collaborator agents
2793
+
2794
+ Returns:
2795
+ Best result selected by consensus
2796
+ """
2797
+ successful_results = []
2798
+
2799
+ for i, result in enumerate(results):
2800
+ if not isinstance(result, Exception):
2801
+ successful_results.append({"agent": collaborators[i].agent_id, "result": result, "votes": 0})
2802
+
2803
+ if not successful_results:
2804
+ return {
2805
+ "success": False,
2806
+ "output": "All collaborators failed",
2807
+ "collaboration_strategy": "consensus",
2808
+ "timestamp": datetime.utcnow().isoformat(),
2809
+ }
2810
+
2811
+ # Simple voting: each agent votes for best result (excluding their own)
2812
+ # In a real implementation, this could use LLM to evaluate quality
2813
+ for voter_idx, voter_result in enumerate(successful_results):
2814
+ # For now, use simple heuristic: longest output is "best"
2815
+ # In production, use LLM-based evaluation
2816
+ best_idx = max(
2817
+ range(len(successful_results)),
2818
+ key=lambda i: (len(successful_results[i]["result"].get("output", "")) if i != voter_idx else 0),
2819
+ )
2820
+ successful_results[best_idx]["votes"] += 1
2821
+
2822
+ # Select result with most votes
2823
+ best_result = max(successful_results, key=lambda r: r["votes"])
2824
+
2825
+ return {
2826
+ "success": True,
2827
+ "output": best_result["result"].get("output", ""),
2828
+ "collaboration_strategy": "consensus",
2829
+ "selected_agent": best_result["agent"],
2830
+ "votes": best_result["votes"],
2831
+ "total_agents": len(successful_results),
2832
+ "all_results": successful_results,
2833
+ "timestamp": datetime.utcnow().isoformat(),
2834
+ }
2835
+
2836
+ # ==================== Smart Context Management (Phase 8 - Tasks 1.16.1-1.16.3) ====================
2837
+
2838
+ async def get_relevant_context(
2839
+ self,
2840
+ query: str,
2841
+ context_items: List[Dict[str, Any]],
2842
+ max_items: Optional[int] = None,
2843
+ min_relevance_score: float = 0.5,
2844
+ ) -> List[Dict[str, Any]]:
2845
+ """
2846
+ Get relevant context items using semantic search and relevance scoring.
2847
+
2848
+ This method filters and ranks context items based on their relevance to
2849
+ the query, helping agents stay within token limits while maintaining
2850
+ the most important context.
2851
+
2852
+ Args:
2853
+ query: Query or task description to match against
2854
+ context_items: List of context items (dicts with 'content' field)
2855
+ max_items: Maximum number of items to return (None = no limit)
2856
+ min_relevance_score: Minimum relevance score (0.0-1.0)
2857
+
2858
+ Returns:
2859
+ List of relevant context items, sorted by relevance (highest first)
2860
+
2861
+ Example:
2862
+ ```python
2863
+ context_items = [
2864
+ {"content": "User prefers concise answers", "type": "preference"},
2865
+ {"content": "Previous task: data analysis", "type": "history"},
2866
+ {"content": "System configuration: prod", "type": "config"},
2867
+ ]
2868
+
2869
+ relevant = await agent.get_relevant_context(
2870
+ query="Analyze sales data",
2871
+ context_items=context_items,
2872
+ max_items=2,
2873
+ min_relevance_score=0.6
2874
+ )
2875
+ # Returns top 2 most relevant items with score >= 0.6
2876
+ ```
2877
+ """
2878
+ if not context_items:
2879
+ return []
2880
+
2881
+ # Score all items
2882
+ scored_items = []
2883
+ for item in context_items:
2884
+ score = await self.score_context_relevance(query, item)
2885
+ if score >= min_relevance_score:
2886
+ scored_items.append({**item, "_relevance_score": score})
2887
+
2888
+ # Sort by relevance (highest first)
2889
+ scored_items.sort(key=lambda x: x["_relevance_score"], reverse=True)
2890
+
2891
+ # Limit number of items
2892
+ if max_items is not None:
2893
+ scored_items = scored_items[:max_items]
2894
+
2895
+ logger.debug(f"Selected {len(scored_items)}/{len(context_items)} relevant context items " f"(min_score={min_relevance_score})")
2896
+
2897
+ return scored_items
2898
+
2899
+ async def score_context_relevance(self, query: str, context_item: Dict[str, Any]) -> float:
2900
+ """
2901
+ Score the relevance of a context item to a query.
2902
+
2903
+ Uses multiple signals to determine relevance:
2904
+ - Keyword overlap (basic)
2905
+ - Semantic similarity (if LLM client with embeddings available)
2906
+ - Recency (if timestamp available)
2907
+ - Type priority (if type specified)
2908
+
2909
+ Args:
2910
+ query: Query or task description
2911
+ context_item: Context item to score (dict with 'content' field)
2912
+
2913
+ Returns:
2914
+ Relevance score between 0.0 (not relevant) and 1.0 (highly relevant)
2915
+
2916
+ Example:
2917
+ ```python
2918
+ score = await agent.score_context_relevance(
2919
+ query="Analyze sales data",
2920
+ context_item={"content": "Previous analysis results", "type": "history"}
2921
+ )
2922
+ print(f"Relevance: {score:.2f}")
2923
+ ```
2924
+ """
2925
+ content = context_item.get("content", "")
2926
+ if not content:
2927
+ return 0.0
2928
+
2929
+ # Convert to lowercase for comparison
2930
+ query_lower = query.lower()
2931
+ content_lower = content.lower()
2932
+
2933
+ # 1. Keyword overlap score (0.0-0.5)
2934
+ query_words = set(query_lower.split())
2935
+ content_words = set(content_lower.split())
2936
+ if not query_words:
2937
+ keyword_score = 0.0
2938
+ else:
2939
+ overlap = len(query_words & content_words)
2940
+ keyword_score = min(0.5, (overlap / len(query_words)) * 0.5)
2941
+
2942
+ # 2. Semantic similarity score (0.0-0.3)
2943
+ # If LLM client with embeddings is available, use it
2944
+ semantic_score = 0.0
2945
+ if self._llm_client and hasattr(self._llm_client, "get_embeddings"):
2946
+ try:
2947
+ embeddings = await self._llm_client.get_embeddings([query, content])
2948
+ if len(embeddings) == 2:
2949
+ # Calculate cosine similarity
2950
+ import math
2951
+
2952
+ vec1, vec2 = embeddings[0], embeddings[1]
2953
+ dot_product = sum(a * b for a, b in zip(vec1, vec2))
2954
+ mag1 = math.sqrt(sum(a * a for a in vec1))
2955
+ mag2 = math.sqrt(sum(b * b for b in vec2))
2956
+ if mag1 > 0 and mag2 > 0:
2957
+ similarity = dot_product / (mag1 * mag2)
2958
+ semantic_score = max(0.0, similarity) * 0.3
2959
+ except Exception as e:
2960
+ logger.debug(f"Semantic similarity calculation failed: {e}")
2961
+
2962
+ # 3. Recency score (0.0-0.1)
2963
+ recency_score = 0.0
2964
+ if "timestamp" in context_item:
2965
+ try:
2966
+ from datetime import datetime
2967
+
2968
+ timestamp = context_item["timestamp"]
2969
+ if isinstance(timestamp, str):
2970
+ timestamp = datetime.fromisoformat(timestamp.replace("Z", "+00:00"))
2971
+ age_seconds = (datetime.utcnow() - timestamp).total_seconds()
2972
+ # Decay over 24 hours
2973
+ recency_score = max(0.0, 0.1 * (1.0 - min(1.0, age_seconds / 86400)))
2974
+ except Exception as e:
2975
+ logger.debug(f"Recency calculation failed: {e}")
2976
+
2977
+ # 4. Type priority score (0.0-0.1)
2978
+ type_score = 0.0
2979
+ item_type = context_item.get("type", "")
2980
+ priority_types = {"preference": 0.1, "constraint": 0.1, "requirement": 0.09}
2981
+ type_score = priority_types.get(item_type, 0.05)
2982
+
2983
+ # Combine scores
2984
+ total_score = keyword_score + semantic_score + recency_score + type_score
2985
+
2986
+ return min(1.0, total_score)
2987
+
2988
+ async def prune_context(
2989
+ self,
2990
+ context_items: List[Dict[str, Any]],
2991
+ max_tokens: int,
2992
+ query: Optional[str] = None,
2993
+ preserve_types: Optional[List[str]] = None,
2994
+ ) -> List[Dict[str, Any]]:
2995
+ """
2996
+ Prune context items to fit within token limit.
2997
+
2998
+ Uses relevance scoring to keep the most important context while
2999
+ staying within token limits. Optionally preserves certain types
3000
+ of context regardless of relevance.
3001
+
3002
+ Args:
3003
+ context_items: List of context items to prune
3004
+ max_tokens: Maximum total tokens allowed
3005
+ query: Optional query for relevance scoring
3006
+ preserve_types: Optional list of types to always preserve
3007
+
3008
+ Returns:
3009
+ Pruned list of context items that fit within token limit
3010
+
3011
+ Example:
3012
+ ```python
3013
+ pruned = await agent.prune_context(
3014
+ context_items=all_context,
3015
+ max_tokens=2000,
3016
+ query="Analyze data",
3017
+ preserve_types=["constraint", "requirement"]
3018
+ )
3019
+ print(f"Pruned from {len(all_context)} to {len(pruned)} items")
3020
+ ```
3021
+ """
3022
+ if not context_items:
3023
+ return []
3024
+
3025
+ preserve_types = preserve_types or []
3026
+
3027
+ # Separate preserved and regular items
3028
+ preserved_items = []
3029
+ regular_items = []
3030
+
3031
+ for item in context_items:
3032
+ if item.get("type") in preserve_types:
3033
+ preserved_items.append(item)
3034
+ else:
3035
+ regular_items.append(item)
3036
+
3037
+ # Score regular items if query provided
3038
+ if query and regular_items:
3039
+ scored_items = []
3040
+ for item in regular_items:
3041
+ score = await self.score_context_relevance(query, item)
3042
+ scored_items.append({**item, "_relevance_score": score})
3043
+ # Sort by relevance
3044
+ scored_items.sort(key=lambda x: x["_relevance_score"], reverse=True)
3045
+ regular_items = scored_items
3046
+
3047
+ # Estimate tokens (rough approximation: 1 token ≈ 4 characters)
3048
+ def estimate_tokens(item: Dict[str, Any]) -> int:
3049
+ content = str(item.get("content", ""))
3050
+ return len(content) // 4
3051
+
3052
+ # Add preserved items first
3053
+ result = []
3054
+ current_tokens = 0
3055
+
3056
+ for item in preserved_items:
3057
+ item_tokens = estimate_tokens(item)
3058
+ if current_tokens + item_tokens <= max_tokens:
3059
+ result.append(item)
3060
+ current_tokens += item_tokens
3061
+ else:
3062
+ logger.warning(f"Preserved item exceeds token limit, skipping: {item.get('type')}")
3063
+
3064
+ # Add regular items until token limit
3065
+ for item in regular_items:
3066
+ item_tokens = estimate_tokens(item)
3067
+ if current_tokens + item_tokens <= max_tokens:
3068
+ result.append(item)
3069
+ current_tokens += item_tokens
3070
+ else:
3071
+ break
3072
+
3073
+ logger.info(f"Pruned context from {len(context_items)} to {len(result)} items " f"({current_tokens}/{max_tokens} tokens)")
3074
+
3075
+ return result
3076
+
3077
+ # ==================== Agent Learning (Phase 8 - Tasks 1.16.4-1.16.10) ====================
3078
+
3079
+ async def record_experience(
3080
+ self,
3081
+ task: Dict[str, Any],
3082
+ result: Dict[str, Any],
3083
+ approach: str,
3084
+ tools_used: Optional[List[str]] = None,
3085
+ ) -> None:
3086
+ """
3087
+ Record an experience for learning and adaptation.
3088
+
3089
+ Args:
3090
+ task: Task specification
3091
+ result: Task execution result
3092
+ approach: Approach/strategy used
3093
+ tools_used: List of tools used (if any)
3094
+
3095
+ Example:
3096
+ ```python
3097
+ await agent.record_experience(
3098
+ task={"description": "Analyze data", "type": "analysis"},
3099
+ result={"success": True, "execution_time": 5.2},
3100
+ approach="statistical_analysis",
3101
+ tools_used=["pandas", "numpy"]
3102
+ )
3103
+ ```
3104
+ """
3105
+ if not self._learning_enabled:
3106
+ return
3107
+
3108
+ from .models import Experience
3109
+
3110
+ # Classify task
3111
+ task_type = await self._classify_task(task)
3112
+
3113
+ # Create experience record
3114
+ experience = Experience( # type: ignore[call-arg]
3115
+ agent_id=self.agent_id,
3116
+ task_type=task_type,
3117
+ task_description=task.get("description", ""),
3118
+ task_complexity=task.get("complexity"),
3119
+ approach=approach,
3120
+ tools_used=tools_used or [],
3121
+ execution_time=result.get("execution_time", 0.0),
3122
+ success=result.get("success", False),
3123
+ quality_score=result.get("quality_score"),
3124
+ error_type=result.get("error_type"),
3125
+ error_message=result.get("error"),
3126
+ context_size=result.get("context_size"),
3127
+ iterations=result.get("iterations"),
3128
+ metadata={"task_id": task.get("task_id")},
3129
+ )
3130
+
3131
+ # Add to experiences
3132
+ self._experiences.append(experience)
3133
+
3134
+ # Limit stored experiences
3135
+ if len(self._experiences) > self._max_experiences:
3136
+ self._experiences = self._experiences[-self._max_experiences :]
3137
+
3138
+ logger.debug(f"Recorded experience: {task_type} - " f"{'success' if experience.success else 'failure'} " f"({experience.execution_time:.2f}s)")
3139
+
3140
+ async def get_recommended_approach(self, task: Dict[str, Any]) -> Optional[Dict[str, Any]]:
3141
+ """
3142
+ Get recommended approach based on past experiences.
3143
+
3144
+ Analyzes similar past experiences to recommend the best approach
3145
+ for the current task.
3146
+
3147
+ Args:
3148
+ task: Task specification
3149
+
3150
+ Returns:
3151
+ Recommended approach dict with 'approach', 'confidence', 'reasoning'
3152
+ or None if no relevant experiences
3153
+
3154
+ Example:
3155
+ ```python
3156
+ recommendation = await agent.get_recommended_approach(
3157
+ task={"description": "Analyze sales data", "type": "analysis"}
3158
+ )
3159
+ if recommendation:
3160
+ print(f"Recommended: {recommendation['approach']}")
3161
+ print(f"Confidence: {recommendation['confidence']:.2f}")
3162
+ print(f"Reasoning: {recommendation['reasoning']}")
3163
+ ```
3164
+ """
3165
+ if not self._learning_enabled or not self._experiences:
3166
+ return None
3167
+
3168
+ # Classify current task
3169
+ task_type = await self._classify_task(task)
3170
+
3171
+ # Find similar experiences
3172
+ similar_experiences = [exp for exp in self._experiences if exp.task_type == task_type]
3173
+
3174
+ if not similar_experiences:
3175
+ return None
3176
+
3177
+ # Analyze successful experiences
3178
+ successful = [exp for exp in similar_experiences if exp.success]
3179
+ if not successful:
3180
+ return None
3181
+
3182
+ # Count approaches
3183
+ approach_stats: Dict[str, Dict[str, Any]] = {}
3184
+ for exp in successful:
3185
+ if exp.approach not in approach_stats:
3186
+ approach_stats[exp.approach] = {
3187
+ "count": 0,
3188
+ "total_time": 0.0,
3189
+ "avg_quality": 0.0,
3190
+ "quality_count": 0,
3191
+ }
3192
+ stats = approach_stats[exp.approach]
3193
+ stats["count"] += 1
3194
+ stats["total_time"] += exp.execution_time
3195
+ if exp.quality_score is not None:
3196
+ stats["avg_quality"] += exp.quality_score
3197
+ stats["quality_count"] += 1
3198
+
3199
+ # Calculate averages and scores
3200
+ for approach, stats in approach_stats.items():
3201
+ stats["avg_time"] = stats["total_time"] / stats["count"]
3202
+ if stats["quality_count"] > 0:
3203
+ stats["avg_quality"] = stats["avg_quality"] / stats["quality_count"]
3204
+ else:
3205
+ stats["avg_quality"] = 0.5 # Default
3206
+
3207
+ # Select best approach (balance success rate, quality, speed)
3208
+ best_approach = max(
3209
+ approach_stats.items(),
3210
+ key=lambda x: (
3211
+ x[1]["count"] / len(similar_experiences), # Success rate
3212
+ x[1]["avg_quality"], # Quality
3213
+ -x[1]["avg_time"], # Speed (negative for faster is better)
3214
+ ),
3215
+ )
3216
+
3217
+ approach_name, stats = best_approach
3218
+ confidence = min(1.0, stats["count"] / max(5, len(similar_experiences)))
3219
+
3220
+ return {
3221
+ "approach": approach_name,
3222
+ "confidence": confidence,
3223
+ "reasoning": (
3224
+ f"Based on {stats['count']} successful experiences with {task_type} tasks. " f"Average execution time: {stats['avg_time']:.2f}s, " f"Average quality: {stats['avg_quality']:.2f}"
3225
+ ),
3226
+ "stats": stats,
3227
+ }
3228
+
3229
+ async def get_learning_insights(self) -> Dict[str, Any]:
3230
+ """
3231
+ Get learning insights and analytics.
3232
+
3233
+ Provides analytics about agent learning including success rates,
3234
+ common patterns, and areas for improvement.
3235
+
3236
+ Returns:
3237
+ Dict with learning insights and statistics
3238
+
3239
+ Example:
3240
+ ```python
3241
+ insights = await agent.get_learning_insights()
3242
+ print(f"Total experiences: {insights['total_experiences']}")
3243
+ print(f"Success rate: {insights['overall_success_rate']:.2%}")
3244
+ print(f"Most common task: {insights['most_common_task_type']}")
3245
+ ```
3246
+ """
3247
+ if not self._learning_enabled or not self._experiences:
3248
+ return {
3249
+ "total_experiences": 0,
3250
+ "learning_enabled": self._learning_enabled,
3251
+ }
3252
+
3253
+ total = len(self._experiences)
3254
+ successful = sum(1 for exp in self._experiences if exp.success)
3255
+ failed = total - successful
3256
+
3257
+ # Task type distribution
3258
+ task_types: Dict[str, int] = {}
3259
+ for exp in self._experiences:
3260
+ task_types[exp.task_type] = task_types.get(exp.task_type, 0) + 1
3261
+
3262
+ # Approach effectiveness
3263
+ approach_success: Dict[str, Dict[str, int]] = {}
3264
+ for exp in self._experiences:
3265
+ if exp.approach not in approach_success:
3266
+ approach_success[exp.approach] = {"success": 0, "failure": 0}
3267
+ if exp.success:
3268
+ approach_success[exp.approach]["success"] += 1
3269
+ else:
3270
+ approach_success[exp.approach]["failure"] += 1
3271
+
3272
+ # Calculate success rates
3273
+ approach_rates = {approach: stats["success"] / (stats["success"] + stats["failure"]) for approach, stats in approach_success.items()}
3274
+
3275
+ # Error patterns
3276
+ error_types: Dict[str, int] = {}
3277
+ for exp in self._experiences:
3278
+ if not exp.success and exp.error_type:
3279
+ error_types[exp.error_type] = error_types.get(exp.error_type, 0) + 1
3280
+
3281
+ return {
3282
+ "total_experiences": total,
3283
+ "successful_experiences": successful,
3284
+ "failed_experiences": failed,
3285
+ "overall_success_rate": successful / total if total > 0 else 0.0,
3286
+ "task_type_distribution": task_types,
3287
+ "most_common_task_type": (max(task_types.items(), key=lambda x: x[1])[0] if task_types else None),
3288
+ "approach_effectiveness": approach_rates,
3289
+ "best_approach": (max(approach_rates.items(), key=lambda x: x[1])[0] if approach_rates else None),
3290
+ "error_patterns": error_types,
3291
+ "most_common_error": (max(error_types.items(), key=lambda x: x[1])[0] if error_types else None),
3292
+ "learning_enabled": self._learning_enabled,
3293
+ }
3294
+
3295
+ async def adapt_strategy(self, task: Dict[str, Any]) -> Dict[str, Any]:
3296
+ """
3297
+ Adapt strategy based on learning insights.
3298
+
3299
+ Analyzes past experiences to suggest strategy adaptations for
3300
+ the current task.
3301
+
3302
+ Args:
3303
+ task: Task specification
3304
+
3305
+ Returns:
3306
+ Dict with strategy adaptations and recommendations
3307
+
3308
+ Example:
3309
+ ```python
3310
+ adaptations = await agent.adapt_strategy(
3311
+ task={"description": "Complex analysis", "type": "analysis"}
3312
+ )
3313
+ print(f"Recommended approach: {adaptations['recommended_approach']}")
3314
+ print(f"Suggested tools: {adaptations['suggested_tools']}")
3315
+ ```
3316
+ """
3317
+ if not self._learning_enabled:
3318
+ return {"adapted": False, "reason": "Learning not enabled"}
3319
+
3320
+ # Get recommended approach
3321
+ recommendation = await self.get_recommended_approach(task)
3322
+
3323
+ if not recommendation:
3324
+ return {
3325
+ "adapted": False,
3326
+ "reason": "No relevant experiences found",
3327
+ }
3328
+
3329
+ # Classify task
3330
+ task_type = await self._classify_task(task)
3331
+
3332
+ # Find similar successful experiences
3333
+ similar_successful = [exp for exp in self._experiences if exp.task_type == task_type and exp.success]
3334
+
3335
+ # Analyze tool usage patterns
3336
+ tool_usage: Dict[str, int] = {}
3337
+ for exp in similar_successful:
3338
+ for tool in exp.tools_used:
3339
+ tool_usage[tool] = tool_usage.get(tool, 0) + 1
3340
+
3341
+ # Get most commonly used tools
3342
+ suggested_tools = sorted(tool_usage.items(), key=lambda x: x[1], reverse=True)[:5] # Top 5 tools
3343
+
3344
+ return {
3345
+ "adapted": True,
3346
+ "recommended_approach": recommendation["approach"],
3347
+ "confidence": recommendation["confidence"],
3348
+ "reasoning": recommendation["reasoning"],
3349
+ "suggested_tools": [tool for tool, _ in suggested_tools],
3350
+ "tool_usage_stats": dict(suggested_tools),
3351
+ "based_on_experiences": len(similar_successful),
3352
+ }
3353
+
3354
+ async def _classify_task(self, task: Dict[str, Any]) -> str:
3355
+ """
3356
+ Classify task into a type/category.
3357
+
3358
+ Uses simple heuristics to classify tasks. Can be overridden by
3359
+ subclasses for more sophisticated classification.
3360
+
3361
+ Args:
3362
+ task: Task specification
3363
+
3364
+ Returns:
3365
+ Task type string
3366
+
3367
+ Example:
3368
+ ```python
3369
+ task_type = await agent._classify_task(
3370
+ {"description": "Analyze sales data"}
3371
+ )
3372
+ # Returns: "analysis"
3373
+ ```
3374
+ """
3375
+ # Check explicit type
3376
+ if "type" in task:
3377
+ return task["type"]
3378
+
3379
+ # Simple keyword-based classification
3380
+ description = task.get("description", "").lower()
3381
+
3382
+ if any(word in description for word in ["analyze", "analysis", "examine"]):
3383
+ return "analysis"
3384
+ elif any(word in description for word in ["search", "find", "lookup"]):
3385
+ return "search"
3386
+ elif any(word in description for word in ["create", "generate", "write"]):
3387
+ return "generation"
3388
+ elif any(word in description for word in ["summarize", "summary"]):
3389
+ return "summarization"
3390
+ elif any(word in description for word in ["calculate", "compute"]):
3391
+ return "calculation"
3392
+ elif any(word in description for word in ["translate", "convert"]):
3393
+ return "translation"
3394
+ else:
3395
+ return "general"
3396
+
3397
+ # ==================== Resource Management (Phase 8 - Tasks 1.16.11-1.16.17) ====================
3398
+
3399
+ async def check_resource_availability(self) -> Dict[str, Any]:
3400
+ """
3401
+ Check if resources are available for task execution.
3402
+
3403
+ Checks against configured resource limits including:
3404
+ - Concurrent task limits
3405
+ - Token rate limits
3406
+ - Tool call rate limits
3407
+
3408
+ Returns:
3409
+ Dict with 'available' (bool) and details about resource status
3410
+
3411
+ Example:
3412
+ ```python
3413
+ status = await agent.check_resource_availability()
3414
+ if status['available']:
3415
+ await agent.execute_task(task, context)
3416
+ else:
3417
+ print(f"Resources unavailable: {status['reason']}")
3418
+ ```
3419
+ """
3420
+ if not self._resource_limits.enforce_limits:
3421
+ return {"available": True, "reason": "Limits not enforced"}
3422
+
3423
+ # Check concurrent task limit
3424
+ if len(self._active_tasks) >= self._resource_limits.max_concurrent_tasks:
3425
+ return {
3426
+ "available": False,
3427
+ "reason": "Concurrent task limit reached",
3428
+ "active_tasks": len(self._active_tasks),
3429
+ "max_tasks": self._resource_limits.max_concurrent_tasks,
3430
+ }
3431
+
3432
+ # Check token rate limits
3433
+ token_check = await self._check_token_rate_limit()
3434
+ if not token_check["available"]:
3435
+ return token_check
3436
+
3437
+ # Check tool call rate limits
3438
+ tool_check = await self._check_tool_call_rate_limit()
3439
+ if not tool_check["available"]:
3440
+ return tool_check
3441
+
3442
+ return {
3443
+ "available": True,
3444
+ "active_tasks": len(self._active_tasks),
3445
+ "max_tasks": self._resource_limits.max_concurrent_tasks,
3446
+ }
3447
+
3448
+ async def wait_for_resources(self, timeout: Optional[float] = None) -> bool:
3449
+ """
3450
+ Wait for resources to become available.
3451
+
3452
+ Args:
3453
+ timeout: Maximum time to wait in seconds (uses resource_wait_timeout_seconds if None)
3454
+
3455
+ Returns:
3456
+ True if resources became available, False if timeout
3457
+
3458
+ Example:
3459
+ ```python
3460
+ if await agent.wait_for_resources(timeout=30):
3461
+ await agent.execute_task(task, context)
3462
+ else:
3463
+ print("Timeout waiting for resources")
3464
+ ```
3465
+ """
3466
+ if timeout is None:
3467
+ timeout = self._resource_limits.resource_wait_timeout_seconds
3468
+
3469
+ start_time = time.time()
3470
+ check_interval = 0.5 # Check every 500ms
3471
+
3472
+ while time.time() - start_time < timeout:
3473
+ status = await self.check_resource_availability()
3474
+ if status["available"]:
3475
+ return True
3476
+
3477
+ # Wait before next check
3478
+ await asyncio.sleep(check_interval)
3479
+
3480
+ logger.warning(f"Timeout waiting for resources after {timeout}s")
3481
+ return False
3482
+
3483
+ async def get_resource_usage(self) -> Dict[str, Any]:
3484
+ """
3485
+ Get current resource usage statistics.
3486
+
3487
+ Returns:
3488
+ Dict with resource usage information
3489
+
3490
+ Example:
3491
+ ```python
3492
+ usage = await agent.get_resource_usage()
3493
+ print(f"Active tasks: {usage['active_tasks']}")
3494
+ print(f"Tokens/min: {usage['tokens_per_minute']}")
3495
+ print(f"Tool calls/min: {usage['tool_calls_per_minute']}")
3496
+ ```
3497
+ """
3498
+ current_time = time.time()
3499
+
3500
+ # Calculate token usage rates
3501
+ tokens_last_minute = sum(count for ts, count in self._token_usage_window if current_time - ts < 60)
3502
+ tokens_last_hour = sum(count for ts, count in self._token_usage_window if current_time - ts < 3600)
3503
+
3504
+ # Calculate tool call rates
3505
+ tool_calls_last_minute = sum(1 for ts in self._tool_call_window if current_time - ts < 60)
3506
+ tool_calls_last_hour = sum(1 for ts in self._tool_call_window if current_time - ts < 3600)
3507
+
3508
+ return {
3509
+ "active_tasks": len(self._active_tasks),
3510
+ "max_concurrent_tasks": self._resource_limits.max_concurrent_tasks,
3511
+ "task_utilization": len(self._active_tasks) / self._resource_limits.max_concurrent_tasks,
3512
+ "tokens_per_minute": tokens_last_minute,
3513
+ "tokens_per_hour": tokens_last_hour,
3514
+ "max_tokens_per_minute": self._resource_limits.max_tokens_per_minute,
3515
+ "max_tokens_per_hour": self._resource_limits.max_tokens_per_hour,
3516
+ "tool_calls_per_minute": tool_calls_last_minute,
3517
+ "tool_calls_per_hour": tool_calls_last_hour,
3518
+ "max_tool_calls_per_minute": self._resource_limits.max_tool_calls_per_minute,
3519
+ "max_tool_calls_per_hour": self._resource_limits.max_tool_calls_per_hour,
3520
+ "limits_enforced": self._resource_limits.enforce_limits,
3521
+ }
3522
+
3523
+ async def _check_token_rate_limit(self) -> Dict[str, Any]:
3524
+ """
3525
+ Check token rate limits.
3526
+
3527
+ Returns:
3528
+ Dict with 'available' (bool) and limit details
3529
+ """
3530
+ if not self._resource_limits.enforce_limits:
3531
+ return {"available": True}
3532
+
3533
+ current_time = time.time()
3534
+
3535
+ # Clean old entries (older than 1 hour)
3536
+ self._token_usage_window = [(ts, count) for ts, count in self._token_usage_window if current_time - ts < 3600]
3537
+
3538
+ # Check per-minute limit
3539
+ if self._resource_limits.max_tokens_per_minute is not None:
3540
+ tokens_last_minute = sum(count for ts, count in self._token_usage_window if current_time - ts < 60)
3541
+ if tokens_last_minute >= self._resource_limits.max_tokens_per_minute:
3542
+ return {
3543
+ "available": False,
3544
+ "reason": "Token rate limit (per minute) reached",
3545
+ "tokens_used": tokens_last_minute,
3546
+ "limit": self._resource_limits.max_tokens_per_minute,
3547
+ "window": "minute",
3548
+ }
3549
+
3550
+ # Check per-hour limit
3551
+ if self._resource_limits.max_tokens_per_hour is not None:
3552
+ tokens_last_hour = sum(count for ts, count in self._token_usage_window)
3553
+ if tokens_last_hour >= self._resource_limits.max_tokens_per_hour:
3554
+ return {
3555
+ "available": False,
3556
+ "reason": "Token rate limit (per hour) reached",
3557
+ "tokens_used": tokens_last_hour,
3558
+ "limit": self._resource_limits.max_tokens_per_hour,
3559
+ "window": "hour",
3560
+ }
3561
+
3562
+ return {"available": True}
3563
+
3564
+ async def _check_tool_call_rate_limit(self) -> Dict[str, Any]:
3565
+ """
3566
+ Check tool call rate limits.
3567
+
3568
+ Returns:
3569
+ Dict with 'available' (bool) and limit details
3570
+ """
3571
+ if not self._resource_limits.enforce_limits:
3572
+ return {"available": True}
3573
+
3574
+ current_time = time.time()
3575
+
3576
+ # Clean old entries (older than 1 hour)
3577
+ self._tool_call_window = [ts for ts in self._tool_call_window if current_time - ts < 3600]
3578
+
3579
+ # Check per-minute limit
3580
+ if self._resource_limits.max_tool_calls_per_minute is not None:
3581
+ calls_last_minute = sum(1 for ts in self._tool_call_window if current_time - ts < 60)
3582
+ if calls_last_minute >= self._resource_limits.max_tool_calls_per_minute:
3583
+ return {
3584
+ "available": False,
3585
+ "reason": "Tool call rate limit (per minute) reached",
3586
+ "calls_made": calls_last_minute,
3587
+ "limit": self._resource_limits.max_tool_calls_per_minute,
3588
+ "window": "minute",
3589
+ }
3590
+
3591
+ # Check per-hour limit
3592
+ if self._resource_limits.max_tool_calls_per_hour is not None:
3593
+ calls_last_hour = len(self._tool_call_window)
3594
+ if calls_last_hour >= self._resource_limits.max_tool_calls_per_hour:
3595
+ return {
3596
+ "available": False,
3597
+ "reason": "Tool call rate limit (per hour) reached",
3598
+ "calls_made": calls_last_hour,
3599
+ "limit": self._resource_limits.max_tool_calls_per_hour,
3600
+ "window": "hour",
3601
+ }
3602
+
3603
+ return {"available": True}
3604
+
3605
+ # ==================== Error Recovery (Phase 8 - Tasks 1.16.18-1.16.22) ====================
3606
+
3607
+ async def execute_with_recovery(
3608
+ self,
3609
+ task: Dict[str, Any],
3610
+ context: Dict[str, Any],
3611
+ strategies: Optional[List[str]] = None,
3612
+ ) -> Dict[str, Any]:
3613
+ """
3614
+ Execute task with advanced error recovery strategies.
3615
+
3616
+ Tries multiple recovery strategies in sequence until one succeeds:
3617
+ 1. Retry with exponential backoff
3618
+ 2. Simplify task and retry
3619
+ 3. Use fallback approach
3620
+ 4. Delegate to another agent
3621
+
3622
+ Args:
3623
+ task: Task specification
3624
+ context: Execution context
3625
+ strategies: List of strategy names to try (uses default chain if None)
3626
+
3627
+ Returns:
3628
+ Task execution result
3629
+
3630
+ Raises:
3631
+ TaskExecutionError: If all recovery strategies fail
3632
+
3633
+ Example:
3634
+ ```python
3635
+ result = await agent.execute_with_recovery(
3636
+ task={"description": "Complex analysis"},
3637
+ context={},
3638
+ strategies=["retry", "simplify", "delegate"]
3639
+ )
3640
+ ```
3641
+ """
3642
+ from .models import RecoveryStrategy
3643
+ from .exceptions import TaskExecutionError
3644
+
3645
+ # Default strategy chain
3646
+ if strategies is None:
3647
+ strategies = [
3648
+ RecoveryStrategy.RETRY,
3649
+ RecoveryStrategy.SIMPLIFY,
3650
+ RecoveryStrategy.FALLBACK,
3651
+ RecoveryStrategy.DELEGATE,
3652
+ ]
3653
+
3654
+ errors = []
3655
+
3656
+ for strategy in strategies:
3657
+ try:
3658
+ logger.info(f"Attempting recovery strategy: {strategy}")
3659
+
3660
+ if strategy == RecoveryStrategy.RETRY:
3661
+ # Retry with exponential backoff (using existing retry mechanism)
3662
+ result = await self._execute_with_retry(self.execute_task, task, context)
3663
+ logger.info(f"Recovery successful with strategy: {strategy}")
3664
+ return result
3665
+
3666
+ elif strategy == RecoveryStrategy.SIMPLIFY:
3667
+ # Simplify task and retry
3668
+ simplified_task = await self._simplify_task(task)
3669
+ result = await self.execute_task(simplified_task, context)
3670
+ logger.info(f"Recovery successful with strategy: {strategy}")
3671
+ return result
3672
+
3673
+ elif strategy == RecoveryStrategy.FALLBACK:
3674
+ # Use fallback approach
3675
+ result = await self._execute_with_fallback(task, context)
3676
+ logger.info(f"Recovery successful with strategy: {strategy}")
3677
+ return result
3678
+
3679
+ elif strategy == RecoveryStrategy.DELEGATE:
3680
+ # Delegate to another agent
3681
+ if self._collaboration_enabled:
3682
+ result = await self._delegate_to_capable_agent(task, context)
3683
+ logger.info(f"Recovery successful with strategy: {strategy}")
3684
+ return result
3685
+ else:
3686
+ logger.warning("Delegation not available (collaboration disabled)")
3687
+ continue
3688
+
3689
+ except Exception as e:
3690
+ logger.warning(f"Recovery strategy {strategy} failed: {e}")
3691
+ errors.append({"strategy": strategy, "error": str(e)})
3692
+ continue
3693
+
3694
+ # All strategies failed
3695
+ error_summary = "; ".join([f"{e['strategy']}: {e['error']}" for e in errors])
3696
+ raise TaskExecutionError(
3697
+ f"All recovery strategies failed. Errors: {error_summary}",
3698
+ agent_id=self.agent_id,
3699
+ task_id=task.get("task_id"),
3700
+ )
3701
+
3702
+ async def _simplify_task(self, task: Dict[str, Any]) -> Dict[str, Any]:
3703
+ """
3704
+ Simplify a task to make it easier to execute.
3705
+
3706
+ Strategies:
3707
+ - Reduce complexity by breaking into smaller parts
3708
+ - Remove optional requirements
3709
+ - Use simpler language
3710
+
3711
+ Args:
3712
+ task: Original task specification
3713
+
3714
+ Returns:
3715
+ Simplified task specification
3716
+
3717
+ Example:
3718
+ ```python
3719
+ simplified = await agent._simplify_task(
3720
+ {"description": "Perform comprehensive analysis with visualizations"}
3721
+ )
3722
+ # Returns: {"description": "Perform basic analysis"}
3723
+ ```
3724
+ """
3725
+ description = task.get("description", "")
3726
+
3727
+ # Simple heuristics for simplification
3728
+ simplified_description = description
3729
+
3730
+ # Remove complexity keywords
3731
+ complexity_words = [
3732
+ "comprehensive",
3733
+ "detailed",
3734
+ "thorough",
3735
+ "extensive",
3736
+ "in-depth",
3737
+ "complete",
3738
+ "full",
3739
+ "exhaustive",
3740
+ ]
3741
+ for word in complexity_words:
3742
+ simplified_description = simplified_description.replace(word, "basic")
3743
+
3744
+ # Remove optional requirements
3745
+ optional_phrases = [
3746
+ "with visualizations",
3747
+ "with charts",
3748
+ "with graphs",
3749
+ "with examples",
3750
+ "with details",
3751
+ "with explanations",
3752
+ ]
3753
+ for phrase in optional_phrases:
3754
+ simplified_description = simplified_description.replace(phrase, "")
3755
+
3756
+ # Clean up extra spaces
3757
+ simplified_description = " ".join(simplified_description.split())
3758
+
3759
+ simplified_task = task.copy()
3760
+ simplified_task["description"] = simplified_description
3761
+ simplified_task["simplified"] = True
3762
+ simplified_task["original_description"] = description
3763
+
3764
+ logger.debug(f"Simplified task: '{description}' -> '{simplified_description}'")
3765
+
3766
+ return simplified_task
3767
+
3768
+ async def _execute_with_fallback(self, task: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
3769
+ """
3770
+ Execute task with fallback approach.
3771
+
3772
+ Uses a simpler, more reliable approach when the primary approach fails.
3773
+
3774
+ Args:
3775
+ task: Task specification
3776
+ context: Execution context
3777
+
3778
+ Returns:
3779
+ Task execution result
3780
+
3781
+ Example:
3782
+ ```python
3783
+ result = await agent._execute_with_fallback(task, context)
3784
+ ```
3785
+ """
3786
+ # Create fallback task with reduced requirements
3787
+ fallback_task = task.copy()
3788
+ fallback_task["fallback_mode"] = True
3789
+
3790
+ # Reduce max_tokens if specified
3791
+ if "max_tokens" in context:
3792
+ context = context.copy()
3793
+ context["max_tokens"] = min(context["max_tokens"], 1000)
3794
+
3795
+ # Reduce temperature for more deterministic output
3796
+ if "temperature" in context:
3797
+ context = context.copy()
3798
+ context["temperature"] = 0.3
3799
+
3800
+ logger.info("Executing with fallback approach (reduced requirements)")
3801
+
3802
+ # Execute with modified parameters
3803
+ result = await self.execute_task(fallback_task, context)
3804
+ result["fallback_used"] = True
3805
+
3806
+ return result
3807
+
3808
+ async def _delegate_to_capable_agent(self, task: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
3809
+ """
3810
+ Delegate task to a capable agent as recovery strategy.
3811
+
3812
+ Finds an agent capable of handling the task and delegates to it.
3813
+
3814
+ Args:
3815
+ task: Task specification
3816
+ context: Execution context
3817
+
3818
+ Returns:
3819
+ Task execution result from delegated agent
3820
+
3821
+ Raises:
3822
+ ValueError: If no capable agent found
3823
+
3824
+ Example:
3825
+ ```python
3826
+ result = await agent._delegate_to_capable_agent(task, context)
3827
+ ```
3828
+ """
3829
+ if not self._collaboration_enabled:
3830
+ raise ValueError("Collaboration not enabled, cannot delegate")
3831
+
3832
+ # Try to classify task and find capable agents
3833
+ task_type = await self._classify_task(task)
3834
+
3835
+ # Look for agents with matching capabilities
3836
+ capable_agents = []
3837
+ for agent_id, agent in self._agent_registry.items():
3838
+ if agent_id == self.agent_id:
3839
+ continue # Skip self
3840
+
3841
+ # Check if agent has relevant capabilities
3842
+ agent_capabilities = getattr(agent, "capabilities", [])
3843
+ if task_type in agent_capabilities or "general" in agent_capabilities:
3844
+ capable_agents.append(agent)
3845
+
3846
+ if not capable_agents:
3847
+ # Try any available agent as last resort
3848
+ capable_agents = [agent for agent_id, agent in self._agent_registry.items() if agent_id != self.agent_id]
3849
+
3850
+ if not capable_agents:
3851
+ raise ValueError("No capable agents available for delegation")
3852
+
3853
+ # Delegate to first capable agent
3854
+ target_agent = capable_agents[0]
3855
+ logger.info(f"Delegating task to {target_agent.agent_id} for recovery")
3856
+
3857
+ result = await target_agent.execute_task(task, context={**context, "delegated_by": self.agent_id, "recovery_delegation": True})
3858
+
3859
+ result["delegated_to"] = target_agent.agent_id
3860
+ result["recovery_delegation"] = True
3861
+
3862
+ return result
3863
+
3864
+ def __str__(self) -> str:
3865
+ """String representation."""
3866
+ return f"Agent({self.agent_id}, {self.name}, {self.agent_type.value}, {self._state.value})"
3867
+
3868
+ def __repr__(self) -> str:
3869
+ """Detailed representation."""
3870
+ return f"BaseAIAgent(agent_id='{self.agent_id}', name='{self.name}', " f"type='{self.agent_type.value}', state='{self._state.value}')"