aiecs 1.0.1__py3-none-any.whl → 1.7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiecs might be problematic. Click here for more details.

Files changed (340) hide show
  1. aiecs/__init__.py +13 -16
  2. aiecs/__main__.py +7 -7
  3. aiecs/aiecs_client.py +269 -75
  4. aiecs/application/executors/operation_executor.py +79 -54
  5. aiecs/application/knowledge_graph/__init__.py +7 -0
  6. aiecs/application/knowledge_graph/builder/__init__.py +37 -0
  7. aiecs/application/knowledge_graph/builder/data_quality.py +302 -0
  8. aiecs/application/knowledge_graph/builder/data_reshaping.py +293 -0
  9. aiecs/application/knowledge_graph/builder/document_builder.py +369 -0
  10. aiecs/application/knowledge_graph/builder/graph_builder.py +490 -0
  11. aiecs/application/knowledge_graph/builder/import_optimizer.py +396 -0
  12. aiecs/application/knowledge_graph/builder/schema_inference.py +462 -0
  13. aiecs/application/knowledge_graph/builder/schema_mapping.py +563 -0
  14. aiecs/application/knowledge_graph/builder/structured_pipeline.py +1384 -0
  15. aiecs/application/knowledge_graph/builder/text_chunker.py +317 -0
  16. aiecs/application/knowledge_graph/extractors/__init__.py +27 -0
  17. aiecs/application/knowledge_graph/extractors/base.py +98 -0
  18. aiecs/application/knowledge_graph/extractors/llm_entity_extractor.py +422 -0
  19. aiecs/application/knowledge_graph/extractors/llm_relation_extractor.py +347 -0
  20. aiecs/application/knowledge_graph/extractors/ner_entity_extractor.py +241 -0
  21. aiecs/application/knowledge_graph/fusion/__init__.py +78 -0
  22. aiecs/application/knowledge_graph/fusion/ab_testing.py +395 -0
  23. aiecs/application/knowledge_graph/fusion/abbreviation_expander.py +327 -0
  24. aiecs/application/knowledge_graph/fusion/alias_index.py +597 -0
  25. aiecs/application/knowledge_graph/fusion/alias_matcher.py +384 -0
  26. aiecs/application/knowledge_graph/fusion/cache_coordinator.py +343 -0
  27. aiecs/application/knowledge_graph/fusion/entity_deduplicator.py +433 -0
  28. aiecs/application/knowledge_graph/fusion/entity_linker.py +511 -0
  29. aiecs/application/knowledge_graph/fusion/evaluation_dataset.py +240 -0
  30. aiecs/application/knowledge_graph/fusion/knowledge_fusion.py +632 -0
  31. aiecs/application/knowledge_graph/fusion/matching_config.py +489 -0
  32. aiecs/application/knowledge_graph/fusion/name_normalizer.py +352 -0
  33. aiecs/application/knowledge_graph/fusion/relation_deduplicator.py +183 -0
  34. aiecs/application/knowledge_graph/fusion/semantic_name_matcher.py +464 -0
  35. aiecs/application/knowledge_graph/fusion/similarity_pipeline.py +534 -0
  36. aiecs/application/knowledge_graph/pattern_matching/__init__.py +21 -0
  37. aiecs/application/knowledge_graph/pattern_matching/pattern_matcher.py +342 -0
  38. aiecs/application/knowledge_graph/pattern_matching/query_executor.py +366 -0
  39. aiecs/application/knowledge_graph/profiling/__init__.py +12 -0
  40. aiecs/application/knowledge_graph/profiling/query_plan_visualizer.py +195 -0
  41. aiecs/application/knowledge_graph/profiling/query_profiler.py +223 -0
  42. aiecs/application/knowledge_graph/reasoning/__init__.py +27 -0
  43. aiecs/application/knowledge_graph/reasoning/evidence_synthesis.py +341 -0
  44. aiecs/application/knowledge_graph/reasoning/inference_engine.py +500 -0
  45. aiecs/application/knowledge_graph/reasoning/logic_form_parser.py +163 -0
  46. aiecs/application/knowledge_graph/reasoning/logic_parser/__init__.py +79 -0
  47. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_builder.py +513 -0
  48. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_nodes.py +913 -0
  49. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_validator.py +866 -0
  50. aiecs/application/knowledge_graph/reasoning/logic_parser/error_handler.py +475 -0
  51. aiecs/application/knowledge_graph/reasoning/logic_parser/parser.py +396 -0
  52. aiecs/application/knowledge_graph/reasoning/logic_parser/query_context.py +208 -0
  53. aiecs/application/knowledge_graph/reasoning/logic_query_integration.py +170 -0
  54. aiecs/application/knowledge_graph/reasoning/query_planner.py +855 -0
  55. aiecs/application/knowledge_graph/reasoning/reasoning_engine.py +518 -0
  56. aiecs/application/knowledge_graph/retrieval/__init__.py +27 -0
  57. aiecs/application/knowledge_graph/retrieval/query_intent_classifier.py +211 -0
  58. aiecs/application/knowledge_graph/retrieval/retrieval_strategies.py +592 -0
  59. aiecs/application/knowledge_graph/retrieval/strategy_types.py +23 -0
  60. aiecs/application/knowledge_graph/search/__init__.py +59 -0
  61. aiecs/application/knowledge_graph/search/hybrid_search.py +457 -0
  62. aiecs/application/knowledge_graph/search/reranker.py +293 -0
  63. aiecs/application/knowledge_graph/search/reranker_strategies.py +535 -0
  64. aiecs/application/knowledge_graph/search/text_similarity.py +392 -0
  65. aiecs/application/knowledge_graph/traversal/__init__.py +15 -0
  66. aiecs/application/knowledge_graph/traversal/enhanced_traversal.py +305 -0
  67. aiecs/application/knowledge_graph/traversal/path_scorer.py +271 -0
  68. aiecs/application/knowledge_graph/validators/__init__.py +13 -0
  69. aiecs/application/knowledge_graph/validators/relation_validator.py +239 -0
  70. aiecs/application/knowledge_graph/visualization/__init__.py +11 -0
  71. aiecs/application/knowledge_graph/visualization/graph_visualizer.py +313 -0
  72. aiecs/common/__init__.py +9 -0
  73. aiecs/common/knowledge_graph/__init__.py +17 -0
  74. aiecs/common/knowledge_graph/runnable.py +471 -0
  75. aiecs/config/__init__.py +20 -5
  76. aiecs/config/config.py +762 -31
  77. aiecs/config/graph_config.py +131 -0
  78. aiecs/config/tool_config.py +399 -0
  79. aiecs/core/__init__.py +29 -13
  80. aiecs/core/interface/__init__.py +2 -2
  81. aiecs/core/interface/execution_interface.py +22 -22
  82. aiecs/core/interface/storage_interface.py +37 -88
  83. aiecs/core/registry/__init__.py +31 -0
  84. aiecs/core/registry/service_registry.py +92 -0
  85. aiecs/domain/__init__.py +270 -1
  86. aiecs/domain/agent/__init__.py +191 -0
  87. aiecs/domain/agent/base_agent.py +3870 -0
  88. aiecs/domain/agent/exceptions.py +99 -0
  89. aiecs/domain/agent/graph_aware_mixin.py +569 -0
  90. aiecs/domain/agent/hybrid_agent.py +1435 -0
  91. aiecs/domain/agent/integration/__init__.py +29 -0
  92. aiecs/domain/agent/integration/context_compressor.py +216 -0
  93. aiecs/domain/agent/integration/context_engine_adapter.py +587 -0
  94. aiecs/domain/agent/integration/protocols.py +281 -0
  95. aiecs/domain/agent/integration/retry_policy.py +218 -0
  96. aiecs/domain/agent/integration/role_config.py +213 -0
  97. aiecs/domain/agent/knowledge_aware_agent.py +1892 -0
  98. aiecs/domain/agent/lifecycle.py +291 -0
  99. aiecs/domain/agent/llm_agent.py +692 -0
  100. aiecs/domain/agent/memory/__init__.py +12 -0
  101. aiecs/domain/agent/memory/conversation.py +1124 -0
  102. aiecs/domain/agent/migration/__init__.py +14 -0
  103. aiecs/domain/agent/migration/conversion.py +163 -0
  104. aiecs/domain/agent/migration/legacy_wrapper.py +86 -0
  105. aiecs/domain/agent/models.py +884 -0
  106. aiecs/domain/agent/observability.py +479 -0
  107. aiecs/domain/agent/persistence.py +449 -0
  108. aiecs/domain/agent/prompts/__init__.py +29 -0
  109. aiecs/domain/agent/prompts/builder.py +159 -0
  110. aiecs/domain/agent/prompts/formatters.py +187 -0
  111. aiecs/domain/agent/prompts/template.py +255 -0
  112. aiecs/domain/agent/registry.py +253 -0
  113. aiecs/domain/agent/tool_agent.py +444 -0
  114. aiecs/domain/agent/tools/__init__.py +15 -0
  115. aiecs/domain/agent/tools/schema_generator.py +364 -0
  116. aiecs/domain/community/__init__.py +155 -0
  117. aiecs/domain/community/agent_adapter.py +469 -0
  118. aiecs/domain/community/analytics.py +432 -0
  119. aiecs/domain/community/collaborative_workflow.py +648 -0
  120. aiecs/domain/community/communication_hub.py +634 -0
  121. aiecs/domain/community/community_builder.py +320 -0
  122. aiecs/domain/community/community_integration.py +796 -0
  123. aiecs/domain/community/community_manager.py +803 -0
  124. aiecs/domain/community/decision_engine.py +849 -0
  125. aiecs/domain/community/exceptions.py +231 -0
  126. aiecs/domain/community/models/__init__.py +33 -0
  127. aiecs/domain/community/models/community_models.py +234 -0
  128. aiecs/domain/community/resource_manager.py +461 -0
  129. aiecs/domain/community/shared_context_manager.py +589 -0
  130. aiecs/domain/context/__init__.py +40 -10
  131. aiecs/domain/context/context_engine.py +1910 -0
  132. aiecs/domain/context/conversation_models.py +87 -53
  133. aiecs/domain/context/graph_memory.py +582 -0
  134. aiecs/domain/execution/model.py +12 -4
  135. aiecs/domain/knowledge_graph/__init__.py +19 -0
  136. aiecs/domain/knowledge_graph/models/__init__.py +52 -0
  137. aiecs/domain/knowledge_graph/models/entity.py +148 -0
  138. aiecs/domain/knowledge_graph/models/evidence.py +178 -0
  139. aiecs/domain/knowledge_graph/models/inference_rule.py +184 -0
  140. aiecs/domain/knowledge_graph/models/path.py +171 -0
  141. aiecs/domain/knowledge_graph/models/path_pattern.py +171 -0
  142. aiecs/domain/knowledge_graph/models/query.py +261 -0
  143. aiecs/domain/knowledge_graph/models/query_plan.py +181 -0
  144. aiecs/domain/knowledge_graph/models/relation.py +202 -0
  145. aiecs/domain/knowledge_graph/schema/__init__.py +23 -0
  146. aiecs/domain/knowledge_graph/schema/entity_type.py +131 -0
  147. aiecs/domain/knowledge_graph/schema/graph_schema.py +253 -0
  148. aiecs/domain/knowledge_graph/schema/property_schema.py +143 -0
  149. aiecs/domain/knowledge_graph/schema/relation_type.py +163 -0
  150. aiecs/domain/knowledge_graph/schema/schema_manager.py +691 -0
  151. aiecs/domain/knowledge_graph/schema/type_enums.py +209 -0
  152. aiecs/domain/task/dsl_processor.py +172 -56
  153. aiecs/domain/task/model.py +20 -8
  154. aiecs/domain/task/task_context.py +27 -24
  155. aiecs/infrastructure/__init__.py +0 -2
  156. aiecs/infrastructure/graph_storage/__init__.py +11 -0
  157. aiecs/infrastructure/graph_storage/base.py +837 -0
  158. aiecs/infrastructure/graph_storage/batch_operations.py +458 -0
  159. aiecs/infrastructure/graph_storage/cache.py +424 -0
  160. aiecs/infrastructure/graph_storage/distributed.py +223 -0
  161. aiecs/infrastructure/graph_storage/error_handling.py +380 -0
  162. aiecs/infrastructure/graph_storage/graceful_degradation.py +294 -0
  163. aiecs/infrastructure/graph_storage/health_checks.py +378 -0
  164. aiecs/infrastructure/graph_storage/in_memory.py +1197 -0
  165. aiecs/infrastructure/graph_storage/index_optimization.py +446 -0
  166. aiecs/infrastructure/graph_storage/lazy_loading.py +431 -0
  167. aiecs/infrastructure/graph_storage/metrics.py +344 -0
  168. aiecs/infrastructure/graph_storage/migration.py +400 -0
  169. aiecs/infrastructure/graph_storage/pagination.py +483 -0
  170. aiecs/infrastructure/graph_storage/performance_monitoring.py +456 -0
  171. aiecs/infrastructure/graph_storage/postgres.py +1563 -0
  172. aiecs/infrastructure/graph_storage/property_storage.py +353 -0
  173. aiecs/infrastructure/graph_storage/protocols.py +76 -0
  174. aiecs/infrastructure/graph_storage/query_optimizer.py +642 -0
  175. aiecs/infrastructure/graph_storage/schema_cache.py +290 -0
  176. aiecs/infrastructure/graph_storage/sqlite.py +1373 -0
  177. aiecs/infrastructure/graph_storage/streaming.py +487 -0
  178. aiecs/infrastructure/graph_storage/tenant.py +412 -0
  179. aiecs/infrastructure/messaging/celery_task_manager.py +92 -54
  180. aiecs/infrastructure/messaging/websocket_manager.py +51 -35
  181. aiecs/infrastructure/monitoring/__init__.py +22 -0
  182. aiecs/infrastructure/monitoring/executor_metrics.py +45 -11
  183. aiecs/infrastructure/monitoring/global_metrics_manager.py +212 -0
  184. aiecs/infrastructure/monitoring/structured_logger.py +3 -7
  185. aiecs/infrastructure/monitoring/tracing_manager.py +63 -35
  186. aiecs/infrastructure/persistence/__init__.py +14 -1
  187. aiecs/infrastructure/persistence/context_engine_client.py +184 -0
  188. aiecs/infrastructure/persistence/database_manager.py +67 -43
  189. aiecs/infrastructure/persistence/file_storage.py +180 -103
  190. aiecs/infrastructure/persistence/redis_client.py +74 -21
  191. aiecs/llm/__init__.py +73 -25
  192. aiecs/llm/callbacks/__init__.py +11 -0
  193. aiecs/llm/{custom_callbacks.py → callbacks/custom_callbacks.py} +26 -19
  194. aiecs/llm/client_factory.py +224 -36
  195. aiecs/llm/client_resolver.py +155 -0
  196. aiecs/llm/clients/__init__.py +38 -0
  197. aiecs/llm/clients/base_client.py +324 -0
  198. aiecs/llm/clients/google_function_calling_mixin.py +457 -0
  199. aiecs/llm/clients/googleai_client.py +241 -0
  200. aiecs/llm/clients/openai_client.py +158 -0
  201. aiecs/llm/clients/openai_compatible_mixin.py +367 -0
  202. aiecs/llm/clients/vertex_client.py +897 -0
  203. aiecs/llm/clients/xai_client.py +201 -0
  204. aiecs/llm/config/__init__.py +51 -0
  205. aiecs/llm/config/config_loader.py +272 -0
  206. aiecs/llm/config/config_validator.py +206 -0
  207. aiecs/llm/config/model_config.py +143 -0
  208. aiecs/llm/protocols.py +149 -0
  209. aiecs/llm/utils/__init__.py +10 -0
  210. aiecs/llm/utils/validate_config.py +89 -0
  211. aiecs/main.py +140 -121
  212. aiecs/scripts/aid/VERSION_MANAGEMENT.md +138 -0
  213. aiecs/scripts/aid/__init__.py +19 -0
  214. aiecs/scripts/aid/module_checker.py +499 -0
  215. aiecs/scripts/aid/version_manager.py +235 -0
  216. aiecs/scripts/{DEPENDENCY_SYSTEM_SUMMARY.md → dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md} +1 -0
  217. aiecs/scripts/{README_DEPENDENCY_CHECKER.md → dependance_check/README_DEPENDENCY_CHECKER.md} +1 -0
  218. aiecs/scripts/dependance_check/__init__.py +15 -0
  219. aiecs/scripts/dependance_check/dependency_checker.py +1835 -0
  220. aiecs/scripts/{dependency_fixer.py → dependance_check/dependency_fixer.py} +192 -90
  221. aiecs/scripts/{download_nlp_data.py → dependance_check/download_nlp_data.py} +203 -71
  222. aiecs/scripts/dependance_patch/__init__.py +7 -0
  223. aiecs/scripts/dependance_patch/fix_weasel/__init__.py +11 -0
  224. aiecs/scripts/{fix_weasel_validator.py → dependance_patch/fix_weasel/fix_weasel_validator.py} +21 -14
  225. aiecs/scripts/{patch_weasel_library.sh → dependance_patch/fix_weasel/patch_weasel_library.sh} +1 -1
  226. aiecs/scripts/knowledge_graph/__init__.py +3 -0
  227. aiecs/scripts/knowledge_graph/run_threshold_experiments.py +212 -0
  228. aiecs/scripts/migrations/multi_tenancy/README.md +142 -0
  229. aiecs/scripts/tools_develop/README.md +671 -0
  230. aiecs/scripts/tools_develop/README_CONFIG_CHECKER.md +273 -0
  231. aiecs/scripts/tools_develop/TOOLS_CONFIG_GUIDE.md +1287 -0
  232. aiecs/scripts/tools_develop/TOOL_AUTO_DISCOVERY.md +234 -0
  233. aiecs/scripts/tools_develop/__init__.py +21 -0
  234. aiecs/scripts/tools_develop/check_all_tools_config.py +548 -0
  235. aiecs/scripts/tools_develop/check_type_annotations.py +257 -0
  236. aiecs/scripts/tools_develop/pre-commit-schema-coverage.sh +66 -0
  237. aiecs/scripts/tools_develop/schema_coverage.py +511 -0
  238. aiecs/scripts/tools_develop/validate_tool_schemas.py +475 -0
  239. aiecs/scripts/tools_develop/verify_executor_config_fix.py +98 -0
  240. aiecs/scripts/tools_develop/verify_tools.py +352 -0
  241. aiecs/tasks/__init__.py +0 -1
  242. aiecs/tasks/worker.py +115 -47
  243. aiecs/tools/__init__.py +194 -72
  244. aiecs/tools/apisource/__init__.py +99 -0
  245. aiecs/tools/apisource/intelligence/__init__.py +19 -0
  246. aiecs/tools/apisource/intelligence/data_fusion.py +632 -0
  247. aiecs/tools/apisource/intelligence/query_analyzer.py +417 -0
  248. aiecs/tools/apisource/intelligence/search_enhancer.py +385 -0
  249. aiecs/tools/apisource/monitoring/__init__.py +9 -0
  250. aiecs/tools/apisource/monitoring/metrics.py +330 -0
  251. aiecs/tools/apisource/providers/__init__.py +112 -0
  252. aiecs/tools/apisource/providers/base.py +671 -0
  253. aiecs/tools/apisource/providers/census.py +397 -0
  254. aiecs/tools/apisource/providers/fred.py +535 -0
  255. aiecs/tools/apisource/providers/newsapi.py +409 -0
  256. aiecs/tools/apisource/providers/worldbank.py +352 -0
  257. aiecs/tools/apisource/reliability/__init__.py +12 -0
  258. aiecs/tools/apisource/reliability/error_handler.py +363 -0
  259. aiecs/tools/apisource/reliability/fallback_strategy.py +376 -0
  260. aiecs/tools/apisource/tool.py +832 -0
  261. aiecs/tools/apisource/utils/__init__.py +9 -0
  262. aiecs/tools/apisource/utils/validators.py +334 -0
  263. aiecs/tools/base_tool.py +415 -21
  264. aiecs/tools/docs/__init__.py +121 -0
  265. aiecs/tools/docs/ai_document_orchestrator.py +607 -0
  266. aiecs/tools/docs/ai_document_writer_orchestrator.py +2350 -0
  267. aiecs/tools/docs/content_insertion_tool.py +1320 -0
  268. aiecs/tools/docs/document_creator_tool.py +1323 -0
  269. aiecs/tools/docs/document_layout_tool.py +1160 -0
  270. aiecs/tools/docs/document_parser_tool.py +1011 -0
  271. aiecs/tools/docs/document_writer_tool.py +1829 -0
  272. aiecs/tools/knowledge_graph/__init__.py +17 -0
  273. aiecs/tools/knowledge_graph/graph_reasoning_tool.py +807 -0
  274. aiecs/tools/knowledge_graph/graph_search_tool.py +944 -0
  275. aiecs/tools/knowledge_graph/kg_builder_tool.py +524 -0
  276. aiecs/tools/langchain_adapter.py +300 -138
  277. aiecs/tools/schema_generator.py +455 -0
  278. aiecs/tools/search_tool/__init__.py +100 -0
  279. aiecs/tools/search_tool/analyzers.py +581 -0
  280. aiecs/tools/search_tool/cache.py +264 -0
  281. aiecs/tools/search_tool/constants.py +128 -0
  282. aiecs/tools/search_tool/context.py +224 -0
  283. aiecs/tools/search_tool/core.py +778 -0
  284. aiecs/tools/search_tool/deduplicator.py +119 -0
  285. aiecs/tools/search_tool/error_handler.py +242 -0
  286. aiecs/tools/search_tool/metrics.py +343 -0
  287. aiecs/tools/search_tool/rate_limiter.py +172 -0
  288. aiecs/tools/search_tool/schemas.py +275 -0
  289. aiecs/tools/statistics/__init__.py +80 -0
  290. aiecs/tools/statistics/ai_data_analysis_orchestrator.py +646 -0
  291. aiecs/tools/statistics/ai_insight_generator_tool.py +508 -0
  292. aiecs/tools/statistics/ai_report_orchestrator_tool.py +684 -0
  293. aiecs/tools/statistics/data_loader_tool.py +555 -0
  294. aiecs/tools/statistics/data_profiler_tool.py +638 -0
  295. aiecs/tools/statistics/data_transformer_tool.py +580 -0
  296. aiecs/tools/statistics/data_visualizer_tool.py +498 -0
  297. aiecs/tools/statistics/model_trainer_tool.py +507 -0
  298. aiecs/tools/statistics/statistical_analyzer_tool.py +472 -0
  299. aiecs/tools/task_tools/__init__.py +49 -36
  300. aiecs/tools/task_tools/chart_tool.py +200 -184
  301. aiecs/tools/task_tools/classfire_tool.py +268 -267
  302. aiecs/tools/task_tools/image_tool.py +175 -131
  303. aiecs/tools/task_tools/office_tool.py +226 -146
  304. aiecs/tools/task_tools/pandas_tool.py +477 -121
  305. aiecs/tools/task_tools/report_tool.py +390 -142
  306. aiecs/tools/task_tools/research_tool.py +149 -79
  307. aiecs/tools/task_tools/scraper_tool.py +339 -145
  308. aiecs/tools/task_tools/stats_tool.py +448 -209
  309. aiecs/tools/temp_file_manager.py +26 -24
  310. aiecs/tools/tool_executor/__init__.py +18 -16
  311. aiecs/tools/tool_executor/tool_executor.py +364 -52
  312. aiecs/utils/LLM_output_structor.py +74 -48
  313. aiecs/utils/__init__.py +14 -3
  314. aiecs/utils/base_callback.py +0 -3
  315. aiecs/utils/cache_provider.py +696 -0
  316. aiecs/utils/execution_utils.py +50 -31
  317. aiecs/utils/prompt_loader.py +1 -0
  318. aiecs/utils/token_usage_repository.py +37 -11
  319. aiecs/ws/socket_server.py +14 -4
  320. {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/METADATA +52 -15
  321. aiecs-1.7.6.dist-info/RECORD +337 -0
  322. aiecs-1.7.6.dist-info/entry_points.txt +13 -0
  323. aiecs/config/registry.py +0 -19
  324. aiecs/domain/context/content_engine.py +0 -982
  325. aiecs/llm/base_client.py +0 -99
  326. aiecs/llm/openai_client.py +0 -125
  327. aiecs/llm/vertex_client.py +0 -186
  328. aiecs/llm/xai_client.py +0 -184
  329. aiecs/scripts/dependency_checker.py +0 -857
  330. aiecs/scripts/quick_dependency_check.py +0 -269
  331. aiecs/tools/task_tools/search_api.py +0 -7
  332. aiecs-1.0.1.dist-info/RECORD +0 -90
  333. aiecs-1.0.1.dist-info/entry_points.txt +0 -7
  334. /aiecs/scripts/{setup_nlp_data.sh → dependance_check/setup_nlp_data.sh} +0 -0
  335. /aiecs/scripts/{README_WEASEL_PATCH.md → dependance_patch/fix_weasel/README_WEASEL_PATCH.md} +0 -0
  336. /aiecs/scripts/{fix_weasel_validator.sh → dependance_patch/fix_weasel/fix_weasel_validator.sh} +0 -0
  337. /aiecs/scripts/{run_weasel_patch.sh → dependance_patch/fix_weasel/run_weasel_patch.sh} +0 -0
  338. {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/WHEEL +0 -0
  339. {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/licenses/LICENSE +0 -0
  340. {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,692 @@
1
+ """
2
+ LLM Agent
3
+
4
+ Agent implementation powered by LLM for text generation and reasoning.
5
+ """
6
+
7
+ import logging
8
+ from typing import Dict, List, Any, Optional, Union, TYPE_CHECKING, AsyncIterator
9
+ from datetime import datetime
10
+
11
+ from aiecs.llm import BaseLLMClient, CacheControl, LLMMessage
12
+
13
+ from .base_agent import BaseAIAgent
14
+ from .models import AgentType, AgentConfiguration
15
+ from .exceptions import TaskExecutionError
16
+
17
+ if TYPE_CHECKING:
18
+ from aiecs.llm.protocols import LLMClientProtocol
19
+ from aiecs.domain.agent.integration.protocols import ConfigManagerProtocol
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ class LLMAgent(BaseAIAgent):
25
+ """
26
+ LLM-powered agent for text generation and reasoning.
27
+
28
+ This agent uses an LLM client to process tasks and generate responses.
29
+
30
+ **LLM Client Configuration:**
31
+ - BaseLLMClient: Standard LLM clients (OpenAI, xAI, etc.)
32
+ - Custom clients: Any object implementing LLMClientProtocol (duck typing)
33
+ - No inheritance required: Custom clients work without BaseLLMClient
34
+
35
+ Examples:
36
+ # Example 1: Basic usage with standard client (backward compatible)
37
+ agent = LLMAgent(
38
+ agent_id="agent1",
39
+ name="My LLM Agent",
40
+ llm_client=OpenAIClient(),
41
+ config=config
42
+ )
43
+
44
+ # Example 2: Custom LLM client without BaseLLMClient inheritance
45
+ class CustomLLMClient:
46
+ provider_name = "custom"
47
+
48
+ def __init__(self, api_key: str):
49
+ self.api_key = api_key
50
+ self.call_count = 0
51
+
52
+ async def generate_text(self, messages, **kwargs):
53
+ self.call_count += 1
54
+ # Custom implementation
55
+ return LLMResponse(
56
+ content="Custom response",
57
+ provider="custom",
58
+ model="custom-model"
59
+ )
60
+
61
+ async def stream_text(self, messages, **kwargs):
62
+ # Custom streaming
63
+ tokens = ["Hello", " ", "world", "!"]
64
+ for token in tokens:
65
+ yield token
66
+ await asyncio.sleep(0.1)
67
+
68
+ async def close(self):
69
+ # Cleanup
70
+ pass
71
+
72
+ agent = LLMAgent(
73
+ agent_id="agent1",
74
+ name="My LLM Agent",
75
+ llm_client=CustomLLMClient(api_key="..."), # Works without BaseLLMClient!
76
+ config=config
77
+ )
78
+
79
+ # Example 3: LLM client wrapper with additional features
80
+ class RetryLLMClient:
81
+ provider_name = "retry_wrapper"
82
+
83
+ def __init__(self, base_client, max_retries: int = 3):
84
+ self.base_client = base_client
85
+ self.max_retries = max_retries
86
+ self.retry_count = 0
87
+
88
+ async def generate_text(self, messages, **kwargs):
89
+ for attempt in range(self.max_retries):
90
+ try:
91
+ return await self.base_client.generate_text(messages, **kwargs)
92
+ except Exception as e:
93
+ if attempt == self.max_retries - 1:
94
+ raise
95
+ self.retry_count += 1
96
+ await asyncio.sleep(2 ** attempt) # Exponential backoff
97
+
98
+ async def stream_text(self, messages, **kwargs):
99
+ async for token in self.base_client.stream_text(messages, **kwargs):
100
+ yield token
101
+
102
+ async def close(self):
103
+ await self.base_client.close()
104
+
105
+ # Wrap existing client
106
+ base_client = OpenAIClient()
107
+ retry_client = RetryLLMClient(base_client, max_retries=3)
108
+
109
+ agent = LLMAgent(
110
+ agent_id="agent1",
111
+ name="My LLM Agent",
112
+ llm_client=retry_client, # Custom wrapper with retry logic
113
+ config=config
114
+ )
115
+
116
+ # Example 4: LLM client with caching
117
+ class CachedLLMClient:
118
+ provider_name = "cached_wrapper"
119
+
120
+ def __init__(self, base_client):
121
+ self.base_client = base_client
122
+ self.cache = {}
123
+
124
+ async def generate_text(self, messages, **kwargs):
125
+ # Generate cache key
126
+ cache_key = hash(str(messages) + str(kwargs))
127
+ if cache_key in self.cache:
128
+ return self.cache[cache_key]
129
+
130
+ # Call base client
131
+ response = await self.base_client.generate_text(messages, **kwargs)
132
+ self.cache[cache_key] = response
133
+ return response
134
+
135
+ async def stream_text(self, messages, **kwargs):
136
+ # Streaming bypasses cache
137
+ async for token in self.base_client.stream_text(messages, **kwargs):
138
+ yield token
139
+
140
+ async def close(self):
141
+ await self.base_client.close()
142
+
143
+ cached_client = CachedLLMClient(OpenAIClient())
144
+
145
+ agent = LLMAgent(
146
+ agent_id="agent1",
147
+ name="My LLM Agent",
148
+ llm_client=cached_client, # Cached client
149
+ config=config
150
+ )
151
+
152
+ # Example 5: Streaming with custom client
153
+ agent = LLMAgent(
154
+ agent_id="agent1",
155
+ name="My LLM Agent",
156
+ llm_client=CustomLLMClient(api_key="..."),
157
+ config=config
158
+ )
159
+
160
+ # Stream task execution
161
+ async for event in agent.execute_task_streaming(task, context):
162
+ if event['type'] == 'token':
163
+ print(event['content'], end='', flush=True)
164
+ elif event['type'] == 'result':
165
+ print(f"\\nFinal result: {event['output']}")
166
+
167
+ # Example 6: Full-featured agent with all options
168
+ from aiecs.domain.context import ContextEngine
169
+ from aiecs.domain.agent.models import ResourceLimits
170
+
171
+ context_engine = ContextEngine()
172
+ await context_engine.initialize()
173
+
174
+ resource_limits = ResourceLimits(
175
+ max_concurrent_tasks=5,
176
+ max_tokens_per_minute=10000
177
+ )
178
+
179
+ agent = LLMAgent(
180
+ agent_id="agent1",
181
+ name="My LLM Agent",
182
+ llm_client=RetryLLMClient(CachedLLMClient(OpenAIClient())),
183
+ config=config,
184
+ config_manager=DatabaseConfigManager(),
185
+ checkpointer=RedisCheckpointer(),
186
+ context_engine=context_engine,
187
+ collaboration_enabled=True,
188
+ agent_registry={"agent2": other_agent},
189
+ learning_enabled=True,
190
+ resource_limits=resource_limits
191
+ )
192
+ """
193
+
194
+ def __init__(
195
+ self,
196
+ agent_id: str,
197
+ name: str,
198
+ llm_client: Union[BaseLLMClient, "LLMClientProtocol"],
199
+ config: AgentConfiguration,
200
+ description: Optional[str] = None,
201
+ version: str = "1.0.0",
202
+ config_manager: Optional["ConfigManagerProtocol"] = None,
203
+ checkpointer: Optional[Any] = None,
204
+ context_engine: Optional[Any] = None,
205
+ collaboration_enabled: bool = False,
206
+ agent_registry: Optional[Dict[str, Any]] = None,
207
+ learning_enabled: bool = False,
208
+ resource_limits: Optional[Any] = None,
209
+ ):
210
+ """
211
+ Initialize LLM agent.
212
+
213
+ Args:
214
+ agent_id: Unique agent identifier
215
+ name: Agent name
216
+ llm_client: LLM client instance (BaseLLMClient or any LLMClientProtocol)
217
+ config: Agent configuration
218
+ description: Optional description
219
+ version: Agent version
220
+ config_manager: Optional configuration manager for dynamic config
221
+ checkpointer: Optional checkpointer for state persistence
222
+ context_engine: Optional context engine for persistent storage
223
+ collaboration_enabled: Enable collaboration features
224
+ agent_registry: Registry of other agents for collaboration
225
+ learning_enabled: Enable learning features
226
+ resource_limits: Optional resource limits configuration
227
+
228
+ Example with custom LLM client:
229
+ ```python
230
+ # Custom LLM client without BaseLLMClient inheritance
231
+ class CustomLLMClient:
232
+ provider_name = "custom"
233
+
234
+ async def generate_text(self, messages, **kwargs):
235
+ # Custom implementation
236
+ return LLMResponse(...)
237
+
238
+ async def stream_text(self, messages, **kwargs):
239
+ # Custom streaming
240
+ yield "token"
241
+
242
+ async def close(self):
243
+ # Cleanup
244
+ pass
245
+
246
+ agent = LLMAgent(
247
+ agent_id="agent1",
248
+ name="My LLM Agent",
249
+ llm_client=CustomLLMClient(), # Works without BaseLLMClient!
250
+ config=config
251
+ )
252
+ ```
253
+
254
+ Example with standard client (backward compatible):
255
+ ```python
256
+ agent = LLMAgent(
257
+ agent_id="agent1",
258
+ name="My LLM Agent",
259
+ llm_client=OpenAIClient(),
260
+ config=config
261
+ )
262
+ ```
263
+ """
264
+ super().__init__(
265
+ agent_id=agent_id,
266
+ name=name,
267
+ agent_type=AgentType.CONVERSATIONAL,
268
+ config=config,
269
+ description=description or "LLM-powered conversational agent",
270
+ version=version,
271
+ llm_client=llm_client, # type: ignore[arg-type]
272
+ config_manager=config_manager,
273
+ checkpointer=checkpointer,
274
+ context_engine=context_engine,
275
+ collaboration_enabled=collaboration_enabled,
276
+ agent_registry=agent_registry,
277
+ learning_enabled=learning_enabled,
278
+ resource_limits=resource_limits,
279
+ )
280
+
281
+ # Store LLM client reference (from BaseAIAgent or local)
282
+ self.llm_client = self._llm_client if self._llm_client else llm_client
283
+ self._system_prompt: Optional[str] = None
284
+ self._conversation_history: List[LLMMessage] = []
285
+
286
+ logger.info(f"LLMAgent initialized: {agent_id} with client {self.llm_client.provider_name}")
287
+
288
+ async def _initialize(self) -> None:
289
+ """Initialize LLM agent - validate LLM client and build system prompt."""
290
+ # Validate LLM client using BaseAIAgent helper
291
+ self._validate_llm_client()
292
+
293
+ # Build system prompt
294
+ self._system_prompt = self._build_system_prompt()
295
+ logger.debug(f"LLMAgent {self.agent_id} initialized with system prompt")
296
+
297
+ async def _shutdown(self) -> None:
298
+ """Shutdown LLM agent."""
299
+ # Clear conversation history
300
+ self._conversation_history.clear()
301
+
302
+ # Close LLM client if it has a close method
303
+ if hasattr(self.llm_client, "close"):
304
+ await self.llm_client.close()
305
+
306
+ logger.info(f"LLMAgent {self.agent_id} shut down")
307
+
308
+ def _build_system_prompt(self) -> str:
309
+ """Build system prompt from configuration.
310
+
311
+ Precedence order:
312
+ 1. config.system_prompt - Direct custom prompt (highest priority)
313
+ 2. Assembled from goal/backstory/domain_knowledge/reasoning_guidance
314
+ 3. Default fallback: "You are a helpful AI assistant."
315
+ """
316
+ # 1. Custom system_prompt takes precedence
317
+ if self._config.system_prompt:
318
+ return self._config.system_prompt
319
+
320
+ # 2. Assemble from individual fields
321
+ parts = []
322
+
323
+ if self._config.goal:
324
+ parts.append(f"Goal: {self._config.goal}")
325
+
326
+ if self._config.backstory:
327
+ parts.append(f"Background: {self._config.backstory}")
328
+
329
+ if self._config.domain_knowledge:
330
+ parts.append(f"Domain Knowledge: {self._config.domain_knowledge}")
331
+
332
+ if self._config.reasoning_guidance:
333
+ parts.append(f"Reasoning Approach: {self._config.reasoning_guidance}")
334
+
335
+ if parts:
336
+ return "\n\n".join(parts)
337
+
338
+ # 3. Default fallback
339
+ return "You are a helpful AI assistant."
340
+
341
+ async def execute_task(self, task: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
342
+ """
343
+ Execute a task using the LLM.
344
+
345
+ Args:
346
+ task: Task specification with 'description' or 'prompt'
347
+ context: Execution context
348
+
349
+ Returns:
350
+ Execution result with 'output', 'reasoning', 'tokens_used'
351
+
352
+ Raises:
353
+ TaskExecutionError: If task execution fails
354
+ """
355
+ start_time = datetime.utcnow()
356
+
357
+ try:
358
+ # Extract task description
359
+ task_description = task.get("description") or task.get("prompt") or task.get("task")
360
+ if not task_description:
361
+ raise TaskExecutionError(
362
+ "Task must contain 'description', 'prompt', or 'task' field",
363
+ agent_id=self.agent_id,
364
+ )
365
+
366
+ # Transition to busy state
367
+ self._transition_state(self.state.__class__.BUSY)
368
+ self._current_task_id = task.get("task_id")
369
+
370
+ # Build messages
371
+ messages = self._build_messages(task_description, context)
372
+
373
+ # Call LLM
374
+ response = await self.llm_client.generate_text(
375
+ messages=messages,
376
+ model=self._config.llm_model,
377
+ temperature=self._config.temperature,
378
+ max_tokens=self._config.max_tokens,
379
+ )
380
+
381
+ # Extract result
382
+ output = response.content
383
+
384
+ # Store in conversation history if enabled
385
+ if self._config.memory_enabled:
386
+ self._conversation_history.append(LLMMessage(role="user", content=task_description))
387
+ self._conversation_history.append(LLMMessage(role="assistant", content=output))
388
+
389
+ # Calculate execution time
390
+ execution_time = (datetime.utcnow() - start_time).total_seconds()
391
+
392
+ # Update metrics
393
+ self.update_metrics(
394
+ execution_time=execution_time,
395
+ success=True,
396
+ tokens_used=getattr(response, "total_tokens", None),
397
+ )
398
+
399
+ # Transition back to active
400
+ self._transition_state(self.state.__class__.ACTIVE)
401
+ self._current_task_id = None
402
+ self.last_active_at = datetime.utcnow()
403
+
404
+ return {
405
+ "success": True,
406
+ "output": output,
407
+ "provider": response.provider,
408
+ "model": response.model,
409
+ "tokens_used": getattr(response, "total_tokens", None),
410
+ "execution_time": execution_time,
411
+ "timestamp": datetime.utcnow().isoformat(),
412
+ }
413
+
414
+ except Exception as e:
415
+ logger.error(f"Task execution failed for {self.agent_id}: {e}")
416
+
417
+ # Update metrics for failure
418
+ execution_time = (datetime.utcnow() - start_time).total_seconds()
419
+ self.update_metrics(execution_time=execution_time, success=False)
420
+
421
+ # Transition to error state
422
+ self._transition_state(self.state.__class__.ERROR)
423
+ self._current_task_id = None
424
+
425
+ raise TaskExecutionError(
426
+ f"Task execution failed: {str(e)}",
427
+ agent_id=self.agent_id,
428
+ task_id=task.get("task_id"),
429
+ )
430
+
431
+ async def process_message(self, message: str, sender_id: Optional[str] = None) -> Dict[str, Any]:
432
+ """
433
+ Process an incoming message.
434
+
435
+ Args:
436
+ message: Message content
437
+ sender_id: Optional sender identifier
438
+
439
+ Returns:
440
+ Response dictionary with 'response', 'tokens_used'
441
+ """
442
+ try:
443
+ # Build task from message
444
+ task = {
445
+ "description": message,
446
+ "task_id": f"msg_{datetime.utcnow().timestamp()}",
447
+ }
448
+
449
+ # Execute as task
450
+ result = await self.execute_task(task, {"sender_id": sender_id})
451
+
452
+ return {
453
+ "response": result.get("output"),
454
+ "tokens_used": result.get("tokens_used"),
455
+ "timestamp": result.get("timestamp"),
456
+ }
457
+
458
+ except Exception as e:
459
+ logger.error(f"Message processing failed for {self.agent_id}: {e}")
460
+ raise
461
+
462
+ async def execute_task_streaming(self, task: Dict[str, Any], context: Dict[str, Any]) -> AsyncIterator[Dict[str, Any]]:
463
+ """
464
+ Execute a task with streaming LLM tokens.
465
+
466
+ Args:
467
+ task: Task specification with 'description' or 'prompt'
468
+ context: Execution context
469
+
470
+ Yields:
471
+ Dict[str, Any]: Event dictionaries with streaming tokens and final result
472
+
473
+ Example:
474
+ ```python
475
+ async for event in agent.execute_task_streaming(task, context):
476
+ if event['type'] == 'token':
477
+ print(event['content'], end='', flush=True)
478
+ elif event['type'] == 'result':
479
+ print(f"\\nTokens used: {event['tokens_used']}")
480
+ ```
481
+ """
482
+ start_time = datetime.utcnow()
483
+
484
+ try:
485
+ # Extract task description
486
+ task_description = task.get("description") or task.get("prompt") or task.get("task")
487
+ if not task_description:
488
+ yield {
489
+ "type": "error",
490
+ "error": "Task must contain 'description', 'prompt', or 'task' field",
491
+ "timestamp": datetime.utcnow().isoformat(),
492
+ }
493
+ return
494
+
495
+ # Transition to busy state
496
+ self._transition_state(self.state.__class__.BUSY)
497
+ self._current_task_id = task.get("task_id")
498
+
499
+ # Yield status
500
+ yield {
501
+ "type": "status",
502
+ "status": "started",
503
+ "timestamp": datetime.utcnow().isoformat(),
504
+ }
505
+
506
+ # Build messages
507
+ messages = self._build_messages(task_description, context)
508
+
509
+ # Stream LLM response
510
+ output_tokens = []
511
+ async for token in self.llm_client.stream_text( # type: ignore[attr-defined]
512
+ messages=messages,
513
+ model=self._config.llm_model,
514
+ temperature=self._config.temperature,
515
+ max_tokens=self._config.max_tokens,
516
+ ):
517
+ output_tokens.append(token)
518
+ yield {
519
+ "type": "token",
520
+ "content": token,
521
+ "timestamp": datetime.utcnow().isoformat(),
522
+ }
523
+
524
+ # Combine output
525
+ output = "".join(output_tokens)
526
+
527
+ # Store in conversation history if enabled
528
+ if self._config.memory_enabled:
529
+ self._conversation_history.append(LLMMessage(role="user", content=task_description))
530
+ self._conversation_history.append(LLMMessage(role="assistant", content=output))
531
+
532
+ # Calculate execution time
533
+ execution_time = (datetime.utcnow() - start_time).total_seconds()
534
+
535
+ # Update metrics
536
+ self.update_metrics(
537
+ execution_time=execution_time,
538
+ success=True,
539
+ tokens_used=None, # Token count not available in streaming mode
540
+ )
541
+
542
+ # Transition back to active
543
+ self._transition_state(self.state.__class__.ACTIVE)
544
+ self._current_task_id = None
545
+ self.last_active_at = datetime.utcnow()
546
+
547
+ # Yield final result
548
+ yield {
549
+ "type": "result",
550
+ "success": True,
551
+ "output": output,
552
+ "provider": self.llm_client.provider_name,
553
+ "execution_time": execution_time,
554
+ "timestamp": datetime.utcnow().isoformat(),
555
+ }
556
+
557
+ except Exception as e:
558
+ logger.error(f"Streaming task execution failed for {self.agent_id}: {e}")
559
+
560
+ # Update metrics for failure
561
+ execution_time = (datetime.utcnow() - start_time).total_seconds()
562
+ self.update_metrics(execution_time=execution_time, success=False)
563
+
564
+ # Transition to error state
565
+ self._transition_state(self.state.__class__.ERROR)
566
+ self._current_task_id = None
567
+
568
+ yield {
569
+ "type": "error",
570
+ "error": str(e),
571
+ "timestamp": datetime.utcnow().isoformat(),
572
+ }
573
+
574
+ async def process_message_streaming(self, message: str, sender_id: Optional[str] = None) -> AsyncIterator[str]:
575
+ """
576
+ Process a message with streaming response.
577
+
578
+ Args:
579
+ message: Message content
580
+ sender_id: Optional sender identifier
581
+
582
+ Yields:
583
+ str: Response text tokens
584
+
585
+ Example:
586
+ ```python
587
+ async for token in agent.process_message_streaming("Hello!"):
588
+ print(token, end='', flush=True)
589
+ ```
590
+ """
591
+ try:
592
+ # Build task from message
593
+ task = {
594
+ "description": message,
595
+ "task_id": f"msg_{datetime.utcnow().timestamp()}",
596
+ }
597
+
598
+ # Stream task execution
599
+ async for event in self.execute_task_streaming(task, {"sender_id": sender_id}):
600
+ if event["type"] == "token":
601
+ yield event["content"]
602
+
603
+ except Exception as e:
604
+ logger.error(f"Streaming message processing failed for {self.agent_id}: {e}")
605
+ raise
606
+
607
+ def _build_messages(self, user_message: str, context: Dict[str, Any]) -> List[LLMMessage]:
608
+ """
609
+ Build LLM messages from task and context.
610
+
611
+ Args:
612
+ user_message: User message
613
+ context: Context dictionary
614
+
615
+ Returns:
616
+ List of LLM messages
617
+ """
618
+ messages = []
619
+
620
+ # Add system prompt with cache control if caching is enabled
621
+ if self._system_prompt:
622
+ cache_control = (
623
+ CacheControl(type="ephemeral")
624
+ if self._config.enable_prompt_caching
625
+ else None
626
+ )
627
+ messages.append(
628
+ LLMMessage(
629
+ role="system",
630
+ content=self._system_prompt,
631
+ cache_control=cache_control,
632
+ )
633
+ )
634
+
635
+ # Add conversation history if available and memory enabled
636
+ if self._config.memory_enabled and self._conversation_history:
637
+ # Limit history to prevent token overflow
638
+ max_history = 10 # Keep last 10 exchanges
639
+ messages.extend(self._conversation_history[-max_history:])
640
+
641
+ # Add additional context if provided
642
+ if context:
643
+ context_str = self._format_context(context)
644
+ if context_str:
645
+ messages.append(
646
+ LLMMessage(
647
+ role="system",
648
+ content=f"Additional Context:\n{context_str}",
649
+ )
650
+ )
651
+
652
+ # Add user message
653
+ messages.append(LLMMessage(role="user", content=user_message))
654
+
655
+ return messages
656
+
657
+ def _format_context(self, context: Dict[str, Any]) -> str:
658
+ """Format context dictionary as string."""
659
+ relevant_fields = []
660
+
661
+ # Filter out internal fields
662
+ for key, value in context.items():
663
+ if not key.startswith("_") and value is not None:
664
+ relevant_fields.append(f"{key}: {value}")
665
+
666
+ return "\n".join(relevant_fields) if relevant_fields else ""
667
+
668
+ def clear_conversation_history(self) -> None:
669
+ """Clear conversation history."""
670
+ self._conversation_history.clear()
671
+ logger.info(f"LLMAgent {self.agent_id} conversation history cleared")
672
+
673
+ def get_conversation_history(self) -> List[Dict[str, str]]:
674
+ """Get conversation history."""
675
+ return [{"role": msg.role, "content": msg.content} for msg in self._conversation_history]
676
+
677
+ @classmethod
678
+ def from_dict(cls, data: Dict[str, Any]) -> "LLMAgent":
679
+ """
680
+ Deserialize LLMAgent from dictionary.
681
+
682
+ Note: LLM client must be provided separately as it cannot be serialized.
683
+
684
+ Args:
685
+ data: Dictionary representation
686
+
687
+ Returns:
688
+ LLMAgent instance
689
+ """
690
+ # This is a placeholder - actual implementation would require
691
+ # providing the LLM client separately
692
+ raise NotImplementedError("LLMAgent.from_dict requires LLM client to be provided separately. " "Use constructor instead.")
@@ -0,0 +1,12 @@
1
+ """
2
+ Agent Memory Module
3
+
4
+ Conversation memory and history management.
5
+ """
6
+
7
+ from .conversation import ConversationMemory, Session
8
+
9
+ __all__ = [
10
+ "ConversationMemory",
11
+ "Session",
12
+ ]