aiecs 1.0.1__py3-none-any.whl → 1.7.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiecs might be problematic. Click here for more details.

Files changed (340) hide show
  1. aiecs/__init__.py +13 -16
  2. aiecs/__main__.py +7 -7
  3. aiecs/aiecs_client.py +269 -75
  4. aiecs/application/executors/operation_executor.py +79 -54
  5. aiecs/application/knowledge_graph/__init__.py +7 -0
  6. aiecs/application/knowledge_graph/builder/__init__.py +37 -0
  7. aiecs/application/knowledge_graph/builder/data_quality.py +302 -0
  8. aiecs/application/knowledge_graph/builder/data_reshaping.py +293 -0
  9. aiecs/application/knowledge_graph/builder/document_builder.py +369 -0
  10. aiecs/application/knowledge_graph/builder/graph_builder.py +490 -0
  11. aiecs/application/knowledge_graph/builder/import_optimizer.py +396 -0
  12. aiecs/application/knowledge_graph/builder/schema_inference.py +462 -0
  13. aiecs/application/knowledge_graph/builder/schema_mapping.py +563 -0
  14. aiecs/application/knowledge_graph/builder/structured_pipeline.py +1384 -0
  15. aiecs/application/knowledge_graph/builder/text_chunker.py +317 -0
  16. aiecs/application/knowledge_graph/extractors/__init__.py +27 -0
  17. aiecs/application/knowledge_graph/extractors/base.py +98 -0
  18. aiecs/application/knowledge_graph/extractors/llm_entity_extractor.py +422 -0
  19. aiecs/application/knowledge_graph/extractors/llm_relation_extractor.py +347 -0
  20. aiecs/application/knowledge_graph/extractors/ner_entity_extractor.py +241 -0
  21. aiecs/application/knowledge_graph/fusion/__init__.py +78 -0
  22. aiecs/application/knowledge_graph/fusion/ab_testing.py +395 -0
  23. aiecs/application/knowledge_graph/fusion/abbreviation_expander.py +327 -0
  24. aiecs/application/knowledge_graph/fusion/alias_index.py +597 -0
  25. aiecs/application/knowledge_graph/fusion/alias_matcher.py +384 -0
  26. aiecs/application/knowledge_graph/fusion/cache_coordinator.py +343 -0
  27. aiecs/application/knowledge_graph/fusion/entity_deduplicator.py +433 -0
  28. aiecs/application/knowledge_graph/fusion/entity_linker.py +511 -0
  29. aiecs/application/knowledge_graph/fusion/evaluation_dataset.py +240 -0
  30. aiecs/application/knowledge_graph/fusion/knowledge_fusion.py +632 -0
  31. aiecs/application/knowledge_graph/fusion/matching_config.py +489 -0
  32. aiecs/application/knowledge_graph/fusion/name_normalizer.py +352 -0
  33. aiecs/application/knowledge_graph/fusion/relation_deduplicator.py +183 -0
  34. aiecs/application/knowledge_graph/fusion/semantic_name_matcher.py +464 -0
  35. aiecs/application/knowledge_graph/fusion/similarity_pipeline.py +534 -0
  36. aiecs/application/knowledge_graph/pattern_matching/__init__.py +21 -0
  37. aiecs/application/knowledge_graph/pattern_matching/pattern_matcher.py +342 -0
  38. aiecs/application/knowledge_graph/pattern_matching/query_executor.py +366 -0
  39. aiecs/application/knowledge_graph/profiling/__init__.py +12 -0
  40. aiecs/application/knowledge_graph/profiling/query_plan_visualizer.py +195 -0
  41. aiecs/application/knowledge_graph/profiling/query_profiler.py +223 -0
  42. aiecs/application/knowledge_graph/reasoning/__init__.py +27 -0
  43. aiecs/application/knowledge_graph/reasoning/evidence_synthesis.py +341 -0
  44. aiecs/application/knowledge_graph/reasoning/inference_engine.py +500 -0
  45. aiecs/application/knowledge_graph/reasoning/logic_form_parser.py +163 -0
  46. aiecs/application/knowledge_graph/reasoning/logic_parser/__init__.py +79 -0
  47. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_builder.py +513 -0
  48. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_nodes.py +913 -0
  49. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_validator.py +866 -0
  50. aiecs/application/knowledge_graph/reasoning/logic_parser/error_handler.py +475 -0
  51. aiecs/application/knowledge_graph/reasoning/logic_parser/parser.py +396 -0
  52. aiecs/application/knowledge_graph/reasoning/logic_parser/query_context.py +208 -0
  53. aiecs/application/knowledge_graph/reasoning/logic_query_integration.py +170 -0
  54. aiecs/application/knowledge_graph/reasoning/query_planner.py +855 -0
  55. aiecs/application/knowledge_graph/reasoning/reasoning_engine.py +518 -0
  56. aiecs/application/knowledge_graph/retrieval/__init__.py +27 -0
  57. aiecs/application/knowledge_graph/retrieval/query_intent_classifier.py +211 -0
  58. aiecs/application/knowledge_graph/retrieval/retrieval_strategies.py +592 -0
  59. aiecs/application/knowledge_graph/retrieval/strategy_types.py +23 -0
  60. aiecs/application/knowledge_graph/search/__init__.py +59 -0
  61. aiecs/application/knowledge_graph/search/hybrid_search.py +457 -0
  62. aiecs/application/knowledge_graph/search/reranker.py +293 -0
  63. aiecs/application/knowledge_graph/search/reranker_strategies.py +535 -0
  64. aiecs/application/knowledge_graph/search/text_similarity.py +392 -0
  65. aiecs/application/knowledge_graph/traversal/__init__.py +15 -0
  66. aiecs/application/knowledge_graph/traversal/enhanced_traversal.py +305 -0
  67. aiecs/application/knowledge_graph/traversal/path_scorer.py +271 -0
  68. aiecs/application/knowledge_graph/validators/__init__.py +13 -0
  69. aiecs/application/knowledge_graph/validators/relation_validator.py +239 -0
  70. aiecs/application/knowledge_graph/visualization/__init__.py +11 -0
  71. aiecs/application/knowledge_graph/visualization/graph_visualizer.py +313 -0
  72. aiecs/common/__init__.py +9 -0
  73. aiecs/common/knowledge_graph/__init__.py +17 -0
  74. aiecs/common/knowledge_graph/runnable.py +471 -0
  75. aiecs/config/__init__.py +20 -5
  76. aiecs/config/config.py +762 -31
  77. aiecs/config/graph_config.py +131 -0
  78. aiecs/config/tool_config.py +435 -0
  79. aiecs/core/__init__.py +29 -13
  80. aiecs/core/interface/__init__.py +2 -2
  81. aiecs/core/interface/execution_interface.py +22 -22
  82. aiecs/core/interface/storage_interface.py +37 -88
  83. aiecs/core/registry/__init__.py +31 -0
  84. aiecs/core/registry/service_registry.py +92 -0
  85. aiecs/domain/__init__.py +270 -1
  86. aiecs/domain/agent/__init__.py +191 -0
  87. aiecs/domain/agent/base_agent.py +3949 -0
  88. aiecs/domain/agent/exceptions.py +99 -0
  89. aiecs/domain/agent/graph_aware_mixin.py +569 -0
  90. aiecs/domain/agent/hybrid_agent.py +1731 -0
  91. aiecs/domain/agent/integration/__init__.py +29 -0
  92. aiecs/domain/agent/integration/context_compressor.py +216 -0
  93. aiecs/domain/agent/integration/context_engine_adapter.py +587 -0
  94. aiecs/domain/agent/integration/protocols.py +281 -0
  95. aiecs/domain/agent/integration/retry_policy.py +218 -0
  96. aiecs/domain/agent/integration/role_config.py +213 -0
  97. aiecs/domain/agent/knowledge_aware_agent.py +1892 -0
  98. aiecs/domain/agent/lifecycle.py +291 -0
  99. aiecs/domain/agent/llm_agent.py +692 -0
  100. aiecs/domain/agent/memory/__init__.py +12 -0
  101. aiecs/domain/agent/memory/conversation.py +1124 -0
  102. aiecs/domain/agent/migration/__init__.py +14 -0
  103. aiecs/domain/agent/migration/conversion.py +163 -0
  104. aiecs/domain/agent/migration/legacy_wrapper.py +86 -0
  105. aiecs/domain/agent/models.py +894 -0
  106. aiecs/domain/agent/observability.py +479 -0
  107. aiecs/domain/agent/persistence.py +449 -0
  108. aiecs/domain/agent/prompts/__init__.py +29 -0
  109. aiecs/domain/agent/prompts/builder.py +159 -0
  110. aiecs/domain/agent/prompts/formatters.py +187 -0
  111. aiecs/domain/agent/prompts/template.py +255 -0
  112. aiecs/domain/agent/registry.py +253 -0
  113. aiecs/domain/agent/tool_agent.py +444 -0
  114. aiecs/domain/agent/tools/__init__.py +15 -0
  115. aiecs/domain/agent/tools/schema_generator.py +377 -0
  116. aiecs/domain/community/__init__.py +155 -0
  117. aiecs/domain/community/agent_adapter.py +469 -0
  118. aiecs/domain/community/analytics.py +432 -0
  119. aiecs/domain/community/collaborative_workflow.py +648 -0
  120. aiecs/domain/community/communication_hub.py +634 -0
  121. aiecs/domain/community/community_builder.py +320 -0
  122. aiecs/domain/community/community_integration.py +796 -0
  123. aiecs/domain/community/community_manager.py +803 -0
  124. aiecs/domain/community/decision_engine.py +849 -0
  125. aiecs/domain/community/exceptions.py +231 -0
  126. aiecs/domain/community/models/__init__.py +33 -0
  127. aiecs/domain/community/models/community_models.py +234 -0
  128. aiecs/domain/community/resource_manager.py +461 -0
  129. aiecs/domain/community/shared_context_manager.py +589 -0
  130. aiecs/domain/context/__init__.py +40 -10
  131. aiecs/domain/context/context_engine.py +1910 -0
  132. aiecs/domain/context/conversation_models.py +87 -53
  133. aiecs/domain/context/graph_memory.py +582 -0
  134. aiecs/domain/execution/model.py +12 -4
  135. aiecs/domain/knowledge_graph/__init__.py +19 -0
  136. aiecs/domain/knowledge_graph/models/__init__.py +52 -0
  137. aiecs/domain/knowledge_graph/models/entity.py +148 -0
  138. aiecs/domain/knowledge_graph/models/evidence.py +178 -0
  139. aiecs/domain/knowledge_graph/models/inference_rule.py +184 -0
  140. aiecs/domain/knowledge_graph/models/path.py +171 -0
  141. aiecs/domain/knowledge_graph/models/path_pattern.py +171 -0
  142. aiecs/domain/knowledge_graph/models/query.py +261 -0
  143. aiecs/domain/knowledge_graph/models/query_plan.py +181 -0
  144. aiecs/domain/knowledge_graph/models/relation.py +202 -0
  145. aiecs/domain/knowledge_graph/schema/__init__.py +23 -0
  146. aiecs/domain/knowledge_graph/schema/entity_type.py +131 -0
  147. aiecs/domain/knowledge_graph/schema/graph_schema.py +253 -0
  148. aiecs/domain/knowledge_graph/schema/property_schema.py +143 -0
  149. aiecs/domain/knowledge_graph/schema/relation_type.py +163 -0
  150. aiecs/domain/knowledge_graph/schema/schema_manager.py +691 -0
  151. aiecs/domain/knowledge_graph/schema/type_enums.py +209 -0
  152. aiecs/domain/task/dsl_processor.py +172 -56
  153. aiecs/domain/task/model.py +20 -8
  154. aiecs/domain/task/task_context.py +27 -24
  155. aiecs/infrastructure/__init__.py +0 -2
  156. aiecs/infrastructure/graph_storage/__init__.py +11 -0
  157. aiecs/infrastructure/graph_storage/base.py +837 -0
  158. aiecs/infrastructure/graph_storage/batch_operations.py +458 -0
  159. aiecs/infrastructure/graph_storage/cache.py +424 -0
  160. aiecs/infrastructure/graph_storage/distributed.py +223 -0
  161. aiecs/infrastructure/graph_storage/error_handling.py +380 -0
  162. aiecs/infrastructure/graph_storage/graceful_degradation.py +294 -0
  163. aiecs/infrastructure/graph_storage/health_checks.py +378 -0
  164. aiecs/infrastructure/graph_storage/in_memory.py +1197 -0
  165. aiecs/infrastructure/graph_storage/index_optimization.py +446 -0
  166. aiecs/infrastructure/graph_storage/lazy_loading.py +431 -0
  167. aiecs/infrastructure/graph_storage/metrics.py +344 -0
  168. aiecs/infrastructure/graph_storage/migration.py +400 -0
  169. aiecs/infrastructure/graph_storage/pagination.py +483 -0
  170. aiecs/infrastructure/graph_storage/performance_monitoring.py +456 -0
  171. aiecs/infrastructure/graph_storage/postgres.py +1563 -0
  172. aiecs/infrastructure/graph_storage/property_storage.py +353 -0
  173. aiecs/infrastructure/graph_storage/protocols.py +76 -0
  174. aiecs/infrastructure/graph_storage/query_optimizer.py +642 -0
  175. aiecs/infrastructure/graph_storage/schema_cache.py +290 -0
  176. aiecs/infrastructure/graph_storage/sqlite.py +1373 -0
  177. aiecs/infrastructure/graph_storage/streaming.py +487 -0
  178. aiecs/infrastructure/graph_storage/tenant.py +412 -0
  179. aiecs/infrastructure/messaging/celery_task_manager.py +92 -54
  180. aiecs/infrastructure/messaging/websocket_manager.py +51 -35
  181. aiecs/infrastructure/monitoring/__init__.py +22 -0
  182. aiecs/infrastructure/monitoring/executor_metrics.py +45 -11
  183. aiecs/infrastructure/monitoring/global_metrics_manager.py +212 -0
  184. aiecs/infrastructure/monitoring/structured_logger.py +3 -7
  185. aiecs/infrastructure/monitoring/tracing_manager.py +63 -35
  186. aiecs/infrastructure/persistence/__init__.py +14 -1
  187. aiecs/infrastructure/persistence/context_engine_client.py +184 -0
  188. aiecs/infrastructure/persistence/database_manager.py +67 -43
  189. aiecs/infrastructure/persistence/file_storage.py +180 -103
  190. aiecs/infrastructure/persistence/redis_client.py +74 -21
  191. aiecs/llm/__init__.py +73 -25
  192. aiecs/llm/callbacks/__init__.py +11 -0
  193. aiecs/llm/{custom_callbacks.py → callbacks/custom_callbacks.py} +26 -19
  194. aiecs/llm/client_factory.py +230 -37
  195. aiecs/llm/client_resolver.py +155 -0
  196. aiecs/llm/clients/__init__.py +38 -0
  197. aiecs/llm/clients/base_client.py +328 -0
  198. aiecs/llm/clients/google_function_calling_mixin.py +415 -0
  199. aiecs/llm/clients/googleai_client.py +314 -0
  200. aiecs/llm/clients/openai_client.py +158 -0
  201. aiecs/llm/clients/openai_compatible_mixin.py +367 -0
  202. aiecs/llm/clients/vertex_client.py +1186 -0
  203. aiecs/llm/clients/xai_client.py +201 -0
  204. aiecs/llm/config/__init__.py +51 -0
  205. aiecs/llm/config/config_loader.py +272 -0
  206. aiecs/llm/config/config_validator.py +206 -0
  207. aiecs/llm/config/model_config.py +143 -0
  208. aiecs/llm/protocols.py +149 -0
  209. aiecs/llm/utils/__init__.py +10 -0
  210. aiecs/llm/utils/validate_config.py +89 -0
  211. aiecs/main.py +140 -121
  212. aiecs/scripts/aid/VERSION_MANAGEMENT.md +138 -0
  213. aiecs/scripts/aid/__init__.py +19 -0
  214. aiecs/scripts/aid/module_checker.py +499 -0
  215. aiecs/scripts/aid/version_manager.py +235 -0
  216. aiecs/scripts/{DEPENDENCY_SYSTEM_SUMMARY.md → dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md} +1 -0
  217. aiecs/scripts/{README_DEPENDENCY_CHECKER.md → dependance_check/README_DEPENDENCY_CHECKER.md} +1 -0
  218. aiecs/scripts/dependance_check/__init__.py +15 -0
  219. aiecs/scripts/dependance_check/dependency_checker.py +1835 -0
  220. aiecs/scripts/{dependency_fixer.py → dependance_check/dependency_fixer.py} +192 -90
  221. aiecs/scripts/{download_nlp_data.py → dependance_check/download_nlp_data.py} +203 -71
  222. aiecs/scripts/dependance_patch/__init__.py +7 -0
  223. aiecs/scripts/dependance_patch/fix_weasel/__init__.py +11 -0
  224. aiecs/scripts/{fix_weasel_validator.py → dependance_patch/fix_weasel/fix_weasel_validator.py} +21 -14
  225. aiecs/scripts/{patch_weasel_library.sh → dependance_patch/fix_weasel/patch_weasel_library.sh} +1 -1
  226. aiecs/scripts/knowledge_graph/__init__.py +3 -0
  227. aiecs/scripts/knowledge_graph/run_threshold_experiments.py +212 -0
  228. aiecs/scripts/migrations/multi_tenancy/README.md +142 -0
  229. aiecs/scripts/tools_develop/README.md +671 -0
  230. aiecs/scripts/tools_develop/README_CONFIG_CHECKER.md +273 -0
  231. aiecs/scripts/tools_develop/TOOLS_CONFIG_GUIDE.md +1287 -0
  232. aiecs/scripts/tools_develop/TOOL_AUTO_DISCOVERY.md +234 -0
  233. aiecs/scripts/tools_develop/__init__.py +21 -0
  234. aiecs/scripts/tools_develop/check_all_tools_config.py +548 -0
  235. aiecs/scripts/tools_develop/check_type_annotations.py +257 -0
  236. aiecs/scripts/tools_develop/pre-commit-schema-coverage.sh +66 -0
  237. aiecs/scripts/tools_develop/schema_coverage.py +511 -0
  238. aiecs/scripts/tools_develop/validate_tool_schemas.py +475 -0
  239. aiecs/scripts/tools_develop/verify_executor_config_fix.py +98 -0
  240. aiecs/scripts/tools_develop/verify_tools.py +352 -0
  241. aiecs/tasks/__init__.py +0 -1
  242. aiecs/tasks/worker.py +115 -47
  243. aiecs/tools/__init__.py +194 -72
  244. aiecs/tools/apisource/__init__.py +99 -0
  245. aiecs/tools/apisource/intelligence/__init__.py +19 -0
  246. aiecs/tools/apisource/intelligence/data_fusion.py +632 -0
  247. aiecs/tools/apisource/intelligence/query_analyzer.py +417 -0
  248. aiecs/tools/apisource/intelligence/search_enhancer.py +385 -0
  249. aiecs/tools/apisource/monitoring/__init__.py +9 -0
  250. aiecs/tools/apisource/monitoring/metrics.py +330 -0
  251. aiecs/tools/apisource/providers/__init__.py +112 -0
  252. aiecs/tools/apisource/providers/base.py +671 -0
  253. aiecs/tools/apisource/providers/census.py +397 -0
  254. aiecs/tools/apisource/providers/fred.py +535 -0
  255. aiecs/tools/apisource/providers/newsapi.py +409 -0
  256. aiecs/tools/apisource/providers/worldbank.py +352 -0
  257. aiecs/tools/apisource/reliability/__init__.py +12 -0
  258. aiecs/tools/apisource/reliability/error_handler.py +363 -0
  259. aiecs/tools/apisource/reliability/fallback_strategy.py +376 -0
  260. aiecs/tools/apisource/tool.py +832 -0
  261. aiecs/tools/apisource/utils/__init__.py +9 -0
  262. aiecs/tools/apisource/utils/validators.py +334 -0
  263. aiecs/tools/base_tool.py +415 -21
  264. aiecs/tools/docs/__init__.py +121 -0
  265. aiecs/tools/docs/ai_document_orchestrator.py +607 -0
  266. aiecs/tools/docs/ai_document_writer_orchestrator.py +2350 -0
  267. aiecs/tools/docs/content_insertion_tool.py +1320 -0
  268. aiecs/tools/docs/document_creator_tool.py +1464 -0
  269. aiecs/tools/docs/document_layout_tool.py +1160 -0
  270. aiecs/tools/docs/document_parser_tool.py +1016 -0
  271. aiecs/tools/docs/document_writer_tool.py +2008 -0
  272. aiecs/tools/knowledge_graph/__init__.py +17 -0
  273. aiecs/tools/knowledge_graph/graph_reasoning_tool.py +807 -0
  274. aiecs/tools/knowledge_graph/graph_search_tool.py +944 -0
  275. aiecs/tools/knowledge_graph/kg_builder_tool.py +524 -0
  276. aiecs/tools/langchain_adapter.py +300 -138
  277. aiecs/tools/schema_generator.py +455 -0
  278. aiecs/tools/search_tool/__init__.py +100 -0
  279. aiecs/tools/search_tool/analyzers.py +581 -0
  280. aiecs/tools/search_tool/cache.py +264 -0
  281. aiecs/tools/search_tool/constants.py +128 -0
  282. aiecs/tools/search_tool/context.py +224 -0
  283. aiecs/tools/search_tool/core.py +778 -0
  284. aiecs/tools/search_tool/deduplicator.py +119 -0
  285. aiecs/tools/search_tool/error_handler.py +242 -0
  286. aiecs/tools/search_tool/metrics.py +343 -0
  287. aiecs/tools/search_tool/rate_limiter.py +172 -0
  288. aiecs/tools/search_tool/schemas.py +275 -0
  289. aiecs/tools/statistics/__init__.py +80 -0
  290. aiecs/tools/statistics/ai_data_analysis_orchestrator.py +646 -0
  291. aiecs/tools/statistics/ai_insight_generator_tool.py +508 -0
  292. aiecs/tools/statistics/ai_report_orchestrator_tool.py +684 -0
  293. aiecs/tools/statistics/data_loader_tool.py +555 -0
  294. aiecs/tools/statistics/data_profiler_tool.py +638 -0
  295. aiecs/tools/statistics/data_transformer_tool.py +580 -0
  296. aiecs/tools/statistics/data_visualizer_tool.py +498 -0
  297. aiecs/tools/statistics/model_trainer_tool.py +507 -0
  298. aiecs/tools/statistics/statistical_analyzer_tool.py +472 -0
  299. aiecs/tools/task_tools/__init__.py +49 -36
  300. aiecs/tools/task_tools/chart_tool.py +200 -184
  301. aiecs/tools/task_tools/classfire_tool.py +268 -267
  302. aiecs/tools/task_tools/image_tool.py +220 -141
  303. aiecs/tools/task_tools/office_tool.py +226 -146
  304. aiecs/tools/task_tools/pandas_tool.py +477 -121
  305. aiecs/tools/task_tools/report_tool.py +390 -142
  306. aiecs/tools/task_tools/research_tool.py +149 -79
  307. aiecs/tools/task_tools/scraper_tool.py +339 -145
  308. aiecs/tools/task_tools/stats_tool.py +448 -209
  309. aiecs/tools/temp_file_manager.py +26 -24
  310. aiecs/tools/tool_executor/__init__.py +18 -16
  311. aiecs/tools/tool_executor/tool_executor.py +364 -52
  312. aiecs/utils/LLM_output_structor.py +74 -48
  313. aiecs/utils/__init__.py +14 -3
  314. aiecs/utils/base_callback.py +0 -3
  315. aiecs/utils/cache_provider.py +696 -0
  316. aiecs/utils/execution_utils.py +50 -31
  317. aiecs/utils/prompt_loader.py +1 -0
  318. aiecs/utils/token_usage_repository.py +37 -11
  319. aiecs/ws/socket_server.py +14 -4
  320. {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/METADATA +52 -15
  321. aiecs-1.7.17.dist-info/RECORD +337 -0
  322. aiecs-1.7.17.dist-info/entry_points.txt +13 -0
  323. aiecs/config/registry.py +0 -19
  324. aiecs/domain/context/content_engine.py +0 -982
  325. aiecs/llm/base_client.py +0 -99
  326. aiecs/llm/openai_client.py +0 -125
  327. aiecs/llm/vertex_client.py +0 -186
  328. aiecs/llm/xai_client.py +0 -184
  329. aiecs/scripts/dependency_checker.py +0 -857
  330. aiecs/scripts/quick_dependency_check.py +0 -269
  331. aiecs/tools/task_tools/search_api.py +0 -7
  332. aiecs-1.0.1.dist-info/RECORD +0 -90
  333. aiecs-1.0.1.dist-info/entry_points.txt +0 -7
  334. /aiecs/scripts/{setup_nlp_data.sh → dependance_check/setup_nlp_data.sh} +0 -0
  335. /aiecs/scripts/{README_WEASEL_PATCH.md → dependance_patch/fix_weasel/README_WEASEL_PATCH.md} +0 -0
  336. /aiecs/scripts/{fix_weasel_validator.sh → dependance_patch/fix_weasel/fix_weasel_validator.sh} +0 -0
  337. /aiecs/scripts/{run_weasel_patch.sh → dependance_patch/fix_weasel/run_weasel_patch.sh} +0 -0
  338. {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/WHEEL +0 -0
  339. {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/licenses/LICENSE +0 -0
  340. {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,646 @@
1
+ """
2
+ AI Data Analysis Orchestrator - AI-powered end-to-end data analysis workflow coordination
3
+
4
+ This orchestrator coordinates multiple foundation tools to provide:
5
+ - Natural language driven analysis
6
+ - Automated workflow orchestration
7
+ - Multi-tool coordination
8
+ - Comprehensive analysis execution
9
+ - Support for various analysis modes
10
+ """
11
+
12
+ import logging
13
+ from typing import Dict, Any, List, Optional
14
+ from enum import Enum
15
+ from datetime import datetime
16
+
17
+ from pydantic import BaseModel, Field
18
+ from pydantic_settings import BaseSettings, SettingsConfigDict
19
+
20
+ from aiecs.tools.base_tool import BaseTool
21
+ from aiecs.tools import register_tool
22
+
23
+
24
+ class AnalysisMode(str, Enum):
25
+ """Analysis execution modes"""
26
+
27
+ EXPLORATORY = "exploratory"
28
+ DIAGNOSTIC = "diagnostic"
29
+ PREDICTIVE = "predictive"
30
+ PRESCRIPTIVE = "prescriptive"
31
+ COMPARATIVE = "comparative"
32
+ CAUSAL = "causal"
33
+
34
+
35
+ class AIProvider(str, Enum):
36
+ """Supported AI providers for future integration"""
37
+
38
+ OPENAI = "openai"
39
+ ANTHROPIC = "anthropic"
40
+ GOOGLE = "google"
41
+ LOCAL = "local"
42
+
43
+
44
+ class OrchestratorError(Exception):
45
+ """Base exception for Orchestrator errors"""
46
+
47
+
48
+ class WorkflowError(OrchestratorError):
49
+ """Raised when workflow execution fails"""
50
+
51
+
52
+ @register_tool("ai_data_analysis_orchestrator")
53
+ class AIDataAnalysisOrchestrator(BaseTool):
54
+ """
55
+ AI-powered data analysis orchestrator that can:
56
+ 1. Understand analysis requirements
57
+ 2. Automatically design analysis workflows
58
+ 3. Orchestrate multiple tools to complete analysis
59
+ 4. Generate comprehensive analysis reports
60
+
61
+ Coordinates foundation tools: data_loader, data_profiler, data_transformer,
62
+ data_visualizer, statistical_analyzer, model_trainer
63
+ """
64
+
65
+ # Configuration schema
66
+ class Config(BaseSettings):
67
+ """Configuration for the AI data analysis orchestrator tool
68
+
69
+ Automatically reads from environment variables with AI_DATA_ORCHESTRATOR_ prefix.
70
+ Example: AI_DATA_ORCHESTRATOR_DEFAULT_MODE -> default_mode
71
+ """
72
+
73
+ model_config = SettingsConfigDict(env_prefix="AI_DATA_ORCHESTRATOR_")
74
+
75
+ default_mode: str = Field(default="exploratory", description="Default analysis mode to use")
76
+ max_iterations: int = Field(default=10, description="Maximum number of analysis iterations")
77
+ enable_auto_workflow: bool = Field(
78
+ default=True,
79
+ description="Whether to enable automatic workflow generation",
80
+ )
81
+ default_ai_provider: str = Field(default="openai", description="Default AI provider to use")
82
+ enable_caching: bool = Field(default=True, description="Whether to enable result caching")
83
+
84
+ def __init__(self, config: Optional[Dict[str, Any]] = None, **kwargs):
85
+ """Initialize AI Data Analysis Orchestrator
86
+
87
+ Configuration is automatically loaded by BaseTool from:
88
+ 1. Explicit config dict (highest priority)
89
+ 2. YAML config files (config/tools/ai_data_analysis_orchestrator.yaml)
90
+ 3. Environment variables (via dotenv from .env files)
91
+ 4. Tool defaults (lowest priority)
92
+
93
+ Args:
94
+ config: Optional configuration overrides
95
+ **kwargs: Additional arguments passed to BaseTool (e.g., tool_name)
96
+ """
97
+ super().__init__(config, **kwargs)
98
+
99
+ # Configuration is automatically loaded by BaseTool into self._config_obj
100
+ # Access config via self._config_obj (BaseSettings instance)
101
+ self.config = self._config_obj if self._config_obj else self.Config()
102
+
103
+ self.logger = logging.getLogger(__name__)
104
+ if not self.logger.handlers:
105
+ handler = logging.StreamHandler()
106
+ handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s"))
107
+ self.logger.addHandler(handler)
108
+ self.logger.setLevel(logging.INFO)
109
+
110
+ # Initialize foundation tools
111
+ self._init_foundation_tools()
112
+
113
+ # Initialize AI providers (placeholder for future implementation)
114
+ self._init_ai_providers()
115
+
116
+ # Workflow cache
117
+ self.workflow_cache: Dict[str, Any] = {}
118
+
119
+ def _init_foundation_tools(self):
120
+ """Initialize foundation data analysis tools"""
121
+ self.foundation_tools = {}
122
+
123
+ try:
124
+ from aiecs.tools.statistics.data_loader_tool import DataLoaderTool
125
+
126
+ self.foundation_tools["data_loader"] = DataLoaderTool()
127
+ self.logger.info("DataLoaderTool initialized")
128
+ except ImportError:
129
+ self.logger.warning("DataLoaderTool not available")
130
+
131
+ try:
132
+ from aiecs.tools.statistics.data_profiler_tool import (
133
+ DataProfilerTool,
134
+ )
135
+
136
+ self.foundation_tools["data_profiler"] = DataProfilerTool()
137
+ self.logger.info("DataProfilerTool initialized")
138
+ except ImportError:
139
+ self.logger.warning("DataProfilerTool not available")
140
+
141
+ try:
142
+ from aiecs.tools.statistics.data_transformer_tool import (
143
+ DataTransformerTool,
144
+ )
145
+
146
+ self.foundation_tools["data_transformer"] = DataTransformerTool()
147
+ self.logger.info("DataTransformerTool initialized")
148
+ except ImportError:
149
+ self.logger.warning("DataTransformerTool not available")
150
+
151
+ try:
152
+ from aiecs.tools.statistics.data_visualizer_tool import (
153
+ DataVisualizerTool,
154
+ )
155
+
156
+ self.foundation_tools["data_visualizer"] = DataVisualizerTool()
157
+ self.logger.info("DataVisualizerTool initialized")
158
+ except ImportError:
159
+ self.logger.warning("DataVisualizerTool not available")
160
+
161
+ try:
162
+ from aiecs.tools.statistics.statistical_analyzer_tool import (
163
+ StatisticalAnalyzerTool,
164
+ )
165
+
166
+ self.foundation_tools["statistical_analyzer"] = StatisticalAnalyzerTool()
167
+ self.logger.info("StatisticalAnalyzerTool initialized")
168
+ except ImportError:
169
+ self.logger.warning("StatisticalAnalyzerTool not available")
170
+
171
+ try:
172
+ from aiecs.tools.statistics.model_trainer_tool import (
173
+ ModelTrainerTool,
174
+ )
175
+
176
+ self.foundation_tools["model_trainer"] = ModelTrainerTool()
177
+ self.logger.info("ModelTrainerTool initialized")
178
+ except ImportError:
179
+ self.logger.warning("ModelTrainerTool not available")
180
+
181
+ def _init_ai_providers(self):
182
+ """Initialize AI providers (placeholder for future implementation)"""
183
+ self.ai_providers = {}
184
+ # Future integration point for AIECS client
185
+ # try:
186
+ # from aiecs import AIECS
187
+ # self.aiecs_client = AIECS()
188
+ # self.ai_providers['aiecs'] = self.aiecs_client
189
+ # except ImportError:
190
+ # self.logger.warning("AIECS client not available")
191
+
192
+ # Schema definitions
193
+ class AnalyzeSchema(BaseModel):
194
+ """Schema for analyze operation"""
195
+
196
+ data_source: str = Field(description="Path to data source or data itself")
197
+ question: str = Field(description="Analysis question in natural language")
198
+ mode: AnalysisMode = Field(default=AnalysisMode.EXPLORATORY, description="Analysis mode")
199
+ max_iterations: int = Field(default=10, description="Maximum workflow iterations")
200
+
201
+ class AutoAnalyzeDatasetSchema(BaseModel):
202
+ """Schema for auto_analyze_dataset operation"""
203
+
204
+ data_source: str = Field(description="Path to data source")
205
+ focus_areas: Optional[List[str]] = Field(default=None, description="Areas to focus on")
206
+ generate_report: bool = Field(default=True, description="Generate analysis report")
207
+
208
+ class OrchestrateWorkflowSchema(BaseModel):
209
+ """Schema for orchestrate_workflow operation"""
210
+
211
+ workflow_steps: List[Dict[str, Any]] = Field(description="Workflow steps to execute")
212
+ data_source: str = Field(description="Data source")
213
+
214
+ def analyze(
215
+ self,
216
+ data_source: str,
217
+ question: str,
218
+ mode: AnalysisMode = AnalysisMode.EXPLORATORY,
219
+ max_iterations: int = 10,
220
+ ) -> Dict[str, Any]:
221
+ """
222
+ Perform AI-driven data analysis based on natural language question.
223
+
224
+ Args:
225
+ data_source: Path to data source file
226
+ question: Analysis question in natural language
227
+ mode: Analysis mode to use
228
+ max_iterations: Maximum workflow iterations
229
+
230
+ Returns:
231
+ Dict containing:
232
+ - analysis_plan: Planned analysis steps
233
+ - execution_log: Log of executed steps
234
+ - findings: Analysis findings and insights
235
+ - recommendations: Recommendations based on analysis
236
+ - report: Analysis report
237
+ """
238
+ try:
239
+ self.logger.info(f"Starting analysis: {question}")
240
+
241
+ # Design analysis workflow based on question and mode
242
+ workflow = self._design_workflow(question, mode, data_source)
243
+
244
+ # Execute workflow
245
+ execution_results = self._execute_workflow(workflow, data_source, max_iterations)
246
+
247
+ # Generate findings from results
248
+ findings = self._generate_findings(execution_results)
249
+
250
+ # Generate recommendations
251
+ recommendations = self._generate_recommendations(findings)
252
+
253
+ # Generate report
254
+ report = self._generate_analysis_report(
255
+ question,
256
+ workflow,
257
+ execution_results,
258
+ findings,
259
+ recommendations,
260
+ )
261
+
262
+ return {
263
+ "analysis_plan": workflow,
264
+ "execution_log": execution_results.get("log", []),
265
+ "findings": findings,
266
+ "recommendations": recommendations,
267
+ "report": report,
268
+ "mode": mode.value,
269
+ "timestamp": datetime.now().isoformat(),
270
+ }
271
+
272
+ except Exception as e:
273
+ self.logger.error(f"Error in analysis: {e}")
274
+ raise WorkflowError(f"Analysis failed: {e}")
275
+
276
+ def auto_analyze_dataset(
277
+ self,
278
+ data_source: str,
279
+ focus_areas: Optional[List[str]] = None,
280
+ generate_report: bool = True,
281
+ ) -> Dict[str, Any]:
282
+ """
283
+ Automatically analyze dataset without specific question.
284
+
285
+ Args:
286
+ data_source: Path to data source
287
+ focus_areas: Specific areas to focus on
288
+ generate_report: Whether to generate comprehensive report
289
+
290
+ Returns:
291
+ Dict containing comprehensive analysis results
292
+ """
293
+ try:
294
+ self.logger.info(f"Auto-analyzing dataset: {data_source}")
295
+
296
+ # Load data
297
+ load_result = self.foundation_tools["data_loader"].load_data(source=data_source)
298
+ data = load_result["data"]
299
+
300
+ # Profile data
301
+ profile_result = self.foundation_tools["data_profiler"].profile_dataset(data=data, level="comprehensive")
302
+
303
+ # Auto-transform if needed
304
+ if profile_result.get("quality_issues"):
305
+ transform_result = self.foundation_tools["data_transformer"].auto_transform(data=data)
306
+ data = transform_result["transformed_data"]
307
+
308
+ # Generate visualizations
309
+ viz_result = self.foundation_tools["data_visualizer"].auto_visualize_dataset(
310
+ data=data,
311
+ focus_areas=focus_areas or ["distributions", "correlations"],
312
+ )
313
+
314
+ # Perform statistical analysis
315
+ numeric_cols = data.select_dtypes(include=["number"]).columns.tolist()
316
+ stats_result = {}
317
+ if len(numeric_cols) >= 2:
318
+ stats_result = self.foundation_tools["statistical_analyzer"].analyze_correlation(data=data, variables=numeric_cols)
319
+
320
+ # Compile results
321
+ results = {
322
+ "data_profile": profile_result,
323
+ "transformations_applied": (transform_result if "transform_result" in locals() else None),
324
+ "visualizations": viz_result,
325
+ "statistical_analysis": stats_result,
326
+ "data_source": data_source,
327
+ "timestamp": datetime.now().isoformat(),
328
+ }
329
+
330
+ if generate_report:
331
+ results["report"] = self._generate_auto_analysis_report(results)
332
+
333
+ return results
334
+
335
+ except Exception as e:
336
+ self.logger.error(f"Error in auto analysis: {e}")
337
+ raise WorkflowError(f"Auto analysis failed: {e}")
338
+
339
+ def orchestrate_workflow(self, workflow_steps: List[Dict[str, Any]], data_source: str) -> Dict[str, Any]:
340
+ """
341
+ Orchestrate a custom workflow with specified steps.
342
+
343
+ Args:
344
+ workflow_steps: List of workflow steps with tool and operation info
345
+ data_source: Data source path
346
+
347
+ Returns:
348
+ Dict containing workflow execution results
349
+ """
350
+ try:
351
+ results = self._execute_workflow(
352
+ {"steps": workflow_steps},
353
+ data_source,
354
+ max_iterations=len(workflow_steps),
355
+ )
356
+
357
+ return {
358
+ "workflow_results": results,
359
+ "total_steps": len(workflow_steps),
360
+ "status": "completed",
361
+ }
362
+
363
+ except Exception as e:
364
+ self.logger.error(f"Error orchestrating workflow: {e}")
365
+ raise WorkflowError(f"Workflow orchestration failed: {e}")
366
+
367
+ # Internal workflow methods
368
+
369
+ def _design_workflow(self, question: str, mode: AnalysisMode, data_source: str) -> Dict[str, Any]:
370
+ """Design analysis workflow based on question and mode"""
371
+ workflow: Dict[str, Any] = {"question": question, "mode": mode.value, "steps": []}
372
+
373
+ # Standard workflow steps based on mode
374
+ if mode == AnalysisMode.EXPLORATORY:
375
+ workflow["steps"] = [
376
+ {
377
+ "tool": "data_loader",
378
+ "operation": "load_data",
379
+ "params": {"source": data_source},
380
+ },
381
+ {
382
+ "tool": "data_profiler",
383
+ "operation": "profile_dataset",
384
+ "params": {"level": "comprehensive"},
385
+ },
386
+ {
387
+ "tool": "data_visualizer",
388
+ "operation": "auto_visualize_dataset",
389
+ "params": {"max_charts": 5},
390
+ },
391
+ {
392
+ "tool": "statistical_analyzer",
393
+ "operation": "analyze_correlation",
394
+ "params": {},
395
+ },
396
+ ]
397
+ elif mode == AnalysisMode.PREDICTIVE:
398
+ workflow["steps"] = [
399
+ {
400
+ "tool": "data_loader",
401
+ "operation": "load_data",
402
+ "params": {"source": data_source},
403
+ },
404
+ {
405
+ "tool": "data_profiler",
406
+ "operation": "profile_dataset",
407
+ "params": {},
408
+ },
409
+ {
410
+ "tool": "data_transformer",
411
+ "operation": "auto_transform",
412
+ "params": {},
413
+ },
414
+ {
415
+ "tool": "model_trainer",
416
+ "operation": "auto_select_model",
417
+ "params": {},
418
+ },
419
+ ]
420
+ elif mode == AnalysisMode.DIAGNOSTIC:
421
+ workflow["steps"] = [
422
+ {
423
+ "tool": "data_loader",
424
+ "operation": "load_data",
425
+ "params": {"source": data_source},
426
+ },
427
+ {
428
+ "tool": "data_profiler",
429
+ "operation": "detect_quality_issues",
430
+ "params": {},
431
+ },
432
+ {
433
+ "tool": "statistical_analyzer",
434
+ "operation": "test_hypothesis",
435
+ "params": {},
436
+ },
437
+ ]
438
+ else:
439
+ # Default exploratory workflow
440
+ workflow["steps"] = [
441
+ {
442
+ "tool": "data_loader",
443
+ "operation": "load_data",
444
+ "params": {"source": data_source},
445
+ },
446
+ {
447
+ "tool": "data_profiler",
448
+ "operation": "profile_dataset",
449
+ "params": {},
450
+ },
451
+ ]
452
+
453
+ return workflow
454
+
455
+ def _execute_workflow(self, workflow: Dict[str, Any], data_source: str, max_iterations: int) -> Dict[str, Any]:
456
+ """Execute workflow steps"""
457
+ results: Dict[str, Any] = {"log": [], "data": None, "outputs": {}}
458
+
459
+ current_data = None
460
+
461
+ for i, step in enumerate(workflow["steps"][:max_iterations]):
462
+ try:
463
+ tool_name = step["tool"]
464
+ operation = step["operation"]
465
+ params = step.get("params", {})
466
+
467
+ self.logger.info(f"Executing step {i+1}: {tool_name}.{operation}")
468
+
469
+ # Get tool
470
+ tool = self.foundation_tools.get(tool_name)
471
+ if not tool:
472
+ self.logger.warning(f"Tool {tool_name} not available, skipping")
473
+ continue
474
+
475
+ # Prepare parameters
476
+ if current_data is not None and "data" not in params:
477
+ params["data"] = current_data
478
+
479
+ # Execute operation
480
+ result = tool.run(operation, **params)
481
+
482
+ # Update current data if result contains data
483
+ if isinstance(result, dict) and "data" in result:
484
+ current_data = result["data"]
485
+ elif isinstance(result, dict) and "transformed_data" in result:
486
+ current_data = result["transformed_data"]
487
+
488
+ # Log execution
489
+ results["log"].append(
490
+ {
491
+ "step": i + 1,
492
+ "tool": tool_name,
493
+ "operation": operation,
494
+ "status": "success",
495
+ "summary": self._summarize_result(result),
496
+ }
497
+ )
498
+
499
+ results["outputs"][f"{tool_name}_{operation}"] = result
500
+
501
+ except Exception as e:
502
+ self.logger.error(f"Error in step {i+1}: {e}")
503
+ results["log"].append(
504
+ {
505
+ "step": i + 1,
506
+ "tool": step["tool"],
507
+ "operation": step["operation"],
508
+ "status": "failed",
509
+ "error": str(e),
510
+ }
511
+ )
512
+
513
+ results["data"] = current_data
514
+ return results
515
+
516
+ def _generate_findings(self, execution_results: Dict[str, Any]) -> List[Dict[str, Any]]:
517
+ """Generate findings from execution results"""
518
+ findings = []
519
+
520
+ outputs = execution_results.get("outputs", {})
521
+
522
+ # Extract insights from profiling
523
+ if "data_profiler_profile_dataset" in outputs:
524
+ profile = outputs["data_profiler_profile_dataset"]
525
+ summary = profile.get("summary", {})
526
+ findings.append(
527
+ {
528
+ "type": "data_profile",
529
+ "title": "Dataset Overview",
530
+ "description": f"Dataset contains {summary.get('rows', 0)} rows and {summary.get('columns', 0)} columns",
531
+ "confidence": "high",
532
+ "evidence": summary,
533
+ }
534
+ )
535
+
536
+ # Extract insights from statistical analysis
537
+ if "statistical_analyzer_analyze_correlation" in outputs:
538
+ corr = outputs["statistical_analyzer_analyze_correlation"]
539
+ high_corr = corr.get("high_correlations", [])
540
+ if high_corr:
541
+ findings.append(
542
+ {
543
+ "type": "correlation",
544
+ "title": "Significant Correlations Found",
545
+ "description": f"Found {len(high_corr)} significant correlations",
546
+ "confidence": "high",
547
+ "evidence": high_corr,
548
+ }
549
+ )
550
+
551
+ return findings
552
+
553
+ def _generate_recommendations(self, findings: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
554
+ """Generate recommendations based on findings"""
555
+ recommendations = []
556
+
557
+ for finding in findings:
558
+ if finding["type"] == "data_profile":
559
+ recommendations.append(
560
+ {
561
+ "action": "data_quality_check",
562
+ "reason": "Perform comprehensive data quality assessment",
563
+ "priority": "high",
564
+ }
565
+ )
566
+ elif finding["type"] == "correlation":
567
+ recommendations.append(
568
+ {
569
+ "action": "investigate_relationships",
570
+ "reason": "Investigate significant correlations for potential insights",
571
+ "priority": "medium",
572
+ }
573
+ )
574
+
575
+ return recommendations
576
+
577
+ def _generate_analysis_report(
578
+ self,
579
+ question: str,
580
+ workflow: Dict[str, Any],
581
+ execution_results: Dict[str, Any],
582
+ findings: List[Dict[str, Any]],
583
+ recommendations: List[Dict[str, Any]],
584
+ ) -> str:
585
+ """Generate comprehensive analysis report"""
586
+ report_lines = [
587
+ "# Data Analysis Report",
588
+ "",
589
+ f"**Question:** {question}",
590
+ f"**Analysis Mode:** {workflow.get('mode', 'N/A')}",
591
+ f"**Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
592
+ "",
593
+ "## Analysis Workflow",
594
+ "",
595
+ ]
596
+
597
+ for i, step in enumerate(workflow.get("steps", []), 1):
598
+ report_lines.append(f"{i}. {step['tool']}.{step['operation']}")
599
+
600
+ report_lines.extend(["", "## Key Findings", ""])
601
+
602
+ for i, finding in enumerate(findings, 1):
603
+ report_lines.append(f"{i}. **{finding['title']}**: {finding['description']}")
604
+
605
+ report_lines.extend(["", "## Recommendations", ""])
606
+
607
+ for i, rec in enumerate(recommendations, 1):
608
+ report_lines.append(f"{i}. {rec['action']}: {rec['reason']}")
609
+
610
+ return "\n".join(report_lines)
611
+
612
+ def _generate_auto_analysis_report(self, results: Dict[str, Any]) -> str:
613
+ """Generate report for auto analysis"""
614
+ profile = results.get("data_profile", {})
615
+ summary = profile.get("summary", {})
616
+
617
+ report_lines = [
618
+ "# Automatic Data Analysis Report",
619
+ "",
620
+ f"**Data Source:** {results.get('data_source', 'N/A')}",
621
+ f"**Generated:** {results.get('timestamp', 'N/A')}",
622
+ "",
623
+ "## Dataset Summary",
624
+ "",
625
+ f"- Rows: {summary.get('rows', 0)}",
626
+ f"- Columns: {summary.get('columns', 0)}",
627
+ f"- Missing Data: {summary.get('missing_percentage', 0):.2f}%",
628
+ f"- Duplicate Rows: {summary.get('duplicate_rows', 0)}",
629
+ "",
630
+ "## Analysis Completed",
631
+ "",
632
+ "- Data profiling",
633
+ "- Quality assessment",
634
+ "- Statistical analysis",
635
+ "- Visualization generation",
636
+ ]
637
+
638
+ return "\n".join(report_lines)
639
+
640
+ def _summarize_result(self, result: Any) -> str:
641
+ """Create summary of result"""
642
+ if isinstance(result, dict):
643
+ if "summary" in result:
644
+ return f"Summary available with {len(result)} keys"
645
+ return f"Result with {len(result)} keys"
646
+ return "Result generated"