aiecs 1.0.1__py3-none-any.whl → 1.7.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiecs might be problematic. Click here for more details.

Files changed (340) hide show
  1. aiecs/__init__.py +13 -16
  2. aiecs/__main__.py +7 -7
  3. aiecs/aiecs_client.py +269 -75
  4. aiecs/application/executors/operation_executor.py +79 -54
  5. aiecs/application/knowledge_graph/__init__.py +7 -0
  6. aiecs/application/knowledge_graph/builder/__init__.py +37 -0
  7. aiecs/application/knowledge_graph/builder/data_quality.py +302 -0
  8. aiecs/application/knowledge_graph/builder/data_reshaping.py +293 -0
  9. aiecs/application/knowledge_graph/builder/document_builder.py +369 -0
  10. aiecs/application/knowledge_graph/builder/graph_builder.py +490 -0
  11. aiecs/application/knowledge_graph/builder/import_optimizer.py +396 -0
  12. aiecs/application/knowledge_graph/builder/schema_inference.py +462 -0
  13. aiecs/application/knowledge_graph/builder/schema_mapping.py +563 -0
  14. aiecs/application/knowledge_graph/builder/structured_pipeline.py +1384 -0
  15. aiecs/application/knowledge_graph/builder/text_chunker.py +317 -0
  16. aiecs/application/knowledge_graph/extractors/__init__.py +27 -0
  17. aiecs/application/knowledge_graph/extractors/base.py +98 -0
  18. aiecs/application/knowledge_graph/extractors/llm_entity_extractor.py +422 -0
  19. aiecs/application/knowledge_graph/extractors/llm_relation_extractor.py +347 -0
  20. aiecs/application/knowledge_graph/extractors/ner_entity_extractor.py +241 -0
  21. aiecs/application/knowledge_graph/fusion/__init__.py +78 -0
  22. aiecs/application/knowledge_graph/fusion/ab_testing.py +395 -0
  23. aiecs/application/knowledge_graph/fusion/abbreviation_expander.py +327 -0
  24. aiecs/application/knowledge_graph/fusion/alias_index.py +597 -0
  25. aiecs/application/knowledge_graph/fusion/alias_matcher.py +384 -0
  26. aiecs/application/knowledge_graph/fusion/cache_coordinator.py +343 -0
  27. aiecs/application/knowledge_graph/fusion/entity_deduplicator.py +433 -0
  28. aiecs/application/knowledge_graph/fusion/entity_linker.py +511 -0
  29. aiecs/application/knowledge_graph/fusion/evaluation_dataset.py +240 -0
  30. aiecs/application/knowledge_graph/fusion/knowledge_fusion.py +632 -0
  31. aiecs/application/knowledge_graph/fusion/matching_config.py +489 -0
  32. aiecs/application/knowledge_graph/fusion/name_normalizer.py +352 -0
  33. aiecs/application/knowledge_graph/fusion/relation_deduplicator.py +183 -0
  34. aiecs/application/knowledge_graph/fusion/semantic_name_matcher.py +464 -0
  35. aiecs/application/knowledge_graph/fusion/similarity_pipeline.py +534 -0
  36. aiecs/application/knowledge_graph/pattern_matching/__init__.py +21 -0
  37. aiecs/application/knowledge_graph/pattern_matching/pattern_matcher.py +342 -0
  38. aiecs/application/knowledge_graph/pattern_matching/query_executor.py +366 -0
  39. aiecs/application/knowledge_graph/profiling/__init__.py +12 -0
  40. aiecs/application/knowledge_graph/profiling/query_plan_visualizer.py +195 -0
  41. aiecs/application/knowledge_graph/profiling/query_profiler.py +223 -0
  42. aiecs/application/knowledge_graph/reasoning/__init__.py +27 -0
  43. aiecs/application/knowledge_graph/reasoning/evidence_synthesis.py +341 -0
  44. aiecs/application/knowledge_graph/reasoning/inference_engine.py +500 -0
  45. aiecs/application/knowledge_graph/reasoning/logic_form_parser.py +163 -0
  46. aiecs/application/knowledge_graph/reasoning/logic_parser/__init__.py +79 -0
  47. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_builder.py +513 -0
  48. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_nodes.py +913 -0
  49. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_validator.py +866 -0
  50. aiecs/application/knowledge_graph/reasoning/logic_parser/error_handler.py +475 -0
  51. aiecs/application/knowledge_graph/reasoning/logic_parser/parser.py +396 -0
  52. aiecs/application/knowledge_graph/reasoning/logic_parser/query_context.py +208 -0
  53. aiecs/application/knowledge_graph/reasoning/logic_query_integration.py +170 -0
  54. aiecs/application/knowledge_graph/reasoning/query_planner.py +855 -0
  55. aiecs/application/knowledge_graph/reasoning/reasoning_engine.py +518 -0
  56. aiecs/application/knowledge_graph/retrieval/__init__.py +27 -0
  57. aiecs/application/knowledge_graph/retrieval/query_intent_classifier.py +211 -0
  58. aiecs/application/knowledge_graph/retrieval/retrieval_strategies.py +592 -0
  59. aiecs/application/knowledge_graph/retrieval/strategy_types.py +23 -0
  60. aiecs/application/knowledge_graph/search/__init__.py +59 -0
  61. aiecs/application/knowledge_graph/search/hybrid_search.py +457 -0
  62. aiecs/application/knowledge_graph/search/reranker.py +293 -0
  63. aiecs/application/knowledge_graph/search/reranker_strategies.py +535 -0
  64. aiecs/application/knowledge_graph/search/text_similarity.py +392 -0
  65. aiecs/application/knowledge_graph/traversal/__init__.py +15 -0
  66. aiecs/application/knowledge_graph/traversal/enhanced_traversal.py +305 -0
  67. aiecs/application/knowledge_graph/traversal/path_scorer.py +271 -0
  68. aiecs/application/knowledge_graph/validators/__init__.py +13 -0
  69. aiecs/application/knowledge_graph/validators/relation_validator.py +239 -0
  70. aiecs/application/knowledge_graph/visualization/__init__.py +11 -0
  71. aiecs/application/knowledge_graph/visualization/graph_visualizer.py +313 -0
  72. aiecs/common/__init__.py +9 -0
  73. aiecs/common/knowledge_graph/__init__.py +17 -0
  74. aiecs/common/knowledge_graph/runnable.py +471 -0
  75. aiecs/config/__init__.py +20 -5
  76. aiecs/config/config.py +762 -31
  77. aiecs/config/graph_config.py +131 -0
  78. aiecs/config/tool_config.py +435 -0
  79. aiecs/core/__init__.py +29 -13
  80. aiecs/core/interface/__init__.py +2 -2
  81. aiecs/core/interface/execution_interface.py +22 -22
  82. aiecs/core/interface/storage_interface.py +37 -88
  83. aiecs/core/registry/__init__.py +31 -0
  84. aiecs/core/registry/service_registry.py +92 -0
  85. aiecs/domain/__init__.py +270 -1
  86. aiecs/domain/agent/__init__.py +191 -0
  87. aiecs/domain/agent/base_agent.py +3949 -0
  88. aiecs/domain/agent/exceptions.py +99 -0
  89. aiecs/domain/agent/graph_aware_mixin.py +569 -0
  90. aiecs/domain/agent/hybrid_agent.py +1731 -0
  91. aiecs/domain/agent/integration/__init__.py +29 -0
  92. aiecs/domain/agent/integration/context_compressor.py +216 -0
  93. aiecs/domain/agent/integration/context_engine_adapter.py +587 -0
  94. aiecs/domain/agent/integration/protocols.py +281 -0
  95. aiecs/domain/agent/integration/retry_policy.py +218 -0
  96. aiecs/domain/agent/integration/role_config.py +213 -0
  97. aiecs/domain/agent/knowledge_aware_agent.py +1892 -0
  98. aiecs/domain/agent/lifecycle.py +291 -0
  99. aiecs/domain/agent/llm_agent.py +692 -0
  100. aiecs/domain/agent/memory/__init__.py +12 -0
  101. aiecs/domain/agent/memory/conversation.py +1124 -0
  102. aiecs/domain/agent/migration/__init__.py +14 -0
  103. aiecs/domain/agent/migration/conversion.py +163 -0
  104. aiecs/domain/agent/migration/legacy_wrapper.py +86 -0
  105. aiecs/domain/agent/models.py +894 -0
  106. aiecs/domain/agent/observability.py +479 -0
  107. aiecs/domain/agent/persistence.py +449 -0
  108. aiecs/domain/agent/prompts/__init__.py +29 -0
  109. aiecs/domain/agent/prompts/builder.py +159 -0
  110. aiecs/domain/agent/prompts/formatters.py +187 -0
  111. aiecs/domain/agent/prompts/template.py +255 -0
  112. aiecs/domain/agent/registry.py +253 -0
  113. aiecs/domain/agent/tool_agent.py +444 -0
  114. aiecs/domain/agent/tools/__init__.py +15 -0
  115. aiecs/domain/agent/tools/schema_generator.py +377 -0
  116. aiecs/domain/community/__init__.py +155 -0
  117. aiecs/domain/community/agent_adapter.py +469 -0
  118. aiecs/domain/community/analytics.py +432 -0
  119. aiecs/domain/community/collaborative_workflow.py +648 -0
  120. aiecs/domain/community/communication_hub.py +634 -0
  121. aiecs/domain/community/community_builder.py +320 -0
  122. aiecs/domain/community/community_integration.py +796 -0
  123. aiecs/domain/community/community_manager.py +803 -0
  124. aiecs/domain/community/decision_engine.py +849 -0
  125. aiecs/domain/community/exceptions.py +231 -0
  126. aiecs/domain/community/models/__init__.py +33 -0
  127. aiecs/domain/community/models/community_models.py +234 -0
  128. aiecs/domain/community/resource_manager.py +461 -0
  129. aiecs/domain/community/shared_context_manager.py +589 -0
  130. aiecs/domain/context/__init__.py +40 -10
  131. aiecs/domain/context/context_engine.py +1910 -0
  132. aiecs/domain/context/conversation_models.py +87 -53
  133. aiecs/domain/context/graph_memory.py +582 -0
  134. aiecs/domain/execution/model.py +12 -4
  135. aiecs/domain/knowledge_graph/__init__.py +19 -0
  136. aiecs/domain/knowledge_graph/models/__init__.py +52 -0
  137. aiecs/domain/knowledge_graph/models/entity.py +148 -0
  138. aiecs/domain/knowledge_graph/models/evidence.py +178 -0
  139. aiecs/domain/knowledge_graph/models/inference_rule.py +184 -0
  140. aiecs/domain/knowledge_graph/models/path.py +171 -0
  141. aiecs/domain/knowledge_graph/models/path_pattern.py +171 -0
  142. aiecs/domain/knowledge_graph/models/query.py +261 -0
  143. aiecs/domain/knowledge_graph/models/query_plan.py +181 -0
  144. aiecs/domain/knowledge_graph/models/relation.py +202 -0
  145. aiecs/domain/knowledge_graph/schema/__init__.py +23 -0
  146. aiecs/domain/knowledge_graph/schema/entity_type.py +131 -0
  147. aiecs/domain/knowledge_graph/schema/graph_schema.py +253 -0
  148. aiecs/domain/knowledge_graph/schema/property_schema.py +143 -0
  149. aiecs/domain/knowledge_graph/schema/relation_type.py +163 -0
  150. aiecs/domain/knowledge_graph/schema/schema_manager.py +691 -0
  151. aiecs/domain/knowledge_graph/schema/type_enums.py +209 -0
  152. aiecs/domain/task/dsl_processor.py +172 -56
  153. aiecs/domain/task/model.py +20 -8
  154. aiecs/domain/task/task_context.py +27 -24
  155. aiecs/infrastructure/__init__.py +0 -2
  156. aiecs/infrastructure/graph_storage/__init__.py +11 -0
  157. aiecs/infrastructure/graph_storage/base.py +837 -0
  158. aiecs/infrastructure/graph_storage/batch_operations.py +458 -0
  159. aiecs/infrastructure/graph_storage/cache.py +424 -0
  160. aiecs/infrastructure/graph_storage/distributed.py +223 -0
  161. aiecs/infrastructure/graph_storage/error_handling.py +380 -0
  162. aiecs/infrastructure/graph_storage/graceful_degradation.py +294 -0
  163. aiecs/infrastructure/graph_storage/health_checks.py +378 -0
  164. aiecs/infrastructure/graph_storage/in_memory.py +1197 -0
  165. aiecs/infrastructure/graph_storage/index_optimization.py +446 -0
  166. aiecs/infrastructure/graph_storage/lazy_loading.py +431 -0
  167. aiecs/infrastructure/graph_storage/metrics.py +344 -0
  168. aiecs/infrastructure/graph_storage/migration.py +400 -0
  169. aiecs/infrastructure/graph_storage/pagination.py +483 -0
  170. aiecs/infrastructure/graph_storage/performance_monitoring.py +456 -0
  171. aiecs/infrastructure/graph_storage/postgres.py +1563 -0
  172. aiecs/infrastructure/graph_storage/property_storage.py +353 -0
  173. aiecs/infrastructure/graph_storage/protocols.py +76 -0
  174. aiecs/infrastructure/graph_storage/query_optimizer.py +642 -0
  175. aiecs/infrastructure/graph_storage/schema_cache.py +290 -0
  176. aiecs/infrastructure/graph_storage/sqlite.py +1373 -0
  177. aiecs/infrastructure/graph_storage/streaming.py +487 -0
  178. aiecs/infrastructure/graph_storage/tenant.py +412 -0
  179. aiecs/infrastructure/messaging/celery_task_manager.py +92 -54
  180. aiecs/infrastructure/messaging/websocket_manager.py +51 -35
  181. aiecs/infrastructure/monitoring/__init__.py +22 -0
  182. aiecs/infrastructure/monitoring/executor_metrics.py +45 -11
  183. aiecs/infrastructure/monitoring/global_metrics_manager.py +212 -0
  184. aiecs/infrastructure/monitoring/structured_logger.py +3 -7
  185. aiecs/infrastructure/monitoring/tracing_manager.py +63 -35
  186. aiecs/infrastructure/persistence/__init__.py +14 -1
  187. aiecs/infrastructure/persistence/context_engine_client.py +184 -0
  188. aiecs/infrastructure/persistence/database_manager.py +67 -43
  189. aiecs/infrastructure/persistence/file_storage.py +180 -103
  190. aiecs/infrastructure/persistence/redis_client.py +74 -21
  191. aiecs/llm/__init__.py +73 -25
  192. aiecs/llm/callbacks/__init__.py +11 -0
  193. aiecs/llm/{custom_callbacks.py → callbacks/custom_callbacks.py} +26 -19
  194. aiecs/llm/client_factory.py +230 -37
  195. aiecs/llm/client_resolver.py +155 -0
  196. aiecs/llm/clients/__init__.py +38 -0
  197. aiecs/llm/clients/base_client.py +328 -0
  198. aiecs/llm/clients/google_function_calling_mixin.py +415 -0
  199. aiecs/llm/clients/googleai_client.py +314 -0
  200. aiecs/llm/clients/openai_client.py +158 -0
  201. aiecs/llm/clients/openai_compatible_mixin.py +367 -0
  202. aiecs/llm/clients/vertex_client.py +1186 -0
  203. aiecs/llm/clients/xai_client.py +201 -0
  204. aiecs/llm/config/__init__.py +51 -0
  205. aiecs/llm/config/config_loader.py +272 -0
  206. aiecs/llm/config/config_validator.py +206 -0
  207. aiecs/llm/config/model_config.py +143 -0
  208. aiecs/llm/protocols.py +149 -0
  209. aiecs/llm/utils/__init__.py +10 -0
  210. aiecs/llm/utils/validate_config.py +89 -0
  211. aiecs/main.py +140 -121
  212. aiecs/scripts/aid/VERSION_MANAGEMENT.md +138 -0
  213. aiecs/scripts/aid/__init__.py +19 -0
  214. aiecs/scripts/aid/module_checker.py +499 -0
  215. aiecs/scripts/aid/version_manager.py +235 -0
  216. aiecs/scripts/{DEPENDENCY_SYSTEM_SUMMARY.md → dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md} +1 -0
  217. aiecs/scripts/{README_DEPENDENCY_CHECKER.md → dependance_check/README_DEPENDENCY_CHECKER.md} +1 -0
  218. aiecs/scripts/dependance_check/__init__.py +15 -0
  219. aiecs/scripts/dependance_check/dependency_checker.py +1835 -0
  220. aiecs/scripts/{dependency_fixer.py → dependance_check/dependency_fixer.py} +192 -90
  221. aiecs/scripts/{download_nlp_data.py → dependance_check/download_nlp_data.py} +203 -71
  222. aiecs/scripts/dependance_patch/__init__.py +7 -0
  223. aiecs/scripts/dependance_patch/fix_weasel/__init__.py +11 -0
  224. aiecs/scripts/{fix_weasel_validator.py → dependance_patch/fix_weasel/fix_weasel_validator.py} +21 -14
  225. aiecs/scripts/{patch_weasel_library.sh → dependance_patch/fix_weasel/patch_weasel_library.sh} +1 -1
  226. aiecs/scripts/knowledge_graph/__init__.py +3 -0
  227. aiecs/scripts/knowledge_graph/run_threshold_experiments.py +212 -0
  228. aiecs/scripts/migrations/multi_tenancy/README.md +142 -0
  229. aiecs/scripts/tools_develop/README.md +671 -0
  230. aiecs/scripts/tools_develop/README_CONFIG_CHECKER.md +273 -0
  231. aiecs/scripts/tools_develop/TOOLS_CONFIG_GUIDE.md +1287 -0
  232. aiecs/scripts/tools_develop/TOOL_AUTO_DISCOVERY.md +234 -0
  233. aiecs/scripts/tools_develop/__init__.py +21 -0
  234. aiecs/scripts/tools_develop/check_all_tools_config.py +548 -0
  235. aiecs/scripts/tools_develop/check_type_annotations.py +257 -0
  236. aiecs/scripts/tools_develop/pre-commit-schema-coverage.sh +66 -0
  237. aiecs/scripts/tools_develop/schema_coverage.py +511 -0
  238. aiecs/scripts/tools_develop/validate_tool_schemas.py +475 -0
  239. aiecs/scripts/tools_develop/verify_executor_config_fix.py +98 -0
  240. aiecs/scripts/tools_develop/verify_tools.py +352 -0
  241. aiecs/tasks/__init__.py +0 -1
  242. aiecs/tasks/worker.py +115 -47
  243. aiecs/tools/__init__.py +194 -72
  244. aiecs/tools/apisource/__init__.py +99 -0
  245. aiecs/tools/apisource/intelligence/__init__.py +19 -0
  246. aiecs/tools/apisource/intelligence/data_fusion.py +632 -0
  247. aiecs/tools/apisource/intelligence/query_analyzer.py +417 -0
  248. aiecs/tools/apisource/intelligence/search_enhancer.py +385 -0
  249. aiecs/tools/apisource/monitoring/__init__.py +9 -0
  250. aiecs/tools/apisource/monitoring/metrics.py +330 -0
  251. aiecs/tools/apisource/providers/__init__.py +112 -0
  252. aiecs/tools/apisource/providers/base.py +671 -0
  253. aiecs/tools/apisource/providers/census.py +397 -0
  254. aiecs/tools/apisource/providers/fred.py +535 -0
  255. aiecs/tools/apisource/providers/newsapi.py +409 -0
  256. aiecs/tools/apisource/providers/worldbank.py +352 -0
  257. aiecs/tools/apisource/reliability/__init__.py +12 -0
  258. aiecs/tools/apisource/reliability/error_handler.py +363 -0
  259. aiecs/tools/apisource/reliability/fallback_strategy.py +376 -0
  260. aiecs/tools/apisource/tool.py +832 -0
  261. aiecs/tools/apisource/utils/__init__.py +9 -0
  262. aiecs/tools/apisource/utils/validators.py +334 -0
  263. aiecs/tools/base_tool.py +415 -21
  264. aiecs/tools/docs/__init__.py +121 -0
  265. aiecs/tools/docs/ai_document_orchestrator.py +607 -0
  266. aiecs/tools/docs/ai_document_writer_orchestrator.py +2350 -0
  267. aiecs/tools/docs/content_insertion_tool.py +1320 -0
  268. aiecs/tools/docs/document_creator_tool.py +1464 -0
  269. aiecs/tools/docs/document_layout_tool.py +1160 -0
  270. aiecs/tools/docs/document_parser_tool.py +1016 -0
  271. aiecs/tools/docs/document_writer_tool.py +2008 -0
  272. aiecs/tools/knowledge_graph/__init__.py +17 -0
  273. aiecs/tools/knowledge_graph/graph_reasoning_tool.py +807 -0
  274. aiecs/tools/knowledge_graph/graph_search_tool.py +944 -0
  275. aiecs/tools/knowledge_graph/kg_builder_tool.py +524 -0
  276. aiecs/tools/langchain_adapter.py +300 -138
  277. aiecs/tools/schema_generator.py +455 -0
  278. aiecs/tools/search_tool/__init__.py +100 -0
  279. aiecs/tools/search_tool/analyzers.py +581 -0
  280. aiecs/tools/search_tool/cache.py +264 -0
  281. aiecs/tools/search_tool/constants.py +128 -0
  282. aiecs/tools/search_tool/context.py +224 -0
  283. aiecs/tools/search_tool/core.py +778 -0
  284. aiecs/tools/search_tool/deduplicator.py +119 -0
  285. aiecs/tools/search_tool/error_handler.py +242 -0
  286. aiecs/tools/search_tool/metrics.py +343 -0
  287. aiecs/tools/search_tool/rate_limiter.py +172 -0
  288. aiecs/tools/search_tool/schemas.py +275 -0
  289. aiecs/tools/statistics/__init__.py +80 -0
  290. aiecs/tools/statistics/ai_data_analysis_orchestrator.py +646 -0
  291. aiecs/tools/statistics/ai_insight_generator_tool.py +508 -0
  292. aiecs/tools/statistics/ai_report_orchestrator_tool.py +684 -0
  293. aiecs/tools/statistics/data_loader_tool.py +555 -0
  294. aiecs/tools/statistics/data_profiler_tool.py +638 -0
  295. aiecs/tools/statistics/data_transformer_tool.py +580 -0
  296. aiecs/tools/statistics/data_visualizer_tool.py +498 -0
  297. aiecs/tools/statistics/model_trainer_tool.py +507 -0
  298. aiecs/tools/statistics/statistical_analyzer_tool.py +472 -0
  299. aiecs/tools/task_tools/__init__.py +49 -36
  300. aiecs/tools/task_tools/chart_tool.py +200 -184
  301. aiecs/tools/task_tools/classfire_tool.py +268 -267
  302. aiecs/tools/task_tools/image_tool.py +220 -141
  303. aiecs/tools/task_tools/office_tool.py +226 -146
  304. aiecs/tools/task_tools/pandas_tool.py +477 -121
  305. aiecs/tools/task_tools/report_tool.py +390 -142
  306. aiecs/tools/task_tools/research_tool.py +149 -79
  307. aiecs/tools/task_tools/scraper_tool.py +339 -145
  308. aiecs/tools/task_tools/stats_tool.py +448 -209
  309. aiecs/tools/temp_file_manager.py +26 -24
  310. aiecs/tools/tool_executor/__init__.py +18 -16
  311. aiecs/tools/tool_executor/tool_executor.py +364 -52
  312. aiecs/utils/LLM_output_structor.py +74 -48
  313. aiecs/utils/__init__.py +14 -3
  314. aiecs/utils/base_callback.py +0 -3
  315. aiecs/utils/cache_provider.py +696 -0
  316. aiecs/utils/execution_utils.py +50 -31
  317. aiecs/utils/prompt_loader.py +1 -0
  318. aiecs/utils/token_usage_repository.py +37 -11
  319. aiecs/ws/socket_server.py +14 -4
  320. {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/METADATA +52 -15
  321. aiecs-1.7.17.dist-info/RECORD +337 -0
  322. aiecs-1.7.17.dist-info/entry_points.txt +13 -0
  323. aiecs/config/registry.py +0 -19
  324. aiecs/domain/context/content_engine.py +0 -982
  325. aiecs/llm/base_client.py +0 -99
  326. aiecs/llm/openai_client.py +0 -125
  327. aiecs/llm/vertex_client.py +0 -186
  328. aiecs/llm/xai_client.py +0 -184
  329. aiecs/scripts/dependency_checker.py +0 -857
  330. aiecs/scripts/quick_dependency_check.py +0 -269
  331. aiecs/tools/task_tools/search_api.py +0 -7
  332. aiecs-1.0.1.dist-info/RECORD +0 -90
  333. aiecs-1.0.1.dist-info/entry_points.txt +0 -7
  334. /aiecs/scripts/{setup_nlp_data.sh → dependance_check/setup_nlp_data.sh} +0 -0
  335. /aiecs/scripts/{README_WEASEL_PATCH.md → dependance_patch/fix_weasel/README_WEASEL_PATCH.md} +0 -0
  336. /aiecs/scripts/{fix_weasel_validator.sh → dependance_patch/fix_weasel/fix_weasel_validator.sh} +0 -0
  337. /aiecs/scripts/{run_weasel_patch.sh → dependance_patch/fix_weasel/run_weasel_patch.sh} +0 -0
  338. {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/WHEEL +0 -0
  339. {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/licenses/LICENSE +0 -0
  340. {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/top_level.txt +0 -0
aiecs/llm/base_client.py DELETED
@@ -1,99 +0,0 @@
1
- from abc import ABC, abstractmethod
2
- from typing import Dict, Any, Optional, List, AsyncGenerator
3
- from dataclasses import dataclass
4
- import time
5
- import logging
6
-
7
- logger = logging.getLogger(__name__)
8
-
9
- @dataclass
10
- class LLMMessage:
11
- role: str # "system", "user", "assistant"
12
- content: str
13
-
14
- @dataclass
15
- class LLMResponse:
16
- content: str
17
- provider: str
18
- model: str
19
- tokens_used: Optional[int] = None
20
- prompt_tokens: Optional[int] = None
21
- completion_tokens: Optional[int] = None
22
- cost_estimate: Optional[float] = None
23
- response_time: Optional[float] = None
24
-
25
- def __post_init__(self):
26
- """Ensure consistency of token data"""
27
- # If there are detailed token information but no total, calculate the total
28
- if self.prompt_tokens is not None and self.completion_tokens is not None and self.tokens_used is None:
29
- self.tokens_used = self.prompt_tokens + self.completion_tokens
30
-
31
- # If only total is available but no detailed information, try to estimate (cannot accurately allocate in this case)
32
- elif self.tokens_used is not None and self.prompt_tokens is None and self.completion_tokens is None:
33
- # In this case we cannot accurately allocate, keep as is
34
- pass
35
-
36
- class LLMClientError(Exception):
37
- """Base exception for LLM client errors"""
38
- pass
39
-
40
- class ProviderNotAvailableError(LLMClientError):
41
- """Raised when a provider is not available or misconfigured"""
42
- pass
43
-
44
- class RateLimitError(LLMClientError):
45
- """Raised when rate limit is exceeded"""
46
- pass
47
-
48
- class BaseLLMClient(ABC):
49
- """Abstract base class for all LLM provider clients"""
50
-
51
- def __init__(self, provider_name: str):
52
- self.provider_name = provider_name
53
- self.logger = logging.getLogger(f"{__name__}.{provider_name}")
54
-
55
- @abstractmethod
56
- async def generate_text(
57
- self,
58
- messages: List[LLMMessage],
59
- model: Optional[str] = None,
60
- temperature: float = 0.7,
61
- max_tokens: Optional[int] = None,
62
- **kwargs
63
- ) -> LLMResponse:
64
- """Generate text using the provider's API"""
65
- pass
66
-
67
- @abstractmethod
68
- async def stream_text(
69
- self,
70
- messages: List[LLMMessage],
71
- model: Optional[str] = None,
72
- temperature: float = 0.7,
73
- max_tokens: Optional[int] = None,
74
- **kwargs
75
- ) -> AsyncGenerator[str, None]:
76
- """Stream text generation using the provider's API"""
77
- pass
78
-
79
- @abstractmethod
80
- async def close(self):
81
- """Clean up resources"""
82
- pass
83
-
84
- async def __aenter__(self):
85
- return self
86
-
87
- async def __aexit__(self, exc_type, exc_val, exc_tb):
88
- await self.close()
89
-
90
- def _count_tokens_estimate(self, text: str) -> int:
91
- """Rough token count estimation (4 chars ≈ 1 token for English)"""
92
- return len(text) // 4
93
-
94
- def _estimate_cost(self, model: str, input_tokens: int, output_tokens: int, token_costs: Dict) -> float:
95
- """Estimate the cost of the API call"""
96
- if model in token_costs:
97
- costs = token_costs[model]
98
- return (input_tokens * costs["input"] + output_tokens * costs["output"]) / 1000
99
- return 0.0
@@ -1,125 +0,0 @@
1
- import asyncio
2
- import logging
3
- from typing import Dict, Any, Optional, List, AsyncGenerator
4
- from openai import AsyncOpenAI
5
- from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
6
- import httpx
7
-
8
- from .base_client import BaseLLMClient, LLMMessage, LLMResponse, ProviderNotAvailableError, RateLimitError
9
- from aiecs.config.config import get_settings
10
-
11
- logger = logging.getLogger(__name__)
12
-
13
- class OpenAIClient(BaseLLMClient):
14
- """OpenAI provider client"""
15
-
16
- def __init__(self):
17
- super().__init__("OpenAI")
18
- self.settings = get_settings()
19
- self._client: Optional[AsyncOpenAI] = None
20
-
21
- # Token cost estimates (USD per 1K tokens)
22
- self.token_costs = {
23
- "gpt-4": {"input": 0.03, "output": 0.06},
24
- "gpt-4-turbo": {"input": 0.01, "output": 0.03},
25
- "gpt-3.5-turbo": {"input": 0.0015, "output": 0.002},
26
- "gpt-4o": {"input": 0.005, "output": 0.015},
27
- "gpt-4o-mini": {"input": 0.00015, "output": 0.0006},
28
- }
29
-
30
- def _get_client(self) -> AsyncOpenAI:
31
- """Lazy initialization of OpenAI client"""
32
- if not self._client:
33
- if not self.settings.openai_api_key:
34
- raise ProviderNotAvailableError("OpenAI API key not configured")
35
- self._client = AsyncOpenAI(api_key=self.settings.openai_api_key)
36
- return self._client
37
-
38
- @retry(
39
- stop=stop_after_attempt(3),
40
- wait=wait_exponential(multiplier=1, min=4, max=10),
41
- retry=retry_if_exception_type((httpx.RequestError, RateLimitError))
42
- )
43
- async def generate_text(
44
- self,
45
- messages: List[LLMMessage],
46
- model: Optional[str] = None,
47
- temperature: float = 0.7,
48
- max_tokens: Optional[int] = None,
49
- **kwargs
50
- ) -> LLMResponse:
51
- """Generate text using OpenAI API"""
52
- client = self._get_client()
53
- model = model or "gpt-4-turbo"
54
-
55
- # Convert to OpenAI message format
56
- openai_messages = [{"role": msg.role, "content": msg.content} for msg in messages]
57
-
58
- try:
59
- response = await client.chat.completions.create(
60
- model=model,
61
- messages=openai_messages,
62
- temperature=temperature,
63
- max_tokens=max_tokens,
64
- **kwargs
65
- )
66
-
67
- content = response.choices[0].message.content
68
- tokens_used = response.usage.total_tokens if response.usage else None
69
-
70
- # Estimate cost
71
- input_tokens = response.usage.prompt_tokens if response.usage else 0
72
- output_tokens = response.usage.completion_tokens if response.usage else 0
73
- cost = self._estimate_cost(model, input_tokens, output_tokens, self.token_costs)
74
-
75
- return LLMResponse(
76
- content=content,
77
- provider=self.provider_name,
78
- model=model,
79
- tokens_used=tokens_used,
80
- cost_estimate=cost
81
- )
82
-
83
- except Exception as e:
84
- if "rate_limit" in str(e).lower():
85
- raise RateLimitError(f"OpenAI rate limit exceeded: {str(e)}")
86
- raise
87
-
88
- async def stream_text(
89
- self,
90
- messages: List[LLMMessage],
91
- model: Optional[str] = None,
92
- temperature: float = 0.7,
93
- max_tokens: Optional[int] = None,
94
- **kwargs
95
- ) -> AsyncGenerator[str, None]:
96
- """Stream text using OpenAI API"""
97
- client = self._get_client()
98
- model = model or "gpt-4-turbo"
99
-
100
- openai_messages = [{"role": msg.role, "content": msg.content} for msg in messages]
101
-
102
- try:
103
- stream = await client.chat.completions.create(
104
- model=model,
105
- messages=openai_messages,
106
- temperature=temperature,
107
- max_tokens=max_tokens,
108
- stream=True,
109
- **kwargs
110
- )
111
-
112
- async for chunk in stream:
113
- if chunk.choices[0].delta.content:
114
- yield chunk.choices[0].delta.content
115
-
116
- except Exception as e:
117
- if "rate_limit" in str(e).lower():
118
- raise RateLimitError(f"OpenAI rate limit exceeded: {str(e)}")
119
- raise
120
-
121
- async def close(self):
122
- """Clean up resources"""
123
- if self._client:
124
- await self._client.close()
125
- self._client = None
@@ -1,186 +0,0 @@
1
- import asyncio
2
- import logging
3
- import os
4
- from typing import Dict, Any, Optional, List, AsyncGenerator
5
- from vertexai.generative_models import GenerativeModel, HarmCategory, HarmBlockThreshold
6
- import vertexai
7
- from google.oauth2 import service_account
8
-
9
- from .base_client import BaseLLMClient, LLMMessage, LLMResponse, ProviderNotAvailableError, RateLimitError
10
- from aiecs.config.config import get_settings
11
-
12
- logger = logging.getLogger(__name__)
13
-
14
- class VertexAIClient(BaseLLMClient):
15
- """Vertex AI provider client"""
16
-
17
- def __init__(self):
18
- super().__init__("Vertex")
19
- self.settings = get_settings()
20
- self._initialized = False
21
-
22
- # Token cost estimates (USD per 1K tokens)
23
- self.token_costs = {
24
- "gemini-2.5-pro": {"input": 0.00125, "output": 0.00375},
25
- "gemini-2.5-flash": {"input": 0.000075, "output": 0.0003},
26
- }
27
-
28
- def _init_vertex_ai(self):
29
- """Lazy initialization of Vertex AI with proper authentication"""
30
- if not self._initialized:
31
- if not self.settings.vertex_project_id:
32
- raise ProviderNotAvailableError("Vertex AI project ID not configured")
33
-
34
- try:
35
- # Set up Google Cloud authentication
36
- credentials = None
37
-
38
- # Check if GOOGLE_APPLICATION_CREDENTIALS is configured
39
- if self.settings.google_application_credentials:
40
- credentials_path = self.settings.google_application_credentials
41
- if os.path.exists(credentials_path):
42
- # Set the environment variable for Google Cloud SDK
43
- os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credentials_path
44
- self.logger.info(f"Using Google Cloud credentials from: {credentials_path}")
45
- else:
46
- self.logger.warning(f"Google Cloud credentials file not found: {credentials_path}")
47
- raise ProviderNotAvailableError(f"Google Cloud credentials file not found: {credentials_path}")
48
- elif 'GOOGLE_APPLICATION_CREDENTIALS' in os.environ:
49
- self.logger.info("Using Google Cloud credentials from environment variable")
50
- else:
51
- self.logger.warning("No Google Cloud credentials configured. Using default authentication.")
52
-
53
- # Initialize Vertex AI
54
- vertexai.init(
55
- project=self.settings.vertex_project_id,
56
- location=getattr(self.settings, 'vertex_location', 'us-central1')
57
- )
58
- self._initialized = True
59
- self.logger.info(f"Vertex AI initialized for project {self.settings.vertex_project_id}")
60
-
61
- except Exception as e:
62
- raise ProviderNotAvailableError(f"Failed to initialize Vertex AI: {str(e)}")
63
-
64
- async def generate_text(
65
- self,
66
- messages: List[LLMMessage],
67
- model: Optional[str] = None,
68
- temperature: float = 0.7,
69
- max_tokens: Optional[int] = None,
70
- **kwargs
71
- ) -> LLMResponse:
72
- """Generate text using Vertex AI"""
73
- self._init_vertex_ai()
74
- model_name = model or "gemini-2.5-pro"
75
-
76
- try:
77
- # Use the stable Vertex AI API
78
- model_instance = GenerativeModel(model_name)
79
-
80
- # Convert messages to Vertex AI format
81
- if len(messages) == 1 and messages[0].role == "user":
82
- prompt = messages[0].content
83
- else:
84
- # For multi-turn conversations, combine messages
85
- prompt = "\n".join([f"{msg.role}: {msg.content}" for msg in messages])
86
-
87
- response = await asyncio.get_event_loop().run_in_executor(
88
- None,
89
- lambda: model_instance.generate_content(
90
- prompt,
91
- generation_config={
92
- "temperature": temperature,
93
- "max_output_tokens": max_tokens or 8192, # Increased to account for thinking tokens
94
- "top_p": 0.95,
95
- "top_k": 40,
96
- },
97
- safety_settings={
98
- HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
99
- HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
100
- HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
101
- HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
102
- }
103
- )
104
- )
105
-
106
- # Handle response content safely
107
- try:
108
- content = response.text
109
- self.logger.debug(f"Vertex AI response received: {content[:100]}...")
110
- except ValueError as ve:
111
- # Handle cases where response has no content (safety filters, etc.)
112
- self.logger.warning(f"Vertex AI response error: {str(ve)}")
113
- self.logger.debug(f"Full response object: {response}")
114
-
115
- # Check if response has candidates but no text
116
- if hasattr(response, 'candidates') and response.candidates:
117
- candidate = response.candidates[0]
118
- self.logger.debug(f"Candidate finish_reason: {getattr(candidate, 'finish_reason', 'unknown')}")
119
-
120
- # If finish_reason is MAX_TOKENS, it might be due to thinking tokens
121
- if hasattr(candidate, 'finish_reason') and candidate.finish_reason == 'MAX_TOKENS':
122
- content = "[Response truncated due to token limit - consider increasing max_tokens for Gemini 2.5 models]"
123
- self.logger.warning("Response truncated due to MAX_TOKENS - Gemini 2.5 uses thinking tokens")
124
- elif "no parts" in str(ve).lower() or "safety filters" in str(ve).lower():
125
- content = "[Response blocked by safety filters or has no content]"
126
- self.logger.warning(f"Vertex AI response blocked or empty: {str(ve)}")
127
- else:
128
- content = f"[Response error: {str(ve)}]"
129
- else:
130
- content = f"[Response error: {str(ve)}]"
131
-
132
- # Vertex AI doesn't provide detailed token usage in the response
133
- tokens_used = self._count_tokens_estimate(prompt + content)
134
- cost = self._estimate_cost(
135
- model_name,
136
- self._count_tokens_estimate(prompt),
137
- self._count_tokens_estimate(content),
138
- self.token_costs
139
- )
140
-
141
- return LLMResponse(
142
- content=content,
143
- provider=self.provider_name,
144
- model=model_name,
145
- tokens_used=tokens_used,
146
- cost_estimate=cost
147
- )
148
-
149
- except Exception as e:
150
- if "quota" in str(e).lower() or "limit" in str(e).lower():
151
- raise RateLimitError(f"Vertex AI quota exceeded: {str(e)}")
152
- # Handle specific Vertex AI response errors
153
- if "cannot get the response text" in str(e).lower() or "safety filters" in str(e).lower():
154
- self.logger.warning(f"Vertex AI response issue: {str(e)}")
155
- # Return a response indicating the issue
156
- return LLMResponse(
157
- content="[Response unavailable due to safety filters or content policy]",
158
- provider=self.provider_name,
159
- model=model_name,
160
- tokens_used=self._count_tokens_estimate(prompt),
161
- cost_estimate=0.0
162
- )
163
- raise
164
-
165
- async def stream_text(
166
- self,
167
- messages: List[LLMMessage],
168
- model: Optional[str] = None,
169
- temperature: float = 0.7,
170
- max_tokens: Optional[int] = None,
171
- **kwargs
172
- ) -> AsyncGenerator[str, None]:
173
- """Stream text using Vertex AI (simulated streaming)"""
174
- # Vertex AI streaming is more complex, for now fall back to non-streaming
175
- response = await self.generate_text(messages, model, temperature, max_tokens, **kwargs)
176
-
177
- # Simulate streaming by yielding words
178
- words = response.content.split()
179
- for word in words:
180
- yield word + " "
181
- await asyncio.sleep(0.05) # Small delay to simulate streaming
182
-
183
- async def close(self):
184
- """Clean up resources"""
185
- # Vertex AI doesn't require explicit cleanup
186
- self._initialized = False
aiecs/llm/xai_client.py DELETED
@@ -1,184 +0,0 @@
1
- import json
2
- import asyncio
3
- import logging
4
- from typing import Dict, Any, Optional, List, AsyncGenerator
5
- from openai import AsyncOpenAI
6
- from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
7
-
8
- from .base_client import BaseLLMClient, LLMMessage, LLMResponse, ProviderNotAvailableError, RateLimitError
9
- from aiecs.config.config import get_settings
10
-
11
- logger = logging.getLogger(__name__)
12
-
13
- class XAIClient(BaseLLMClient):
14
- """xAI (Grok) provider client"""
15
-
16
- def __init__(self):
17
- super().__init__("xAI")
18
- self.settings = get_settings()
19
- self._openai_client: Optional[AsyncOpenAI] = None
20
-
21
- # Enhanced model mapping for all Grok models
22
- self.model_map = {
23
- # Legacy Grok models
24
- "grok-beta": "grok-beta",
25
- "grok": "grok-beta",
26
-
27
- # Current Grok models
28
- "Grok 2": "grok-2",
29
- "grok-2": "grok-2",
30
- "Grok 2 Vision": "grok-2-vision",
31
- "grok-2-vision": "grok-2-vision",
32
-
33
- # Grok 3 models
34
- "Grok 3 Normal": "grok-3",
35
- "grok-3": "grok-3",
36
- "Grok 3 Fast": "grok-3-fast",
37
- "grok-3-fast": "grok-3-fast",
38
-
39
- # Grok 3 Mini models
40
- "Grok 3 Mini Normal": "grok-3-mini",
41
- "grok-3-mini": "grok-3-mini",
42
- "Grok 3 Mini Fast": "grok-3-mini-fast",
43
- "grok-3-mini-fast": "grok-3-mini-fast",
44
-
45
- # Grok 3 Reasoning models
46
- "Grok 3 Reasoning Normal": "grok-3-reasoning",
47
- "grok-3-reasoning": "grok-3-reasoning",
48
- "Grok 3 Reasoning Fast": "grok-3-reasoning-fast",
49
- "grok-3-reasoning-fast": "grok-3-reasoning-fast",
50
-
51
- # Grok 3 Mini Reasoning models
52
- "Grok 3 Mini Reasoning Normal": "grok-3-mini-reasoning",
53
- "grok-3-mini-reasoning": "grok-3-mini-reasoning",
54
- "Grok 3 Mini Reasoning Fast": "grok-3-mini-reasoning-fast",
55
- "grok-3-mini-reasoning-fast": "grok-3-mini-reasoning-fast",
56
-
57
- # Grok 4 models
58
- "Grok 4 Normal": "grok-4",
59
- "grok-4": "grok-4",
60
- "Grok 4 Fast": "grok-4-fast",
61
- "grok-4-fast": "grok-4-fast",
62
- "Grok 4 0709": "grok-4-0709",
63
- "grok-4-0709": "grok-4-0709",
64
- }
65
-
66
- def _get_openai_client(self) -> AsyncOpenAI:
67
- """Lazy initialization of OpenAI client for XAI"""
68
- if not self._openai_client:
69
- api_key = self._get_api_key()
70
- self._openai_client = AsyncOpenAI(
71
- api_key=api_key,
72
- base_url="https://api.x.ai/v1",
73
- timeout=360.0 # Override default timeout with longer timeout for reasoning models
74
- )
75
- return self._openai_client
76
-
77
- def _get_api_key(self) -> str:
78
- """Get API key with backward compatibility"""
79
- # Support both xai_api_key and grok_api_key for backward compatibility
80
- api_key = getattr(self.settings, 'xai_api_key', None) or getattr(self.settings, 'grok_api_key', None)
81
- if not api_key:
82
- raise ProviderNotAvailableError("xAI API key not configured")
83
- return api_key
84
-
85
- @retry(
86
- stop=stop_after_attempt(3),
87
- wait=wait_exponential(multiplier=1, min=4, max=10),
88
- retry=retry_if_exception_type((Exception, RateLimitError))
89
- )
90
- async def generate_text(
91
- self,
92
- messages: List[LLMMessage],
93
- model: Optional[str] = None,
94
- temperature: float = 0.7,
95
- max_tokens: Optional[int] = None,
96
- **kwargs
97
- ) -> LLMResponse:
98
- """Generate text using xAI API via OpenAI library (supports all Grok models)"""
99
- # Check API key availability
100
- api_key = self._get_api_key()
101
- if not api_key:
102
- raise ProviderNotAvailableError("xAI API key is not configured.")
103
-
104
- client = self._get_openai_client()
105
-
106
- selected_model = model or "grok-4" # Default to grok-4 as in the example
107
- api_model = self.model_map.get(selected_model, selected_model)
108
-
109
- # Convert to OpenAI format
110
- openai_messages = [{"role": msg.role, "content": msg.content} for msg in messages]
111
-
112
- try:
113
- completion = await client.chat.completions.create(
114
- model=api_model,
115
- messages=openai_messages,
116
- temperature=temperature,
117
- max_tokens=max_tokens,
118
- **kwargs
119
- )
120
-
121
- content = completion.choices[0].message.content
122
- tokens_used = completion.usage.total_tokens if completion.usage else None
123
-
124
- return LLMResponse(
125
- content=content,
126
- provider=self.provider_name,
127
- model=selected_model,
128
- tokens_used=tokens_used,
129
- cost_estimate=0.0 # xAI pricing not available yet
130
- )
131
-
132
- except Exception as e:
133
- if "rate limit" in str(e).lower() or "429" in str(e):
134
- raise RateLimitError(f"xAI rate limit exceeded: {str(e)}")
135
- logger.error(f"xAI API error: {str(e)}")
136
- raise
137
-
138
- async def stream_text(
139
- self,
140
- messages: List[LLMMessage],
141
- model: Optional[str] = None,
142
- temperature: float = 0.7,
143
- max_tokens: Optional[int] = None,
144
- **kwargs
145
- ) -> AsyncGenerator[str, None]:
146
- """Stream text using xAI API via OpenAI library (supports all Grok models)"""
147
- # Check API key availability
148
- api_key = self._get_api_key()
149
- if not api_key:
150
- raise ProviderNotAvailableError("xAI API key is not configured.")
151
-
152
- client = self._get_openai_client()
153
-
154
- selected_model = model or "grok-4" # Default to grok-4
155
- api_model = self.model_map.get(selected_model, selected_model)
156
-
157
- # Convert to OpenAI format
158
- openai_messages = [{"role": msg.role, "content": msg.content} for msg in messages]
159
-
160
- try:
161
- stream = await client.chat.completions.create(
162
- model=api_model,
163
- messages=openai_messages,
164
- temperature=temperature,
165
- max_tokens=max_tokens,
166
- stream=True,
167
- **kwargs
168
- )
169
-
170
- async for chunk in stream:
171
- if chunk.choices[0].delta.content is not None:
172
- yield chunk.choices[0].delta.content
173
-
174
- except Exception as e:
175
- if "rate limit" in str(e).lower() or "429" in str(e):
176
- raise RateLimitError(f"xAI rate limit exceeded: {str(e)}")
177
- logger.error(f"xAI API streaming error: {str(e)}")
178
- raise
179
-
180
- async def close(self):
181
- """Clean up resources"""
182
- if self._openai_client:
183
- await self._openai_client.close()
184
- self._openai_client = None