aiecs 1.0.1__py3-none-any.whl → 1.7.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiecs might be problematic. Click here for more details.

Files changed (340) hide show
  1. aiecs/__init__.py +13 -16
  2. aiecs/__main__.py +7 -7
  3. aiecs/aiecs_client.py +269 -75
  4. aiecs/application/executors/operation_executor.py +79 -54
  5. aiecs/application/knowledge_graph/__init__.py +7 -0
  6. aiecs/application/knowledge_graph/builder/__init__.py +37 -0
  7. aiecs/application/knowledge_graph/builder/data_quality.py +302 -0
  8. aiecs/application/knowledge_graph/builder/data_reshaping.py +293 -0
  9. aiecs/application/knowledge_graph/builder/document_builder.py +369 -0
  10. aiecs/application/knowledge_graph/builder/graph_builder.py +490 -0
  11. aiecs/application/knowledge_graph/builder/import_optimizer.py +396 -0
  12. aiecs/application/knowledge_graph/builder/schema_inference.py +462 -0
  13. aiecs/application/knowledge_graph/builder/schema_mapping.py +563 -0
  14. aiecs/application/knowledge_graph/builder/structured_pipeline.py +1384 -0
  15. aiecs/application/knowledge_graph/builder/text_chunker.py +317 -0
  16. aiecs/application/knowledge_graph/extractors/__init__.py +27 -0
  17. aiecs/application/knowledge_graph/extractors/base.py +98 -0
  18. aiecs/application/knowledge_graph/extractors/llm_entity_extractor.py +422 -0
  19. aiecs/application/knowledge_graph/extractors/llm_relation_extractor.py +347 -0
  20. aiecs/application/knowledge_graph/extractors/ner_entity_extractor.py +241 -0
  21. aiecs/application/knowledge_graph/fusion/__init__.py +78 -0
  22. aiecs/application/knowledge_graph/fusion/ab_testing.py +395 -0
  23. aiecs/application/knowledge_graph/fusion/abbreviation_expander.py +327 -0
  24. aiecs/application/knowledge_graph/fusion/alias_index.py +597 -0
  25. aiecs/application/knowledge_graph/fusion/alias_matcher.py +384 -0
  26. aiecs/application/knowledge_graph/fusion/cache_coordinator.py +343 -0
  27. aiecs/application/knowledge_graph/fusion/entity_deduplicator.py +433 -0
  28. aiecs/application/knowledge_graph/fusion/entity_linker.py +511 -0
  29. aiecs/application/knowledge_graph/fusion/evaluation_dataset.py +240 -0
  30. aiecs/application/knowledge_graph/fusion/knowledge_fusion.py +632 -0
  31. aiecs/application/knowledge_graph/fusion/matching_config.py +489 -0
  32. aiecs/application/knowledge_graph/fusion/name_normalizer.py +352 -0
  33. aiecs/application/knowledge_graph/fusion/relation_deduplicator.py +183 -0
  34. aiecs/application/knowledge_graph/fusion/semantic_name_matcher.py +464 -0
  35. aiecs/application/knowledge_graph/fusion/similarity_pipeline.py +534 -0
  36. aiecs/application/knowledge_graph/pattern_matching/__init__.py +21 -0
  37. aiecs/application/knowledge_graph/pattern_matching/pattern_matcher.py +342 -0
  38. aiecs/application/knowledge_graph/pattern_matching/query_executor.py +366 -0
  39. aiecs/application/knowledge_graph/profiling/__init__.py +12 -0
  40. aiecs/application/knowledge_graph/profiling/query_plan_visualizer.py +195 -0
  41. aiecs/application/knowledge_graph/profiling/query_profiler.py +223 -0
  42. aiecs/application/knowledge_graph/reasoning/__init__.py +27 -0
  43. aiecs/application/knowledge_graph/reasoning/evidence_synthesis.py +341 -0
  44. aiecs/application/knowledge_graph/reasoning/inference_engine.py +500 -0
  45. aiecs/application/knowledge_graph/reasoning/logic_form_parser.py +163 -0
  46. aiecs/application/knowledge_graph/reasoning/logic_parser/__init__.py +79 -0
  47. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_builder.py +513 -0
  48. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_nodes.py +913 -0
  49. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_validator.py +866 -0
  50. aiecs/application/knowledge_graph/reasoning/logic_parser/error_handler.py +475 -0
  51. aiecs/application/knowledge_graph/reasoning/logic_parser/parser.py +396 -0
  52. aiecs/application/knowledge_graph/reasoning/logic_parser/query_context.py +208 -0
  53. aiecs/application/knowledge_graph/reasoning/logic_query_integration.py +170 -0
  54. aiecs/application/knowledge_graph/reasoning/query_planner.py +855 -0
  55. aiecs/application/knowledge_graph/reasoning/reasoning_engine.py +518 -0
  56. aiecs/application/knowledge_graph/retrieval/__init__.py +27 -0
  57. aiecs/application/knowledge_graph/retrieval/query_intent_classifier.py +211 -0
  58. aiecs/application/knowledge_graph/retrieval/retrieval_strategies.py +592 -0
  59. aiecs/application/knowledge_graph/retrieval/strategy_types.py +23 -0
  60. aiecs/application/knowledge_graph/search/__init__.py +59 -0
  61. aiecs/application/knowledge_graph/search/hybrid_search.py +457 -0
  62. aiecs/application/knowledge_graph/search/reranker.py +293 -0
  63. aiecs/application/knowledge_graph/search/reranker_strategies.py +535 -0
  64. aiecs/application/knowledge_graph/search/text_similarity.py +392 -0
  65. aiecs/application/knowledge_graph/traversal/__init__.py +15 -0
  66. aiecs/application/knowledge_graph/traversal/enhanced_traversal.py +305 -0
  67. aiecs/application/knowledge_graph/traversal/path_scorer.py +271 -0
  68. aiecs/application/knowledge_graph/validators/__init__.py +13 -0
  69. aiecs/application/knowledge_graph/validators/relation_validator.py +239 -0
  70. aiecs/application/knowledge_graph/visualization/__init__.py +11 -0
  71. aiecs/application/knowledge_graph/visualization/graph_visualizer.py +313 -0
  72. aiecs/common/__init__.py +9 -0
  73. aiecs/common/knowledge_graph/__init__.py +17 -0
  74. aiecs/common/knowledge_graph/runnable.py +471 -0
  75. aiecs/config/__init__.py +20 -5
  76. aiecs/config/config.py +762 -31
  77. aiecs/config/graph_config.py +131 -0
  78. aiecs/config/tool_config.py +435 -0
  79. aiecs/core/__init__.py +29 -13
  80. aiecs/core/interface/__init__.py +2 -2
  81. aiecs/core/interface/execution_interface.py +22 -22
  82. aiecs/core/interface/storage_interface.py +37 -88
  83. aiecs/core/registry/__init__.py +31 -0
  84. aiecs/core/registry/service_registry.py +92 -0
  85. aiecs/domain/__init__.py +270 -1
  86. aiecs/domain/agent/__init__.py +191 -0
  87. aiecs/domain/agent/base_agent.py +3949 -0
  88. aiecs/domain/agent/exceptions.py +99 -0
  89. aiecs/domain/agent/graph_aware_mixin.py +569 -0
  90. aiecs/domain/agent/hybrid_agent.py +1731 -0
  91. aiecs/domain/agent/integration/__init__.py +29 -0
  92. aiecs/domain/agent/integration/context_compressor.py +216 -0
  93. aiecs/domain/agent/integration/context_engine_adapter.py +587 -0
  94. aiecs/domain/agent/integration/protocols.py +281 -0
  95. aiecs/domain/agent/integration/retry_policy.py +218 -0
  96. aiecs/domain/agent/integration/role_config.py +213 -0
  97. aiecs/domain/agent/knowledge_aware_agent.py +1892 -0
  98. aiecs/domain/agent/lifecycle.py +291 -0
  99. aiecs/domain/agent/llm_agent.py +692 -0
  100. aiecs/domain/agent/memory/__init__.py +12 -0
  101. aiecs/domain/agent/memory/conversation.py +1124 -0
  102. aiecs/domain/agent/migration/__init__.py +14 -0
  103. aiecs/domain/agent/migration/conversion.py +163 -0
  104. aiecs/domain/agent/migration/legacy_wrapper.py +86 -0
  105. aiecs/domain/agent/models.py +894 -0
  106. aiecs/domain/agent/observability.py +479 -0
  107. aiecs/domain/agent/persistence.py +449 -0
  108. aiecs/domain/agent/prompts/__init__.py +29 -0
  109. aiecs/domain/agent/prompts/builder.py +159 -0
  110. aiecs/domain/agent/prompts/formatters.py +187 -0
  111. aiecs/domain/agent/prompts/template.py +255 -0
  112. aiecs/domain/agent/registry.py +253 -0
  113. aiecs/domain/agent/tool_agent.py +444 -0
  114. aiecs/domain/agent/tools/__init__.py +15 -0
  115. aiecs/domain/agent/tools/schema_generator.py +377 -0
  116. aiecs/domain/community/__init__.py +155 -0
  117. aiecs/domain/community/agent_adapter.py +469 -0
  118. aiecs/domain/community/analytics.py +432 -0
  119. aiecs/domain/community/collaborative_workflow.py +648 -0
  120. aiecs/domain/community/communication_hub.py +634 -0
  121. aiecs/domain/community/community_builder.py +320 -0
  122. aiecs/domain/community/community_integration.py +796 -0
  123. aiecs/domain/community/community_manager.py +803 -0
  124. aiecs/domain/community/decision_engine.py +849 -0
  125. aiecs/domain/community/exceptions.py +231 -0
  126. aiecs/domain/community/models/__init__.py +33 -0
  127. aiecs/domain/community/models/community_models.py +234 -0
  128. aiecs/domain/community/resource_manager.py +461 -0
  129. aiecs/domain/community/shared_context_manager.py +589 -0
  130. aiecs/domain/context/__init__.py +40 -10
  131. aiecs/domain/context/context_engine.py +1910 -0
  132. aiecs/domain/context/conversation_models.py +87 -53
  133. aiecs/domain/context/graph_memory.py +582 -0
  134. aiecs/domain/execution/model.py +12 -4
  135. aiecs/domain/knowledge_graph/__init__.py +19 -0
  136. aiecs/domain/knowledge_graph/models/__init__.py +52 -0
  137. aiecs/domain/knowledge_graph/models/entity.py +148 -0
  138. aiecs/domain/knowledge_graph/models/evidence.py +178 -0
  139. aiecs/domain/knowledge_graph/models/inference_rule.py +184 -0
  140. aiecs/domain/knowledge_graph/models/path.py +171 -0
  141. aiecs/domain/knowledge_graph/models/path_pattern.py +171 -0
  142. aiecs/domain/knowledge_graph/models/query.py +261 -0
  143. aiecs/domain/knowledge_graph/models/query_plan.py +181 -0
  144. aiecs/domain/knowledge_graph/models/relation.py +202 -0
  145. aiecs/domain/knowledge_graph/schema/__init__.py +23 -0
  146. aiecs/domain/knowledge_graph/schema/entity_type.py +131 -0
  147. aiecs/domain/knowledge_graph/schema/graph_schema.py +253 -0
  148. aiecs/domain/knowledge_graph/schema/property_schema.py +143 -0
  149. aiecs/domain/knowledge_graph/schema/relation_type.py +163 -0
  150. aiecs/domain/knowledge_graph/schema/schema_manager.py +691 -0
  151. aiecs/domain/knowledge_graph/schema/type_enums.py +209 -0
  152. aiecs/domain/task/dsl_processor.py +172 -56
  153. aiecs/domain/task/model.py +20 -8
  154. aiecs/domain/task/task_context.py +27 -24
  155. aiecs/infrastructure/__init__.py +0 -2
  156. aiecs/infrastructure/graph_storage/__init__.py +11 -0
  157. aiecs/infrastructure/graph_storage/base.py +837 -0
  158. aiecs/infrastructure/graph_storage/batch_operations.py +458 -0
  159. aiecs/infrastructure/graph_storage/cache.py +424 -0
  160. aiecs/infrastructure/graph_storage/distributed.py +223 -0
  161. aiecs/infrastructure/graph_storage/error_handling.py +380 -0
  162. aiecs/infrastructure/graph_storage/graceful_degradation.py +294 -0
  163. aiecs/infrastructure/graph_storage/health_checks.py +378 -0
  164. aiecs/infrastructure/graph_storage/in_memory.py +1197 -0
  165. aiecs/infrastructure/graph_storage/index_optimization.py +446 -0
  166. aiecs/infrastructure/graph_storage/lazy_loading.py +431 -0
  167. aiecs/infrastructure/graph_storage/metrics.py +344 -0
  168. aiecs/infrastructure/graph_storage/migration.py +400 -0
  169. aiecs/infrastructure/graph_storage/pagination.py +483 -0
  170. aiecs/infrastructure/graph_storage/performance_monitoring.py +456 -0
  171. aiecs/infrastructure/graph_storage/postgres.py +1563 -0
  172. aiecs/infrastructure/graph_storage/property_storage.py +353 -0
  173. aiecs/infrastructure/graph_storage/protocols.py +76 -0
  174. aiecs/infrastructure/graph_storage/query_optimizer.py +642 -0
  175. aiecs/infrastructure/graph_storage/schema_cache.py +290 -0
  176. aiecs/infrastructure/graph_storage/sqlite.py +1373 -0
  177. aiecs/infrastructure/graph_storage/streaming.py +487 -0
  178. aiecs/infrastructure/graph_storage/tenant.py +412 -0
  179. aiecs/infrastructure/messaging/celery_task_manager.py +92 -54
  180. aiecs/infrastructure/messaging/websocket_manager.py +51 -35
  181. aiecs/infrastructure/monitoring/__init__.py +22 -0
  182. aiecs/infrastructure/monitoring/executor_metrics.py +45 -11
  183. aiecs/infrastructure/monitoring/global_metrics_manager.py +212 -0
  184. aiecs/infrastructure/monitoring/structured_logger.py +3 -7
  185. aiecs/infrastructure/monitoring/tracing_manager.py +63 -35
  186. aiecs/infrastructure/persistence/__init__.py +14 -1
  187. aiecs/infrastructure/persistence/context_engine_client.py +184 -0
  188. aiecs/infrastructure/persistence/database_manager.py +67 -43
  189. aiecs/infrastructure/persistence/file_storage.py +180 -103
  190. aiecs/infrastructure/persistence/redis_client.py +74 -21
  191. aiecs/llm/__init__.py +73 -25
  192. aiecs/llm/callbacks/__init__.py +11 -0
  193. aiecs/llm/{custom_callbacks.py → callbacks/custom_callbacks.py} +26 -19
  194. aiecs/llm/client_factory.py +230 -37
  195. aiecs/llm/client_resolver.py +155 -0
  196. aiecs/llm/clients/__init__.py +38 -0
  197. aiecs/llm/clients/base_client.py +328 -0
  198. aiecs/llm/clients/google_function_calling_mixin.py +415 -0
  199. aiecs/llm/clients/googleai_client.py +314 -0
  200. aiecs/llm/clients/openai_client.py +158 -0
  201. aiecs/llm/clients/openai_compatible_mixin.py +367 -0
  202. aiecs/llm/clients/vertex_client.py +1186 -0
  203. aiecs/llm/clients/xai_client.py +201 -0
  204. aiecs/llm/config/__init__.py +51 -0
  205. aiecs/llm/config/config_loader.py +272 -0
  206. aiecs/llm/config/config_validator.py +206 -0
  207. aiecs/llm/config/model_config.py +143 -0
  208. aiecs/llm/protocols.py +149 -0
  209. aiecs/llm/utils/__init__.py +10 -0
  210. aiecs/llm/utils/validate_config.py +89 -0
  211. aiecs/main.py +140 -121
  212. aiecs/scripts/aid/VERSION_MANAGEMENT.md +138 -0
  213. aiecs/scripts/aid/__init__.py +19 -0
  214. aiecs/scripts/aid/module_checker.py +499 -0
  215. aiecs/scripts/aid/version_manager.py +235 -0
  216. aiecs/scripts/{DEPENDENCY_SYSTEM_SUMMARY.md → dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md} +1 -0
  217. aiecs/scripts/{README_DEPENDENCY_CHECKER.md → dependance_check/README_DEPENDENCY_CHECKER.md} +1 -0
  218. aiecs/scripts/dependance_check/__init__.py +15 -0
  219. aiecs/scripts/dependance_check/dependency_checker.py +1835 -0
  220. aiecs/scripts/{dependency_fixer.py → dependance_check/dependency_fixer.py} +192 -90
  221. aiecs/scripts/{download_nlp_data.py → dependance_check/download_nlp_data.py} +203 -71
  222. aiecs/scripts/dependance_patch/__init__.py +7 -0
  223. aiecs/scripts/dependance_patch/fix_weasel/__init__.py +11 -0
  224. aiecs/scripts/{fix_weasel_validator.py → dependance_patch/fix_weasel/fix_weasel_validator.py} +21 -14
  225. aiecs/scripts/{patch_weasel_library.sh → dependance_patch/fix_weasel/patch_weasel_library.sh} +1 -1
  226. aiecs/scripts/knowledge_graph/__init__.py +3 -0
  227. aiecs/scripts/knowledge_graph/run_threshold_experiments.py +212 -0
  228. aiecs/scripts/migrations/multi_tenancy/README.md +142 -0
  229. aiecs/scripts/tools_develop/README.md +671 -0
  230. aiecs/scripts/tools_develop/README_CONFIG_CHECKER.md +273 -0
  231. aiecs/scripts/tools_develop/TOOLS_CONFIG_GUIDE.md +1287 -0
  232. aiecs/scripts/tools_develop/TOOL_AUTO_DISCOVERY.md +234 -0
  233. aiecs/scripts/tools_develop/__init__.py +21 -0
  234. aiecs/scripts/tools_develop/check_all_tools_config.py +548 -0
  235. aiecs/scripts/tools_develop/check_type_annotations.py +257 -0
  236. aiecs/scripts/tools_develop/pre-commit-schema-coverage.sh +66 -0
  237. aiecs/scripts/tools_develop/schema_coverage.py +511 -0
  238. aiecs/scripts/tools_develop/validate_tool_schemas.py +475 -0
  239. aiecs/scripts/tools_develop/verify_executor_config_fix.py +98 -0
  240. aiecs/scripts/tools_develop/verify_tools.py +352 -0
  241. aiecs/tasks/__init__.py +0 -1
  242. aiecs/tasks/worker.py +115 -47
  243. aiecs/tools/__init__.py +194 -72
  244. aiecs/tools/apisource/__init__.py +99 -0
  245. aiecs/tools/apisource/intelligence/__init__.py +19 -0
  246. aiecs/tools/apisource/intelligence/data_fusion.py +632 -0
  247. aiecs/tools/apisource/intelligence/query_analyzer.py +417 -0
  248. aiecs/tools/apisource/intelligence/search_enhancer.py +385 -0
  249. aiecs/tools/apisource/monitoring/__init__.py +9 -0
  250. aiecs/tools/apisource/monitoring/metrics.py +330 -0
  251. aiecs/tools/apisource/providers/__init__.py +112 -0
  252. aiecs/tools/apisource/providers/base.py +671 -0
  253. aiecs/tools/apisource/providers/census.py +397 -0
  254. aiecs/tools/apisource/providers/fred.py +535 -0
  255. aiecs/tools/apisource/providers/newsapi.py +409 -0
  256. aiecs/tools/apisource/providers/worldbank.py +352 -0
  257. aiecs/tools/apisource/reliability/__init__.py +12 -0
  258. aiecs/tools/apisource/reliability/error_handler.py +363 -0
  259. aiecs/tools/apisource/reliability/fallback_strategy.py +376 -0
  260. aiecs/tools/apisource/tool.py +832 -0
  261. aiecs/tools/apisource/utils/__init__.py +9 -0
  262. aiecs/tools/apisource/utils/validators.py +334 -0
  263. aiecs/tools/base_tool.py +415 -21
  264. aiecs/tools/docs/__init__.py +121 -0
  265. aiecs/tools/docs/ai_document_orchestrator.py +607 -0
  266. aiecs/tools/docs/ai_document_writer_orchestrator.py +2350 -0
  267. aiecs/tools/docs/content_insertion_tool.py +1320 -0
  268. aiecs/tools/docs/document_creator_tool.py +1464 -0
  269. aiecs/tools/docs/document_layout_tool.py +1160 -0
  270. aiecs/tools/docs/document_parser_tool.py +1016 -0
  271. aiecs/tools/docs/document_writer_tool.py +2008 -0
  272. aiecs/tools/knowledge_graph/__init__.py +17 -0
  273. aiecs/tools/knowledge_graph/graph_reasoning_tool.py +807 -0
  274. aiecs/tools/knowledge_graph/graph_search_tool.py +944 -0
  275. aiecs/tools/knowledge_graph/kg_builder_tool.py +524 -0
  276. aiecs/tools/langchain_adapter.py +300 -138
  277. aiecs/tools/schema_generator.py +455 -0
  278. aiecs/tools/search_tool/__init__.py +100 -0
  279. aiecs/tools/search_tool/analyzers.py +581 -0
  280. aiecs/tools/search_tool/cache.py +264 -0
  281. aiecs/tools/search_tool/constants.py +128 -0
  282. aiecs/tools/search_tool/context.py +224 -0
  283. aiecs/tools/search_tool/core.py +778 -0
  284. aiecs/tools/search_tool/deduplicator.py +119 -0
  285. aiecs/tools/search_tool/error_handler.py +242 -0
  286. aiecs/tools/search_tool/metrics.py +343 -0
  287. aiecs/tools/search_tool/rate_limiter.py +172 -0
  288. aiecs/tools/search_tool/schemas.py +275 -0
  289. aiecs/tools/statistics/__init__.py +80 -0
  290. aiecs/tools/statistics/ai_data_analysis_orchestrator.py +646 -0
  291. aiecs/tools/statistics/ai_insight_generator_tool.py +508 -0
  292. aiecs/tools/statistics/ai_report_orchestrator_tool.py +684 -0
  293. aiecs/tools/statistics/data_loader_tool.py +555 -0
  294. aiecs/tools/statistics/data_profiler_tool.py +638 -0
  295. aiecs/tools/statistics/data_transformer_tool.py +580 -0
  296. aiecs/tools/statistics/data_visualizer_tool.py +498 -0
  297. aiecs/tools/statistics/model_trainer_tool.py +507 -0
  298. aiecs/tools/statistics/statistical_analyzer_tool.py +472 -0
  299. aiecs/tools/task_tools/__init__.py +49 -36
  300. aiecs/tools/task_tools/chart_tool.py +200 -184
  301. aiecs/tools/task_tools/classfire_tool.py +268 -267
  302. aiecs/tools/task_tools/image_tool.py +220 -141
  303. aiecs/tools/task_tools/office_tool.py +226 -146
  304. aiecs/tools/task_tools/pandas_tool.py +477 -121
  305. aiecs/tools/task_tools/report_tool.py +390 -142
  306. aiecs/tools/task_tools/research_tool.py +149 -79
  307. aiecs/tools/task_tools/scraper_tool.py +339 -145
  308. aiecs/tools/task_tools/stats_tool.py +448 -209
  309. aiecs/tools/temp_file_manager.py +26 -24
  310. aiecs/tools/tool_executor/__init__.py +18 -16
  311. aiecs/tools/tool_executor/tool_executor.py +364 -52
  312. aiecs/utils/LLM_output_structor.py +74 -48
  313. aiecs/utils/__init__.py +14 -3
  314. aiecs/utils/base_callback.py +0 -3
  315. aiecs/utils/cache_provider.py +696 -0
  316. aiecs/utils/execution_utils.py +50 -31
  317. aiecs/utils/prompt_loader.py +1 -0
  318. aiecs/utils/token_usage_repository.py +37 -11
  319. aiecs/ws/socket_server.py +14 -4
  320. {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/METADATA +52 -15
  321. aiecs-1.7.17.dist-info/RECORD +337 -0
  322. aiecs-1.7.17.dist-info/entry_points.txt +13 -0
  323. aiecs/config/registry.py +0 -19
  324. aiecs/domain/context/content_engine.py +0 -982
  325. aiecs/llm/base_client.py +0 -99
  326. aiecs/llm/openai_client.py +0 -125
  327. aiecs/llm/vertex_client.py +0 -186
  328. aiecs/llm/xai_client.py +0 -184
  329. aiecs/scripts/dependency_checker.py +0 -857
  330. aiecs/scripts/quick_dependency_check.py +0 -269
  331. aiecs/tools/task_tools/search_api.py +0 -7
  332. aiecs-1.0.1.dist-info/RECORD +0 -90
  333. aiecs-1.0.1.dist-info/entry_points.txt +0 -7
  334. /aiecs/scripts/{setup_nlp_data.sh → dependance_check/setup_nlp_data.sh} +0 -0
  335. /aiecs/scripts/{README_WEASEL_PATCH.md → dependance_patch/fix_weasel/README_WEASEL_PATCH.md} +0 -0
  336. /aiecs/scripts/{fix_weasel_validator.sh → dependance_patch/fix_weasel/fix_weasel_validator.sh} +0 -0
  337. /aiecs/scripts/{run_weasel_patch.sh → dependance_patch/fix_weasel/run_weasel_patch.sh} +0 -0
  338. {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/WHEEL +0 -0
  339. {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/licenses/LICENSE +0 -0
  340. {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,314 @@
1
+ import json
2
+ import logging
3
+ import os
4
+ from typing import Optional, List, AsyncGenerator
5
+
6
+ from google import genai
7
+ from google.genai import types
8
+
9
+ from aiecs.llm.clients.base_client import (
10
+ BaseLLMClient,
11
+ LLMMessage,
12
+ LLMResponse,
13
+ ProviderNotAvailableError,
14
+ RateLimitError,
15
+ )
16
+ from aiecs.config.config import get_settings
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ class GoogleAIClient(BaseLLMClient):
22
+ """Google AI (Gemini) provider client using the new google.genai SDK"""
23
+
24
+ def __init__(self):
25
+ super().__init__("GoogleAI")
26
+ self.settings = get_settings()
27
+ self._initialized = False
28
+ self._client: Optional[genai.Client] = None
29
+
30
+ def _init_google_ai(self) -> genai.Client:
31
+ """Lazy initialization of Google AI SDK and return the client"""
32
+ if not self._initialized or self._client is None:
33
+ api_key = self.settings.googleai_api_key or os.environ.get("GOOGLEAI_API_KEY")
34
+ if not api_key:
35
+ raise ProviderNotAvailableError("Google AI API key not configured. Set GOOGLEAI_API_KEY.")
36
+
37
+ try:
38
+ self._client = genai.Client(api_key=api_key)
39
+ self._initialized = True
40
+ self.logger.info("Google AI SDK (google.genai) initialized successfully.")
41
+ except Exception as e:
42
+ raise ProviderNotAvailableError(f"Failed to initialize Google AI SDK: {str(e)}")
43
+
44
+ return self._client
45
+
46
+ def _convert_messages_to_contents(
47
+ self, messages: List[LLMMessage]
48
+ ) -> List[types.Content]:
49
+ """
50
+ Convert LLMMessage list to Google GenAI Content objects.
51
+
52
+ This properly handles multi-turn conversations including
53
+ function/tool responses for Google AI Function Calling.
54
+
55
+ Args:
56
+ messages: List of LLMMessage objects (system messages should be filtered out)
57
+
58
+ Returns:
59
+ List of Content objects for Google AI API
60
+ """
61
+ contents = []
62
+
63
+ for msg in messages:
64
+ # Handle tool/function responses (role="tool")
65
+ if msg.role == "tool":
66
+ # Google AI expects function responses as user messages with FunctionResponse parts
67
+ func_name = msg.tool_call_id or "unknown_function"
68
+
69
+ # Parse content as the function response
70
+ try:
71
+ if msg.content and msg.content.strip().startswith('{'):
72
+ response_data = json.loads(msg.content)
73
+ else:
74
+ response_data = {"result": msg.content}
75
+ except json.JSONDecodeError:
76
+ response_data = {"result": msg.content}
77
+
78
+ # Create FunctionResponse part
79
+ func_response_part = types.Part.from_function_response(
80
+ name=func_name,
81
+ response=response_data
82
+ )
83
+
84
+ contents.append(types.Content(
85
+ role="user", # Function responses are sent as "user" role
86
+ parts=[func_response_part]
87
+ ))
88
+
89
+ # Handle assistant messages with tool calls
90
+ elif msg.role == "assistant" and msg.tool_calls:
91
+ parts = []
92
+ if msg.content:
93
+ parts.append(types.Part(text=msg.content))
94
+
95
+ for tool_call in msg.tool_calls:
96
+ func = tool_call.get("function", {})
97
+ func_name = func.get("name", "")
98
+ func_args = func.get("arguments", "{}")
99
+
100
+ # Parse arguments
101
+ try:
102
+ args_dict = json.loads(func_args) if isinstance(func_args, str) else func_args
103
+ except json.JSONDecodeError:
104
+ args_dict = {}
105
+
106
+ # Create FunctionCall part using types.FunctionCall
107
+ # Note: types.Part.from_function_call() may not exist in google.genai
108
+ # Use FunctionCall type directly
109
+ function_call = types.FunctionCall(
110
+ name=func_name,
111
+ args=args_dict
112
+ )
113
+ parts.append(types.Part(function_call=function_call))
114
+
115
+ contents.append(types.Content(
116
+ role="model",
117
+ parts=parts
118
+ ))
119
+
120
+ # Handle regular messages (user, assistant without tool_calls)
121
+ else:
122
+ role = "model" if msg.role == "assistant" else msg.role
123
+ if msg.content:
124
+ contents.append(types.Content(
125
+ role=role,
126
+ parts=[types.Part(text=msg.content)]
127
+ ))
128
+
129
+ return contents
130
+
131
+ async def generate_text(
132
+ self,
133
+ messages: List[LLMMessage],
134
+ model: Optional[str] = None,
135
+ temperature: float = 0.7,
136
+ max_tokens: Optional[int] = None,
137
+ system_instruction: Optional[str] = None,
138
+ **kwargs,
139
+ ) -> LLMResponse:
140
+ """Generate text using Google AI (google.genai SDK)"""
141
+ client = self._init_google_ai()
142
+
143
+ # Get model name from config if not provided
144
+ model_name = model or self._get_default_model() or "gemini-2.5-pro"
145
+
146
+ # Get model config for default parameters
147
+ model_config = self._get_model_config(model_name)
148
+ if model_config and max_tokens is None:
149
+ max_tokens = model_config.default_params.max_tokens
150
+
151
+ try:
152
+ # Extract system message from messages if not provided
153
+ system_msg = None
154
+ user_messages = []
155
+ for msg in messages:
156
+ if msg.role == "system":
157
+ system_msg = msg.content
158
+ else:
159
+ user_messages.append(msg)
160
+
161
+ # Use provided system_instruction or extracted system message
162
+ final_system_instruction = system_instruction or system_msg
163
+
164
+ # Convert messages to Content objects
165
+ contents = self._convert_messages_to_contents(user_messages)
166
+
167
+ # Create GenerateContentConfig with all settings
168
+ config = types.GenerateContentConfig(
169
+ system_instruction=final_system_instruction,
170
+ temperature=temperature,
171
+ max_output_tokens=max_tokens or 8192,
172
+ top_p=kwargs.get("top_p", 0.95),
173
+ top_k=kwargs.get("top_k", 40),
174
+ safety_settings=[
175
+ types.SafetySetting(
176
+ category="HARM_CATEGORY_HARASSMENT",
177
+ threshold="OFF",
178
+ ),
179
+ types.SafetySetting(
180
+ category="HARM_CATEGORY_HATE_SPEECH",
181
+ threshold="OFF",
182
+ ),
183
+ types.SafetySetting(
184
+ category="HARM_CATEGORY_SEXUALLY_EXPLICIT",
185
+ threshold="OFF",
186
+ ),
187
+ types.SafetySetting(
188
+ category="HARM_CATEGORY_DANGEROUS_CONTENT",
189
+ threshold="OFF",
190
+ ),
191
+ ],
192
+ )
193
+
194
+ # Use async client for async operations
195
+ response = await client.aio.models.generate_content(
196
+ model=model_name,
197
+ contents=contents,
198
+ config=config,
199
+ )
200
+
201
+ content = response.text
202
+ prompt_tokens = response.usage_metadata.prompt_token_count
203
+ completion_tokens = response.usage_metadata.candidates_token_count
204
+ total_tokens = response.usage_metadata.total_token_count
205
+
206
+ # Extract cache metadata from Google AI response
207
+ cache_read_tokens = None
208
+ cache_hit = None
209
+ if hasattr(response.usage_metadata, "cached_content_token_count"):
210
+ cache_read_tokens = response.usage_metadata.cached_content_token_count
211
+ cache_hit = cache_read_tokens is not None and cache_read_tokens > 0
212
+
213
+ # Use config-based cost estimation
214
+ cost = self._estimate_cost_from_config(model_name, prompt_tokens, completion_tokens)
215
+
216
+ return LLMResponse(
217
+ content=content,
218
+ provider=self.provider_name,
219
+ model=model_name,
220
+ tokens_used=total_tokens,
221
+ prompt_tokens=prompt_tokens,
222
+ completion_tokens=completion_tokens,
223
+ cost_estimate=cost,
224
+ cache_read_tokens=cache_read_tokens,
225
+ cache_hit=cache_hit,
226
+ )
227
+
228
+ except Exception as e:
229
+ if "quota" in str(e).lower():
230
+ raise RateLimitError(f"Google AI quota exceeded: {str(e)}")
231
+ self.logger.error(f"Error generating text with Google AI: {e}")
232
+ raise
233
+
234
+ async def stream_text( # type: ignore[override]
235
+ self,
236
+ messages: List[LLMMessage],
237
+ model: Optional[str] = None,
238
+ temperature: float = 0.7,
239
+ max_tokens: Optional[int] = None,
240
+ system_instruction: Optional[str] = None,
241
+ **kwargs,
242
+ ) -> AsyncGenerator[str, None]:
243
+ """Stream text generation using Google AI (google.genai SDK)"""
244
+ client = self._init_google_ai()
245
+
246
+ # Get model name from config if not provided
247
+ model_name = model or self._get_default_model() or "gemini-2.5-pro"
248
+
249
+ # Get model config for default parameters
250
+ model_config = self._get_model_config(model_name)
251
+ if model_config and max_tokens is None:
252
+ max_tokens = model_config.default_params.max_tokens
253
+
254
+ try:
255
+ # Extract system message from messages if not provided
256
+ system_msg = None
257
+ user_messages = []
258
+ for msg in messages:
259
+ if msg.role == "system":
260
+ system_msg = msg.content
261
+ else:
262
+ user_messages.append(msg)
263
+
264
+ # Use provided system_instruction or extracted system message
265
+ final_system_instruction = system_instruction or system_msg
266
+
267
+ # Convert messages to Content objects
268
+ contents = self._convert_messages_to_contents(user_messages)
269
+
270
+ # Create GenerateContentConfig with all settings
271
+ config = types.GenerateContentConfig(
272
+ system_instruction=final_system_instruction,
273
+ temperature=temperature,
274
+ max_output_tokens=max_tokens or 8192,
275
+ top_p=kwargs.get("top_p", 0.95),
276
+ top_k=kwargs.get("top_k", 40),
277
+ safety_settings=[
278
+ types.SafetySetting(
279
+ category="HARM_CATEGORY_HARASSMENT",
280
+ threshold="OFF",
281
+ ),
282
+ types.SafetySetting(
283
+ category="HARM_CATEGORY_HATE_SPEECH",
284
+ threshold="OFF",
285
+ ),
286
+ types.SafetySetting(
287
+ category="HARM_CATEGORY_SEXUALLY_EXPLICIT",
288
+ threshold="OFF",
289
+ ),
290
+ types.SafetySetting(
291
+ category="HARM_CATEGORY_DANGEROUS_CONTENT",
292
+ threshold="OFF",
293
+ ),
294
+ ],
295
+ )
296
+
297
+ # Use async streaming with the new SDK
298
+ async for chunk in client.aio.models.generate_content_stream(
299
+ model=model_name,
300
+ contents=contents,
301
+ config=config,
302
+ ):
303
+ if chunk.text:
304
+ yield chunk.text
305
+
306
+ except Exception as e:
307
+ self.logger.error(f"Error streaming text with Google AI: {e}")
308
+ raise
309
+
310
+ async def close(self):
311
+ """Clean up resources"""
312
+ # Google GenAI SDK does not require explicit closing of a client
313
+ self._initialized = False
314
+ self._client = None
@@ -0,0 +1,158 @@
1
+ import logging
2
+ from typing import Optional, List, Dict, AsyncGenerator, cast, Any
3
+ from openai import AsyncOpenAI
4
+ from tenacity import (
5
+ retry,
6
+ stop_after_attempt,
7
+ wait_exponential,
8
+ retry_if_exception_type,
9
+ )
10
+ import httpx
11
+
12
+ from aiecs.llm.clients.base_client import (
13
+ BaseLLMClient,
14
+ LLMMessage,
15
+ LLMResponse,
16
+ ProviderNotAvailableError,
17
+ RateLimitError,
18
+ )
19
+ from aiecs.llm.clients.openai_compatible_mixin import (
20
+ OpenAICompatibleFunctionCallingMixin,
21
+ StreamChunk,
22
+ )
23
+ from aiecs.config.config import get_settings
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ class OpenAIClient(BaseLLMClient, OpenAICompatibleFunctionCallingMixin):
29
+ """OpenAI provider client"""
30
+
31
+ def __init__(self) -> None:
32
+ super().__init__("OpenAI")
33
+ self.settings = get_settings()
34
+ self._client: Optional[AsyncOpenAI] = None
35
+
36
+ def _get_client(self) -> AsyncOpenAI:
37
+ """Lazy initialization of OpenAI client"""
38
+ if not self._client:
39
+ if not self.settings.openai_api_key:
40
+ raise ProviderNotAvailableError("OpenAI API key not configured")
41
+ self._client = AsyncOpenAI(api_key=self.settings.openai_api_key)
42
+ return self._client
43
+
44
+ @retry(
45
+ stop=stop_after_attempt(3),
46
+ wait=wait_exponential(multiplier=1, min=4, max=10),
47
+ retry=retry_if_exception_type((httpx.RequestError, RateLimitError)),
48
+ )
49
+ async def generate_text(
50
+ self,
51
+ messages: List[LLMMessage],
52
+ model: Optional[str] = None,
53
+ temperature: float = 0.7,
54
+ max_tokens: Optional[int] = None,
55
+ functions: Optional[List[Dict[str, Any]]] = None,
56
+ tools: Optional[List[Dict[str, Any]]] = None,
57
+ tool_choice: Optional[Any] = None,
58
+ **kwargs,
59
+ ) -> LLMResponse:
60
+ """
61
+ Generate text using OpenAI API with optional function calling support.
62
+
63
+ Args:
64
+ messages: List of LLM messages
65
+ model: Model name (optional)
66
+ temperature: Temperature for generation
67
+ max_tokens: Maximum tokens to generate
68
+ functions: List of function schemas (legacy format)
69
+ tools: List of tool schemas (new format, recommended)
70
+ tool_choice: Tool choice strategy ("auto", "none", or specific tool)
71
+ **kwargs: Additional arguments passed to OpenAI API
72
+
73
+ Returns:
74
+ LLMResponse with content and optional function_call information
75
+ """
76
+ client = self._get_client()
77
+
78
+ # Get model name from config if not provided
79
+ model = model or self._get_default_model() or "gpt-4-turbo"
80
+
81
+ try:
82
+ # Use mixin method for Function Calling support
83
+ return await self._generate_text_with_function_calling(
84
+ client=client,
85
+ messages=messages,
86
+ model=model,
87
+ temperature=temperature,
88
+ max_tokens=max_tokens,
89
+ functions=functions,
90
+ tools=tools,
91
+ tool_choice=tool_choice,
92
+ **kwargs,
93
+ )
94
+
95
+ except Exception as e:
96
+ if "rate_limit" in str(e).lower():
97
+ raise RateLimitError(f"OpenAI rate limit exceeded: {str(e)}")
98
+ raise
99
+
100
+ async def stream_text( # type: ignore[override]
101
+ self,
102
+ messages: List[LLMMessage],
103
+ model: Optional[str] = None,
104
+ temperature: float = 0.7,
105
+ max_tokens: Optional[int] = None,
106
+ functions: Optional[List[Dict[str, Any]]] = None,
107
+ tools: Optional[List[Dict[str, Any]]] = None,
108
+ tool_choice: Optional[Any] = None,
109
+ return_chunks: bool = False,
110
+ **kwargs,
111
+ ) -> AsyncGenerator[Any, None]:
112
+ """
113
+ Stream text using OpenAI API with optional function calling support.
114
+
115
+ Args:
116
+ messages: List of LLM messages
117
+ model: Model name (optional)
118
+ temperature: Temperature for generation
119
+ max_tokens: Maximum tokens to generate
120
+ functions: List of function schemas (legacy format)
121
+ tools: List of tool schemas (new format, recommended)
122
+ tool_choice: Tool choice strategy ("auto", "none", or specific tool)
123
+ return_chunks: If True, returns StreamChunk objects with tool_calls info; if False, returns str tokens only
124
+ **kwargs: Additional arguments passed to OpenAI API
125
+
126
+ Yields:
127
+ str or StreamChunk: Text tokens as they are generated, or StreamChunk objects if return_chunks=True
128
+ """
129
+ client = self._get_client()
130
+
131
+ # Get model name from config if not provided
132
+ model = model or self._get_default_model() or "gpt-4-turbo"
133
+
134
+ try:
135
+ # Use mixin method for Function Calling support
136
+ async for chunk in self._stream_text_with_function_calling(
137
+ client=client,
138
+ messages=messages,
139
+ model=model,
140
+ temperature=temperature,
141
+ max_tokens=max_tokens,
142
+ functions=functions,
143
+ tools=tools,
144
+ tool_choice=tool_choice,
145
+ return_chunks=return_chunks,
146
+ **kwargs,
147
+ ):
148
+ yield chunk
149
+ except Exception as e:
150
+ if "rate_limit" in str(e).lower():
151
+ raise RateLimitError(f"OpenAI rate limit exceeded: {str(e)}")
152
+ raise
153
+
154
+ async def close(self):
155
+ """Clean up resources"""
156
+ if self._client:
157
+ await self._client.close()
158
+ self._client = None