aiecs 1.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (302) hide show
  1. aiecs/__init__.py +72 -0
  2. aiecs/__main__.py +41 -0
  3. aiecs/aiecs_client.py +469 -0
  4. aiecs/application/__init__.py +10 -0
  5. aiecs/application/executors/__init__.py +10 -0
  6. aiecs/application/executors/operation_executor.py +363 -0
  7. aiecs/application/knowledge_graph/__init__.py +7 -0
  8. aiecs/application/knowledge_graph/builder/__init__.py +37 -0
  9. aiecs/application/knowledge_graph/builder/document_builder.py +375 -0
  10. aiecs/application/knowledge_graph/builder/graph_builder.py +356 -0
  11. aiecs/application/knowledge_graph/builder/schema_mapping.py +531 -0
  12. aiecs/application/knowledge_graph/builder/structured_pipeline.py +443 -0
  13. aiecs/application/knowledge_graph/builder/text_chunker.py +319 -0
  14. aiecs/application/knowledge_graph/extractors/__init__.py +27 -0
  15. aiecs/application/knowledge_graph/extractors/base.py +100 -0
  16. aiecs/application/knowledge_graph/extractors/llm_entity_extractor.py +327 -0
  17. aiecs/application/knowledge_graph/extractors/llm_relation_extractor.py +349 -0
  18. aiecs/application/knowledge_graph/extractors/ner_entity_extractor.py +244 -0
  19. aiecs/application/knowledge_graph/fusion/__init__.py +23 -0
  20. aiecs/application/knowledge_graph/fusion/entity_deduplicator.py +387 -0
  21. aiecs/application/knowledge_graph/fusion/entity_linker.py +343 -0
  22. aiecs/application/knowledge_graph/fusion/knowledge_fusion.py +580 -0
  23. aiecs/application/knowledge_graph/fusion/relation_deduplicator.py +189 -0
  24. aiecs/application/knowledge_graph/pattern_matching/__init__.py +21 -0
  25. aiecs/application/knowledge_graph/pattern_matching/pattern_matcher.py +344 -0
  26. aiecs/application/knowledge_graph/pattern_matching/query_executor.py +378 -0
  27. aiecs/application/knowledge_graph/profiling/__init__.py +12 -0
  28. aiecs/application/knowledge_graph/profiling/query_plan_visualizer.py +199 -0
  29. aiecs/application/knowledge_graph/profiling/query_profiler.py +223 -0
  30. aiecs/application/knowledge_graph/reasoning/__init__.py +27 -0
  31. aiecs/application/knowledge_graph/reasoning/evidence_synthesis.py +347 -0
  32. aiecs/application/knowledge_graph/reasoning/inference_engine.py +504 -0
  33. aiecs/application/knowledge_graph/reasoning/logic_form_parser.py +167 -0
  34. aiecs/application/knowledge_graph/reasoning/logic_parser/__init__.py +79 -0
  35. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_builder.py +513 -0
  36. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_nodes.py +630 -0
  37. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_validator.py +654 -0
  38. aiecs/application/knowledge_graph/reasoning/logic_parser/error_handler.py +477 -0
  39. aiecs/application/knowledge_graph/reasoning/logic_parser/parser.py +390 -0
  40. aiecs/application/knowledge_graph/reasoning/logic_parser/query_context.py +217 -0
  41. aiecs/application/knowledge_graph/reasoning/logic_query_integration.py +169 -0
  42. aiecs/application/knowledge_graph/reasoning/query_planner.py +872 -0
  43. aiecs/application/knowledge_graph/reasoning/reasoning_engine.py +554 -0
  44. aiecs/application/knowledge_graph/retrieval/__init__.py +19 -0
  45. aiecs/application/knowledge_graph/retrieval/retrieval_strategies.py +596 -0
  46. aiecs/application/knowledge_graph/search/__init__.py +59 -0
  47. aiecs/application/knowledge_graph/search/hybrid_search.py +423 -0
  48. aiecs/application/knowledge_graph/search/reranker.py +295 -0
  49. aiecs/application/knowledge_graph/search/reranker_strategies.py +553 -0
  50. aiecs/application/knowledge_graph/search/text_similarity.py +398 -0
  51. aiecs/application/knowledge_graph/traversal/__init__.py +15 -0
  52. aiecs/application/knowledge_graph/traversal/enhanced_traversal.py +329 -0
  53. aiecs/application/knowledge_graph/traversal/path_scorer.py +269 -0
  54. aiecs/application/knowledge_graph/validators/__init__.py +13 -0
  55. aiecs/application/knowledge_graph/validators/relation_validator.py +189 -0
  56. aiecs/application/knowledge_graph/visualization/__init__.py +11 -0
  57. aiecs/application/knowledge_graph/visualization/graph_visualizer.py +321 -0
  58. aiecs/common/__init__.py +9 -0
  59. aiecs/common/knowledge_graph/__init__.py +17 -0
  60. aiecs/common/knowledge_graph/runnable.py +484 -0
  61. aiecs/config/__init__.py +16 -0
  62. aiecs/config/config.py +498 -0
  63. aiecs/config/graph_config.py +137 -0
  64. aiecs/config/registry.py +23 -0
  65. aiecs/core/__init__.py +46 -0
  66. aiecs/core/interface/__init__.py +34 -0
  67. aiecs/core/interface/execution_interface.py +152 -0
  68. aiecs/core/interface/storage_interface.py +171 -0
  69. aiecs/domain/__init__.py +289 -0
  70. aiecs/domain/agent/__init__.py +189 -0
  71. aiecs/domain/agent/base_agent.py +697 -0
  72. aiecs/domain/agent/exceptions.py +103 -0
  73. aiecs/domain/agent/graph_aware_mixin.py +559 -0
  74. aiecs/domain/agent/hybrid_agent.py +490 -0
  75. aiecs/domain/agent/integration/__init__.py +26 -0
  76. aiecs/domain/agent/integration/context_compressor.py +222 -0
  77. aiecs/domain/agent/integration/context_engine_adapter.py +252 -0
  78. aiecs/domain/agent/integration/retry_policy.py +219 -0
  79. aiecs/domain/agent/integration/role_config.py +213 -0
  80. aiecs/domain/agent/knowledge_aware_agent.py +646 -0
  81. aiecs/domain/agent/lifecycle.py +296 -0
  82. aiecs/domain/agent/llm_agent.py +300 -0
  83. aiecs/domain/agent/memory/__init__.py +12 -0
  84. aiecs/domain/agent/memory/conversation.py +197 -0
  85. aiecs/domain/agent/migration/__init__.py +14 -0
  86. aiecs/domain/agent/migration/conversion.py +160 -0
  87. aiecs/domain/agent/migration/legacy_wrapper.py +90 -0
  88. aiecs/domain/agent/models.py +317 -0
  89. aiecs/domain/agent/observability.py +407 -0
  90. aiecs/domain/agent/persistence.py +289 -0
  91. aiecs/domain/agent/prompts/__init__.py +29 -0
  92. aiecs/domain/agent/prompts/builder.py +161 -0
  93. aiecs/domain/agent/prompts/formatters.py +189 -0
  94. aiecs/domain/agent/prompts/template.py +255 -0
  95. aiecs/domain/agent/registry.py +260 -0
  96. aiecs/domain/agent/tool_agent.py +257 -0
  97. aiecs/domain/agent/tools/__init__.py +12 -0
  98. aiecs/domain/agent/tools/schema_generator.py +221 -0
  99. aiecs/domain/community/__init__.py +155 -0
  100. aiecs/domain/community/agent_adapter.py +477 -0
  101. aiecs/domain/community/analytics.py +481 -0
  102. aiecs/domain/community/collaborative_workflow.py +642 -0
  103. aiecs/domain/community/communication_hub.py +645 -0
  104. aiecs/domain/community/community_builder.py +320 -0
  105. aiecs/domain/community/community_integration.py +800 -0
  106. aiecs/domain/community/community_manager.py +813 -0
  107. aiecs/domain/community/decision_engine.py +879 -0
  108. aiecs/domain/community/exceptions.py +225 -0
  109. aiecs/domain/community/models/__init__.py +33 -0
  110. aiecs/domain/community/models/community_models.py +268 -0
  111. aiecs/domain/community/resource_manager.py +457 -0
  112. aiecs/domain/community/shared_context_manager.py +603 -0
  113. aiecs/domain/context/__init__.py +58 -0
  114. aiecs/domain/context/context_engine.py +989 -0
  115. aiecs/domain/context/conversation_models.py +354 -0
  116. aiecs/domain/context/graph_memory.py +467 -0
  117. aiecs/domain/execution/__init__.py +12 -0
  118. aiecs/domain/execution/model.py +57 -0
  119. aiecs/domain/knowledge_graph/__init__.py +19 -0
  120. aiecs/domain/knowledge_graph/models/__init__.py +52 -0
  121. aiecs/domain/knowledge_graph/models/entity.py +130 -0
  122. aiecs/domain/knowledge_graph/models/evidence.py +194 -0
  123. aiecs/domain/knowledge_graph/models/inference_rule.py +186 -0
  124. aiecs/domain/knowledge_graph/models/path.py +179 -0
  125. aiecs/domain/knowledge_graph/models/path_pattern.py +173 -0
  126. aiecs/domain/knowledge_graph/models/query.py +272 -0
  127. aiecs/domain/knowledge_graph/models/query_plan.py +187 -0
  128. aiecs/domain/knowledge_graph/models/relation.py +136 -0
  129. aiecs/domain/knowledge_graph/schema/__init__.py +23 -0
  130. aiecs/domain/knowledge_graph/schema/entity_type.py +135 -0
  131. aiecs/domain/knowledge_graph/schema/graph_schema.py +271 -0
  132. aiecs/domain/knowledge_graph/schema/property_schema.py +155 -0
  133. aiecs/domain/knowledge_graph/schema/relation_type.py +171 -0
  134. aiecs/domain/knowledge_graph/schema/schema_manager.py +496 -0
  135. aiecs/domain/knowledge_graph/schema/type_enums.py +205 -0
  136. aiecs/domain/task/__init__.py +13 -0
  137. aiecs/domain/task/dsl_processor.py +613 -0
  138. aiecs/domain/task/model.py +62 -0
  139. aiecs/domain/task/task_context.py +268 -0
  140. aiecs/infrastructure/__init__.py +24 -0
  141. aiecs/infrastructure/graph_storage/__init__.py +11 -0
  142. aiecs/infrastructure/graph_storage/base.py +601 -0
  143. aiecs/infrastructure/graph_storage/batch_operations.py +449 -0
  144. aiecs/infrastructure/graph_storage/cache.py +429 -0
  145. aiecs/infrastructure/graph_storage/distributed.py +226 -0
  146. aiecs/infrastructure/graph_storage/error_handling.py +390 -0
  147. aiecs/infrastructure/graph_storage/graceful_degradation.py +306 -0
  148. aiecs/infrastructure/graph_storage/health_checks.py +378 -0
  149. aiecs/infrastructure/graph_storage/in_memory.py +514 -0
  150. aiecs/infrastructure/graph_storage/index_optimization.py +483 -0
  151. aiecs/infrastructure/graph_storage/lazy_loading.py +410 -0
  152. aiecs/infrastructure/graph_storage/metrics.py +357 -0
  153. aiecs/infrastructure/graph_storage/migration.py +413 -0
  154. aiecs/infrastructure/graph_storage/pagination.py +471 -0
  155. aiecs/infrastructure/graph_storage/performance_monitoring.py +466 -0
  156. aiecs/infrastructure/graph_storage/postgres.py +871 -0
  157. aiecs/infrastructure/graph_storage/query_optimizer.py +635 -0
  158. aiecs/infrastructure/graph_storage/schema_cache.py +290 -0
  159. aiecs/infrastructure/graph_storage/sqlite.py +623 -0
  160. aiecs/infrastructure/graph_storage/streaming.py +495 -0
  161. aiecs/infrastructure/messaging/__init__.py +13 -0
  162. aiecs/infrastructure/messaging/celery_task_manager.py +383 -0
  163. aiecs/infrastructure/messaging/websocket_manager.py +298 -0
  164. aiecs/infrastructure/monitoring/__init__.py +34 -0
  165. aiecs/infrastructure/monitoring/executor_metrics.py +174 -0
  166. aiecs/infrastructure/monitoring/global_metrics_manager.py +213 -0
  167. aiecs/infrastructure/monitoring/structured_logger.py +48 -0
  168. aiecs/infrastructure/monitoring/tracing_manager.py +410 -0
  169. aiecs/infrastructure/persistence/__init__.py +24 -0
  170. aiecs/infrastructure/persistence/context_engine_client.py +187 -0
  171. aiecs/infrastructure/persistence/database_manager.py +333 -0
  172. aiecs/infrastructure/persistence/file_storage.py +754 -0
  173. aiecs/infrastructure/persistence/redis_client.py +220 -0
  174. aiecs/llm/__init__.py +86 -0
  175. aiecs/llm/callbacks/__init__.py +11 -0
  176. aiecs/llm/callbacks/custom_callbacks.py +264 -0
  177. aiecs/llm/client_factory.py +420 -0
  178. aiecs/llm/clients/__init__.py +33 -0
  179. aiecs/llm/clients/base_client.py +193 -0
  180. aiecs/llm/clients/googleai_client.py +181 -0
  181. aiecs/llm/clients/openai_client.py +131 -0
  182. aiecs/llm/clients/vertex_client.py +437 -0
  183. aiecs/llm/clients/xai_client.py +184 -0
  184. aiecs/llm/config/__init__.py +51 -0
  185. aiecs/llm/config/config_loader.py +275 -0
  186. aiecs/llm/config/config_validator.py +236 -0
  187. aiecs/llm/config/model_config.py +151 -0
  188. aiecs/llm/utils/__init__.py +10 -0
  189. aiecs/llm/utils/validate_config.py +91 -0
  190. aiecs/main.py +363 -0
  191. aiecs/scripts/__init__.py +3 -0
  192. aiecs/scripts/aid/VERSION_MANAGEMENT.md +97 -0
  193. aiecs/scripts/aid/__init__.py +19 -0
  194. aiecs/scripts/aid/version_manager.py +215 -0
  195. aiecs/scripts/dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md +242 -0
  196. aiecs/scripts/dependance_check/README_DEPENDENCY_CHECKER.md +310 -0
  197. aiecs/scripts/dependance_check/__init__.py +17 -0
  198. aiecs/scripts/dependance_check/dependency_checker.py +938 -0
  199. aiecs/scripts/dependance_check/dependency_fixer.py +391 -0
  200. aiecs/scripts/dependance_check/download_nlp_data.py +396 -0
  201. aiecs/scripts/dependance_check/quick_dependency_check.py +270 -0
  202. aiecs/scripts/dependance_check/setup_nlp_data.sh +217 -0
  203. aiecs/scripts/dependance_patch/__init__.py +7 -0
  204. aiecs/scripts/dependance_patch/fix_weasel/README_WEASEL_PATCH.md +126 -0
  205. aiecs/scripts/dependance_patch/fix_weasel/__init__.py +11 -0
  206. aiecs/scripts/dependance_patch/fix_weasel/fix_weasel_validator.py +128 -0
  207. aiecs/scripts/dependance_patch/fix_weasel/fix_weasel_validator.sh +82 -0
  208. aiecs/scripts/dependance_patch/fix_weasel/patch_weasel_library.sh +188 -0
  209. aiecs/scripts/dependance_patch/fix_weasel/run_weasel_patch.sh +41 -0
  210. aiecs/scripts/tools_develop/README.md +449 -0
  211. aiecs/scripts/tools_develop/TOOL_AUTO_DISCOVERY.md +234 -0
  212. aiecs/scripts/tools_develop/__init__.py +21 -0
  213. aiecs/scripts/tools_develop/check_type_annotations.py +259 -0
  214. aiecs/scripts/tools_develop/validate_tool_schemas.py +422 -0
  215. aiecs/scripts/tools_develop/verify_tools.py +356 -0
  216. aiecs/tasks/__init__.py +1 -0
  217. aiecs/tasks/worker.py +172 -0
  218. aiecs/tools/__init__.py +299 -0
  219. aiecs/tools/apisource/__init__.py +99 -0
  220. aiecs/tools/apisource/intelligence/__init__.py +19 -0
  221. aiecs/tools/apisource/intelligence/data_fusion.py +381 -0
  222. aiecs/tools/apisource/intelligence/query_analyzer.py +413 -0
  223. aiecs/tools/apisource/intelligence/search_enhancer.py +388 -0
  224. aiecs/tools/apisource/monitoring/__init__.py +9 -0
  225. aiecs/tools/apisource/monitoring/metrics.py +303 -0
  226. aiecs/tools/apisource/providers/__init__.py +115 -0
  227. aiecs/tools/apisource/providers/base.py +664 -0
  228. aiecs/tools/apisource/providers/census.py +401 -0
  229. aiecs/tools/apisource/providers/fred.py +564 -0
  230. aiecs/tools/apisource/providers/newsapi.py +412 -0
  231. aiecs/tools/apisource/providers/worldbank.py +357 -0
  232. aiecs/tools/apisource/reliability/__init__.py +12 -0
  233. aiecs/tools/apisource/reliability/error_handler.py +375 -0
  234. aiecs/tools/apisource/reliability/fallback_strategy.py +391 -0
  235. aiecs/tools/apisource/tool.py +850 -0
  236. aiecs/tools/apisource/utils/__init__.py +9 -0
  237. aiecs/tools/apisource/utils/validators.py +338 -0
  238. aiecs/tools/base_tool.py +201 -0
  239. aiecs/tools/docs/__init__.py +121 -0
  240. aiecs/tools/docs/ai_document_orchestrator.py +599 -0
  241. aiecs/tools/docs/ai_document_writer_orchestrator.py +2403 -0
  242. aiecs/tools/docs/content_insertion_tool.py +1333 -0
  243. aiecs/tools/docs/document_creator_tool.py +1317 -0
  244. aiecs/tools/docs/document_layout_tool.py +1166 -0
  245. aiecs/tools/docs/document_parser_tool.py +994 -0
  246. aiecs/tools/docs/document_writer_tool.py +1818 -0
  247. aiecs/tools/knowledge_graph/__init__.py +17 -0
  248. aiecs/tools/knowledge_graph/graph_reasoning_tool.py +734 -0
  249. aiecs/tools/knowledge_graph/graph_search_tool.py +923 -0
  250. aiecs/tools/knowledge_graph/kg_builder_tool.py +476 -0
  251. aiecs/tools/langchain_adapter.py +542 -0
  252. aiecs/tools/schema_generator.py +275 -0
  253. aiecs/tools/search_tool/__init__.py +100 -0
  254. aiecs/tools/search_tool/analyzers.py +589 -0
  255. aiecs/tools/search_tool/cache.py +260 -0
  256. aiecs/tools/search_tool/constants.py +128 -0
  257. aiecs/tools/search_tool/context.py +216 -0
  258. aiecs/tools/search_tool/core.py +749 -0
  259. aiecs/tools/search_tool/deduplicator.py +123 -0
  260. aiecs/tools/search_tool/error_handler.py +271 -0
  261. aiecs/tools/search_tool/metrics.py +371 -0
  262. aiecs/tools/search_tool/rate_limiter.py +178 -0
  263. aiecs/tools/search_tool/schemas.py +277 -0
  264. aiecs/tools/statistics/__init__.py +80 -0
  265. aiecs/tools/statistics/ai_data_analysis_orchestrator.py +643 -0
  266. aiecs/tools/statistics/ai_insight_generator_tool.py +505 -0
  267. aiecs/tools/statistics/ai_report_orchestrator_tool.py +694 -0
  268. aiecs/tools/statistics/data_loader_tool.py +564 -0
  269. aiecs/tools/statistics/data_profiler_tool.py +658 -0
  270. aiecs/tools/statistics/data_transformer_tool.py +573 -0
  271. aiecs/tools/statistics/data_visualizer_tool.py +495 -0
  272. aiecs/tools/statistics/model_trainer_tool.py +487 -0
  273. aiecs/tools/statistics/statistical_analyzer_tool.py +459 -0
  274. aiecs/tools/task_tools/__init__.py +86 -0
  275. aiecs/tools/task_tools/chart_tool.py +732 -0
  276. aiecs/tools/task_tools/classfire_tool.py +922 -0
  277. aiecs/tools/task_tools/image_tool.py +447 -0
  278. aiecs/tools/task_tools/office_tool.py +684 -0
  279. aiecs/tools/task_tools/pandas_tool.py +635 -0
  280. aiecs/tools/task_tools/report_tool.py +635 -0
  281. aiecs/tools/task_tools/research_tool.py +392 -0
  282. aiecs/tools/task_tools/scraper_tool.py +715 -0
  283. aiecs/tools/task_tools/stats_tool.py +688 -0
  284. aiecs/tools/temp_file_manager.py +130 -0
  285. aiecs/tools/tool_executor/__init__.py +37 -0
  286. aiecs/tools/tool_executor/tool_executor.py +881 -0
  287. aiecs/utils/LLM_output_structor.py +445 -0
  288. aiecs/utils/__init__.py +34 -0
  289. aiecs/utils/base_callback.py +47 -0
  290. aiecs/utils/cache_provider.py +695 -0
  291. aiecs/utils/execution_utils.py +184 -0
  292. aiecs/utils/logging.py +1 -0
  293. aiecs/utils/prompt_loader.py +14 -0
  294. aiecs/utils/token_usage_repository.py +323 -0
  295. aiecs/ws/__init__.py +0 -0
  296. aiecs/ws/socket_server.py +52 -0
  297. aiecs-1.5.1.dist-info/METADATA +608 -0
  298. aiecs-1.5.1.dist-info/RECORD +302 -0
  299. aiecs-1.5.1.dist-info/WHEEL +5 -0
  300. aiecs-1.5.1.dist-info/entry_points.txt +10 -0
  301. aiecs-1.5.1.dist-info/licenses/LICENSE +225 -0
  302. aiecs-1.5.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,420 @@
1
+ import logging
2
+ from typing import Dict, Any, Optional, Union, List
3
+ from enum import Enum
4
+
5
+ from .clients.base_client import BaseLLMClient, LLMMessage, LLMResponse
6
+ from .clients.openai_client import OpenAIClient
7
+ from .clients.vertex_client import VertexAIClient
8
+ from .clients.googleai_client import GoogleAIClient
9
+ from .clients.xai_client import XAIClient
10
+ from .callbacks.custom_callbacks import CustomAsyncCallbackHandler
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ class AIProvider(str, Enum):
16
+ OPENAI = "OpenAI"
17
+ VERTEX = "Vertex"
18
+ GOOGLEAI = "GoogleAI"
19
+ XAI = "xAI"
20
+
21
+
22
+ class LLMClientFactory:
23
+ """Factory for creating and managing LLM provider clients"""
24
+
25
+ _clients: Dict[AIProvider, BaseLLMClient] = {}
26
+
27
+ @classmethod
28
+ def get_client(cls, provider: Union[str, AIProvider]) -> BaseLLMClient:
29
+ """Get or create a client for the specified provider"""
30
+ if isinstance(provider, str):
31
+ try:
32
+ provider = AIProvider(provider)
33
+ except ValueError:
34
+ raise ValueError(f"Unsupported provider: {provider}")
35
+
36
+ if provider not in cls._clients:
37
+ cls._clients[provider] = cls._create_client(provider)
38
+
39
+ return cls._clients[provider]
40
+
41
+ @classmethod
42
+ def _create_client(cls, provider: AIProvider) -> BaseLLMClient:
43
+ """Create a new client instance for the provider"""
44
+ if provider == AIProvider.OPENAI:
45
+ return OpenAIClient()
46
+ elif provider == AIProvider.VERTEX:
47
+ return VertexAIClient()
48
+ elif provider == AIProvider.GOOGLEAI:
49
+ return GoogleAIClient()
50
+ elif provider == AIProvider.XAI:
51
+ return XAIClient()
52
+ else:
53
+ raise ValueError(f"Unsupported provider: {provider}")
54
+
55
+ @classmethod
56
+ async def close_all(cls):
57
+ """Close all active clients"""
58
+ for client in cls._clients.values():
59
+ try:
60
+ await client.close()
61
+ except Exception as e:
62
+ logger.error(f"Error closing client {client.provider_name}: {e}")
63
+ cls._clients.clear()
64
+
65
+ @classmethod
66
+ async def close_client(cls, provider: Union[str, AIProvider]):
67
+ """Close a specific client"""
68
+ if isinstance(provider, str):
69
+ provider = AIProvider(provider)
70
+
71
+ if provider in cls._clients:
72
+ try:
73
+ await cls._clients[provider].close()
74
+ del cls._clients[provider]
75
+ except Exception as e:
76
+ logger.error(f"Error closing client {provider}: {e}")
77
+
78
+ @classmethod
79
+ def reload_config(cls):
80
+ """
81
+ Reload LLM models configuration.
82
+
83
+ This reloads the configuration from the YAML file, allowing for
84
+ hot-reloading of model settings without restarting the application.
85
+ """
86
+ try:
87
+ from aiecs.llm.config import reload_llm_config
88
+
89
+ config = reload_llm_config()
90
+ logger.info(f"Reloaded LLM configuration: {len(config.providers)} providers")
91
+ return config
92
+ except Exception as e:
93
+ logger.error(f"Failed to reload LLM configuration: {e}")
94
+ raise
95
+
96
+
97
+ class LLMClientManager:
98
+ """High-level manager for LLM operations with context-aware provider selection"""
99
+
100
+ def __init__(self):
101
+ self.factory = LLMClientFactory()
102
+
103
+ def _extract_ai_preference(
104
+ self, context: Optional[Dict[str, Any]]
105
+ ) -> tuple[Optional[str], Optional[str]]:
106
+ """Extract AI provider and model from context"""
107
+ if not context:
108
+ return None, None
109
+
110
+ metadata = context.get("metadata", {})
111
+
112
+ # First, check for aiPreference in metadata
113
+ ai_preference = metadata.get("aiPreference", {})
114
+ if isinstance(ai_preference, dict):
115
+ provider = ai_preference.get("provider")
116
+ model = ai_preference.get("model")
117
+ if provider is not None:
118
+ return provider, model
119
+
120
+ # Fallback to direct provider/model in metadata
121
+ provider = metadata.get("provider")
122
+ model = metadata.get("model")
123
+ return provider, model
124
+
125
+ async def generate_text(
126
+ self,
127
+ messages: Union[str, list[LLMMessage]],
128
+ provider: Optional[Union[str, AIProvider]] = None,
129
+ model: Optional[str] = None,
130
+ context: Optional[Dict[str, Any]] = None,
131
+ temperature: float = 0.7,
132
+ max_tokens: Optional[int] = None,
133
+ callbacks: Optional[List[CustomAsyncCallbackHandler]] = None,
134
+ **kwargs,
135
+ ) -> LLMResponse:
136
+ """
137
+ Generate text using context-aware provider selection
138
+
139
+ Args:
140
+ messages: Either a string prompt or list of LLMMessage objects
141
+ provider: AI provider to use (can be overridden by context)
142
+ model: Specific model to use (can be overridden by context)
143
+ context: TaskContext or dict containing aiPreference
144
+ temperature: Sampling temperature (0.0 to 2.0)
145
+ max_tokens: Maximum tokens to generate
146
+ callbacks: List of callback handlers to execute during LLM calls
147
+ **kwargs: Additional provider-specific parameters
148
+
149
+ Returns:
150
+ LLMResponse object with generated text and metadata
151
+ """
152
+ # Extract provider/model from context if available
153
+ context_provider, context_model = self._extract_ai_preference(context)
154
+
155
+ # Use context preferences if available, otherwise use provided values
156
+ final_provider = context_provider or provider or AIProvider.OPENAI
157
+ final_model = context_model or model
158
+
159
+ # Convert string prompt to messages format
160
+ if isinstance(messages, str):
161
+ messages = [LLMMessage(role="user", content=messages)]
162
+
163
+ # Execute on_llm_start callbacks
164
+ if callbacks:
165
+ # Convert LLMMessage objects to dictionaries for callbacks
166
+ messages_dict = [{"role": msg.role, "content": msg.content} for msg in messages]
167
+ for callback in callbacks:
168
+ try:
169
+ await callback.on_llm_start(
170
+ messages_dict,
171
+ provider=final_provider,
172
+ model=final_model,
173
+ **kwargs,
174
+ )
175
+ except Exception as e:
176
+ logger.error(f"Error in callback on_llm_start: {e}")
177
+
178
+ try:
179
+ # Get the appropriate client
180
+ client = self.factory.get_client(final_provider)
181
+
182
+ # Generate text
183
+ response = await client.generate_text(
184
+ messages=messages,
185
+ model=final_model,
186
+ temperature=temperature,
187
+ max_tokens=max_tokens,
188
+ **kwargs,
189
+ )
190
+
191
+ # Execute on_llm_end callbacks
192
+ if callbacks:
193
+ # Convert LLMResponse object to dictionary for callbacks
194
+ response_dict = {
195
+ "content": response.content,
196
+ "provider": response.provider,
197
+ "model": response.model,
198
+ "tokens_used": response.tokens_used,
199
+ "prompt_tokens": response.prompt_tokens,
200
+ "completion_tokens": response.completion_tokens,
201
+ "cost_estimate": response.cost_estimate,
202
+ "response_time": response.response_time,
203
+ }
204
+ for callback in callbacks:
205
+ try:
206
+ await callback.on_llm_end(
207
+ response_dict,
208
+ provider=final_provider,
209
+ model=final_model,
210
+ **kwargs,
211
+ )
212
+ except Exception as e:
213
+ logger.error(f"Error in callback on_llm_end: {e}")
214
+
215
+ logger.info(f"Generated text using {final_provider}/{response.model}")
216
+ return response
217
+
218
+ except Exception as e:
219
+ # Execute on_llm_error callbacks
220
+ if callbacks:
221
+ for callback in callbacks:
222
+ try:
223
+ await callback.on_llm_error(
224
+ e,
225
+ provider=final_provider,
226
+ model=final_model,
227
+ **kwargs,
228
+ )
229
+ except Exception as callback_error:
230
+ logger.error(f"Error in callback on_llm_error: {callback_error}")
231
+
232
+ # Re-raise the original exception
233
+ raise
234
+
235
+ async def stream_text(
236
+ self,
237
+ messages: Union[str, list[LLMMessage]],
238
+ provider: Optional[Union[str, AIProvider]] = None,
239
+ model: Optional[str] = None,
240
+ context: Optional[Dict[str, Any]] = None,
241
+ temperature: float = 0.7,
242
+ max_tokens: Optional[int] = None,
243
+ callbacks: Optional[List[CustomAsyncCallbackHandler]] = None,
244
+ **kwargs,
245
+ ):
246
+ """
247
+ Stream text generation using context-aware provider selection
248
+
249
+ Args:
250
+ messages: Either a string prompt or list of LLMMessage objects
251
+ provider: AI provider to use (can be overridden by context)
252
+ model: Specific model to use (can be overridden by context)
253
+ context: TaskContext or dict containing aiPreference
254
+ temperature: Sampling temperature (0.0 to 2.0)
255
+ max_tokens: Maximum tokens to generate
256
+ callbacks: List of callback handlers to execute during LLM calls
257
+ **kwargs: Additional provider-specific parameters
258
+
259
+ Yields:
260
+ str: Incremental text chunks
261
+ """
262
+ # Extract provider/model from context if available
263
+ context_provider, context_model = self._extract_ai_preference(context)
264
+
265
+ # Use context preferences if available, otherwise use provided values
266
+ final_provider = context_provider or provider or AIProvider.OPENAI
267
+ final_model = context_model or model
268
+
269
+ # Convert string prompt to messages format
270
+ if isinstance(messages, str):
271
+ messages = [LLMMessage(role="user", content=messages)]
272
+
273
+ # Execute on_llm_start callbacks
274
+ if callbacks:
275
+ # Convert LLMMessage objects to dictionaries for callbacks
276
+ messages_dict = [{"role": msg.role, "content": msg.content} for msg in messages]
277
+ for callback in callbacks:
278
+ try:
279
+ await callback.on_llm_start(
280
+ messages_dict,
281
+ provider=final_provider,
282
+ model=final_model,
283
+ **kwargs,
284
+ )
285
+ except Exception as e:
286
+ logger.error(f"Error in callback on_llm_start: {e}")
287
+
288
+ try:
289
+ # Get the appropriate client
290
+ client = self.factory.get_client(final_provider)
291
+
292
+ # Collect streamed content for token counting
293
+ collected_content = ""
294
+
295
+ # Stream text
296
+ async for chunk in await client.stream_text(
297
+ messages=messages,
298
+ model=final_model,
299
+ temperature=temperature,
300
+ max_tokens=max_tokens,
301
+ **kwargs,
302
+ ):
303
+ collected_content += chunk
304
+ yield chunk
305
+
306
+ # Create a response object for callbacks (streaming doesn't return LLMResponse directly)
307
+ # We need to estimate token usage for streaming responses
308
+ estimated_tokens = len(collected_content) // 4 # Rough estimation
309
+ stream_response = LLMResponse(
310
+ content=collected_content,
311
+ provider=str(final_provider),
312
+ model=final_model or "unknown",
313
+ tokens_used=estimated_tokens,
314
+ )
315
+
316
+ # Execute on_llm_end callbacks
317
+ if callbacks:
318
+ # Convert LLMResponse object to dictionary for callbacks
319
+ response_dict = {
320
+ "content": stream_response.content,
321
+ "provider": stream_response.provider,
322
+ "model": stream_response.model,
323
+ "tokens_used": stream_response.tokens_used,
324
+ "prompt_tokens": stream_response.prompt_tokens,
325
+ "completion_tokens": stream_response.completion_tokens,
326
+ "cost_estimate": stream_response.cost_estimate,
327
+ "response_time": stream_response.response_time,
328
+ }
329
+ for callback in callbacks:
330
+ try:
331
+ await callback.on_llm_end(
332
+ response_dict,
333
+ provider=final_provider,
334
+ model=final_model,
335
+ **kwargs,
336
+ )
337
+ except Exception as e:
338
+ logger.error(f"Error in callback on_llm_end: {e}")
339
+
340
+ except Exception as e:
341
+ # Execute on_llm_error callbacks
342
+ if callbacks:
343
+ for callback in callbacks:
344
+ try:
345
+ await callback.on_llm_error(
346
+ e,
347
+ provider=final_provider,
348
+ model=final_model,
349
+ **kwargs,
350
+ )
351
+ except Exception as callback_error:
352
+ logger.error(f"Error in callback on_llm_error: {callback_error}")
353
+
354
+ # Re-raise the original exception
355
+ raise
356
+
357
+ async def close(self):
358
+ """Close all clients"""
359
+ await self.factory.close_all()
360
+
361
+
362
+ # Global instance for easy access
363
+ _llm_manager = LLMClientManager()
364
+
365
+
366
+ async def get_llm_manager() -> LLMClientManager:
367
+ """Get the global LLM manager instance"""
368
+ return _llm_manager
369
+
370
+
371
+ # Convenience functions for backward compatibility
372
+
373
+
374
+ async def generate_text(
375
+ messages: Union[str, list[LLMMessage]],
376
+ provider: Optional[Union[str, AIProvider]] = None,
377
+ model: Optional[str] = None,
378
+ context: Optional[Dict[str, Any]] = None,
379
+ temperature: float = 0.7,
380
+ max_tokens: Optional[int] = None,
381
+ callbacks: Optional[List[CustomAsyncCallbackHandler]] = None,
382
+ **kwargs,
383
+ ) -> LLMResponse:
384
+ """Generate text using the global LLM manager"""
385
+ manager = await get_llm_manager()
386
+ return await manager.generate_text(
387
+ messages,
388
+ provider,
389
+ model,
390
+ context,
391
+ temperature,
392
+ max_tokens,
393
+ callbacks,
394
+ **kwargs,
395
+ )
396
+
397
+
398
+ async def stream_text(
399
+ messages: Union[str, list[LLMMessage]],
400
+ provider: Optional[Union[str, AIProvider]] = None,
401
+ model: Optional[str] = None,
402
+ context: Optional[Dict[str, Any]] = None,
403
+ temperature: float = 0.7,
404
+ max_tokens: Optional[int] = None,
405
+ callbacks: Optional[List[CustomAsyncCallbackHandler]] = None,
406
+ **kwargs,
407
+ ):
408
+ """Stream text using the global LLM manager"""
409
+ manager = await get_llm_manager()
410
+ async for chunk in manager.stream_text(
411
+ messages,
412
+ provider,
413
+ model,
414
+ context,
415
+ temperature,
416
+ max_tokens,
417
+ callbacks,
418
+ **kwargs,
419
+ ):
420
+ yield chunk
@@ -0,0 +1,33 @@
1
+ """
2
+ LLM Client implementations.
3
+
4
+ This package contains all LLM provider client implementations.
5
+ """
6
+
7
+ from .base_client import (
8
+ BaseLLMClient,
9
+ LLMMessage,
10
+ LLMResponse,
11
+ LLMClientError,
12
+ ProviderNotAvailableError,
13
+ RateLimitError,
14
+ )
15
+ from .openai_client import OpenAIClient
16
+ from .vertex_client import VertexAIClient
17
+ from .googleai_client import GoogleAIClient
18
+ from .xai_client import XAIClient
19
+
20
+ __all__ = [
21
+ # Base classes
22
+ "BaseLLMClient",
23
+ "LLMMessage",
24
+ "LLMResponse",
25
+ "LLMClientError",
26
+ "ProviderNotAvailableError",
27
+ "RateLimitError",
28
+ # Client implementations
29
+ "OpenAIClient",
30
+ "VertexAIClient",
31
+ "GoogleAIClient",
32
+ "XAIClient",
33
+ ]
@@ -0,0 +1,193 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Dict, Any, Optional, List, AsyncGenerator
3
+ from dataclasses import dataclass
4
+ import logging
5
+
6
+ logger = logging.getLogger(__name__)
7
+
8
+ # Lazy import to avoid circular dependency
9
+
10
+
11
+ def _get_config_loader():
12
+ """Lazy import of config loader to avoid circular dependency"""
13
+ from aiecs.llm.config import get_llm_config_loader
14
+
15
+ return get_llm_config_loader()
16
+
17
+
18
+ @dataclass
19
+ class LLMMessage:
20
+ role: str # "system", "user", "assistant"
21
+ content: str
22
+
23
+
24
+ @dataclass
25
+ class LLMResponse:
26
+ content: str
27
+ provider: str
28
+ model: str
29
+ tokens_used: Optional[int] = None
30
+ prompt_tokens: Optional[int] = None
31
+ completion_tokens: Optional[int] = None
32
+ cost_estimate: Optional[float] = None
33
+ response_time: Optional[float] = None
34
+ # Added for backward compatibility
35
+ metadata: Optional[Dict[str, Any]] = None
36
+
37
+ def __post_init__(self):
38
+ """Ensure consistency of token data"""
39
+ # If there are detailed token information but no total, calculate the
40
+ # total
41
+ if (
42
+ self.prompt_tokens is not None
43
+ and self.completion_tokens is not None
44
+ and self.tokens_used is None
45
+ ):
46
+ self.tokens_used = self.prompt_tokens + self.completion_tokens
47
+
48
+ # If only total is available but no detailed information, try to
49
+ # estimate (cannot accurately allocate in this case)
50
+ elif (
51
+ self.tokens_used is not None
52
+ and self.prompt_tokens is None
53
+ and self.completion_tokens is None
54
+ ):
55
+ # In this case we cannot accurately allocate, keep as is
56
+ pass
57
+
58
+
59
+ class LLMClientError(Exception):
60
+ """Base exception for LLM client errors"""
61
+
62
+
63
+ class ProviderNotAvailableError(LLMClientError):
64
+ """Raised when a provider is not available or misconfigured"""
65
+
66
+
67
+ class RateLimitError(LLMClientError):
68
+ """Raised when rate limit is exceeded"""
69
+
70
+
71
+ class BaseLLMClient(ABC):
72
+ """Abstract base class for all LLM provider clients"""
73
+
74
+ def __init__(self, provider_name: str):
75
+ self.provider_name = provider_name
76
+ self.logger = logging.getLogger(f"{__name__}.{provider_name}")
77
+
78
+ @abstractmethod
79
+ async def generate_text(
80
+ self,
81
+ messages: List[LLMMessage],
82
+ model: Optional[str] = None,
83
+ temperature: float = 0.7,
84
+ max_tokens: Optional[int] = None,
85
+ **kwargs,
86
+ ) -> LLMResponse:
87
+ """Generate text using the provider's API"""
88
+
89
+ @abstractmethod
90
+ async def stream_text(
91
+ self,
92
+ messages: List[LLMMessage],
93
+ model: Optional[str] = None,
94
+ temperature: float = 0.7,
95
+ max_tokens: Optional[int] = None,
96
+ **kwargs,
97
+ ) -> AsyncGenerator[str, None]:
98
+ """Stream text generation using the provider's API"""
99
+
100
+ @abstractmethod
101
+ async def close(self):
102
+ """Clean up resources"""
103
+
104
+ async def __aenter__(self):
105
+ return self
106
+
107
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
108
+ await self.close()
109
+
110
+ def _count_tokens_estimate(self, text: str) -> int:
111
+ """Rough token count estimation (4 chars ≈ 1 token for English)"""
112
+ return len(text) // 4
113
+
114
+ def _estimate_cost(
115
+ self,
116
+ model: str,
117
+ input_tokens: int,
118
+ output_tokens: int,
119
+ token_costs: Dict,
120
+ ) -> float:
121
+ """
122
+ Estimate the cost of the API call.
123
+
124
+ DEPRECATED: Use _estimate_cost_from_config instead for config-based cost estimation.
125
+ This method is kept for backward compatibility.
126
+ """
127
+ if model in token_costs:
128
+ costs = token_costs[model]
129
+ return (input_tokens * costs["input"] + output_tokens * costs["output"]) / 1000
130
+ return 0.0
131
+
132
+ def _estimate_cost_from_config(
133
+ self, model_name: str, input_tokens: int, output_tokens: int
134
+ ) -> float:
135
+ """
136
+ Estimate the cost using configuration-based pricing.
137
+
138
+ Args:
139
+ model_name: Name of the model
140
+ input_tokens: Number of input tokens
141
+ output_tokens: Number of output tokens
142
+
143
+ Returns:
144
+ Estimated cost in USD
145
+ """
146
+ try:
147
+ loader = _get_config_loader()
148
+ model_config = loader.get_model_config(self.provider_name, model_name)
149
+
150
+ if model_config and model_config.costs:
151
+ input_cost = (input_tokens * model_config.costs.input) / 1000
152
+ output_cost = (output_tokens * model_config.costs.output) / 1000
153
+ return input_cost + output_cost
154
+ else:
155
+ self.logger.warning(
156
+ f"No cost configuration found for model {model_name} "
157
+ f"in provider {self.provider_name}"
158
+ )
159
+ return 0.0
160
+ except Exception as e:
161
+ self.logger.warning(f"Failed to estimate cost from config: {e}")
162
+ return 0.0
163
+
164
+ def _get_model_config(self, model_name: str):
165
+ """
166
+ Get model configuration from the config loader.
167
+
168
+ Args:
169
+ model_name: Name of the model
170
+
171
+ Returns:
172
+ ModelConfig if found, None otherwise
173
+ """
174
+ try:
175
+ loader = _get_config_loader()
176
+ return loader.get_model_config(self.provider_name, model_name)
177
+ except Exception as e:
178
+ self.logger.warning(f"Failed to get model config: {e}")
179
+ return None
180
+
181
+ def _get_default_model(self) -> Optional[str]:
182
+ """
183
+ Get the default model for this provider from configuration.
184
+
185
+ Returns:
186
+ Default model name if configured, None otherwise
187
+ """
188
+ try:
189
+ loader = _get_config_loader()
190
+ return loader.get_default_model(self.provider_name)
191
+ except Exception as e:
192
+ self.logger.warning(f"Failed to get default model: {e}")
193
+ return None