aiecs 1.0.1__py3-none-any.whl → 1.7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiecs might be problematic. Click here for more details.

Files changed (340) hide show
  1. aiecs/__init__.py +13 -16
  2. aiecs/__main__.py +7 -7
  3. aiecs/aiecs_client.py +269 -75
  4. aiecs/application/executors/operation_executor.py +79 -54
  5. aiecs/application/knowledge_graph/__init__.py +7 -0
  6. aiecs/application/knowledge_graph/builder/__init__.py +37 -0
  7. aiecs/application/knowledge_graph/builder/data_quality.py +302 -0
  8. aiecs/application/knowledge_graph/builder/data_reshaping.py +293 -0
  9. aiecs/application/knowledge_graph/builder/document_builder.py +369 -0
  10. aiecs/application/knowledge_graph/builder/graph_builder.py +490 -0
  11. aiecs/application/knowledge_graph/builder/import_optimizer.py +396 -0
  12. aiecs/application/knowledge_graph/builder/schema_inference.py +462 -0
  13. aiecs/application/knowledge_graph/builder/schema_mapping.py +563 -0
  14. aiecs/application/knowledge_graph/builder/structured_pipeline.py +1384 -0
  15. aiecs/application/knowledge_graph/builder/text_chunker.py +317 -0
  16. aiecs/application/knowledge_graph/extractors/__init__.py +27 -0
  17. aiecs/application/knowledge_graph/extractors/base.py +98 -0
  18. aiecs/application/knowledge_graph/extractors/llm_entity_extractor.py +422 -0
  19. aiecs/application/knowledge_graph/extractors/llm_relation_extractor.py +347 -0
  20. aiecs/application/knowledge_graph/extractors/ner_entity_extractor.py +241 -0
  21. aiecs/application/knowledge_graph/fusion/__init__.py +78 -0
  22. aiecs/application/knowledge_graph/fusion/ab_testing.py +395 -0
  23. aiecs/application/knowledge_graph/fusion/abbreviation_expander.py +327 -0
  24. aiecs/application/knowledge_graph/fusion/alias_index.py +597 -0
  25. aiecs/application/knowledge_graph/fusion/alias_matcher.py +384 -0
  26. aiecs/application/knowledge_graph/fusion/cache_coordinator.py +343 -0
  27. aiecs/application/knowledge_graph/fusion/entity_deduplicator.py +433 -0
  28. aiecs/application/knowledge_graph/fusion/entity_linker.py +511 -0
  29. aiecs/application/knowledge_graph/fusion/evaluation_dataset.py +240 -0
  30. aiecs/application/knowledge_graph/fusion/knowledge_fusion.py +632 -0
  31. aiecs/application/knowledge_graph/fusion/matching_config.py +489 -0
  32. aiecs/application/knowledge_graph/fusion/name_normalizer.py +352 -0
  33. aiecs/application/knowledge_graph/fusion/relation_deduplicator.py +183 -0
  34. aiecs/application/knowledge_graph/fusion/semantic_name_matcher.py +464 -0
  35. aiecs/application/knowledge_graph/fusion/similarity_pipeline.py +534 -0
  36. aiecs/application/knowledge_graph/pattern_matching/__init__.py +21 -0
  37. aiecs/application/knowledge_graph/pattern_matching/pattern_matcher.py +342 -0
  38. aiecs/application/knowledge_graph/pattern_matching/query_executor.py +366 -0
  39. aiecs/application/knowledge_graph/profiling/__init__.py +12 -0
  40. aiecs/application/knowledge_graph/profiling/query_plan_visualizer.py +195 -0
  41. aiecs/application/knowledge_graph/profiling/query_profiler.py +223 -0
  42. aiecs/application/knowledge_graph/reasoning/__init__.py +27 -0
  43. aiecs/application/knowledge_graph/reasoning/evidence_synthesis.py +341 -0
  44. aiecs/application/knowledge_graph/reasoning/inference_engine.py +500 -0
  45. aiecs/application/knowledge_graph/reasoning/logic_form_parser.py +163 -0
  46. aiecs/application/knowledge_graph/reasoning/logic_parser/__init__.py +79 -0
  47. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_builder.py +513 -0
  48. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_nodes.py +913 -0
  49. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_validator.py +866 -0
  50. aiecs/application/knowledge_graph/reasoning/logic_parser/error_handler.py +475 -0
  51. aiecs/application/knowledge_graph/reasoning/logic_parser/parser.py +396 -0
  52. aiecs/application/knowledge_graph/reasoning/logic_parser/query_context.py +208 -0
  53. aiecs/application/knowledge_graph/reasoning/logic_query_integration.py +170 -0
  54. aiecs/application/knowledge_graph/reasoning/query_planner.py +855 -0
  55. aiecs/application/knowledge_graph/reasoning/reasoning_engine.py +518 -0
  56. aiecs/application/knowledge_graph/retrieval/__init__.py +27 -0
  57. aiecs/application/knowledge_graph/retrieval/query_intent_classifier.py +211 -0
  58. aiecs/application/knowledge_graph/retrieval/retrieval_strategies.py +592 -0
  59. aiecs/application/knowledge_graph/retrieval/strategy_types.py +23 -0
  60. aiecs/application/knowledge_graph/search/__init__.py +59 -0
  61. aiecs/application/knowledge_graph/search/hybrid_search.py +457 -0
  62. aiecs/application/knowledge_graph/search/reranker.py +293 -0
  63. aiecs/application/knowledge_graph/search/reranker_strategies.py +535 -0
  64. aiecs/application/knowledge_graph/search/text_similarity.py +392 -0
  65. aiecs/application/knowledge_graph/traversal/__init__.py +15 -0
  66. aiecs/application/knowledge_graph/traversal/enhanced_traversal.py +305 -0
  67. aiecs/application/knowledge_graph/traversal/path_scorer.py +271 -0
  68. aiecs/application/knowledge_graph/validators/__init__.py +13 -0
  69. aiecs/application/knowledge_graph/validators/relation_validator.py +239 -0
  70. aiecs/application/knowledge_graph/visualization/__init__.py +11 -0
  71. aiecs/application/knowledge_graph/visualization/graph_visualizer.py +313 -0
  72. aiecs/common/__init__.py +9 -0
  73. aiecs/common/knowledge_graph/__init__.py +17 -0
  74. aiecs/common/knowledge_graph/runnable.py +471 -0
  75. aiecs/config/__init__.py +20 -5
  76. aiecs/config/config.py +762 -31
  77. aiecs/config/graph_config.py +131 -0
  78. aiecs/config/tool_config.py +399 -0
  79. aiecs/core/__init__.py +29 -13
  80. aiecs/core/interface/__init__.py +2 -2
  81. aiecs/core/interface/execution_interface.py +22 -22
  82. aiecs/core/interface/storage_interface.py +37 -88
  83. aiecs/core/registry/__init__.py +31 -0
  84. aiecs/core/registry/service_registry.py +92 -0
  85. aiecs/domain/__init__.py +270 -1
  86. aiecs/domain/agent/__init__.py +191 -0
  87. aiecs/domain/agent/base_agent.py +3870 -0
  88. aiecs/domain/agent/exceptions.py +99 -0
  89. aiecs/domain/agent/graph_aware_mixin.py +569 -0
  90. aiecs/domain/agent/hybrid_agent.py +1435 -0
  91. aiecs/domain/agent/integration/__init__.py +29 -0
  92. aiecs/domain/agent/integration/context_compressor.py +216 -0
  93. aiecs/domain/agent/integration/context_engine_adapter.py +587 -0
  94. aiecs/domain/agent/integration/protocols.py +281 -0
  95. aiecs/domain/agent/integration/retry_policy.py +218 -0
  96. aiecs/domain/agent/integration/role_config.py +213 -0
  97. aiecs/domain/agent/knowledge_aware_agent.py +1892 -0
  98. aiecs/domain/agent/lifecycle.py +291 -0
  99. aiecs/domain/agent/llm_agent.py +692 -0
  100. aiecs/domain/agent/memory/__init__.py +12 -0
  101. aiecs/domain/agent/memory/conversation.py +1124 -0
  102. aiecs/domain/agent/migration/__init__.py +14 -0
  103. aiecs/domain/agent/migration/conversion.py +163 -0
  104. aiecs/domain/agent/migration/legacy_wrapper.py +86 -0
  105. aiecs/domain/agent/models.py +884 -0
  106. aiecs/domain/agent/observability.py +479 -0
  107. aiecs/domain/agent/persistence.py +449 -0
  108. aiecs/domain/agent/prompts/__init__.py +29 -0
  109. aiecs/domain/agent/prompts/builder.py +159 -0
  110. aiecs/domain/agent/prompts/formatters.py +187 -0
  111. aiecs/domain/agent/prompts/template.py +255 -0
  112. aiecs/domain/agent/registry.py +253 -0
  113. aiecs/domain/agent/tool_agent.py +444 -0
  114. aiecs/domain/agent/tools/__init__.py +15 -0
  115. aiecs/domain/agent/tools/schema_generator.py +364 -0
  116. aiecs/domain/community/__init__.py +155 -0
  117. aiecs/domain/community/agent_adapter.py +469 -0
  118. aiecs/domain/community/analytics.py +432 -0
  119. aiecs/domain/community/collaborative_workflow.py +648 -0
  120. aiecs/domain/community/communication_hub.py +634 -0
  121. aiecs/domain/community/community_builder.py +320 -0
  122. aiecs/domain/community/community_integration.py +796 -0
  123. aiecs/domain/community/community_manager.py +803 -0
  124. aiecs/domain/community/decision_engine.py +849 -0
  125. aiecs/domain/community/exceptions.py +231 -0
  126. aiecs/domain/community/models/__init__.py +33 -0
  127. aiecs/domain/community/models/community_models.py +234 -0
  128. aiecs/domain/community/resource_manager.py +461 -0
  129. aiecs/domain/community/shared_context_manager.py +589 -0
  130. aiecs/domain/context/__init__.py +40 -10
  131. aiecs/domain/context/context_engine.py +1910 -0
  132. aiecs/domain/context/conversation_models.py +87 -53
  133. aiecs/domain/context/graph_memory.py +582 -0
  134. aiecs/domain/execution/model.py +12 -4
  135. aiecs/domain/knowledge_graph/__init__.py +19 -0
  136. aiecs/domain/knowledge_graph/models/__init__.py +52 -0
  137. aiecs/domain/knowledge_graph/models/entity.py +148 -0
  138. aiecs/domain/knowledge_graph/models/evidence.py +178 -0
  139. aiecs/domain/knowledge_graph/models/inference_rule.py +184 -0
  140. aiecs/domain/knowledge_graph/models/path.py +171 -0
  141. aiecs/domain/knowledge_graph/models/path_pattern.py +171 -0
  142. aiecs/domain/knowledge_graph/models/query.py +261 -0
  143. aiecs/domain/knowledge_graph/models/query_plan.py +181 -0
  144. aiecs/domain/knowledge_graph/models/relation.py +202 -0
  145. aiecs/domain/knowledge_graph/schema/__init__.py +23 -0
  146. aiecs/domain/knowledge_graph/schema/entity_type.py +131 -0
  147. aiecs/domain/knowledge_graph/schema/graph_schema.py +253 -0
  148. aiecs/domain/knowledge_graph/schema/property_schema.py +143 -0
  149. aiecs/domain/knowledge_graph/schema/relation_type.py +163 -0
  150. aiecs/domain/knowledge_graph/schema/schema_manager.py +691 -0
  151. aiecs/domain/knowledge_graph/schema/type_enums.py +209 -0
  152. aiecs/domain/task/dsl_processor.py +172 -56
  153. aiecs/domain/task/model.py +20 -8
  154. aiecs/domain/task/task_context.py +27 -24
  155. aiecs/infrastructure/__init__.py +0 -2
  156. aiecs/infrastructure/graph_storage/__init__.py +11 -0
  157. aiecs/infrastructure/graph_storage/base.py +837 -0
  158. aiecs/infrastructure/graph_storage/batch_operations.py +458 -0
  159. aiecs/infrastructure/graph_storage/cache.py +424 -0
  160. aiecs/infrastructure/graph_storage/distributed.py +223 -0
  161. aiecs/infrastructure/graph_storage/error_handling.py +380 -0
  162. aiecs/infrastructure/graph_storage/graceful_degradation.py +294 -0
  163. aiecs/infrastructure/graph_storage/health_checks.py +378 -0
  164. aiecs/infrastructure/graph_storage/in_memory.py +1197 -0
  165. aiecs/infrastructure/graph_storage/index_optimization.py +446 -0
  166. aiecs/infrastructure/graph_storage/lazy_loading.py +431 -0
  167. aiecs/infrastructure/graph_storage/metrics.py +344 -0
  168. aiecs/infrastructure/graph_storage/migration.py +400 -0
  169. aiecs/infrastructure/graph_storage/pagination.py +483 -0
  170. aiecs/infrastructure/graph_storage/performance_monitoring.py +456 -0
  171. aiecs/infrastructure/graph_storage/postgres.py +1563 -0
  172. aiecs/infrastructure/graph_storage/property_storage.py +353 -0
  173. aiecs/infrastructure/graph_storage/protocols.py +76 -0
  174. aiecs/infrastructure/graph_storage/query_optimizer.py +642 -0
  175. aiecs/infrastructure/graph_storage/schema_cache.py +290 -0
  176. aiecs/infrastructure/graph_storage/sqlite.py +1373 -0
  177. aiecs/infrastructure/graph_storage/streaming.py +487 -0
  178. aiecs/infrastructure/graph_storage/tenant.py +412 -0
  179. aiecs/infrastructure/messaging/celery_task_manager.py +92 -54
  180. aiecs/infrastructure/messaging/websocket_manager.py +51 -35
  181. aiecs/infrastructure/monitoring/__init__.py +22 -0
  182. aiecs/infrastructure/monitoring/executor_metrics.py +45 -11
  183. aiecs/infrastructure/monitoring/global_metrics_manager.py +212 -0
  184. aiecs/infrastructure/monitoring/structured_logger.py +3 -7
  185. aiecs/infrastructure/monitoring/tracing_manager.py +63 -35
  186. aiecs/infrastructure/persistence/__init__.py +14 -1
  187. aiecs/infrastructure/persistence/context_engine_client.py +184 -0
  188. aiecs/infrastructure/persistence/database_manager.py +67 -43
  189. aiecs/infrastructure/persistence/file_storage.py +180 -103
  190. aiecs/infrastructure/persistence/redis_client.py +74 -21
  191. aiecs/llm/__init__.py +73 -25
  192. aiecs/llm/callbacks/__init__.py +11 -0
  193. aiecs/llm/{custom_callbacks.py → callbacks/custom_callbacks.py} +26 -19
  194. aiecs/llm/client_factory.py +224 -36
  195. aiecs/llm/client_resolver.py +155 -0
  196. aiecs/llm/clients/__init__.py +38 -0
  197. aiecs/llm/clients/base_client.py +324 -0
  198. aiecs/llm/clients/google_function_calling_mixin.py +457 -0
  199. aiecs/llm/clients/googleai_client.py +241 -0
  200. aiecs/llm/clients/openai_client.py +158 -0
  201. aiecs/llm/clients/openai_compatible_mixin.py +367 -0
  202. aiecs/llm/clients/vertex_client.py +897 -0
  203. aiecs/llm/clients/xai_client.py +201 -0
  204. aiecs/llm/config/__init__.py +51 -0
  205. aiecs/llm/config/config_loader.py +272 -0
  206. aiecs/llm/config/config_validator.py +206 -0
  207. aiecs/llm/config/model_config.py +143 -0
  208. aiecs/llm/protocols.py +149 -0
  209. aiecs/llm/utils/__init__.py +10 -0
  210. aiecs/llm/utils/validate_config.py +89 -0
  211. aiecs/main.py +140 -121
  212. aiecs/scripts/aid/VERSION_MANAGEMENT.md +138 -0
  213. aiecs/scripts/aid/__init__.py +19 -0
  214. aiecs/scripts/aid/module_checker.py +499 -0
  215. aiecs/scripts/aid/version_manager.py +235 -0
  216. aiecs/scripts/{DEPENDENCY_SYSTEM_SUMMARY.md → dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md} +1 -0
  217. aiecs/scripts/{README_DEPENDENCY_CHECKER.md → dependance_check/README_DEPENDENCY_CHECKER.md} +1 -0
  218. aiecs/scripts/dependance_check/__init__.py +15 -0
  219. aiecs/scripts/dependance_check/dependency_checker.py +1835 -0
  220. aiecs/scripts/{dependency_fixer.py → dependance_check/dependency_fixer.py} +192 -90
  221. aiecs/scripts/{download_nlp_data.py → dependance_check/download_nlp_data.py} +203 -71
  222. aiecs/scripts/dependance_patch/__init__.py +7 -0
  223. aiecs/scripts/dependance_patch/fix_weasel/__init__.py +11 -0
  224. aiecs/scripts/{fix_weasel_validator.py → dependance_patch/fix_weasel/fix_weasel_validator.py} +21 -14
  225. aiecs/scripts/{patch_weasel_library.sh → dependance_patch/fix_weasel/patch_weasel_library.sh} +1 -1
  226. aiecs/scripts/knowledge_graph/__init__.py +3 -0
  227. aiecs/scripts/knowledge_graph/run_threshold_experiments.py +212 -0
  228. aiecs/scripts/migrations/multi_tenancy/README.md +142 -0
  229. aiecs/scripts/tools_develop/README.md +671 -0
  230. aiecs/scripts/tools_develop/README_CONFIG_CHECKER.md +273 -0
  231. aiecs/scripts/tools_develop/TOOLS_CONFIG_GUIDE.md +1287 -0
  232. aiecs/scripts/tools_develop/TOOL_AUTO_DISCOVERY.md +234 -0
  233. aiecs/scripts/tools_develop/__init__.py +21 -0
  234. aiecs/scripts/tools_develop/check_all_tools_config.py +548 -0
  235. aiecs/scripts/tools_develop/check_type_annotations.py +257 -0
  236. aiecs/scripts/tools_develop/pre-commit-schema-coverage.sh +66 -0
  237. aiecs/scripts/tools_develop/schema_coverage.py +511 -0
  238. aiecs/scripts/tools_develop/validate_tool_schemas.py +475 -0
  239. aiecs/scripts/tools_develop/verify_executor_config_fix.py +98 -0
  240. aiecs/scripts/tools_develop/verify_tools.py +352 -0
  241. aiecs/tasks/__init__.py +0 -1
  242. aiecs/tasks/worker.py +115 -47
  243. aiecs/tools/__init__.py +194 -72
  244. aiecs/tools/apisource/__init__.py +99 -0
  245. aiecs/tools/apisource/intelligence/__init__.py +19 -0
  246. aiecs/tools/apisource/intelligence/data_fusion.py +632 -0
  247. aiecs/tools/apisource/intelligence/query_analyzer.py +417 -0
  248. aiecs/tools/apisource/intelligence/search_enhancer.py +385 -0
  249. aiecs/tools/apisource/monitoring/__init__.py +9 -0
  250. aiecs/tools/apisource/monitoring/metrics.py +330 -0
  251. aiecs/tools/apisource/providers/__init__.py +112 -0
  252. aiecs/tools/apisource/providers/base.py +671 -0
  253. aiecs/tools/apisource/providers/census.py +397 -0
  254. aiecs/tools/apisource/providers/fred.py +535 -0
  255. aiecs/tools/apisource/providers/newsapi.py +409 -0
  256. aiecs/tools/apisource/providers/worldbank.py +352 -0
  257. aiecs/tools/apisource/reliability/__init__.py +12 -0
  258. aiecs/tools/apisource/reliability/error_handler.py +363 -0
  259. aiecs/tools/apisource/reliability/fallback_strategy.py +376 -0
  260. aiecs/tools/apisource/tool.py +832 -0
  261. aiecs/tools/apisource/utils/__init__.py +9 -0
  262. aiecs/tools/apisource/utils/validators.py +334 -0
  263. aiecs/tools/base_tool.py +415 -21
  264. aiecs/tools/docs/__init__.py +121 -0
  265. aiecs/tools/docs/ai_document_orchestrator.py +607 -0
  266. aiecs/tools/docs/ai_document_writer_orchestrator.py +2350 -0
  267. aiecs/tools/docs/content_insertion_tool.py +1320 -0
  268. aiecs/tools/docs/document_creator_tool.py +1323 -0
  269. aiecs/tools/docs/document_layout_tool.py +1160 -0
  270. aiecs/tools/docs/document_parser_tool.py +1011 -0
  271. aiecs/tools/docs/document_writer_tool.py +1829 -0
  272. aiecs/tools/knowledge_graph/__init__.py +17 -0
  273. aiecs/tools/knowledge_graph/graph_reasoning_tool.py +807 -0
  274. aiecs/tools/knowledge_graph/graph_search_tool.py +944 -0
  275. aiecs/tools/knowledge_graph/kg_builder_tool.py +524 -0
  276. aiecs/tools/langchain_adapter.py +300 -138
  277. aiecs/tools/schema_generator.py +455 -0
  278. aiecs/tools/search_tool/__init__.py +100 -0
  279. aiecs/tools/search_tool/analyzers.py +581 -0
  280. aiecs/tools/search_tool/cache.py +264 -0
  281. aiecs/tools/search_tool/constants.py +128 -0
  282. aiecs/tools/search_tool/context.py +224 -0
  283. aiecs/tools/search_tool/core.py +778 -0
  284. aiecs/tools/search_tool/deduplicator.py +119 -0
  285. aiecs/tools/search_tool/error_handler.py +242 -0
  286. aiecs/tools/search_tool/metrics.py +343 -0
  287. aiecs/tools/search_tool/rate_limiter.py +172 -0
  288. aiecs/tools/search_tool/schemas.py +275 -0
  289. aiecs/tools/statistics/__init__.py +80 -0
  290. aiecs/tools/statistics/ai_data_analysis_orchestrator.py +646 -0
  291. aiecs/tools/statistics/ai_insight_generator_tool.py +508 -0
  292. aiecs/tools/statistics/ai_report_orchestrator_tool.py +684 -0
  293. aiecs/tools/statistics/data_loader_tool.py +555 -0
  294. aiecs/tools/statistics/data_profiler_tool.py +638 -0
  295. aiecs/tools/statistics/data_transformer_tool.py +580 -0
  296. aiecs/tools/statistics/data_visualizer_tool.py +498 -0
  297. aiecs/tools/statistics/model_trainer_tool.py +507 -0
  298. aiecs/tools/statistics/statistical_analyzer_tool.py +472 -0
  299. aiecs/tools/task_tools/__init__.py +49 -36
  300. aiecs/tools/task_tools/chart_tool.py +200 -184
  301. aiecs/tools/task_tools/classfire_tool.py +268 -267
  302. aiecs/tools/task_tools/image_tool.py +175 -131
  303. aiecs/tools/task_tools/office_tool.py +226 -146
  304. aiecs/tools/task_tools/pandas_tool.py +477 -121
  305. aiecs/tools/task_tools/report_tool.py +390 -142
  306. aiecs/tools/task_tools/research_tool.py +149 -79
  307. aiecs/tools/task_tools/scraper_tool.py +339 -145
  308. aiecs/tools/task_tools/stats_tool.py +448 -209
  309. aiecs/tools/temp_file_manager.py +26 -24
  310. aiecs/tools/tool_executor/__init__.py +18 -16
  311. aiecs/tools/tool_executor/tool_executor.py +364 -52
  312. aiecs/utils/LLM_output_structor.py +74 -48
  313. aiecs/utils/__init__.py +14 -3
  314. aiecs/utils/base_callback.py +0 -3
  315. aiecs/utils/cache_provider.py +696 -0
  316. aiecs/utils/execution_utils.py +50 -31
  317. aiecs/utils/prompt_loader.py +1 -0
  318. aiecs/utils/token_usage_repository.py +37 -11
  319. aiecs/ws/socket_server.py +14 -4
  320. {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/METADATA +52 -15
  321. aiecs-1.7.6.dist-info/RECORD +337 -0
  322. aiecs-1.7.6.dist-info/entry_points.txt +13 -0
  323. aiecs/config/registry.py +0 -19
  324. aiecs/domain/context/content_engine.py +0 -982
  325. aiecs/llm/base_client.py +0 -99
  326. aiecs/llm/openai_client.py +0 -125
  327. aiecs/llm/vertex_client.py +0 -186
  328. aiecs/llm/xai_client.py +0 -184
  329. aiecs/scripts/dependency_checker.py +0 -857
  330. aiecs/scripts/quick_dependency_check.py +0 -269
  331. aiecs/tools/task_tools/search_api.py +0 -7
  332. aiecs-1.0.1.dist-info/RECORD +0 -90
  333. aiecs-1.0.1.dist-info/entry_points.txt +0 -7
  334. /aiecs/scripts/{setup_nlp_data.sh → dependance_check/setup_nlp_data.sh} +0 -0
  335. /aiecs/scripts/{README_WEASEL_PATCH.md → dependance_patch/fix_weasel/README_WEASEL_PATCH.md} +0 -0
  336. /aiecs/scripts/{fix_weasel_validator.sh → dependance_patch/fix_weasel/fix_weasel_validator.sh} +0 -0
  337. /aiecs/scripts/{run_weasel_patch.sh → dependance_patch/fix_weasel/run_weasel_patch.sh} +0 -0
  338. {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/WHEEL +0 -0
  339. {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/licenses/LICENSE +0 -0
  340. {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/top_level.txt +0 -0
@@ -1,33 +1,120 @@
1
1
  import logging
2
- from typing import Dict, Any, Optional, Union, List
2
+ from typing import Dict, Any, Optional, Union, List, TYPE_CHECKING
3
3
  from enum import Enum
4
4
 
5
- from .base_client import BaseLLMClient, LLMMessage, LLMResponse
6
- from .openai_client import OpenAIClient
7
- from .vertex_client import VertexAIClient
8
- from .xai_client import XAIClient
9
- from ..utils.base_callback import CustomAsyncCallbackHandler
5
+ from .clients.base_client import BaseLLMClient, LLMMessage, LLMResponse
6
+ from .clients.openai_client import OpenAIClient
7
+ from .clients.vertex_client import VertexAIClient
8
+ from .clients.googleai_client import GoogleAIClient
9
+ from .clients.xai_client import XAIClient
10
+ from .callbacks.custom_callbacks import CustomAsyncCallbackHandler
11
+
12
+ if TYPE_CHECKING:
13
+ from .protocols import LLMClientProtocol
10
14
 
11
15
  logger = logging.getLogger(__name__)
12
16
 
17
+
13
18
  class AIProvider(str, Enum):
14
19
  OPENAI = "OpenAI"
15
20
  VERTEX = "Vertex"
21
+ GOOGLEAI = "GoogleAI"
16
22
  XAI = "xAI"
17
23
 
24
+
18
25
  class LLMClientFactory:
19
26
  """Factory for creating and managing LLM provider clients"""
20
27
 
21
28
  _clients: Dict[AIProvider, BaseLLMClient] = {}
29
+ _custom_clients: Dict[str, "LLMClientProtocol"] = {}
30
+
31
+ @classmethod
32
+ def register_custom_provider(cls, name: str, client: "LLMClientProtocol") -> None:
33
+ """
34
+ Register a custom LLM client provider.
35
+
36
+ This allows registration of custom LLM clients that implement the LLMClientProtocol
37
+ without inheriting from BaseLLMClient. Custom providers can be retrieved by name
38
+ using get_client().
39
+
40
+ Args:
41
+ name: Custom provider name (e.g., "my-llm", "llama-local", "custom-gpt")
42
+ client: Client implementing LLMClientProtocol
43
+
44
+ Raises:
45
+ ValueError: If client doesn't implement LLMClientProtocol
46
+ ValueError: If name conflicts with standard AIProvider enum values
47
+
48
+ Example:
49
+ ```python
50
+ # Register custom LLM client
51
+ custom_client = MyCustomLLMClient()
52
+ LLMClientFactory.register_custom_provider("my-llm", custom_client)
53
+
54
+ # Use custom client
55
+ client = LLMClientFactory.get_client("my-llm")
56
+ response = await client.generate_text(messages)
57
+ ```
58
+ """
59
+ # Import here to avoid circular dependency
60
+ from .protocols import LLMClientProtocol
61
+
62
+ # Validate protocol compliance
63
+ if not isinstance(client, LLMClientProtocol):
64
+ raise ValueError(
65
+ f"Client must implement LLMClientProtocol. "
66
+ f"Required methods: generate_text, stream_text, close, get_embeddings. "
67
+ f"Required attribute: provider_name"
68
+ )
69
+
70
+ # Prevent conflicts with standard provider names
71
+ try:
72
+ AIProvider(name)
73
+ raise ValueError(
74
+ f"Custom provider name '{name}' conflicts with standard AIProvider enum. "
75
+ f"Please use a different name."
76
+ )
77
+ except ValueError as e:
78
+ # If ValueError is raised because name is not in enum, that's good
79
+ if "conflicts with standard AIProvider" in str(e):
80
+ raise
81
+ # Otherwise, name is not in enum, proceed with registration
82
+
83
+ cls._custom_clients[name] = client
84
+ logger.info(f"Registered custom LLM provider: {name}")
22
85
 
23
86
  @classmethod
24
- def get_client(cls, provider: Union[str, AIProvider]) -> BaseLLMClient:
25
- """Get or create a client for the specified provider"""
87
+ def get_client(cls, provider: Union[str, AIProvider]) -> Union[BaseLLMClient, "LLMClientProtocol"]:
88
+ """
89
+ Get or create a client for the specified provider.
90
+
91
+ Supports both standard AIProvider enum values and custom provider names
92
+ registered via register_custom_provider().
93
+
94
+ Args:
95
+ provider: AIProvider enum or custom provider name string
96
+
97
+ Returns:
98
+ LLM client (BaseLLMClient for standard providers, LLMClientProtocol for custom)
99
+
100
+ Raises:
101
+ ValueError: If provider is unknown (not standard and not registered)
102
+ """
103
+ # Check custom providers first
104
+ if isinstance(provider, str) and provider in cls._custom_clients:
105
+ return cls._custom_clients[provider]
106
+
107
+ # Handle standard providers
26
108
  if isinstance(provider, str):
27
109
  try:
28
110
  provider = AIProvider(provider)
29
111
  except ValueError:
30
- raise ValueError(f"Unsupported provider: {provider}")
112
+ raise ValueError(
113
+ f"Unknown provider: {provider}. "
114
+ f"Standard providers: {[p.value for p in AIProvider]}. "
115
+ f"Custom providers: {list(cls._custom_clients.keys())}. "
116
+ f"Register custom providers with LLMClientFactory.register_custom_provider()"
117
+ )
31
118
 
32
119
  if provider not in cls._clients:
33
120
  cls._clients[provider] = cls._create_client(provider)
@@ -41,6 +128,8 @@ class LLMClientFactory:
41
128
  return OpenAIClient()
42
129
  elif provider == AIProvider.VERTEX:
43
130
  return VertexAIClient()
131
+ elif provider == AIProvider.GOOGLEAI:
132
+ return GoogleAIClient()
44
133
  elif provider == AIProvider.XAI:
45
134
  return XAIClient()
46
135
  else:
@@ -48,7 +137,8 @@ class LLMClientFactory:
48
137
 
49
138
  @classmethod
50
139
  async def close_all(cls):
51
- """Close all active clients"""
140
+ """Close all active clients (both standard and custom)"""
141
+ # Close standard clients
52
142
  for client in cls._clients.values():
53
143
  try:
54
144
  await client.close()
@@ -56,11 +146,34 @@ class LLMClientFactory:
56
146
  logger.error(f"Error closing client {client.provider_name}: {e}")
57
147
  cls._clients.clear()
58
148
 
149
+ # Close custom clients
150
+ for name, client in cls._custom_clients.items():
151
+ try:
152
+ await client.close()
153
+ except Exception as e:
154
+ logger.error(f"Error closing custom client {name}: {e}")
155
+ cls._custom_clients.clear()
156
+
59
157
  @classmethod
60
158
  async def close_client(cls, provider: Union[str, AIProvider]):
61
- """Close a specific client"""
159
+ """Close a specific client (standard or custom)"""
160
+ # Check if it's a custom provider
161
+ if isinstance(provider, str) and provider in cls._custom_clients:
162
+ try:
163
+ await cls._custom_clients[provider].close()
164
+ del cls._custom_clients[provider]
165
+ logger.info(f"Closed custom client: {provider}")
166
+ except Exception as e:
167
+ logger.error(f"Error closing custom client {provider}: {e}")
168
+ return
169
+
170
+ # Handle standard providers
62
171
  if isinstance(provider, str):
63
- provider = AIProvider(provider)
172
+ try:
173
+ provider = AIProvider(provider)
174
+ except ValueError:
175
+ logger.warning(f"Unknown provider to close: {provider}")
176
+ return
64
177
 
65
178
  if provider in cls._clients:
66
179
  try:
@@ -69,6 +182,25 @@ class LLMClientFactory:
69
182
  except Exception as e:
70
183
  logger.error(f"Error closing client {provider}: {e}")
71
184
 
185
+ @classmethod
186
+ def reload_config(cls):
187
+ """
188
+ Reload LLM models configuration.
189
+
190
+ This reloads the configuration from the YAML file, allowing for
191
+ hot-reloading of model settings without restarting the application.
192
+ """
193
+ try:
194
+ from aiecs.llm.config import reload_llm_config
195
+
196
+ config = reload_llm_config()
197
+ logger.info(f"Reloaded LLM configuration: {len(config.providers)} providers")
198
+ return config
199
+ except Exception as e:
200
+ logger.error(f"Failed to reload LLM configuration: {e}")
201
+ raise
202
+
203
+
72
204
  class LLMClientManager:
73
205
  """High-level manager for LLM operations with context-aware provider selection"""
74
206
 
@@ -80,19 +212,19 @@ class LLMClientManager:
80
212
  if not context:
81
213
  return None, None
82
214
 
83
- metadata = context.get('metadata', {})
215
+ metadata = context.get("metadata", {})
84
216
 
85
217
  # First, check for aiPreference in metadata
86
- ai_preference = metadata.get('aiPreference', {})
218
+ ai_preference = metadata.get("aiPreference", {})
87
219
  if isinstance(ai_preference, dict):
88
- provider = ai_preference.get('provider')
89
- model = ai_preference.get('model')
220
+ provider = ai_preference.get("provider")
221
+ model = ai_preference.get("model")
90
222
  if provider is not None:
91
223
  return provider, model
92
224
 
93
225
  # Fallback to direct provider/model in metadata
94
- provider = metadata.get('provider')
95
- model = metadata.get('model')
226
+ provider = metadata.get("provider")
227
+ model = metadata.get("model")
96
228
  return provider, model
97
229
 
98
230
  async def generate_text(
@@ -104,7 +236,7 @@ class LLMClientManager:
104
236
  temperature: float = 0.7,
105
237
  max_tokens: Optional[int] = None,
106
238
  callbacks: Optional[List[CustomAsyncCallbackHandler]] = None,
107
- **kwargs
239
+ **kwargs,
108
240
  ) -> LLMResponse:
109
241
  """
110
242
  Generate text using context-aware provider selection
@@ -139,7 +271,12 @@ class LLMClientManager:
139
271
  messages_dict = [{"role": msg.role, "content": msg.content} for msg in messages]
140
272
  for callback in callbacks:
141
273
  try:
142
- await callback.on_llm_start(messages_dict, provider=final_provider, model=final_model, **kwargs)
274
+ await callback.on_llm_start(
275
+ messages_dict,
276
+ provider=final_provider,
277
+ model=final_model,
278
+ **kwargs,
279
+ )
143
280
  except Exception as e:
144
281
  logger.error(f"Error in callback on_llm_start: {e}")
145
282
 
@@ -153,7 +290,7 @@ class LLMClientManager:
153
290
  model=final_model,
154
291
  temperature=temperature,
155
292
  max_tokens=max_tokens,
156
- **kwargs
293
+ **kwargs,
157
294
  )
158
295
 
159
296
  # Execute on_llm_end callbacks
@@ -167,11 +304,16 @@ class LLMClientManager:
167
304
  "prompt_tokens": response.prompt_tokens,
168
305
  "completion_tokens": response.completion_tokens,
169
306
  "cost_estimate": response.cost_estimate,
170
- "response_time": response.response_time
307
+ "response_time": response.response_time,
171
308
  }
172
309
  for callback in callbacks:
173
310
  try:
174
- await callback.on_llm_end(response_dict, provider=final_provider, model=final_model, **kwargs)
311
+ await callback.on_llm_end(
312
+ response_dict,
313
+ provider=final_provider,
314
+ model=final_model,
315
+ **kwargs,
316
+ )
175
317
  except Exception as e:
176
318
  logger.error(f"Error in callback on_llm_end: {e}")
177
319
 
@@ -183,7 +325,12 @@ class LLMClientManager:
183
325
  if callbacks:
184
326
  for callback in callbacks:
185
327
  try:
186
- await callback.on_llm_error(e, provider=final_provider, model=final_model, **kwargs)
328
+ await callback.on_llm_error(
329
+ e,
330
+ provider=final_provider,
331
+ model=final_model,
332
+ **kwargs,
333
+ )
187
334
  except Exception as callback_error:
188
335
  logger.error(f"Error in callback on_llm_error: {callback_error}")
189
336
 
@@ -199,7 +346,7 @@ class LLMClientManager:
199
346
  temperature: float = 0.7,
200
347
  max_tokens: Optional[int] = None,
201
348
  callbacks: Optional[List[CustomAsyncCallbackHandler]] = None,
202
- **kwargs
349
+ **kwargs,
203
350
  ):
204
351
  """
205
352
  Stream text generation using context-aware provider selection
@@ -234,7 +381,12 @@ class LLMClientManager:
234
381
  messages_dict = [{"role": msg.role, "content": msg.content} for msg in messages]
235
382
  for callback in callbacks:
236
383
  try:
237
- await callback.on_llm_start(messages_dict, provider=final_provider, model=final_model, **kwargs)
384
+ await callback.on_llm_start(
385
+ messages_dict,
386
+ provider=final_provider,
387
+ model=final_model,
388
+ **kwargs,
389
+ )
238
390
  except Exception as e:
239
391
  logger.error(f"Error in callback on_llm_start: {e}")
240
392
 
@@ -246,12 +398,14 @@ class LLMClientManager:
246
398
  collected_content = ""
247
399
 
248
400
  # Stream text
249
- async for chunk in await client.stream_text(
401
+ # Note: stream_text is an async generator, not a coroutine,
402
+ # so we iterate directly without await
403
+ async for chunk in client.stream_text(
250
404
  messages=messages,
251
405
  model=final_model,
252
406
  temperature=temperature,
253
407
  max_tokens=max_tokens,
254
- **kwargs
408
+ **kwargs,
255
409
  ):
256
410
  collected_content += chunk
257
411
  yield chunk
@@ -263,7 +417,7 @@ class LLMClientManager:
263
417
  content=collected_content,
264
418
  provider=str(final_provider),
265
419
  model=final_model or "unknown",
266
- tokens_used=estimated_tokens
420
+ tokens_used=estimated_tokens,
267
421
  )
268
422
 
269
423
  # Execute on_llm_end callbacks
@@ -277,11 +431,16 @@ class LLMClientManager:
277
431
  "prompt_tokens": stream_response.prompt_tokens,
278
432
  "completion_tokens": stream_response.completion_tokens,
279
433
  "cost_estimate": stream_response.cost_estimate,
280
- "response_time": stream_response.response_time
434
+ "response_time": stream_response.response_time,
281
435
  }
282
436
  for callback in callbacks:
283
437
  try:
284
- await callback.on_llm_end(response_dict, provider=final_provider, model=final_model, **kwargs)
438
+ await callback.on_llm_end(
439
+ response_dict,
440
+ provider=final_provider,
441
+ model=final_model,
442
+ **kwargs,
443
+ )
285
444
  except Exception as e:
286
445
  logger.error(f"Error in callback on_llm_end: {e}")
287
446
 
@@ -290,7 +449,12 @@ class LLMClientManager:
290
449
  if callbacks:
291
450
  for callback in callbacks:
292
451
  try:
293
- await callback.on_llm_error(e, provider=final_provider, model=final_model, **kwargs)
452
+ await callback.on_llm_error(
453
+ e,
454
+ provider=final_provider,
455
+ model=final_model,
456
+ **kwargs,
457
+ )
294
458
  except Exception as callback_error:
295
459
  logger.error(f"Error in callback on_llm_error: {callback_error}")
296
460
 
@@ -301,14 +465,19 @@ class LLMClientManager:
301
465
  """Close all clients"""
302
466
  await self.factory.close_all()
303
467
 
468
+
304
469
  # Global instance for easy access
305
470
  _llm_manager = LLMClientManager()
306
471
 
472
+
307
473
  async def get_llm_manager() -> LLMClientManager:
308
474
  """Get the global LLM manager instance"""
309
475
  return _llm_manager
310
476
 
477
+
311
478
  # Convenience functions for backward compatibility
479
+
480
+
312
481
  async def generate_text(
313
482
  messages: Union[str, list[LLMMessage]],
314
483
  provider: Optional[Union[str, AIProvider]] = None,
@@ -317,11 +486,21 @@ async def generate_text(
317
486
  temperature: float = 0.7,
318
487
  max_tokens: Optional[int] = None,
319
488
  callbacks: Optional[List[CustomAsyncCallbackHandler]] = None,
320
- **kwargs
489
+ **kwargs,
321
490
  ) -> LLMResponse:
322
491
  """Generate text using the global LLM manager"""
323
492
  manager = await get_llm_manager()
324
- return await manager.generate_text(messages, provider, model, context, temperature, max_tokens, callbacks, **kwargs)
493
+ return await manager.generate_text(
494
+ messages,
495
+ provider,
496
+ model,
497
+ context,
498
+ temperature,
499
+ max_tokens,
500
+ callbacks,
501
+ **kwargs,
502
+ )
503
+
325
504
 
326
505
  async def stream_text(
327
506
  messages: Union[str, list[LLMMessage]],
@@ -331,9 +510,18 @@ async def stream_text(
331
510
  temperature: float = 0.7,
332
511
  max_tokens: Optional[int] = None,
333
512
  callbacks: Optional[List[CustomAsyncCallbackHandler]] = None,
334
- **kwargs
513
+ **kwargs,
335
514
  ):
336
515
  """Stream text using the global LLM manager"""
337
516
  manager = await get_llm_manager()
338
- async for chunk in manager.stream_text(messages, provider, model, context, temperature, max_tokens, callbacks, **kwargs):
517
+ async for chunk in manager.stream_text(
518
+ messages,
519
+ provider,
520
+ model,
521
+ context,
522
+ temperature,
523
+ max_tokens,
524
+ callbacks,
525
+ **kwargs,
526
+ ):
339
527
  yield chunk
@@ -0,0 +1,155 @@
1
+ """
2
+ LLM Client Resolution Helper
3
+
4
+ Provides convenient helper functions for resolving LLM clients from provider names
5
+ with caching support for improved performance.
6
+ """
7
+
8
+ import logging
9
+ from typing import Optional, Union, Dict, TYPE_CHECKING
10
+ from aiecs.llm.client_factory import LLMClientFactory, AIProvider
11
+
12
+ if TYPE_CHECKING:
13
+ from aiecs.llm.protocols import LLMClientProtocol
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ # Cache for resolved clients to avoid repeated factory calls
19
+ _client_cache: Dict[str, "LLMClientProtocol"] = {}
20
+
21
+
22
+ def resolve_llm_client(
23
+ provider: Union[str, AIProvider],
24
+ model: Optional[str] = None,
25
+ use_cache: bool = True,
26
+ ) -> "LLMClientProtocol":
27
+ """
28
+ Resolve an LLM client from a provider name with optional caching.
29
+
30
+ This helper function provides a convenient interface for resolving LLM clients
31
+ from provider names, supporting both standard AIProvider enum values and custom
32
+ provider names registered via LLMClientFactory.register_custom_provider().
33
+
34
+ The function includes built-in caching to avoid repeated factory calls for the
35
+ same provider, improving performance when the same client is requested multiple times.
36
+
37
+ Args:
38
+ provider: AIProvider enum or custom provider name string
39
+ model: Optional model name (for logging/debugging purposes)
40
+ use_cache: Whether to use cached clients (default: True)
41
+
42
+ Returns:
43
+ LLM client implementing LLMClientProtocol
44
+
45
+ Raises:
46
+ ValueError: If provider is unknown (not standard and not registered)
47
+
48
+ Example:
49
+ ```python
50
+ from aiecs.llm.client_resolver import resolve_llm_client
51
+ from aiecs.llm.client_factory import LLMClientFactory
52
+
53
+ # Resolve standard provider
54
+ client = resolve_llm_client("OpenAI", model="gpt-4")
55
+
56
+ # Register and resolve custom provider
57
+ LLMClientFactory.register_custom_provider("my-llm", custom_client)
58
+ client = resolve_llm_client("my-llm", model="custom-model")
59
+
60
+ # Use the client
61
+ response = await client.generate_text(messages, model="gpt-4")
62
+ ```
63
+
64
+ Note:
65
+ - Caching is based on provider name only, not model name
66
+ - Custom providers registered after caching will require cache clearing
67
+ - Use `clear_client_cache()` to clear the cache if needed
68
+ """
69
+ # Convert provider to string for cache key
70
+ cache_key = str(provider) if isinstance(provider, AIProvider) else provider
71
+
72
+ # Check cache first if caching is enabled
73
+ if use_cache and cache_key in _client_cache:
74
+ logger.debug(f"Using cached client for provider: {cache_key}")
75
+ return _client_cache[cache_key]
76
+
77
+ # Resolve client from factory
78
+ try:
79
+ client = LLMClientFactory.get_client(provider)
80
+
81
+ # Log resolution
82
+ if model:
83
+ logger.info(f"Resolved LLM client for provider: {cache_key}, model: {model}")
84
+ else:
85
+ logger.info(f"Resolved LLM client for provider: {cache_key}")
86
+
87
+ # Cache the client if caching is enabled
88
+ # Cast to LLMClientProtocol since BaseLLMClient implements the protocol
89
+ if use_cache:
90
+ from typing import cast
91
+ _client_cache[cache_key] = cast("LLMClientProtocol", client)
92
+ logger.debug(f"Cached client for provider: {cache_key}")
93
+
94
+ # Cast return value to match return type annotation
95
+ from typing import cast
96
+ return cast("LLMClientProtocol", client)
97
+
98
+ except ValueError as e:
99
+ logger.error(f"Failed to resolve LLM client for provider: {cache_key}")
100
+ raise
101
+
102
+
103
+ def clear_client_cache(provider: Optional[Union[str, AIProvider]] = None) -> None:
104
+ """
105
+ Clear the client resolution cache.
106
+
107
+ Args:
108
+ provider: Optional provider to clear from cache.
109
+ If None, clears entire cache.
110
+
111
+ Example:
112
+ ```python
113
+ from aiecs.llm.client_resolver import clear_client_cache
114
+
115
+ # Clear specific provider
116
+ clear_client_cache("OpenAI")
117
+
118
+ # Clear entire cache
119
+ clear_client_cache()
120
+ ```
121
+ """
122
+ global _client_cache
123
+
124
+ if provider is None:
125
+ # Clear entire cache
126
+ count = len(_client_cache)
127
+ _client_cache.clear()
128
+ logger.info(f"Cleared entire client cache ({count} entries)")
129
+ else:
130
+ # Clear specific provider
131
+ cache_key = str(provider) if isinstance(provider, AIProvider) else provider
132
+ if cache_key in _client_cache:
133
+ del _client_cache[cache_key]
134
+ logger.info(f"Cleared cache for provider: {cache_key}")
135
+ else:
136
+ logger.debug(f"Provider not in cache: {cache_key}")
137
+
138
+
139
+ def get_cached_providers() -> list[str]:
140
+ """
141
+ Get list of providers currently in the cache.
142
+
143
+ Returns:
144
+ List of provider names that have cached clients
145
+
146
+ Example:
147
+ ```python
148
+ from aiecs.llm.client_resolver import get_cached_providers
149
+
150
+ providers = get_cached_providers()
151
+ print(f"Cached providers: {providers}")
152
+ ```
153
+ """
154
+ return list(_client_cache.keys())
155
+
@@ -0,0 +1,38 @@
1
+ """
2
+ LLM Client implementations.
3
+
4
+ This package contains all LLM provider client implementations.
5
+ """
6
+
7
+ from .base_client import (
8
+ BaseLLMClient,
9
+ CacheControl,
10
+ LLMMessage,
11
+ LLMResponse,
12
+ LLMClientError,
13
+ ProviderNotAvailableError,
14
+ RateLimitError,
15
+ )
16
+ from .openai_compatible_mixin import StreamChunk
17
+ from .openai_client import OpenAIClient
18
+ from .vertex_client import VertexAIClient
19
+ from .googleai_client import GoogleAIClient
20
+ from .xai_client import XAIClient
21
+
22
+ __all__ = [
23
+ # Base classes
24
+ "BaseLLMClient",
25
+ "CacheControl",
26
+ "LLMMessage",
27
+ "LLMResponse",
28
+ "LLMClientError",
29
+ "ProviderNotAvailableError",
30
+ "RateLimitError",
31
+ # Streaming support
32
+ "StreamChunk",
33
+ # Client implementations
34
+ "OpenAIClient",
35
+ "VertexAIClient",
36
+ "GoogleAIClient",
37
+ "XAIClient",
38
+ ]