aiecs 1.0.1__py3-none-any.whl → 1.7.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiecs might be problematic. Click here for more details.

Files changed (340) hide show
  1. aiecs/__init__.py +13 -16
  2. aiecs/__main__.py +7 -7
  3. aiecs/aiecs_client.py +269 -75
  4. aiecs/application/executors/operation_executor.py +79 -54
  5. aiecs/application/knowledge_graph/__init__.py +7 -0
  6. aiecs/application/knowledge_graph/builder/__init__.py +37 -0
  7. aiecs/application/knowledge_graph/builder/data_quality.py +302 -0
  8. aiecs/application/knowledge_graph/builder/data_reshaping.py +293 -0
  9. aiecs/application/knowledge_graph/builder/document_builder.py +369 -0
  10. aiecs/application/knowledge_graph/builder/graph_builder.py +490 -0
  11. aiecs/application/knowledge_graph/builder/import_optimizer.py +396 -0
  12. aiecs/application/knowledge_graph/builder/schema_inference.py +462 -0
  13. aiecs/application/knowledge_graph/builder/schema_mapping.py +563 -0
  14. aiecs/application/knowledge_graph/builder/structured_pipeline.py +1384 -0
  15. aiecs/application/knowledge_graph/builder/text_chunker.py +317 -0
  16. aiecs/application/knowledge_graph/extractors/__init__.py +27 -0
  17. aiecs/application/knowledge_graph/extractors/base.py +98 -0
  18. aiecs/application/knowledge_graph/extractors/llm_entity_extractor.py +422 -0
  19. aiecs/application/knowledge_graph/extractors/llm_relation_extractor.py +347 -0
  20. aiecs/application/knowledge_graph/extractors/ner_entity_extractor.py +241 -0
  21. aiecs/application/knowledge_graph/fusion/__init__.py +78 -0
  22. aiecs/application/knowledge_graph/fusion/ab_testing.py +395 -0
  23. aiecs/application/knowledge_graph/fusion/abbreviation_expander.py +327 -0
  24. aiecs/application/knowledge_graph/fusion/alias_index.py +597 -0
  25. aiecs/application/knowledge_graph/fusion/alias_matcher.py +384 -0
  26. aiecs/application/knowledge_graph/fusion/cache_coordinator.py +343 -0
  27. aiecs/application/knowledge_graph/fusion/entity_deduplicator.py +433 -0
  28. aiecs/application/knowledge_graph/fusion/entity_linker.py +511 -0
  29. aiecs/application/knowledge_graph/fusion/evaluation_dataset.py +240 -0
  30. aiecs/application/knowledge_graph/fusion/knowledge_fusion.py +632 -0
  31. aiecs/application/knowledge_graph/fusion/matching_config.py +489 -0
  32. aiecs/application/knowledge_graph/fusion/name_normalizer.py +352 -0
  33. aiecs/application/knowledge_graph/fusion/relation_deduplicator.py +183 -0
  34. aiecs/application/knowledge_graph/fusion/semantic_name_matcher.py +464 -0
  35. aiecs/application/knowledge_graph/fusion/similarity_pipeline.py +534 -0
  36. aiecs/application/knowledge_graph/pattern_matching/__init__.py +21 -0
  37. aiecs/application/knowledge_graph/pattern_matching/pattern_matcher.py +342 -0
  38. aiecs/application/knowledge_graph/pattern_matching/query_executor.py +366 -0
  39. aiecs/application/knowledge_graph/profiling/__init__.py +12 -0
  40. aiecs/application/knowledge_graph/profiling/query_plan_visualizer.py +195 -0
  41. aiecs/application/knowledge_graph/profiling/query_profiler.py +223 -0
  42. aiecs/application/knowledge_graph/reasoning/__init__.py +27 -0
  43. aiecs/application/knowledge_graph/reasoning/evidence_synthesis.py +341 -0
  44. aiecs/application/knowledge_graph/reasoning/inference_engine.py +500 -0
  45. aiecs/application/knowledge_graph/reasoning/logic_form_parser.py +163 -0
  46. aiecs/application/knowledge_graph/reasoning/logic_parser/__init__.py +79 -0
  47. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_builder.py +513 -0
  48. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_nodes.py +913 -0
  49. aiecs/application/knowledge_graph/reasoning/logic_parser/ast_validator.py +866 -0
  50. aiecs/application/knowledge_graph/reasoning/logic_parser/error_handler.py +475 -0
  51. aiecs/application/knowledge_graph/reasoning/logic_parser/parser.py +396 -0
  52. aiecs/application/knowledge_graph/reasoning/logic_parser/query_context.py +208 -0
  53. aiecs/application/knowledge_graph/reasoning/logic_query_integration.py +170 -0
  54. aiecs/application/knowledge_graph/reasoning/query_planner.py +855 -0
  55. aiecs/application/knowledge_graph/reasoning/reasoning_engine.py +518 -0
  56. aiecs/application/knowledge_graph/retrieval/__init__.py +27 -0
  57. aiecs/application/knowledge_graph/retrieval/query_intent_classifier.py +211 -0
  58. aiecs/application/knowledge_graph/retrieval/retrieval_strategies.py +592 -0
  59. aiecs/application/knowledge_graph/retrieval/strategy_types.py +23 -0
  60. aiecs/application/knowledge_graph/search/__init__.py +59 -0
  61. aiecs/application/knowledge_graph/search/hybrid_search.py +457 -0
  62. aiecs/application/knowledge_graph/search/reranker.py +293 -0
  63. aiecs/application/knowledge_graph/search/reranker_strategies.py +535 -0
  64. aiecs/application/knowledge_graph/search/text_similarity.py +392 -0
  65. aiecs/application/knowledge_graph/traversal/__init__.py +15 -0
  66. aiecs/application/knowledge_graph/traversal/enhanced_traversal.py +305 -0
  67. aiecs/application/knowledge_graph/traversal/path_scorer.py +271 -0
  68. aiecs/application/knowledge_graph/validators/__init__.py +13 -0
  69. aiecs/application/knowledge_graph/validators/relation_validator.py +239 -0
  70. aiecs/application/knowledge_graph/visualization/__init__.py +11 -0
  71. aiecs/application/knowledge_graph/visualization/graph_visualizer.py +313 -0
  72. aiecs/common/__init__.py +9 -0
  73. aiecs/common/knowledge_graph/__init__.py +17 -0
  74. aiecs/common/knowledge_graph/runnable.py +471 -0
  75. aiecs/config/__init__.py +20 -5
  76. aiecs/config/config.py +762 -31
  77. aiecs/config/graph_config.py +131 -0
  78. aiecs/config/tool_config.py +435 -0
  79. aiecs/core/__init__.py +29 -13
  80. aiecs/core/interface/__init__.py +2 -2
  81. aiecs/core/interface/execution_interface.py +22 -22
  82. aiecs/core/interface/storage_interface.py +37 -88
  83. aiecs/core/registry/__init__.py +31 -0
  84. aiecs/core/registry/service_registry.py +92 -0
  85. aiecs/domain/__init__.py +270 -1
  86. aiecs/domain/agent/__init__.py +191 -0
  87. aiecs/domain/agent/base_agent.py +3949 -0
  88. aiecs/domain/agent/exceptions.py +99 -0
  89. aiecs/domain/agent/graph_aware_mixin.py +569 -0
  90. aiecs/domain/agent/hybrid_agent.py +1731 -0
  91. aiecs/domain/agent/integration/__init__.py +29 -0
  92. aiecs/domain/agent/integration/context_compressor.py +216 -0
  93. aiecs/domain/agent/integration/context_engine_adapter.py +587 -0
  94. aiecs/domain/agent/integration/protocols.py +281 -0
  95. aiecs/domain/agent/integration/retry_policy.py +218 -0
  96. aiecs/domain/agent/integration/role_config.py +213 -0
  97. aiecs/domain/agent/knowledge_aware_agent.py +1892 -0
  98. aiecs/domain/agent/lifecycle.py +291 -0
  99. aiecs/domain/agent/llm_agent.py +692 -0
  100. aiecs/domain/agent/memory/__init__.py +12 -0
  101. aiecs/domain/agent/memory/conversation.py +1124 -0
  102. aiecs/domain/agent/migration/__init__.py +14 -0
  103. aiecs/domain/agent/migration/conversion.py +163 -0
  104. aiecs/domain/agent/migration/legacy_wrapper.py +86 -0
  105. aiecs/domain/agent/models.py +894 -0
  106. aiecs/domain/agent/observability.py +479 -0
  107. aiecs/domain/agent/persistence.py +449 -0
  108. aiecs/domain/agent/prompts/__init__.py +29 -0
  109. aiecs/domain/agent/prompts/builder.py +159 -0
  110. aiecs/domain/agent/prompts/formatters.py +187 -0
  111. aiecs/domain/agent/prompts/template.py +255 -0
  112. aiecs/domain/agent/registry.py +253 -0
  113. aiecs/domain/agent/tool_agent.py +444 -0
  114. aiecs/domain/agent/tools/__init__.py +15 -0
  115. aiecs/domain/agent/tools/schema_generator.py +377 -0
  116. aiecs/domain/community/__init__.py +155 -0
  117. aiecs/domain/community/agent_adapter.py +469 -0
  118. aiecs/domain/community/analytics.py +432 -0
  119. aiecs/domain/community/collaborative_workflow.py +648 -0
  120. aiecs/domain/community/communication_hub.py +634 -0
  121. aiecs/domain/community/community_builder.py +320 -0
  122. aiecs/domain/community/community_integration.py +796 -0
  123. aiecs/domain/community/community_manager.py +803 -0
  124. aiecs/domain/community/decision_engine.py +849 -0
  125. aiecs/domain/community/exceptions.py +231 -0
  126. aiecs/domain/community/models/__init__.py +33 -0
  127. aiecs/domain/community/models/community_models.py +234 -0
  128. aiecs/domain/community/resource_manager.py +461 -0
  129. aiecs/domain/community/shared_context_manager.py +589 -0
  130. aiecs/domain/context/__init__.py +40 -10
  131. aiecs/domain/context/context_engine.py +1910 -0
  132. aiecs/domain/context/conversation_models.py +87 -53
  133. aiecs/domain/context/graph_memory.py +582 -0
  134. aiecs/domain/execution/model.py +12 -4
  135. aiecs/domain/knowledge_graph/__init__.py +19 -0
  136. aiecs/domain/knowledge_graph/models/__init__.py +52 -0
  137. aiecs/domain/knowledge_graph/models/entity.py +148 -0
  138. aiecs/domain/knowledge_graph/models/evidence.py +178 -0
  139. aiecs/domain/knowledge_graph/models/inference_rule.py +184 -0
  140. aiecs/domain/knowledge_graph/models/path.py +171 -0
  141. aiecs/domain/knowledge_graph/models/path_pattern.py +171 -0
  142. aiecs/domain/knowledge_graph/models/query.py +261 -0
  143. aiecs/domain/knowledge_graph/models/query_plan.py +181 -0
  144. aiecs/domain/knowledge_graph/models/relation.py +202 -0
  145. aiecs/domain/knowledge_graph/schema/__init__.py +23 -0
  146. aiecs/domain/knowledge_graph/schema/entity_type.py +131 -0
  147. aiecs/domain/knowledge_graph/schema/graph_schema.py +253 -0
  148. aiecs/domain/knowledge_graph/schema/property_schema.py +143 -0
  149. aiecs/domain/knowledge_graph/schema/relation_type.py +163 -0
  150. aiecs/domain/knowledge_graph/schema/schema_manager.py +691 -0
  151. aiecs/domain/knowledge_graph/schema/type_enums.py +209 -0
  152. aiecs/domain/task/dsl_processor.py +172 -56
  153. aiecs/domain/task/model.py +20 -8
  154. aiecs/domain/task/task_context.py +27 -24
  155. aiecs/infrastructure/__init__.py +0 -2
  156. aiecs/infrastructure/graph_storage/__init__.py +11 -0
  157. aiecs/infrastructure/graph_storage/base.py +837 -0
  158. aiecs/infrastructure/graph_storage/batch_operations.py +458 -0
  159. aiecs/infrastructure/graph_storage/cache.py +424 -0
  160. aiecs/infrastructure/graph_storage/distributed.py +223 -0
  161. aiecs/infrastructure/graph_storage/error_handling.py +380 -0
  162. aiecs/infrastructure/graph_storage/graceful_degradation.py +294 -0
  163. aiecs/infrastructure/graph_storage/health_checks.py +378 -0
  164. aiecs/infrastructure/graph_storage/in_memory.py +1197 -0
  165. aiecs/infrastructure/graph_storage/index_optimization.py +446 -0
  166. aiecs/infrastructure/graph_storage/lazy_loading.py +431 -0
  167. aiecs/infrastructure/graph_storage/metrics.py +344 -0
  168. aiecs/infrastructure/graph_storage/migration.py +400 -0
  169. aiecs/infrastructure/graph_storage/pagination.py +483 -0
  170. aiecs/infrastructure/graph_storage/performance_monitoring.py +456 -0
  171. aiecs/infrastructure/graph_storage/postgres.py +1563 -0
  172. aiecs/infrastructure/graph_storage/property_storage.py +353 -0
  173. aiecs/infrastructure/graph_storage/protocols.py +76 -0
  174. aiecs/infrastructure/graph_storage/query_optimizer.py +642 -0
  175. aiecs/infrastructure/graph_storage/schema_cache.py +290 -0
  176. aiecs/infrastructure/graph_storage/sqlite.py +1373 -0
  177. aiecs/infrastructure/graph_storage/streaming.py +487 -0
  178. aiecs/infrastructure/graph_storage/tenant.py +412 -0
  179. aiecs/infrastructure/messaging/celery_task_manager.py +92 -54
  180. aiecs/infrastructure/messaging/websocket_manager.py +51 -35
  181. aiecs/infrastructure/monitoring/__init__.py +22 -0
  182. aiecs/infrastructure/monitoring/executor_metrics.py +45 -11
  183. aiecs/infrastructure/monitoring/global_metrics_manager.py +212 -0
  184. aiecs/infrastructure/monitoring/structured_logger.py +3 -7
  185. aiecs/infrastructure/monitoring/tracing_manager.py +63 -35
  186. aiecs/infrastructure/persistence/__init__.py +14 -1
  187. aiecs/infrastructure/persistence/context_engine_client.py +184 -0
  188. aiecs/infrastructure/persistence/database_manager.py +67 -43
  189. aiecs/infrastructure/persistence/file_storage.py +180 -103
  190. aiecs/infrastructure/persistence/redis_client.py +74 -21
  191. aiecs/llm/__init__.py +73 -25
  192. aiecs/llm/callbacks/__init__.py +11 -0
  193. aiecs/llm/{custom_callbacks.py → callbacks/custom_callbacks.py} +26 -19
  194. aiecs/llm/client_factory.py +230 -37
  195. aiecs/llm/client_resolver.py +155 -0
  196. aiecs/llm/clients/__init__.py +38 -0
  197. aiecs/llm/clients/base_client.py +328 -0
  198. aiecs/llm/clients/google_function_calling_mixin.py +415 -0
  199. aiecs/llm/clients/googleai_client.py +314 -0
  200. aiecs/llm/clients/openai_client.py +158 -0
  201. aiecs/llm/clients/openai_compatible_mixin.py +367 -0
  202. aiecs/llm/clients/vertex_client.py +1186 -0
  203. aiecs/llm/clients/xai_client.py +201 -0
  204. aiecs/llm/config/__init__.py +51 -0
  205. aiecs/llm/config/config_loader.py +272 -0
  206. aiecs/llm/config/config_validator.py +206 -0
  207. aiecs/llm/config/model_config.py +143 -0
  208. aiecs/llm/protocols.py +149 -0
  209. aiecs/llm/utils/__init__.py +10 -0
  210. aiecs/llm/utils/validate_config.py +89 -0
  211. aiecs/main.py +140 -121
  212. aiecs/scripts/aid/VERSION_MANAGEMENT.md +138 -0
  213. aiecs/scripts/aid/__init__.py +19 -0
  214. aiecs/scripts/aid/module_checker.py +499 -0
  215. aiecs/scripts/aid/version_manager.py +235 -0
  216. aiecs/scripts/{DEPENDENCY_SYSTEM_SUMMARY.md → dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md} +1 -0
  217. aiecs/scripts/{README_DEPENDENCY_CHECKER.md → dependance_check/README_DEPENDENCY_CHECKER.md} +1 -0
  218. aiecs/scripts/dependance_check/__init__.py +15 -0
  219. aiecs/scripts/dependance_check/dependency_checker.py +1835 -0
  220. aiecs/scripts/{dependency_fixer.py → dependance_check/dependency_fixer.py} +192 -90
  221. aiecs/scripts/{download_nlp_data.py → dependance_check/download_nlp_data.py} +203 -71
  222. aiecs/scripts/dependance_patch/__init__.py +7 -0
  223. aiecs/scripts/dependance_patch/fix_weasel/__init__.py +11 -0
  224. aiecs/scripts/{fix_weasel_validator.py → dependance_patch/fix_weasel/fix_weasel_validator.py} +21 -14
  225. aiecs/scripts/{patch_weasel_library.sh → dependance_patch/fix_weasel/patch_weasel_library.sh} +1 -1
  226. aiecs/scripts/knowledge_graph/__init__.py +3 -0
  227. aiecs/scripts/knowledge_graph/run_threshold_experiments.py +212 -0
  228. aiecs/scripts/migrations/multi_tenancy/README.md +142 -0
  229. aiecs/scripts/tools_develop/README.md +671 -0
  230. aiecs/scripts/tools_develop/README_CONFIG_CHECKER.md +273 -0
  231. aiecs/scripts/tools_develop/TOOLS_CONFIG_GUIDE.md +1287 -0
  232. aiecs/scripts/tools_develop/TOOL_AUTO_DISCOVERY.md +234 -0
  233. aiecs/scripts/tools_develop/__init__.py +21 -0
  234. aiecs/scripts/tools_develop/check_all_tools_config.py +548 -0
  235. aiecs/scripts/tools_develop/check_type_annotations.py +257 -0
  236. aiecs/scripts/tools_develop/pre-commit-schema-coverage.sh +66 -0
  237. aiecs/scripts/tools_develop/schema_coverage.py +511 -0
  238. aiecs/scripts/tools_develop/validate_tool_schemas.py +475 -0
  239. aiecs/scripts/tools_develop/verify_executor_config_fix.py +98 -0
  240. aiecs/scripts/tools_develop/verify_tools.py +352 -0
  241. aiecs/tasks/__init__.py +0 -1
  242. aiecs/tasks/worker.py +115 -47
  243. aiecs/tools/__init__.py +194 -72
  244. aiecs/tools/apisource/__init__.py +99 -0
  245. aiecs/tools/apisource/intelligence/__init__.py +19 -0
  246. aiecs/tools/apisource/intelligence/data_fusion.py +632 -0
  247. aiecs/tools/apisource/intelligence/query_analyzer.py +417 -0
  248. aiecs/tools/apisource/intelligence/search_enhancer.py +385 -0
  249. aiecs/tools/apisource/monitoring/__init__.py +9 -0
  250. aiecs/tools/apisource/monitoring/metrics.py +330 -0
  251. aiecs/tools/apisource/providers/__init__.py +112 -0
  252. aiecs/tools/apisource/providers/base.py +671 -0
  253. aiecs/tools/apisource/providers/census.py +397 -0
  254. aiecs/tools/apisource/providers/fred.py +535 -0
  255. aiecs/tools/apisource/providers/newsapi.py +409 -0
  256. aiecs/tools/apisource/providers/worldbank.py +352 -0
  257. aiecs/tools/apisource/reliability/__init__.py +12 -0
  258. aiecs/tools/apisource/reliability/error_handler.py +363 -0
  259. aiecs/tools/apisource/reliability/fallback_strategy.py +376 -0
  260. aiecs/tools/apisource/tool.py +832 -0
  261. aiecs/tools/apisource/utils/__init__.py +9 -0
  262. aiecs/tools/apisource/utils/validators.py +334 -0
  263. aiecs/tools/base_tool.py +415 -21
  264. aiecs/tools/docs/__init__.py +121 -0
  265. aiecs/tools/docs/ai_document_orchestrator.py +607 -0
  266. aiecs/tools/docs/ai_document_writer_orchestrator.py +2350 -0
  267. aiecs/tools/docs/content_insertion_tool.py +1320 -0
  268. aiecs/tools/docs/document_creator_tool.py +1464 -0
  269. aiecs/tools/docs/document_layout_tool.py +1160 -0
  270. aiecs/tools/docs/document_parser_tool.py +1016 -0
  271. aiecs/tools/docs/document_writer_tool.py +2008 -0
  272. aiecs/tools/knowledge_graph/__init__.py +17 -0
  273. aiecs/tools/knowledge_graph/graph_reasoning_tool.py +807 -0
  274. aiecs/tools/knowledge_graph/graph_search_tool.py +944 -0
  275. aiecs/tools/knowledge_graph/kg_builder_tool.py +524 -0
  276. aiecs/tools/langchain_adapter.py +300 -138
  277. aiecs/tools/schema_generator.py +455 -0
  278. aiecs/tools/search_tool/__init__.py +100 -0
  279. aiecs/tools/search_tool/analyzers.py +581 -0
  280. aiecs/tools/search_tool/cache.py +264 -0
  281. aiecs/tools/search_tool/constants.py +128 -0
  282. aiecs/tools/search_tool/context.py +224 -0
  283. aiecs/tools/search_tool/core.py +778 -0
  284. aiecs/tools/search_tool/deduplicator.py +119 -0
  285. aiecs/tools/search_tool/error_handler.py +242 -0
  286. aiecs/tools/search_tool/metrics.py +343 -0
  287. aiecs/tools/search_tool/rate_limiter.py +172 -0
  288. aiecs/tools/search_tool/schemas.py +275 -0
  289. aiecs/tools/statistics/__init__.py +80 -0
  290. aiecs/tools/statistics/ai_data_analysis_orchestrator.py +646 -0
  291. aiecs/tools/statistics/ai_insight_generator_tool.py +508 -0
  292. aiecs/tools/statistics/ai_report_orchestrator_tool.py +684 -0
  293. aiecs/tools/statistics/data_loader_tool.py +555 -0
  294. aiecs/tools/statistics/data_profiler_tool.py +638 -0
  295. aiecs/tools/statistics/data_transformer_tool.py +580 -0
  296. aiecs/tools/statistics/data_visualizer_tool.py +498 -0
  297. aiecs/tools/statistics/model_trainer_tool.py +507 -0
  298. aiecs/tools/statistics/statistical_analyzer_tool.py +472 -0
  299. aiecs/tools/task_tools/__init__.py +49 -36
  300. aiecs/tools/task_tools/chart_tool.py +200 -184
  301. aiecs/tools/task_tools/classfire_tool.py +268 -267
  302. aiecs/tools/task_tools/image_tool.py +220 -141
  303. aiecs/tools/task_tools/office_tool.py +226 -146
  304. aiecs/tools/task_tools/pandas_tool.py +477 -121
  305. aiecs/tools/task_tools/report_tool.py +390 -142
  306. aiecs/tools/task_tools/research_tool.py +149 -79
  307. aiecs/tools/task_tools/scraper_tool.py +339 -145
  308. aiecs/tools/task_tools/stats_tool.py +448 -209
  309. aiecs/tools/temp_file_manager.py +26 -24
  310. aiecs/tools/tool_executor/__init__.py +18 -16
  311. aiecs/tools/tool_executor/tool_executor.py +364 -52
  312. aiecs/utils/LLM_output_structor.py +74 -48
  313. aiecs/utils/__init__.py +14 -3
  314. aiecs/utils/base_callback.py +0 -3
  315. aiecs/utils/cache_provider.py +696 -0
  316. aiecs/utils/execution_utils.py +50 -31
  317. aiecs/utils/prompt_loader.py +1 -0
  318. aiecs/utils/token_usage_repository.py +37 -11
  319. aiecs/ws/socket_server.py +14 -4
  320. {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/METADATA +52 -15
  321. aiecs-1.7.17.dist-info/RECORD +337 -0
  322. aiecs-1.7.17.dist-info/entry_points.txt +13 -0
  323. aiecs/config/registry.py +0 -19
  324. aiecs/domain/context/content_engine.py +0 -982
  325. aiecs/llm/base_client.py +0 -99
  326. aiecs/llm/openai_client.py +0 -125
  327. aiecs/llm/vertex_client.py +0 -186
  328. aiecs/llm/xai_client.py +0 -184
  329. aiecs/scripts/dependency_checker.py +0 -857
  330. aiecs/scripts/quick_dependency_check.py +0 -269
  331. aiecs/tools/task_tools/search_api.py +0 -7
  332. aiecs-1.0.1.dist-info/RECORD +0 -90
  333. aiecs-1.0.1.dist-info/entry_points.txt +0 -7
  334. /aiecs/scripts/{setup_nlp_data.sh → dependance_check/setup_nlp_data.sh} +0 -0
  335. /aiecs/scripts/{README_WEASEL_PATCH.md → dependance_patch/fix_weasel/README_WEASEL_PATCH.md} +0 -0
  336. /aiecs/scripts/{fix_weasel_validator.sh → dependance_patch/fix_weasel/fix_weasel_validator.sh} +0 -0
  337. /aiecs/scripts/{run_weasel_patch.sh → dependance_patch/fix_weasel/run_weasel_patch.sh} +0 -0
  338. {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/WHEEL +0 -0
  339. {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/licenses/LICENSE +0 -0
  340. {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/top_level.txt +0 -0
@@ -1,33 +1,121 @@
1
1
  import logging
2
- from typing import Dict, Any, Optional, Union, List
2
+ from typing import Dict, Any, Optional, Union, List, TYPE_CHECKING
3
3
  from enum import Enum
4
4
 
5
- from .base_client import BaseLLMClient, LLMMessage, LLMResponse
6
- from .openai_client import OpenAIClient
7
- from .vertex_client import VertexAIClient
8
- from .xai_client import XAIClient
9
- from ..utils.base_callback import CustomAsyncCallbackHandler
5
+ from .clients.base_client import BaseLLMClient, LLMMessage, LLMResponse
6
+ from .clients.openai_client import OpenAIClient
7
+ from .clients.vertex_client import VertexAIClient
8
+ from .clients.googleai_client import GoogleAIClient
9
+ from .clients.xai_client import XAIClient
10
+ from .clients.openai_compatible_mixin import StreamChunk
11
+ from .callbacks.custom_callbacks import CustomAsyncCallbackHandler
12
+
13
+ if TYPE_CHECKING:
14
+ from .protocols import LLMClientProtocol
10
15
 
11
16
  logger = logging.getLogger(__name__)
12
17
 
18
+
13
19
  class AIProvider(str, Enum):
14
20
  OPENAI = "OpenAI"
15
21
  VERTEX = "Vertex"
22
+ GOOGLEAI = "GoogleAI"
16
23
  XAI = "xAI"
17
24
 
25
+
18
26
  class LLMClientFactory:
19
27
  """Factory for creating and managing LLM provider clients"""
20
28
 
21
29
  _clients: Dict[AIProvider, BaseLLMClient] = {}
30
+ _custom_clients: Dict[str, "LLMClientProtocol"] = {}
31
+
32
+ @classmethod
33
+ def register_custom_provider(cls, name: str, client: "LLMClientProtocol") -> None:
34
+ """
35
+ Register a custom LLM client provider.
36
+
37
+ This allows registration of custom LLM clients that implement the LLMClientProtocol
38
+ without inheriting from BaseLLMClient. Custom providers can be retrieved by name
39
+ using get_client().
40
+
41
+ Args:
42
+ name: Custom provider name (e.g., "my-llm", "llama-local", "custom-gpt")
43
+ client: Client implementing LLMClientProtocol
44
+
45
+ Raises:
46
+ ValueError: If client doesn't implement LLMClientProtocol
47
+ ValueError: If name conflicts with standard AIProvider enum values
48
+
49
+ Example:
50
+ ```python
51
+ # Register custom LLM client
52
+ custom_client = MyCustomLLMClient()
53
+ LLMClientFactory.register_custom_provider("my-llm", custom_client)
54
+
55
+ # Use custom client
56
+ client = LLMClientFactory.get_client("my-llm")
57
+ response = await client.generate_text(messages)
58
+ ```
59
+ """
60
+ # Import here to avoid circular dependency
61
+ from .protocols import LLMClientProtocol
62
+
63
+ # Validate protocol compliance
64
+ if not isinstance(client, LLMClientProtocol):
65
+ raise ValueError(
66
+ f"Client must implement LLMClientProtocol. "
67
+ f"Required methods: generate_text, stream_text, close, get_embeddings. "
68
+ f"Required attribute: provider_name"
69
+ )
70
+
71
+ # Prevent conflicts with standard provider names
72
+ try:
73
+ AIProvider(name)
74
+ raise ValueError(
75
+ f"Custom provider name '{name}' conflicts with standard AIProvider enum. "
76
+ f"Please use a different name."
77
+ )
78
+ except ValueError as e:
79
+ # If ValueError is raised because name is not in enum, that's good
80
+ if "conflicts with standard AIProvider" in str(e):
81
+ raise
82
+ # Otherwise, name is not in enum, proceed with registration
83
+
84
+ cls._custom_clients[name] = client
85
+ logger.info(f"Registered custom LLM provider: {name}")
22
86
 
23
87
  @classmethod
24
- def get_client(cls, provider: Union[str, AIProvider]) -> BaseLLMClient:
25
- """Get or create a client for the specified provider"""
88
+ def get_client(cls, provider: Union[str, AIProvider]) -> Union[BaseLLMClient, "LLMClientProtocol"]:
89
+ """
90
+ Get or create a client for the specified provider.
91
+
92
+ Supports both standard AIProvider enum values and custom provider names
93
+ registered via register_custom_provider().
94
+
95
+ Args:
96
+ provider: AIProvider enum or custom provider name string
97
+
98
+ Returns:
99
+ LLM client (BaseLLMClient for standard providers, LLMClientProtocol for custom)
100
+
101
+ Raises:
102
+ ValueError: If provider is unknown (not standard and not registered)
103
+ """
104
+ # Check custom providers first
105
+ if isinstance(provider, str) and provider in cls._custom_clients:
106
+ return cls._custom_clients[provider]
107
+
108
+ # Handle standard providers
26
109
  if isinstance(provider, str):
27
110
  try:
28
111
  provider = AIProvider(provider)
29
112
  except ValueError:
30
- raise ValueError(f"Unsupported provider: {provider}")
113
+ raise ValueError(
114
+ f"Unknown provider: {provider}. "
115
+ f"Standard providers: {[p.value for p in AIProvider]}. "
116
+ f"Custom providers: {list(cls._custom_clients.keys())}. "
117
+ f"Register custom providers with LLMClientFactory.register_custom_provider()"
118
+ )
31
119
 
32
120
  if provider not in cls._clients:
33
121
  cls._clients[provider] = cls._create_client(provider)
@@ -41,6 +129,8 @@ class LLMClientFactory:
41
129
  return OpenAIClient()
42
130
  elif provider == AIProvider.VERTEX:
43
131
  return VertexAIClient()
132
+ elif provider == AIProvider.GOOGLEAI:
133
+ return GoogleAIClient()
44
134
  elif provider == AIProvider.XAI:
45
135
  return XAIClient()
46
136
  else:
@@ -48,7 +138,8 @@ class LLMClientFactory:
48
138
 
49
139
  @classmethod
50
140
  async def close_all(cls):
51
- """Close all active clients"""
141
+ """Close all active clients (both standard and custom)"""
142
+ # Close standard clients
52
143
  for client in cls._clients.values():
53
144
  try:
54
145
  await client.close()
@@ -56,11 +147,34 @@ class LLMClientFactory:
56
147
  logger.error(f"Error closing client {client.provider_name}: {e}")
57
148
  cls._clients.clear()
58
149
 
150
+ # Close custom clients
151
+ for name, client in cls._custom_clients.items():
152
+ try:
153
+ await client.close()
154
+ except Exception as e:
155
+ logger.error(f"Error closing custom client {name}: {e}")
156
+ cls._custom_clients.clear()
157
+
59
158
  @classmethod
60
159
  async def close_client(cls, provider: Union[str, AIProvider]):
61
- """Close a specific client"""
160
+ """Close a specific client (standard or custom)"""
161
+ # Check if it's a custom provider
162
+ if isinstance(provider, str) and provider in cls._custom_clients:
163
+ try:
164
+ await cls._custom_clients[provider].close()
165
+ del cls._custom_clients[provider]
166
+ logger.info(f"Closed custom client: {provider}")
167
+ except Exception as e:
168
+ logger.error(f"Error closing custom client {provider}: {e}")
169
+ return
170
+
171
+ # Handle standard providers
62
172
  if isinstance(provider, str):
63
- provider = AIProvider(provider)
173
+ try:
174
+ provider = AIProvider(provider)
175
+ except ValueError:
176
+ logger.warning(f"Unknown provider to close: {provider}")
177
+ return
64
178
 
65
179
  if provider in cls._clients:
66
180
  try:
@@ -69,6 +183,25 @@ class LLMClientFactory:
69
183
  except Exception as e:
70
184
  logger.error(f"Error closing client {provider}: {e}")
71
185
 
186
+ @classmethod
187
+ def reload_config(cls):
188
+ """
189
+ Reload LLM models configuration.
190
+
191
+ This reloads the configuration from the YAML file, allowing for
192
+ hot-reloading of model settings without restarting the application.
193
+ """
194
+ try:
195
+ from aiecs.llm.config import reload_llm_config
196
+
197
+ config = reload_llm_config()
198
+ logger.info(f"Reloaded LLM configuration: {len(config.providers)} providers")
199
+ return config
200
+ except Exception as e:
201
+ logger.error(f"Failed to reload LLM configuration: {e}")
202
+ raise
203
+
204
+
72
205
  class LLMClientManager:
73
206
  """High-level manager for LLM operations with context-aware provider selection"""
74
207
 
@@ -80,19 +213,19 @@ class LLMClientManager:
80
213
  if not context:
81
214
  return None, None
82
215
 
83
- metadata = context.get('metadata', {})
216
+ metadata = context.get("metadata", {})
84
217
 
85
218
  # First, check for aiPreference in metadata
86
- ai_preference = metadata.get('aiPreference', {})
219
+ ai_preference = metadata.get("aiPreference", {})
87
220
  if isinstance(ai_preference, dict):
88
- provider = ai_preference.get('provider')
89
- model = ai_preference.get('model')
221
+ provider = ai_preference.get("provider")
222
+ model = ai_preference.get("model")
90
223
  if provider is not None:
91
224
  return provider, model
92
225
 
93
226
  # Fallback to direct provider/model in metadata
94
- provider = metadata.get('provider')
95
- model = metadata.get('model')
227
+ provider = metadata.get("provider")
228
+ model = metadata.get("model")
96
229
  return provider, model
97
230
 
98
231
  async def generate_text(
@@ -104,7 +237,7 @@ class LLMClientManager:
104
237
  temperature: float = 0.7,
105
238
  max_tokens: Optional[int] = None,
106
239
  callbacks: Optional[List[CustomAsyncCallbackHandler]] = None,
107
- **kwargs
240
+ **kwargs,
108
241
  ) -> LLMResponse:
109
242
  """
110
243
  Generate text using context-aware provider selection
@@ -139,7 +272,12 @@ class LLMClientManager:
139
272
  messages_dict = [{"role": msg.role, "content": msg.content} for msg in messages]
140
273
  for callback in callbacks:
141
274
  try:
142
- await callback.on_llm_start(messages_dict, provider=final_provider, model=final_model, **kwargs)
275
+ await callback.on_llm_start(
276
+ messages_dict,
277
+ provider=final_provider,
278
+ model=final_model,
279
+ **kwargs,
280
+ )
143
281
  except Exception as e:
144
282
  logger.error(f"Error in callback on_llm_start: {e}")
145
283
 
@@ -153,7 +291,7 @@ class LLMClientManager:
153
291
  model=final_model,
154
292
  temperature=temperature,
155
293
  max_tokens=max_tokens,
156
- **kwargs
294
+ **kwargs,
157
295
  )
158
296
 
159
297
  # Execute on_llm_end callbacks
@@ -167,11 +305,16 @@ class LLMClientManager:
167
305
  "prompt_tokens": response.prompt_tokens,
168
306
  "completion_tokens": response.completion_tokens,
169
307
  "cost_estimate": response.cost_estimate,
170
- "response_time": response.response_time
308
+ "response_time": response.response_time,
171
309
  }
172
310
  for callback in callbacks:
173
311
  try:
174
- await callback.on_llm_end(response_dict, provider=final_provider, model=final_model, **kwargs)
312
+ await callback.on_llm_end(
313
+ response_dict,
314
+ provider=final_provider,
315
+ model=final_model,
316
+ **kwargs,
317
+ )
175
318
  except Exception as e:
176
319
  logger.error(f"Error in callback on_llm_end: {e}")
177
320
 
@@ -183,7 +326,12 @@ class LLMClientManager:
183
326
  if callbacks:
184
327
  for callback in callbacks:
185
328
  try:
186
- await callback.on_llm_error(e, provider=final_provider, model=final_model, **kwargs)
329
+ await callback.on_llm_error(
330
+ e,
331
+ provider=final_provider,
332
+ model=final_model,
333
+ **kwargs,
334
+ )
187
335
  except Exception as callback_error:
188
336
  logger.error(f"Error in callback on_llm_error: {callback_error}")
189
337
 
@@ -199,7 +347,7 @@ class LLMClientManager:
199
347
  temperature: float = 0.7,
200
348
  max_tokens: Optional[int] = None,
201
349
  callbacks: Optional[List[CustomAsyncCallbackHandler]] = None,
202
- **kwargs
350
+ **kwargs,
203
351
  ):
204
352
  """
205
353
  Stream text generation using context-aware provider selection
@@ -234,7 +382,12 @@ class LLMClientManager:
234
382
  messages_dict = [{"role": msg.role, "content": msg.content} for msg in messages]
235
383
  for callback in callbacks:
236
384
  try:
237
- await callback.on_llm_start(messages_dict, provider=final_provider, model=final_model, **kwargs)
385
+ await callback.on_llm_start(
386
+ messages_dict,
387
+ provider=final_provider,
388
+ model=final_model,
389
+ **kwargs,
390
+ )
238
391
  except Exception as e:
239
392
  logger.error(f"Error in callback on_llm_start: {e}")
240
393
 
@@ -246,14 +399,20 @@ class LLMClientManager:
246
399
  collected_content = ""
247
400
 
248
401
  # Stream text
249
- async for chunk in await client.stream_text(
402
+ # Note: stream_text is an async generator, not a coroutine,
403
+ # so we iterate directly without await
404
+ async for chunk in client.stream_text(
250
405
  messages=messages,
251
406
  model=final_model,
252
407
  temperature=temperature,
253
408
  max_tokens=max_tokens,
254
- **kwargs
409
+ **kwargs,
255
410
  ):
256
- collected_content += chunk
411
+ # Handle StreamChunk objects (when return_chunks=True or function calling)
412
+ if hasattr(chunk, 'content') and chunk.content:
413
+ collected_content += chunk.content
414
+ elif isinstance(chunk, str):
415
+ collected_content += chunk
257
416
  yield chunk
258
417
 
259
418
  # Create a response object for callbacks (streaming doesn't return LLMResponse directly)
@@ -263,7 +422,7 @@ class LLMClientManager:
263
422
  content=collected_content,
264
423
  provider=str(final_provider),
265
424
  model=final_model or "unknown",
266
- tokens_used=estimated_tokens
425
+ tokens_used=estimated_tokens,
267
426
  )
268
427
 
269
428
  # Execute on_llm_end callbacks
@@ -277,11 +436,16 @@ class LLMClientManager:
277
436
  "prompt_tokens": stream_response.prompt_tokens,
278
437
  "completion_tokens": stream_response.completion_tokens,
279
438
  "cost_estimate": stream_response.cost_estimate,
280
- "response_time": stream_response.response_time
439
+ "response_time": stream_response.response_time,
281
440
  }
282
441
  for callback in callbacks:
283
442
  try:
284
- await callback.on_llm_end(response_dict, provider=final_provider, model=final_model, **kwargs)
443
+ await callback.on_llm_end(
444
+ response_dict,
445
+ provider=final_provider,
446
+ model=final_model,
447
+ **kwargs,
448
+ )
285
449
  except Exception as e:
286
450
  logger.error(f"Error in callback on_llm_end: {e}")
287
451
 
@@ -290,7 +454,12 @@ class LLMClientManager:
290
454
  if callbacks:
291
455
  for callback in callbacks:
292
456
  try:
293
- await callback.on_llm_error(e, provider=final_provider, model=final_model, **kwargs)
457
+ await callback.on_llm_error(
458
+ e,
459
+ provider=final_provider,
460
+ model=final_model,
461
+ **kwargs,
462
+ )
294
463
  except Exception as callback_error:
295
464
  logger.error(f"Error in callback on_llm_error: {callback_error}")
296
465
 
@@ -301,14 +470,19 @@ class LLMClientManager:
301
470
  """Close all clients"""
302
471
  await self.factory.close_all()
303
472
 
473
+
304
474
  # Global instance for easy access
305
475
  _llm_manager = LLMClientManager()
306
476
 
477
+
307
478
  async def get_llm_manager() -> LLMClientManager:
308
479
  """Get the global LLM manager instance"""
309
480
  return _llm_manager
310
481
 
482
+
311
483
  # Convenience functions for backward compatibility
484
+
485
+
312
486
  async def generate_text(
313
487
  messages: Union[str, list[LLMMessage]],
314
488
  provider: Optional[Union[str, AIProvider]] = None,
@@ -317,11 +491,21 @@ async def generate_text(
317
491
  temperature: float = 0.7,
318
492
  max_tokens: Optional[int] = None,
319
493
  callbacks: Optional[List[CustomAsyncCallbackHandler]] = None,
320
- **kwargs
494
+ **kwargs,
321
495
  ) -> LLMResponse:
322
496
  """Generate text using the global LLM manager"""
323
497
  manager = await get_llm_manager()
324
- return await manager.generate_text(messages, provider, model, context, temperature, max_tokens, callbacks, **kwargs)
498
+ return await manager.generate_text(
499
+ messages,
500
+ provider,
501
+ model,
502
+ context,
503
+ temperature,
504
+ max_tokens,
505
+ callbacks,
506
+ **kwargs,
507
+ )
508
+
325
509
 
326
510
  async def stream_text(
327
511
  messages: Union[str, list[LLMMessage]],
@@ -331,9 +515,18 @@ async def stream_text(
331
515
  temperature: float = 0.7,
332
516
  max_tokens: Optional[int] = None,
333
517
  callbacks: Optional[List[CustomAsyncCallbackHandler]] = None,
334
- **kwargs
518
+ **kwargs,
335
519
  ):
336
520
  """Stream text using the global LLM manager"""
337
521
  manager = await get_llm_manager()
338
- async for chunk in manager.stream_text(messages, provider, model, context, temperature, max_tokens, callbacks, **kwargs):
522
+ async for chunk in manager.stream_text(
523
+ messages,
524
+ provider,
525
+ model,
526
+ context,
527
+ temperature,
528
+ max_tokens,
529
+ callbacks,
530
+ **kwargs,
531
+ ):
339
532
  yield chunk
@@ -0,0 +1,155 @@
1
+ """
2
+ LLM Client Resolution Helper
3
+
4
+ Provides convenient helper functions for resolving LLM clients from provider names
5
+ with caching support for improved performance.
6
+ """
7
+
8
+ import logging
9
+ from typing import Optional, Union, Dict, TYPE_CHECKING
10
+ from aiecs.llm.client_factory import LLMClientFactory, AIProvider
11
+
12
+ if TYPE_CHECKING:
13
+ from aiecs.llm.protocols import LLMClientProtocol
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ # Cache for resolved clients to avoid repeated factory calls
19
+ _client_cache: Dict[str, "LLMClientProtocol"] = {}
20
+
21
+
22
+ def resolve_llm_client(
23
+ provider: Union[str, AIProvider],
24
+ model: Optional[str] = None,
25
+ use_cache: bool = True,
26
+ ) -> "LLMClientProtocol":
27
+ """
28
+ Resolve an LLM client from a provider name with optional caching.
29
+
30
+ This helper function provides a convenient interface for resolving LLM clients
31
+ from provider names, supporting both standard AIProvider enum values and custom
32
+ provider names registered via LLMClientFactory.register_custom_provider().
33
+
34
+ The function includes built-in caching to avoid repeated factory calls for the
35
+ same provider, improving performance when the same client is requested multiple times.
36
+
37
+ Args:
38
+ provider: AIProvider enum or custom provider name string
39
+ model: Optional model name (for logging/debugging purposes)
40
+ use_cache: Whether to use cached clients (default: True)
41
+
42
+ Returns:
43
+ LLM client implementing LLMClientProtocol
44
+
45
+ Raises:
46
+ ValueError: If provider is unknown (not standard and not registered)
47
+
48
+ Example:
49
+ ```python
50
+ from aiecs.llm.client_resolver import resolve_llm_client
51
+ from aiecs.llm.client_factory import LLMClientFactory
52
+
53
+ # Resolve standard provider
54
+ client = resolve_llm_client("OpenAI", model="gpt-4")
55
+
56
+ # Register and resolve custom provider
57
+ LLMClientFactory.register_custom_provider("my-llm", custom_client)
58
+ client = resolve_llm_client("my-llm", model="custom-model")
59
+
60
+ # Use the client
61
+ response = await client.generate_text(messages, model="gpt-4")
62
+ ```
63
+
64
+ Note:
65
+ - Caching is based on provider name only, not model name
66
+ - Custom providers registered after caching will require cache clearing
67
+ - Use `clear_client_cache()` to clear the cache if needed
68
+ """
69
+ # Convert provider to string for cache key
70
+ cache_key = str(provider) if isinstance(provider, AIProvider) else provider
71
+
72
+ # Check cache first if caching is enabled
73
+ if use_cache and cache_key in _client_cache:
74
+ logger.debug(f"Using cached client for provider: {cache_key}")
75
+ return _client_cache[cache_key]
76
+
77
+ # Resolve client from factory
78
+ try:
79
+ client = LLMClientFactory.get_client(provider)
80
+
81
+ # Log resolution
82
+ if model:
83
+ logger.info(f"Resolved LLM client for provider: {cache_key}, model: {model}")
84
+ else:
85
+ logger.info(f"Resolved LLM client for provider: {cache_key}")
86
+
87
+ # Cache the client if caching is enabled
88
+ # Cast to LLMClientProtocol since BaseLLMClient implements the protocol
89
+ if use_cache:
90
+ from typing import cast
91
+ _client_cache[cache_key] = cast("LLMClientProtocol", client)
92
+ logger.debug(f"Cached client for provider: {cache_key}")
93
+
94
+ # Cast return value to match return type annotation
95
+ from typing import cast
96
+ return cast("LLMClientProtocol", client)
97
+
98
+ except ValueError as e:
99
+ logger.error(f"Failed to resolve LLM client for provider: {cache_key}")
100
+ raise
101
+
102
+
103
+ def clear_client_cache(provider: Optional[Union[str, AIProvider]] = None) -> None:
104
+ """
105
+ Clear the client resolution cache.
106
+
107
+ Args:
108
+ provider: Optional provider to clear from cache.
109
+ If None, clears entire cache.
110
+
111
+ Example:
112
+ ```python
113
+ from aiecs.llm.client_resolver import clear_client_cache
114
+
115
+ # Clear specific provider
116
+ clear_client_cache("OpenAI")
117
+
118
+ # Clear entire cache
119
+ clear_client_cache()
120
+ ```
121
+ """
122
+ global _client_cache
123
+
124
+ if provider is None:
125
+ # Clear entire cache
126
+ count = len(_client_cache)
127
+ _client_cache.clear()
128
+ logger.info(f"Cleared entire client cache ({count} entries)")
129
+ else:
130
+ # Clear specific provider
131
+ cache_key = str(provider) if isinstance(provider, AIProvider) else provider
132
+ if cache_key in _client_cache:
133
+ del _client_cache[cache_key]
134
+ logger.info(f"Cleared cache for provider: {cache_key}")
135
+ else:
136
+ logger.debug(f"Provider not in cache: {cache_key}")
137
+
138
+
139
+ def get_cached_providers() -> list[str]:
140
+ """
141
+ Get list of providers currently in the cache.
142
+
143
+ Returns:
144
+ List of provider names that have cached clients
145
+
146
+ Example:
147
+ ```python
148
+ from aiecs.llm.client_resolver import get_cached_providers
149
+
150
+ providers = get_cached_providers()
151
+ print(f"Cached providers: {providers}")
152
+ ```
153
+ """
154
+ return list(_client_cache.keys())
155
+
@@ -0,0 +1,38 @@
1
+ """
2
+ LLM Client implementations.
3
+
4
+ This package contains all LLM provider client implementations.
5
+ """
6
+
7
+ from .base_client import (
8
+ BaseLLMClient,
9
+ CacheControl,
10
+ LLMMessage,
11
+ LLMResponse,
12
+ LLMClientError,
13
+ ProviderNotAvailableError,
14
+ RateLimitError,
15
+ )
16
+ from .openai_compatible_mixin import StreamChunk
17
+ from .openai_client import OpenAIClient
18
+ from .vertex_client import VertexAIClient
19
+ from .googleai_client import GoogleAIClient
20
+ from .xai_client import XAIClient
21
+
22
+ __all__ = [
23
+ # Base classes
24
+ "BaseLLMClient",
25
+ "CacheControl",
26
+ "LLMMessage",
27
+ "LLMResponse",
28
+ "LLMClientError",
29
+ "ProviderNotAvailableError",
30
+ "RateLimitError",
31
+ # Streaming support
32
+ "StreamChunk",
33
+ # Client implementations
34
+ "OpenAIClient",
35
+ "VertexAIClient",
36
+ "GoogleAIClient",
37
+ "XAIClient",
38
+ ]