aiecs 1.0.1__py3-none-any.whl → 1.7.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiecs might be problematic. Click here for more details.
- aiecs/__init__.py +13 -16
- aiecs/__main__.py +7 -7
- aiecs/aiecs_client.py +269 -75
- aiecs/application/executors/operation_executor.py +79 -54
- aiecs/application/knowledge_graph/__init__.py +7 -0
- aiecs/application/knowledge_graph/builder/__init__.py +37 -0
- aiecs/application/knowledge_graph/builder/data_quality.py +302 -0
- aiecs/application/knowledge_graph/builder/data_reshaping.py +293 -0
- aiecs/application/knowledge_graph/builder/document_builder.py +369 -0
- aiecs/application/knowledge_graph/builder/graph_builder.py +490 -0
- aiecs/application/knowledge_graph/builder/import_optimizer.py +396 -0
- aiecs/application/knowledge_graph/builder/schema_inference.py +462 -0
- aiecs/application/knowledge_graph/builder/schema_mapping.py +563 -0
- aiecs/application/knowledge_graph/builder/structured_pipeline.py +1384 -0
- aiecs/application/knowledge_graph/builder/text_chunker.py +317 -0
- aiecs/application/knowledge_graph/extractors/__init__.py +27 -0
- aiecs/application/knowledge_graph/extractors/base.py +98 -0
- aiecs/application/knowledge_graph/extractors/llm_entity_extractor.py +422 -0
- aiecs/application/knowledge_graph/extractors/llm_relation_extractor.py +347 -0
- aiecs/application/knowledge_graph/extractors/ner_entity_extractor.py +241 -0
- aiecs/application/knowledge_graph/fusion/__init__.py +78 -0
- aiecs/application/knowledge_graph/fusion/ab_testing.py +395 -0
- aiecs/application/knowledge_graph/fusion/abbreviation_expander.py +327 -0
- aiecs/application/knowledge_graph/fusion/alias_index.py +597 -0
- aiecs/application/knowledge_graph/fusion/alias_matcher.py +384 -0
- aiecs/application/knowledge_graph/fusion/cache_coordinator.py +343 -0
- aiecs/application/knowledge_graph/fusion/entity_deduplicator.py +433 -0
- aiecs/application/knowledge_graph/fusion/entity_linker.py +511 -0
- aiecs/application/knowledge_graph/fusion/evaluation_dataset.py +240 -0
- aiecs/application/knowledge_graph/fusion/knowledge_fusion.py +632 -0
- aiecs/application/knowledge_graph/fusion/matching_config.py +489 -0
- aiecs/application/knowledge_graph/fusion/name_normalizer.py +352 -0
- aiecs/application/knowledge_graph/fusion/relation_deduplicator.py +183 -0
- aiecs/application/knowledge_graph/fusion/semantic_name_matcher.py +464 -0
- aiecs/application/knowledge_graph/fusion/similarity_pipeline.py +534 -0
- aiecs/application/knowledge_graph/pattern_matching/__init__.py +21 -0
- aiecs/application/knowledge_graph/pattern_matching/pattern_matcher.py +342 -0
- aiecs/application/knowledge_graph/pattern_matching/query_executor.py +366 -0
- aiecs/application/knowledge_graph/profiling/__init__.py +12 -0
- aiecs/application/knowledge_graph/profiling/query_plan_visualizer.py +195 -0
- aiecs/application/knowledge_graph/profiling/query_profiler.py +223 -0
- aiecs/application/knowledge_graph/reasoning/__init__.py +27 -0
- aiecs/application/knowledge_graph/reasoning/evidence_synthesis.py +341 -0
- aiecs/application/knowledge_graph/reasoning/inference_engine.py +500 -0
- aiecs/application/knowledge_graph/reasoning/logic_form_parser.py +163 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/__init__.py +79 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_builder.py +513 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_nodes.py +913 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_validator.py +866 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/error_handler.py +475 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/parser.py +396 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/query_context.py +208 -0
- aiecs/application/knowledge_graph/reasoning/logic_query_integration.py +170 -0
- aiecs/application/knowledge_graph/reasoning/query_planner.py +855 -0
- aiecs/application/knowledge_graph/reasoning/reasoning_engine.py +518 -0
- aiecs/application/knowledge_graph/retrieval/__init__.py +27 -0
- aiecs/application/knowledge_graph/retrieval/query_intent_classifier.py +211 -0
- aiecs/application/knowledge_graph/retrieval/retrieval_strategies.py +592 -0
- aiecs/application/knowledge_graph/retrieval/strategy_types.py +23 -0
- aiecs/application/knowledge_graph/search/__init__.py +59 -0
- aiecs/application/knowledge_graph/search/hybrid_search.py +457 -0
- aiecs/application/knowledge_graph/search/reranker.py +293 -0
- aiecs/application/knowledge_graph/search/reranker_strategies.py +535 -0
- aiecs/application/knowledge_graph/search/text_similarity.py +392 -0
- aiecs/application/knowledge_graph/traversal/__init__.py +15 -0
- aiecs/application/knowledge_graph/traversal/enhanced_traversal.py +305 -0
- aiecs/application/knowledge_graph/traversal/path_scorer.py +271 -0
- aiecs/application/knowledge_graph/validators/__init__.py +13 -0
- aiecs/application/knowledge_graph/validators/relation_validator.py +239 -0
- aiecs/application/knowledge_graph/visualization/__init__.py +11 -0
- aiecs/application/knowledge_graph/visualization/graph_visualizer.py +313 -0
- aiecs/common/__init__.py +9 -0
- aiecs/common/knowledge_graph/__init__.py +17 -0
- aiecs/common/knowledge_graph/runnable.py +471 -0
- aiecs/config/__init__.py +20 -5
- aiecs/config/config.py +762 -31
- aiecs/config/graph_config.py +131 -0
- aiecs/config/tool_config.py +435 -0
- aiecs/core/__init__.py +29 -13
- aiecs/core/interface/__init__.py +2 -2
- aiecs/core/interface/execution_interface.py +22 -22
- aiecs/core/interface/storage_interface.py +37 -88
- aiecs/core/registry/__init__.py +31 -0
- aiecs/core/registry/service_registry.py +92 -0
- aiecs/domain/__init__.py +270 -1
- aiecs/domain/agent/__init__.py +191 -0
- aiecs/domain/agent/base_agent.py +3949 -0
- aiecs/domain/agent/exceptions.py +99 -0
- aiecs/domain/agent/graph_aware_mixin.py +569 -0
- aiecs/domain/agent/hybrid_agent.py +1731 -0
- aiecs/domain/agent/integration/__init__.py +29 -0
- aiecs/domain/agent/integration/context_compressor.py +216 -0
- aiecs/domain/agent/integration/context_engine_adapter.py +587 -0
- aiecs/domain/agent/integration/protocols.py +281 -0
- aiecs/domain/agent/integration/retry_policy.py +218 -0
- aiecs/domain/agent/integration/role_config.py +213 -0
- aiecs/domain/agent/knowledge_aware_agent.py +1892 -0
- aiecs/domain/agent/lifecycle.py +291 -0
- aiecs/domain/agent/llm_agent.py +692 -0
- aiecs/domain/agent/memory/__init__.py +12 -0
- aiecs/domain/agent/memory/conversation.py +1124 -0
- aiecs/domain/agent/migration/__init__.py +14 -0
- aiecs/domain/agent/migration/conversion.py +163 -0
- aiecs/domain/agent/migration/legacy_wrapper.py +86 -0
- aiecs/domain/agent/models.py +894 -0
- aiecs/domain/agent/observability.py +479 -0
- aiecs/domain/agent/persistence.py +449 -0
- aiecs/domain/agent/prompts/__init__.py +29 -0
- aiecs/domain/agent/prompts/builder.py +159 -0
- aiecs/domain/agent/prompts/formatters.py +187 -0
- aiecs/domain/agent/prompts/template.py +255 -0
- aiecs/domain/agent/registry.py +253 -0
- aiecs/domain/agent/tool_agent.py +444 -0
- aiecs/domain/agent/tools/__init__.py +15 -0
- aiecs/domain/agent/tools/schema_generator.py +377 -0
- aiecs/domain/community/__init__.py +155 -0
- aiecs/domain/community/agent_adapter.py +469 -0
- aiecs/domain/community/analytics.py +432 -0
- aiecs/domain/community/collaborative_workflow.py +648 -0
- aiecs/domain/community/communication_hub.py +634 -0
- aiecs/domain/community/community_builder.py +320 -0
- aiecs/domain/community/community_integration.py +796 -0
- aiecs/domain/community/community_manager.py +803 -0
- aiecs/domain/community/decision_engine.py +849 -0
- aiecs/domain/community/exceptions.py +231 -0
- aiecs/domain/community/models/__init__.py +33 -0
- aiecs/domain/community/models/community_models.py +234 -0
- aiecs/domain/community/resource_manager.py +461 -0
- aiecs/domain/community/shared_context_manager.py +589 -0
- aiecs/domain/context/__init__.py +40 -10
- aiecs/domain/context/context_engine.py +1910 -0
- aiecs/domain/context/conversation_models.py +87 -53
- aiecs/domain/context/graph_memory.py +582 -0
- aiecs/domain/execution/model.py +12 -4
- aiecs/domain/knowledge_graph/__init__.py +19 -0
- aiecs/domain/knowledge_graph/models/__init__.py +52 -0
- aiecs/domain/knowledge_graph/models/entity.py +148 -0
- aiecs/domain/knowledge_graph/models/evidence.py +178 -0
- aiecs/domain/knowledge_graph/models/inference_rule.py +184 -0
- aiecs/domain/knowledge_graph/models/path.py +171 -0
- aiecs/domain/knowledge_graph/models/path_pattern.py +171 -0
- aiecs/domain/knowledge_graph/models/query.py +261 -0
- aiecs/domain/knowledge_graph/models/query_plan.py +181 -0
- aiecs/domain/knowledge_graph/models/relation.py +202 -0
- aiecs/domain/knowledge_graph/schema/__init__.py +23 -0
- aiecs/domain/knowledge_graph/schema/entity_type.py +131 -0
- aiecs/domain/knowledge_graph/schema/graph_schema.py +253 -0
- aiecs/domain/knowledge_graph/schema/property_schema.py +143 -0
- aiecs/domain/knowledge_graph/schema/relation_type.py +163 -0
- aiecs/domain/knowledge_graph/schema/schema_manager.py +691 -0
- aiecs/domain/knowledge_graph/schema/type_enums.py +209 -0
- aiecs/domain/task/dsl_processor.py +172 -56
- aiecs/domain/task/model.py +20 -8
- aiecs/domain/task/task_context.py +27 -24
- aiecs/infrastructure/__init__.py +0 -2
- aiecs/infrastructure/graph_storage/__init__.py +11 -0
- aiecs/infrastructure/graph_storage/base.py +837 -0
- aiecs/infrastructure/graph_storage/batch_operations.py +458 -0
- aiecs/infrastructure/graph_storage/cache.py +424 -0
- aiecs/infrastructure/graph_storage/distributed.py +223 -0
- aiecs/infrastructure/graph_storage/error_handling.py +380 -0
- aiecs/infrastructure/graph_storage/graceful_degradation.py +294 -0
- aiecs/infrastructure/graph_storage/health_checks.py +378 -0
- aiecs/infrastructure/graph_storage/in_memory.py +1197 -0
- aiecs/infrastructure/graph_storage/index_optimization.py +446 -0
- aiecs/infrastructure/graph_storage/lazy_loading.py +431 -0
- aiecs/infrastructure/graph_storage/metrics.py +344 -0
- aiecs/infrastructure/graph_storage/migration.py +400 -0
- aiecs/infrastructure/graph_storage/pagination.py +483 -0
- aiecs/infrastructure/graph_storage/performance_monitoring.py +456 -0
- aiecs/infrastructure/graph_storage/postgres.py +1563 -0
- aiecs/infrastructure/graph_storage/property_storage.py +353 -0
- aiecs/infrastructure/graph_storage/protocols.py +76 -0
- aiecs/infrastructure/graph_storage/query_optimizer.py +642 -0
- aiecs/infrastructure/graph_storage/schema_cache.py +290 -0
- aiecs/infrastructure/graph_storage/sqlite.py +1373 -0
- aiecs/infrastructure/graph_storage/streaming.py +487 -0
- aiecs/infrastructure/graph_storage/tenant.py +412 -0
- aiecs/infrastructure/messaging/celery_task_manager.py +92 -54
- aiecs/infrastructure/messaging/websocket_manager.py +51 -35
- aiecs/infrastructure/monitoring/__init__.py +22 -0
- aiecs/infrastructure/monitoring/executor_metrics.py +45 -11
- aiecs/infrastructure/monitoring/global_metrics_manager.py +212 -0
- aiecs/infrastructure/monitoring/structured_logger.py +3 -7
- aiecs/infrastructure/monitoring/tracing_manager.py +63 -35
- aiecs/infrastructure/persistence/__init__.py +14 -1
- aiecs/infrastructure/persistence/context_engine_client.py +184 -0
- aiecs/infrastructure/persistence/database_manager.py +67 -43
- aiecs/infrastructure/persistence/file_storage.py +180 -103
- aiecs/infrastructure/persistence/redis_client.py +74 -21
- aiecs/llm/__init__.py +73 -25
- aiecs/llm/callbacks/__init__.py +11 -0
- aiecs/llm/{custom_callbacks.py → callbacks/custom_callbacks.py} +26 -19
- aiecs/llm/client_factory.py +230 -37
- aiecs/llm/client_resolver.py +155 -0
- aiecs/llm/clients/__init__.py +38 -0
- aiecs/llm/clients/base_client.py +328 -0
- aiecs/llm/clients/google_function_calling_mixin.py +415 -0
- aiecs/llm/clients/googleai_client.py +314 -0
- aiecs/llm/clients/openai_client.py +158 -0
- aiecs/llm/clients/openai_compatible_mixin.py +367 -0
- aiecs/llm/clients/vertex_client.py +1186 -0
- aiecs/llm/clients/xai_client.py +201 -0
- aiecs/llm/config/__init__.py +51 -0
- aiecs/llm/config/config_loader.py +272 -0
- aiecs/llm/config/config_validator.py +206 -0
- aiecs/llm/config/model_config.py +143 -0
- aiecs/llm/protocols.py +149 -0
- aiecs/llm/utils/__init__.py +10 -0
- aiecs/llm/utils/validate_config.py +89 -0
- aiecs/main.py +140 -121
- aiecs/scripts/aid/VERSION_MANAGEMENT.md +138 -0
- aiecs/scripts/aid/__init__.py +19 -0
- aiecs/scripts/aid/module_checker.py +499 -0
- aiecs/scripts/aid/version_manager.py +235 -0
- aiecs/scripts/{DEPENDENCY_SYSTEM_SUMMARY.md → dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md} +1 -0
- aiecs/scripts/{README_DEPENDENCY_CHECKER.md → dependance_check/README_DEPENDENCY_CHECKER.md} +1 -0
- aiecs/scripts/dependance_check/__init__.py +15 -0
- aiecs/scripts/dependance_check/dependency_checker.py +1835 -0
- aiecs/scripts/{dependency_fixer.py → dependance_check/dependency_fixer.py} +192 -90
- aiecs/scripts/{download_nlp_data.py → dependance_check/download_nlp_data.py} +203 -71
- aiecs/scripts/dependance_patch/__init__.py +7 -0
- aiecs/scripts/dependance_patch/fix_weasel/__init__.py +11 -0
- aiecs/scripts/{fix_weasel_validator.py → dependance_patch/fix_weasel/fix_weasel_validator.py} +21 -14
- aiecs/scripts/{patch_weasel_library.sh → dependance_patch/fix_weasel/patch_weasel_library.sh} +1 -1
- aiecs/scripts/knowledge_graph/__init__.py +3 -0
- aiecs/scripts/knowledge_graph/run_threshold_experiments.py +212 -0
- aiecs/scripts/migrations/multi_tenancy/README.md +142 -0
- aiecs/scripts/tools_develop/README.md +671 -0
- aiecs/scripts/tools_develop/README_CONFIG_CHECKER.md +273 -0
- aiecs/scripts/tools_develop/TOOLS_CONFIG_GUIDE.md +1287 -0
- aiecs/scripts/tools_develop/TOOL_AUTO_DISCOVERY.md +234 -0
- aiecs/scripts/tools_develop/__init__.py +21 -0
- aiecs/scripts/tools_develop/check_all_tools_config.py +548 -0
- aiecs/scripts/tools_develop/check_type_annotations.py +257 -0
- aiecs/scripts/tools_develop/pre-commit-schema-coverage.sh +66 -0
- aiecs/scripts/tools_develop/schema_coverage.py +511 -0
- aiecs/scripts/tools_develop/validate_tool_schemas.py +475 -0
- aiecs/scripts/tools_develop/verify_executor_config_fix.py +98 -0
- aiecs/scripts/tools_develop/verify_tools.py +352 -0
- aiecs/tasks/__init__.py +0 -1
- aiecs/tasks/worker.py +115 -47
- aiecs/tools/__init__.py +194 -72
- aiecs/tools/apisource/__init__.py +99 -0
- aiecs/tools/apisource/intelligence/__init__.py +19 -0
- aiecs/tools/apisource/intelligence/data_fusion.py +632 -0
- aiecs/tools/apisource/intelligence/query_analyzer.py +417 -0
- aiecs/tools/apisource/intelligence/search_enhancer.py +385 -0
- aiecs/tools/apisource/monitoring/__init__.py +9 -0
- aiecs/tools/apisource/monitoring/metrics.py +330 -0
- aiecs/tools/apisource/providers/__init__.py +112 -0
- aiecs/tools/apisource/providers/base.py +671 -0
- aiecs/tools/apisource/providers/census.py +397 -0
- aiecs/tools/apisource/providers/fred.py +535 -0
- aiecs/tools/apisource/providers/newsapi.py +409 -0
- aiecs/tools/apisource/providers/worldbank.py +352 -0
- aiecs/tools/apisource/reliability/__init__.py +12 -0
- aiecs/tools/apisource/reliability/error_handler.py +363 -0
- aiecs/tools/apisource/reliability/fallback_strategy.py +376 -0
- aiecs/tools/apisource/tool.py +832 -0
- aiecs/tools/apisource/utils/__init__.py +9 -0
- aiecs/tools/apisource/utils/validators.py +334 -0
- aiecs/tools/base_tool.py +415 -21
- aiecs/tools/docs/__init__.py +121 -0
- aiecs/tools/docs/ai_document_orchestrator.py +607 -0
- aiecs/tools/docs/ai_document_writer_orchestrator.py +2350 -0
- aiecs/tools/docs/content_insertion_tool.py +1320 -0
- aiecs/tools/docs/document_creator_tool.py +1464 -0
- aiecs/tools/docs/document_layout_tool.py +1160 -0
- aiecs/tools/docs/document_parser_tool.py +1016 -0
- aiecs/tools/docs/document_writer_tool.py +2008 -0
- aiecs/tools/knowledge_graph/__init__.py +17 -0
- aiecs/tools/knowledge_graph/graph_reasoning_tool.py +807 -0
- aiecs/tools/knowledge_graph/graph_search_tool.py +944 -0
- aiecs/tools/knowledge_graph/kg_builder_tool.py +524 -0
- aiecs/tools/langchain_adapter.py +300 -138
- aiecs/tools/schema_generator.py +455 -0
- aiecs/tools/search_tool/__init__.py +100 -0
- aiecs/tools/search_tool/analyzers.py +581 -0
- aiecs/tools/search_tool/cache.py +264 -0
- aiecs/tools/search_tool/constants.py +128 -0
- aiecs/tools/search_tool/context.py +224 -0
- aiecs/tools/search_tool/core.py +778 -0
- aiecs/tools/search_tool/deduplicator.py +119 -0
- aiecs/tools/search_tool/error_handler.py +242 -0
- aiecs/tools/search_tool/metrics.py +343 -0
- aiecs/tools/search_tool/rate_limiter.py +172 -0
- aiecs/tools/search_tool/schemas.py +275 -0
- aiecs/tools/statistics/__init__.py +80 -0
- aiecs/tools/statistics/ai_data_analysis_orchestrator.py +646 -0
- aiecs/tools/statistics/ai_insight_generator_tool.py +508 -0
- aiecs/tools/statistics/ai_report_orchestrator_tool.py +684 -0
- aiecs/tools/statistics/data_loader_tool.py +555 -0
- aiecs/tools/statistics/data_profiler_tool.py +638 -0
- aiecs/tools/statistics/data_transformer_tool.py +580 -0
- aiecs/tools/statistics/data_visualizer_tool.py +498 -0
- aiecs/tools/statistics/model_trainer_tool.py +507 -0
- aiecs/tools/statistics/statistical_analyzer_tool.py +472 -0
- aiecs/tools/task_tools/__init__.py +49 -36
- aiecs/tools/task_tools/chart_tool.py +200 -184
- aiecs/tools/task_tools/classfire_tool.py +268 -267
- aiecs/tools/task_tools/image_tool.py +220 -141
- aiecs/tools/task_tools/office_tool.py +226 -146
- aiecs/tools/task_tools/pandas_tool.py +477 -121
- aiecs/tools/task_tools/report_tool.py +390 -142
- aiecs/tools/task_tools/research_tool.py +149 -79
- aiecs/tools/task_tools/scraper_tool.py +339 -145
- aiecs/tools/task_tools/stats_tool.py +448 -209
- aiecs/tools/temp_file_manager.py +26 -24
- aiecs/tools/tool_executor/__init__.py +18 -16
- aiecs/tools/tool_executor/tool_executor.py +364 -52
- aiecs/utils/LLM_output_structor.py +74 -48
- aiecs/utils/__init__.py +14 -3
- aiecs/utils/base_callback.py +0 -3
- aiecs/utils/cache_provider.py +696 -0
- aiecs/utils/execution_utils.py +50 -31
- aiecs/utils/prompt_loader.py +1 -0
- aiecs/utils/token_usage_repository.py +37 -11
- aiecs/ws/socket_server.py +14 -4
- {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/METADATA +52 -15
- aiecs-1.7.17.dist-info/RECORD +337 -0
- aiecs-1.7.17.dist-info/entry_points.txt +13 -0
- aiecs/config/registry.py +0 -19
- aiecs/domain/context/content_engine.py +0 -982
- aiecs/llm/base_client.py +0 -99
- aiecs/llm/openai_client.py +0 -125
- aiecs/llm/vertex_client.py +0 -186
- aiecs/llm/xai_client.py +0 -184
- aiecs/scripts/dependency_checker.py +0 -857
- aiecs/scripts/quick_dependency_check.py +0 -269
- aiecs/tools/task_tools/search_api.py +0 -7
- aiecs-1.0.1.dist-info/RECORD +0 -90
- aiecs-1.0.1.dist-info/entry_points.txt +0 -7
- /aiecs/scripts/{setup_nlp_data.sh → dependance_check/setup_nlp_data.sh} +0 -0
- /aiecs/scripts/{README_WEASEL_PATCH.md → dependance_patch/fix_weasel/README_WEASEL_PATCH.md} +0 -0
- /aiecs/scripts/{fix_weasel_validator.sh → dependance_patch/fix_weasel/fix_weasel_validator.sh} +0 -0
- /aiecs/scripts/{run_weasel_patch.sh → dependance_patch/fix_weasel/run_weasel_patch.sh} +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/WHEEL +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/licenses/LICENSE +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
from openai import AsyncOpenAI
|
|
2
|
+
from aiecs.config.config import get_settings
|
|
3
|
+
from aiecs.llm.clients.base_client import (
|
|
4
|
+
BaseLLMClient,
|
|
5
|
+
LLMMessage,
|
|
6
|
+
LLMResponse,
|
|
7
|
+
ProviderNotAvailableError,
|
|
8
|
+
RateLimitError,
|
|
9
|
+
)
|
|
10
|
+
from aiecs.llm.clients.openai_compatible_mixin import (
|
|
11
|
+
OpenAICompatibleFunctionCallingMixin,
|
|
12
|
+
StreamChunk,
|
|
13
|
+
)
|
|
14
|
+
from tenacity import (
|
|
15
|
+
retry,
|
|
16
|
+
stop_after_attempt,
|
|
17
|
+
wait_exponential,
|
|
18
|
+
retry_if_exception_type,
|
|
19
|
+
)
|
|
20
|
+
import logging
|
|
21
|
+
from typing import Dict, Optional, List, AsyncGenerator, cast, Any
|
|
22
|
+
|
|
23
|
+
# Lazy import to avoid circular dependency
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _get_config_loader():
|
|
27
|
+
"""Lazy import of config loader to avoid circular dependency"""
|
|
28
|
+
from aiecs.llm.config import get_llm_config_loader
|
|
29
|
+
|
|
30
|
+
return get_llm_config_loader()
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
logger = logging.getLogger(__name__)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class XAIClient(BaseLLMClient, OpenAICompatibleFunctionCallingMixin):
|
|
37
|
+
"""xAI (Grok) provider client"""
|
|
38
|
+
|
|
39
|
+
def __init__(self) -> None:
|
|
40
|
+
super().__init__("xAI")
|
|
41
|
+
self.settings = get_settings()
|
|
42
|
+
self._openai_client: Optional[AsyncOpenAI] = None
|
|
43
|
+
self._model_map: Optional[Dict[str, str]] = None
|
|
44
|
+
|
|
45
|
+
def _get_openai_client(self) -> AsyncOpenAI:
|
|
46
|
+
"""Lazy initialization of OpenAI client for XAI"""
|
|
47
|
+
if not self._openai_client:
|
|
48
|
+
api_key = self._get_api_key()
|
|
49
|
+
self._openai_client = AsyncOpenAI(
|
|
50
|
+
api_key=api_key,
|
|
51
|
+
base_url="https://api.x.ai/v1",
|
|
52
|
+
timeout=360.0, # Override default timeout with longer timeout for reasoning models
|
|
53
|
+
)
|
|
54
|
+
return self._openai_client
|
|
55
|
+
|
|
56
|
+
def _get_api_key(self) -> str:
|
|
57
|
+
"""Get API key with backward compatibility"""
|
|
58
|
+
# Support both xai_api_key and grok_api_key for backward compatibility
|
|
59
|
+
api_key = getattr(self.settings, "xai_api_key", None) or getattr(self.settings, "grok_api_key", None)
|
|
60
|
+
if not api_key:
|
|
61
|
+
raise ProviderNotAvailableError("xAI API key not configured")
|
|
62
|
+
return api_key
|
|
63
|
+
|
|
64
|
+
def _get_model_map(self) -> Dict[str, str]:
|
|
65
|
+
"""Get model mappings from configuration"""
|
|
66
|
+
if self._model_map is None:
|
|
67
|
+
try:
|
|
68
|
+
loader = _get_config_loader()
|
|
69
|
+
provider_config = loader.get_provider_config("xAI")
|
|
70
|
+
if provider_config and provider_config.model_mappings:
|
|
71
|
+
self._model_map = provider_config.model_mappings
|
|
72
|
+
else:
|
|
73
|
+
self._model_map = {}
|
|
74
|
+
except Exception as e:
|
|
75
|
+
self.logger.warning(f"Failed to load model mappings from config: {e}")
|
|
76
|
+
self._model_map = {}
|
|
77
|
+
return self._model_map
|
|
78
|
+
|
|
79
|
+
@retry(
|
|
80
|
+
stop=stop_after_attempt(3),
|
|
81
|
+
wait=wait_exponential(multiplier=1, min=4, max=10),
|
|
82
|
+
retry=retry_if_exception_type((Exception, RateLimitError)),
|
|
83
|
+
)
|
|
84
|
+
async def generate_text(
|
|
85
|
+
self,
|
|
86
|
+
messages: List[LLMMessage],
|
|
87
|
+
model: Optional[str] = None,
|
|
88
|
+
temperature: float = 0.7,
|
|
89
|
+
max_tokens: Optional[int] = None,
|
|
90
|
+
functions: Optional[List[Dict[str, Any]]] = None,
|
|
91
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
92
|
+
tool_choice: Optional[Any] = None,
|
|
93
|
+
**kwargs,
|
|
94
|
+
) -> LLMResponse:
|
|
95
|
+
"""
|
|
96
|
+
Generate text using xAI API via OpenAI library (supports all Grok models).
|
|
97
|
+
|
|
98
|
+
xAI API is OpenAI-compatible, so it supports Function Calling.
|
|
99
|
+
"""
|
|
100
|
+
# Check API key availability
|
|
101
|
+
api_key = self._get_api_key()
|
|
102
|
+
if not api_key:
|
|
103
|
+
raise ProviderNotAvailableError("xAI API key is not configured.")
|
|
104
|
+
|
|
105
|
+
client = self._get_openai_client()
|
|
106
|
+
|
|
107
|
+
# Get model name from config if not provided
|
|
108
|
+
selected_model = model or self._get_default_model() or "grok-4"
|
|
109
|
+
|
|
110
|
+
# Get model mappings from config
|
|
111
|
+
model_map = self._get_model_map()
|
|
112
|
+
api_model = model_map.get(selected_model, selected_model)
|
|
113
|
+
|
|
114
|
+
try:
|
|
115
|
+
# Use mixin method for Function Calling support
|
|
116
|
+
response = await self._generate_text_with_function_calling(
|
|
117
|
+
client=client,
|
|
118
|
+
messages=messages,
|
|
119
|
+
model=api_model,
|
|
120
|
+
temperature=temperature,
|
|
121
|
+
max_tokens=max_tokens,
|
|
122
|
+
functions=functions,
|
|
123
|
+
tools=tools,
|
|
124
|
+
tool_choice=tool_choice,
|
|
125
|
+
**kwargs,
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
# Override provider and model name for xAI
|
|
129
|
+
response.provider = self.provider_name
|
|
130
|
+
response.model = selected_model
|
|
131
|
+
response.cost_estimate = 0.0 # xAI pricing not available yet
|
|
132
|
+
|
|
133
|
+
return response
|
|
134
|
+
|
|
135
|
+
except Exception as e:
|
|
136
|
+
if "rate limit" in str(e).lower() or "429" in str(e):
|
|
137
|
+
raise RateLimitError(f"xAI rate limit exceeded: {str(e)}")
|
|
138
|
+
logger.error(f"xAI API error: {str(e)}")
|
|
139
|
+
raise
|
|
140
|
+
|
|
141
|
+
async def stream_text( # type: ignore[override]
|
|
142
|
+
self,
|
|
143
|
+
messages: List[LLMMessage],
|
|
144
|
+
model: Optional[str] = None,
|
|
145
|
+
temperature: float = 0.7,
|
|
146
|
+
max_tokens: Optional[int] = None,
|
|
147
|
+
functions: Optional[List[Dict[str, Any]]] = None,
|
|
148
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
149
|
+
tool_choice: Optional[Any] = None,
|
|
150
|
+
return_chunks: bool = False,
|
|
151
|
+
**kwargs,
|
|
152
|
+
) -> AsyncGenerator[Any, None]:
|
|
153
|
+
"""
|
|
154
|
+
Stream text using xAI API via OpenAI library (supports all Grok models).
|
|
155
|
+
|
|
156
|
+
xAI API is OpenAI-compatible, so it supports Function Calling.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
return_chunks: If True, returns StreamChunk objects with tool_calls info; if False, returns str tokens only
|
|
160
|
+
"""
|
|
161
|
+
# Check API key availability
|
|
162
|
+
api_key = self._get_api_key()
|
|
163
|
+
if not api_key:
|
|
164
|
+
raise ProviderNotAvailableError("xAI API key is not configured.")
|
|
165
|
+
|
|
166
|
+
client = self._get_openai_client()
|
|
167
|
+
|
|
168
|
+
# Get model name from config if not provided
|
|
169
|
+
selected_model = model or self._get_default_model() or "grok-4"
|
|
170
|
+
|
|
171
|
+
# Get model mappings from config
|
|
172
|
+
model_map = self._get_model_map()
|
|
173
|
+
api_model = model_map.get(selected_model, selected_model)
|
|
174
|
+
|
|
175
|
+
try:
|
|
176
|
+
# Use mixin method for Function Calling support
|
|
177
|
+
async for chunk in self._stream_text_with_function_calling(
|
|
178
|
+
client=client,
|
|
179
|
+
messages=messages,
|
|
180
|
+
model=api_model,
|
|
181
|
+
temperature=temperature,
|
|
182
|
+
max_tokens=max_tokens,
|
|
183
|
+
functions=functions,
|
|
184
|
+
tools=tools,
|
|
185
|
+
tool_choice=tool_choice,
|
|
186
|
+
return_chunks=return_chunks,
|
|
187
|
+
**kwargs,
|
|
188
|
+
):
|
|
189
|
+
yield chunk
|
|
190
|
+
|
|
191
|
+
except Exception as e:
|
|
192
|
+
if "rate limit" in str(e).lower() or "429" in str(e):
|
|
193
|
+
raise RateLimitError(f"xAI rate limit exceeded: {str(e)}")
|
|
194
|
+
logger.error(f"xAI API streaming error: {str(e)}")
|
|
195
|
+
raise
|
|
196
|
+
|
|
197
|
+
async def close(self):
|
|
198
|
+
"""Clean up resources"""
|
|
199
|
+
if self._openai_client:
|
|
200
|
+
await self._openai_client.close()
|
|
201
|
+
self._openai_client = None
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM Configuration management.
|
|
3
|
+
|
|
4
|
+
This package handles configuration loading, validation, and management
|
|
5
|
+
for all LLM providers and models.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from .model_config import (
|
|
9
|
+
ModelCostConfig,
|
|
10
|
+
ModelCapabilities,
|
|
11
|
+
ModelDefaultParams,
|
|
12
|
+
ModelConfig,
|
|
13
|
+
ProviderConfig,
|
|
14
|
+
LLMModelsConfig,
|
|
15
|
+
)
|
|
16
|
+
from .config_loader import (
|
|
17
|
+
LLMConfigLoader,
|
|
18
|
+
get_llm_config_loader,
|
|
19
|
+
get_llm_config,
|
|
20
|
+
reload_llm_config,
|
|
21
|
+
)
|
|
22
|
+
from .config_validator import (
|
|
23
|
+
ConfigValidationError,
|
|
24
|
+
validate_cost_config,
|
|
25
|
+
validate_model_config,
|
|
26
|
+
validate_provider_config,
|
|
27
|
+
validate_llm_config,
|
|
28
|
+
validate_config_file,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
__all__ = [
|
|
32
|
+
# Configuration models
|
|
33
|
+
"ModelCostConfig",
|
|
34
|
+
"ModelCapabilities",
|
|
35
|
+
"ModelDefaultParams",
|
|
36
|
+
"ModelConfig",
|
|
37
|
+
"ProviderConfig",
|
|
38
|
+
"LLMModelsConfig",
|
|
39
|
+
# Config loader
|
|
40
|
+
"LLMConfigLoader",
|
|
41
|
+
"get_llm_config_loader",
|
|
42
|
+
"get_llm_config",
|
|
43
|
+
"reload_llm_config",
|
|
44
|
+
# Validation
|
|
45
|
+
"ConfigValidationError",
|
|
46
|
+
"validate_cost_config",
|
|
47
|
+
"validate_model_config",
|
|
48
|
+
"validate_provider_config",
|
|
49
|
+
"validate_llm_config",
|
|
50
|
+
"validate_config_file",
|
|
51
|
+
]
|
|
@@ -0,0 +1,272 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Configuration loader for LLM models.
|
|
3
|
+
|
|
4
|
+
This module provides a singleton configuration loader that loads and manages
|
|
5
|
+
LLM model configurations from YAML files with support for hot-reloading.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import os
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Optional
|
|
12
|
+
import yaml
|
|
13
|
+
from threading import Lock
|
|
14
|
+
|
|
15
|
+
from aiecs.llm.config.model_config import (
|
|
16
|
+
LLMModelsConfig,
|
|
17
|
+
ProviderConfig,
|
|
18
|
+
ModelConfig,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class LLMConfigLoader:
|
|
25
|
+
"""
|
|
26
|
+
Singleton configuration loader for LLM models.
|
|
27
|
+
|
|
28
|
+
Supports:
|
|
29
|
+
- Loading configuration from YAML files
|
|
30
|
+
- Hot-reloading (manual refresh)
|
|
31
|
+
- Thread-safe access
|
|
32
|
+
- Caching for performance
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
_instance: Optional["LLMConfigLoader"] = None
|
|
36
|
+
_lock = Lock()
|
|
37
|
+
_config_lock = Lock()
|
|
38
|
+
_initialized: bool = False
|
|
39
|
+
|
|
40
|
+
def __new__(cls):
|
|
41
|
+
"""Ensure singleton instance"""
|
|
42
|
+
if cls._instance is None:
|
|
43
|
+
with cls._lock:
|
|
44
|
+
if cls._instance is None:
|
|
45
|
+
cls._instance = super().__new__(cls)
|
|
46
|
+
cls._instance._initialized = False
|
|
47
|
+
return cls._instance
|
|
48
|
+
|
|
49
|
+
def __init__(self) -> None:
|
|
50
|
+
"""Initialize the configuration loader"""
|
|
51
|
+
if self._initialized:
|
|
52
|
+
return
|
|
53
|
+
|
|
54
|
+
self._config: Optional[LLMModelsConfig] = None
|
|
55
|
+
self._config_path: Optional[Path] = None
|
|
56
|
+
self._initialized = True
|
|
57
|
+
logger.info("LLMConfigLoader initialized")
|
|
58
|
+
|
|
59
|
+
def _find_config_file(self) -> Path:
|
|
60
|
+
"""
|
|
61
|
+
Find the configuration file.
|
|
62
|
+
|
|
63
|
+
Search order:
|
|
64
|
+
1. Settings llm_models_config_path
|
|
65
|
+
2. Environment variable LLM_MODELS_CONFIG
|
|
66
|
+
3. aiecs/config/llm_models.yaml
|
|
67
|
+
4. config/llm_models.yaml
|
|
68
|
+
"""
|
|
69
|
+
# Check settings first
|
|
70
|
+
try:
|
|
71
|
+
from aiecs.config.config import get_settings
|
|
72
|
+
|
|
73
|
+
settings = get_settings()
|
|
74
|
+
if settings.llm_models_config_path:
|
|
75
|
+
path = Path(settings.llm_models_config_path)
|
|
76
|
+
if path.exists():
|
|
77
|
+
logger.info(f"Using LLM config from settings: {path}")
|
|
78
|
+
return path
|
|
79
|
+
else:
|
|
80
|
+
logger.warning(f"Settings llm_models_config_path does not exist: {path}")
|
|
81
|
+
except Exception as e:
|
|
82
|
+
logger.debug(f"Could not load settings: {e}")
|
|
83
|
+
|
|
84
|
+
# Check environment variable
|
|
85
|
+
env_path = os.environ.get("LLM_MODELS_CONFIG")
|
|
86
|
+
if env_path:
|
|
87
|
+
path = Path(env_path)
|
|
88
|
+
if path.exists():
|
|
89
|
+
logger.info(f"Using LLM config from environment: {path}")
|
|
90
|
+
return path
|
|
91
|
+
else:
|
|
92
|
+
logger.warning(f"LLM_MODELS_CONFIG path does not exist: {path}")
|
|
93
|
+
|
|
94
|
+
# Check standard locations
|
|
95
|
+
current_dir = Path(__file__).parent.parent # aiecs/
|
|
96
|
+
|
|
97
|
+
# Try aiecs/config/llm_models.yaml
|
|
98
|
+
config_path1 = current_dir / "config" / "llm_models.yaml"
|
|
99
|
+
if config_path1.exists():
|
|
100
|
+
logger.info(f"Using LLM config from: {config_path1}")
|
|
101
|
+
return config_path1
|
|
102
|
+
|
|
103
|
+
# Try config/llm_models.yaml (relative to project root)
|
|
104
|
+
config_path2 = current_dir.parent / "config" / "llm_models.yaml"
|
|
105
|
+
if config_path2.exists():
|
|
106
|
+
logger.info(f"Using LLM config from: {config_path2}")
|
|
107
|
+
return config_path2
|
|
108
|
+
|
|
109
|
+
# Default to the first path even if it doesn't exist
|
|
110
|
+
logger.warning(f"LLM config file not found, using default path: {config_path1}")
|
|
111
|
+
return config_path1
|
|
112
|
+
|
|
113
|
+
def load_config(self, config_path: Optional[Path] = None) -> LLMModelsConfig:
|
|
114
|
+
"""
|
|
115
|
+
Load configuration from YAML file.
|
|
116
|
+
|
|
117
|
+
Args:
|
|
118
|
+
config_path: Optional path to configuration file. If not provided,
|
|
119
|
+
will search in standard locations.
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
LLMModelsConfig: Loaded and validated configuration
|
|
123
|
+
|
|
124
|
+
Raises:
|
|
125
|
+
FileNotFoundError: If config file doesn't exist
|
|
126
|
+
ValueError: If config file is invalid
|
|
127
|
+
"""
|
|
128
|
+
with self._config_lock:
|
|
129
|
+
if config_path is None:
|
|
130
|
+
config_path = self._find_config_file()
|
|
131
|
+
else:
|
|
132
|
+
config_path = Path(config_path)
|
|
133
|
+
|
|
134
|
+
if not config_path.exists():
|
|
135
|
+
raise FileNotFoundError(f"LLM models configuration file not found: {config_path}\n" f"Please create the configuration file or set LLM_MODELS_CONFIG environment variable.")
|
|
136
|
+
|
|
137
|
+
try:
|
|
138
|
+
with open(config_path, "r", encoding="utf-8") as f:
|
|
139
|
+
config_data = yaml.safe_load(f)
|
|
140
|
+
|
|
141
|
+
if not config_data:
|
|
142
|
+
raise ValueError("Configuration file is empty")
|
|
143
|
+
|
|
144
|
+
# Validate and parse using Pydantic
|
|
145
|
+
self._config = LLMModelsConfig(**config_data)
|
|
146
|
+
self._config_path = config_path
|
|
147
|
+
|
|
148
|
+
logger.info(f"Loaded LLM configuration from {config_path}: " f"{len(self._config.providers)} providers, " f"{sum(len(p.models) for p in self._config.providers.values())} models")
|
|
149
|
+
|
|
150
|
+
return self._config
|
|
151
|
+
|
|
152
|
+
except yaml.YAMLError as e:
|
|
153
|
+
raise ValueError(f"Invalid YAML in configuration file: {e}")
|
|
154
|
+
except Exception as e:
|
|
155
|
+
raise ValueError(f"Failed to load configuration: {e}")
|
|
156
|
+
|
|
157
|
+
def reload_config(self) -> LLMModelsConfig:
|
|
158
|
+
"""
|
|
159
|
+
Reload configuration from the current config file.
|
|
160
|
+
|
|
161
|
+
This supports the hybrid loading mode - configuration is loaded at startup
|
|
162
|
+
but can be manually refreshed without restarting the application.
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
LLMModelsConfig: Reloaded configuration
|
|
166
|
+
"""
|
|
167
|
+
logger.info("Reloading LLM configuration...")
|
|
168
|
+
return self.load_config(self._config_path)
|
|
169
|
+
|
|
170
|
+
def get_config(self) -> LLMModelsConfig:
|
|
171
|
+
"""
|
|
172
|
+
Get the current configuration.
|
|
173
|
+
|
|
174
|
+
Loads configuration on first access if not already loaded.
|
|
175
|
+
|
|
176
|
+
Returns:
|
|
177
|
+
LLMModelsConfig: Current configuration
|
|
178
|
+
"""
|
|
179
|
+
if self._config is None:
|
|
180
|
+
self.load_config()
|
|
181
|
+
# After load_config(), _config should never be None
|
|
182
|
+
if self._config is None:
|
|
183
|
+
raise RuntimeError("Failed to load LLM configuration")
|
|
184
|
+
return self._config
|
|
185
|
+
|
|
186
|
+
def get_provider_config(self, provider_name: str) -> Optional[ProviderConfig]:
|
|
187
|
+
"""
|
|
188
|
+
Get configuration for a specific provider.
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
provider_name: Name of the provider (e.g., "Vertex", "OpenAI")
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
ProviderConfig if found, None otherwise
|
|
195
|
+
"""
|
|
196
|
+
config = self.get_config()
|
|
197
|
+
return config.get_provider_config(provider_name)
|
|
198
|
+
|
|
199
|
+
def get_model_config(self, provider_name: str, model_name: str) -> Optional[ModelConfig]:
|
|
200
|
+
"""
|
|
201
|
+
Get configuration for a specific model.
|
|
202
|
+
|
|
203
|
+
Args:
|
|
204
|
+
provider_name: Name of the provider
|
|
205
|
+
model_name: Name of the model (or alias)
|
|
206
|
+
|
|
207
|
+
Returns:
|
|
208
|
+
ModelConfig if found, None otherwise
|
|
209
|
+
"""
|
|
210
|
+
config = self.get_config()
|
|
211
|
+
return config.get_model_config(provider_name, model_name)
|
|
212
|
+
|
|
213
|
+
def get_default_model(self, provider_name: str) -> Optional[str]:
|
|
214
|
+
"""
|
|
215
|
+
Get the default model name for a provider.
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
provider_name: Name of the provider
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
Default model name if found, None otherwise
|
|
222
|
+
"""
|
|
223
|
+
provider_config = self.get_provider_config(provider_name)
|
|
224
|
+
if provider_config:
|
|
225
|
+
return provider_config.default_model
|
|
226
|
+
return None
|
|
227
|
+
|
|
228
|
+
def is_loaded(self) -> bool:
|
|
229
|
+
"""Check if configuration has been loaded"""
|
|
230
|
+
return self._config is not None
|
|
231
|
+
|
|
232
|
+
def get_config_path(self) -> Optional[Path]:
|
|
233
|
+
"""Get the path to the current configuration file"""
|
|
234
|
+
return self._config_path
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
# Global singleton instance
|
|
238
|
+
_loader = LLMConfigLoader()
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def get_llm_config_loader() -> LLMConfigLoader:
|
|
242
|
+
"""
|
|
243
|
+
Get the global LLM configuration loader instance.
|
|
244
|
+
|
|
245
|
+
Returns:
|
|
246
|
+
LLMConfigLoader: Global singleton instance
|
|
247
|
+
"""
|
|
248
|
+
return _loader
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def get_llm_config() -> LLMModelsConfig:
|
|
252
|
+
"""
|
|
253
|
+
Get the current LLM configuration.
|
|
254
|
+
|
|
255
|
+
Convenience function that returns the configuration from the global loader.
|
|
256
|
+
|
|
257
|
+
Returns:
|
|
258
|
+
LLMModelsConfig: Current configuration
|
|
259
|
+
"""
|
|
260
|
+
return _loader.get_config()
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
def reload_llm_config() -> LLMModelsConfig:
|
|
264
|
+
"""
|
|
265
|
+
Reload the LLM configuration.
|
|
266
|
+
|
|
267
|
+
Convenience function that reloads the configuration in the global loader.
|
|
268
|
+
|
|
269
|
+
Returns:
|
|
270
|
+
LLMModelsConfig: Reloaded configuration
|
|
271
|
+
"""
|
|
272
|
+
return _loader.reload_config()
|