aiecs 1.0.1__py3-none-any.whl → 1.7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiecs might be problematic. Click here for more details.
- aiecs/__init__.py +13 -16
- aiecs/__main__.py +7 -7
- aiecs/aiecs_client.py +269 -75
- aiecs/application/executors/operation_executor.py +79 -54
- aiecs/application/knowledge_graph/__init__.py +7 -0
- aiecs/application/knowledge_graph/builder/__init__.py +37 -0
- aiecs/application/knowledge_graph/builder/data_quality.py +302 -0
- aiecs/application/knowledge_graph/builder/data_reshaping.py +293 -0
- aiecs/application/knowledge_graph/builder/document_builder.py +369 -0
- aiecs/application/knowledge_graph/builder/graph_builder.py +490 -0
- aiecs/application/knowledge_graph/builder/import_optimizer.py +396 -0
- aiecs/application/knowledge_graph/builder/schema_inference.py +462 -0
- aiecs/application/knowledge_graph/builder/schema_mapping.py +563 -0
- aiecs/application/knowledge_graph/builder/structured_pipeline.py +1384 -0
- aiecs/application/knowledge_graph/builder/text_chunker.py +317 -0
- aiecs/application/knowledge_graph/extractors/__init__.py +27 -0
- aiecs/application/knowledge_graph/extractors/base.py +98 -0
- aiecs/application/knowledge_graph/extractors/llm_entity_extractor.py +422 -0
- aiecs/application/knowledge_graph/extractors/llm_relation_extractor.py +347 -0
- aiecs/application/knowledge_graph/extractors/ner_entity_extractor.py +241 -0
- aiecs/application/knowledge_graph/fusion/__init__.py +78 -0
- aiecs/application/knowledge_graph/fusion/ab_testing.py +395 -0
- aiecs/application/knowledge_graph/fusion/abbreviation_expander.py +327 -0
- aiecs/application/knowledge_graph/fusion/alias_index.py +597 -0
- aiecs/application/knowledge_graph/fusion/alias_matcher.py +384 -0
- aiecs/application/knowledge_graph/fusion/cache_coordinator.py +343 -0
- aiecs/application/knowledge_graph/fusion/entity_deduplicator.py +433 -0
- aiecs/application/knowledge_graph/fusion/entity_linker.py +511 -0
- aiecs/application/knowledge_graph/fusion/evaluation_dataset.py +240 -0
- aiecs/application/knowledge_graph/fusion/knowledge_fusion.py +632 -0
- aiecs/application/knowledge_graph/fusion/matching_config.py +489 -0
- aiecs/application/knowledge_graph/fusion/name_normalizer.py +352 -0
- aiecs/application/knowledge_graph/fusion/relation_deduplicator.py +183 -0
- aiecs/application/knowledge_graph/fusion/semantic_name_matcher.py +464 -0
- aiecs/application/knowledge_graph/fusion/similarity_pipeline.py +534 -0
- aiecs/application/knowledge_graph/pattern_matching/__init__.py +21 -0
- aiecs/application/knowledge_graph/pattern_matching/pattern_matcher.py +342 -0
- aiecs/application/knowledge_graph/pattern_matching/query_executor.py +366 -0
- aiecs/application/knowledge_graph/profiling/__init__.py +12 -0
- aiecs/application/knowledge_graph/profiling/query_plan_visualizer.py +195 -0
- aiecs/application/knowledge_graph/profiling/query_profiler.py +223 -0
- aiecs/application/knowledge_graph/reasoning/__init__.py +27 -0
- aiecs/application/knowledge_graph/reasoning/evidence_synthesis.py +341 -0
- aiecs/application/knowledge_graph/reasoning/inference_engine.py +500 -0
- aiecs/application/knowledge_graph/reasoning/logic_form_parser.py +163 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/__init__.py +79 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_builder.py +513 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_nodes.py +913 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_validator.py +866 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/error_handler.py +475 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/parser.py +396 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/query_context.py +208 -0
- aiecs/application/knowledge_graph/reasoning/logic_query_integration.py +170 -0
- aiecs/application/knowledge_graph/reasoning/query_planner.py +855 -0
- aiecs/application/knowledge_graph/reasoning/reasoning_engine.py +518 -0
- aiecs/application/knowledge_graph/retrieval/__init__.py +27 -0
- aiecs/application/knowledge_graph/retrieval/query_intent_classifier.py +211 -0
- aiecs/application/knowledge_graph/retrieval/retrieval_strategies.py +592 -0
- aiecs/application/knowledge_graph/retrieval/strategy_types.py +23 -0
- aiecs/application/knowledge_graph/search/__init__.py +59 -0
- aiecs/application/knowledge_graph/search/hybrid_search.py +457 -0
- aiecs/application/knowledge_graph/search/reranker.py +293 -0
- aiecs/application/knowledge_graph/search/reranker_strategies.py +535 -0
- aiecs/application/knowledge_graph/search/text_similarity.py +392 -0
- aiecs/application/knowledge_graph/traversal/__init__.py +15 -0
- aiecs/application/knowledge_graph/traversal/enhanced_traversal.py +305 -0
- aiecs/application/knowledge_graph/traversal/path_scorer.py +271 -0
- aiecs/application/knowledge_graph/validators/__init__.py +13 -0
- aiecs/application/knowledge_graph/validators/relation_validator.py +239 -0
- aiecs/application/knowledge_graph/visualization/__init__.py +11 -0
- aiecs/application/knowledge_graph/visualization/graph_visualizer.py +313 -0
- aiecs/common/__init__.py +9 -0
- aiecs/common/knowledge_graph/__init__.py +17 -0
- aiecs/common/knowledge_graph/runnable.py +471 -0
- aiecs/config/__init__.py +20 -5
- aiecs/config/config.py +762 -31
- aiecs/config/graph_config.py +131 -0
- aiecs/config/tool_config.py +399 -0
- aiecs/core/__init__.py +29 -13
- aiecs/core/interface/__init__.py +2 -2
- aiecs/core/interface/execution_interface.py +22 -22
- aiecs/core/interface/storage_interface.py +37 -88
- aiecs/core/registry/__init__.py +31 -0
- aiecs/core/registry/service_registry.py +92 -0
- aiecs/domain/__init__.py +270 -1
- aiecs/domain/agent/__init__.py +191 -0
- aiecs/domain/agent/base_agent.py +3870 -0
- aiecs/domain/agent/exceptions.py +99 -0
- aiecs/domain/agent/graph_aware_mixin.py +569 -0
- aiecs/domain/agent/hybrid_agent.py +1435 -0
- aiecs/domain/agent/integration/__init__.py +29 -0
- aiecs/domain/agent/integration/context_compressor.py +216 -0
- aiecs/domain/agent/integration/context_engine_adapter.py +587 -0
- aiecs/domain/agent/integration/protocols.py +281 -0
- aiecs/domain/agent/integration/retry_policy.py +218 -0
- aiecs/domain/agent/integration/role_config.py +213 -0
- aiecs/domain/agent/knowledge_aware_agent.py +1892 -0
- aiecs/domain/agent/lifecycle.py +291 -0
- aiecs/domain/agent/llm_agent.py +692 -0
- aiecs/domain/agent/memory/__init__.py +12 -0
- aiecs/domain/agent/memory/conversation.py +1124 -0
- aiecs/domain/agent/migration/__init__.py +14 -0
- aiecs/domain/agent/migration/conversion.py +163 -0
- aiecs/domain/agent/migration/legacy_wrapper.py +86 -0
- aiecs/domain/agent/models.py +884 -0
- aiecs/domain/agent/observability.py +479 -0
- aiecs/domain/agent/persistence.py +449 -0
- aiecs/domain/agent/prompts/__init__.py +29 -0
- aiecs/domain/agent/prompts/builder.py +159 -0
- aiecs/domain/agent/prompts/formatters.py +187 -0
- aiecs/domain/agent/prompts/template.py +255 -0
- aiecs/domain/agent/registry.py +253 -0
- aiecs/domain/agent/tool_agent.py +444 -0
- aiecs/domain/agent/tools/__init__.py +15 -0
- aiecs/domain/agent/tools/schema_generator.py +364 -0
- aiecs/domain/community/__init__.py +155 -0
- aiecs/domain/community/agent_adapter.py +469 -0
- aiecs/domain/community/analytics.py +432 -0
- aiecs/domain/community/collaborative_workflow.py +648 -0
- aiecs/domain/community/communication_hub.py +634 -0
- aiecs/domain/community/community_builder.py +320 -0
- aiecs/domain/community/community_integration.py +796 -0
- aiecs/domain/community/community_manager.py +803 -0
- aiecs/domain/community/decision_engine.py +849 -0
- aiecs/domain/community/exceptions.py +231 -0
- aiecs/domain/community/models/__init__.py +33 -0
- aiecs/domain/community/models/community_models.py +234 -0
- aiecs/domain/community/resource_manager.py +461 -0
- aiecs/domain/community/shared_context_manager.py +589 -0
- aiecs/domain/context/__init__.py +40 -10
- aiecs/domain/context/context_engine.py +1910 -0
- aiecs/domain/context/conversation_models.py +87 -53
- aiecs/domain/context/graph_memory.py +582 -0
- aiecs/domain/execution/model.py +12 -4
- aiecs/domain/knowledge_graph/__init__.py +19 -0
- aiecs/domain/knowledge_graph/models/__init__.py +52 -0
- aiecs/domain/knowledge_graph/models/entity.py +148 -0
- aiecs/domain/knowledge_graph/models/evidence.py +178 -0
- aiecs/domain/knowledge_graph/models/inference_rule.py +184 -0
- aiecs/domain/knowledge_graph/models/path.py +171 -0
- aiecs/domain/knowledge_graph/models/path_pattern.py +171 -0
- aiecs/domain/knowledge_graph/models/query.py +261 -0
- aiecs/domain/knowledge_graph/models/query_plan.py +181 -0
- aiecs/domain/knowledge_graph/models/relation.py +202 -0
- aiecs/domain/knowledge_graph/schema/__init__.py +23 -0
- aiecs/domain/knowledge_graph/schema/entity_type.py +131 -0
- aiecs/domain/knowledge_graph/schema/graph_schema.py +253 -0
- aiecs/domain/knowledge_graph/schema/property_schema.py +143 -0
- aiecs/domain/knowledge_graph/schema/relation_type.py +163 -0
- aiecs/domain/knowledge_graph/schema/schema_manager.py +691 -0
- aiecs/domain/knowledge_graph/schema/type_enums.py +209 -0
- aiecs/domain/task/dsl_processor.py +172 -56
- aiecs/domain/task/model.py +20 -8
- aiecs/domain/task/task_context.py +27 -24
- aiecs/infrastructure/__init__.py +0 -2
- aiecs/infrastructure/graph_storage/__init__.py +11 -0
- aiecs/infrastructure/graph_storage/base.py +837 -0
- aiecs/infrastructure/graph_storage/batch_operations.py +458 -0
- aiecs/infrastructure/graph_storage/cache.py +424 -0
- aiecs/infrastructure/graph_storage/distributed.py +223 -0
- aiecs/infrastructure/graph_storage/error_handling.py +380 -0
- aiecs/infrastructure/graph_storage/graceful_degradation.py +294 -0
- aiecs/infrastructure/graph_storage/health_checks.py +378 -0
- aiecs/infrastructure/graph_storage/in_memory.py +1197 -0
- aiecs/infrastructure/graph_storage/index_optimization.py +446 -0
- aiecs/infrastructure/graph_storage/lazy_loading.py +431 -0
- aiecs/infrastructure/graph_storage/metrics.py +344 -0
- aiecs/infrastructure/graph_storage/migration.py +400 -0
- aiecs/infrastructure/graph_storage/pagination.py +483 -0
- aiecs/infrastructure/graph_storage/performance_monitoring.py +456 -0
- aiecs/infrastructure/graph_storage/postgres.py +1563 -0
- aiecs/infrastructure/graph_storage/property_storage.py +353 -0
- aiecs/infrastructure/graph_storage/protocols.py +76 -0
- aiecs/infrastructure/graph_storage/query_optimizer.py +642 -0
- aiecs/infrastructure/graph_storage/schema_cache.py +290 -0
- aiecs/infrastructure/graph_storage/sqlite.py +1373 -0
- aiecs/infrastructure/graph_storage/streaming.py +487 -0
- aiecs/infrastructure/graph_storage/tenant.py +412 -0
- aiecs/infrastructure/messaging/celery_task_manager.py +92 -54
- aiecs/infrastructure/messaging/websocket_manager.py +51 -35
- aiecs/infrastructure/monitoring/__init__.py +22 -0
- aiecs/infrastructure/monitoring/executor_metrics.py +45 -11
- aiecs/infrastructure/monitoring/global_metrics_manager.py +212 -0
- aiecs/infrastructure/monitoring/structured_logger.py +3 -7
- aiecs/infrastructure/monitoring/tracing_manager.py +63 -35
- aiecs/infrastructure/persistence/__init__.py +14 -1
- aiecs/infrastructure/persistence/context_engine_client.py +184 -0
- aiecs/infrastructure/persistence/database_manager.py +67 -43
- aiecs/infrastructure/persistence/file_storage.py +180 -103
- aiecs/infrastructure/persistence/redis_client.py +74 -21
- aiecs/llm/__init__.py +73 -25
- aiecs/llm/callbacks/__init__.py +11 -0
- aiecs/llm/{custom_callbacks.py → callbacks/custom_callbacks.py} +26 -19
- aiecs/llm/client_factory.py +224 -36
- aiecs/llm/client_resolver.py +155 -0
- aiecs/llm/clients/__init__.py +38 -0
- aiecs/llm/clients/base_client.py +324 -0
- aiecs/llm/clients/google_function_calling_mixin.py +457 -0
- aiecs/llm/clients/googleai_client.py +241 -0
- aiecs/llm/clients/openai_client.py +158 -0
- aiecs/llm/clients/openai_compatible_mixin.py +367 -0
- aiecs/llm/clients/vertex_client.py +897 -0
- aiecs/llm/clients/xai_client.py +201 -0
- aiecs/llm/config/__init__.py +51 -0
- aiecs/llm/config/config_loader.py +272 -0
- aiecs/llm/config/config_validator.py +206 -0
- aiecs/llm/config/model_config.py +143 -0
- aiecs/llm/protocols.py +149 -0
- aiecs/llm/utils/__init__.py +10 -0
- aiecs/llm/utils/validate_config.py +89 -0
- aiecs/main.py +140 -121
- aiecs/scripts/aid/VERSION_MANAGEMENT.md +138 -0
- aiecs/scripts/aid/__init__.py +19 -0
- aiecs/scripts/aid/module_checker.py +499 -0
- aiecs/scripts/aid/version_manager.py +235 -0
- aiecs/scripts/{DEPENDENCY_SYSTEM_SUMMARY.md → dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md} +1 -0
- aiecs/scripts/{README_DEPENDENCY_CHECKER.md → dependance_check/README_DEPENDENCY_CHECKER.md} +1 -0
- aiecs/scripts/dependance_check/__init__.py +15 -0
- aiecs/scripts/dependance_check/dependency_checker.py +1835 -0
- aiecs/scripts/{dependency_fixer.py → dependance_check/dependency_fixer.py} +192 -90
- aiecs/scripts/{download_nlp_data.py → dependance_check/download_nlp_data.py} +203 -71
- aiecs/scripts/dependance_patch/__init__.py +7 -0
- aiecs/scripts/dependance_patch/fix_weasel/__init__.py +11 -0
- aiecs/scripts/{fix_weasel_validator.py → dependance_patch/fix_weasel/fix_weasel_validator.py} +21 -14
- aiecs/scripts/{patch_weasel_library.sh → dependance_patch/fix_weasel/patch_weasel_library.sh} +1 -1
- aiecs/scripts/knowledge_graph/__init__.py +3 -0
- aiecs/scripts/knowledge_graph/run_threshold_experiments.py +212 -0
- aiecs/scripts/migrations/multi_tenancy/README.md +142 -0
- aiecs/scripts/tools_develop/README.md +671 -0
- aiecs/scripts/tools_develop/README_CONFIG_CHECKER.md +273 -0
- aiecs/scripts/tools_develop/TOOLS_CONFIG_GUIDE.md +1287 -0
- aiecs/scripts/tools_develop/TOOL_AUTO_DISCOVERY.md +234 -0
- aiecs/scripts/tools_develop/__init__.py +21 -0
- aiecs/scripts/tools_develop/check_all_tools_config.py +548 -0
- aiecs/scripts/tools_develop/check_type_annotations.py +257 -0
- aiecs/scripts/tools_develop/pre-commit-schema-coverage.sh +66 -0
- aiecs/scripts/tools_develop/schema_coverage.py +511 -0
- aiecs/scripts/tools_develop/validate_tool_schemas.py +475 -0
- aiecs/scripts/tools_develop/verify_executor_config_fix.py +98 -0
- aiecs/scripts/tools_develop/verify_tools.py +352 -0
- aiecs/tasks/__init__.py +0 -1
- aiecs/tasks/worker.py +115 -47
- aiecs/tools/__init__.py +194 -72
- aiecs/tools/apisource/__init__.py +99 -0
- aiecs/tools/apisource/intelligence/__init__.py +19 -0
- aiecs/tools/apisource/intelligence/data_fusion.py +632 -0
- aiecs/tools/apisource/intelligence/query_analyzer.py +417 -0
- aiecs/tools/apisource/intelligence/search_enhancer.py +385 -0
- aiecs/tools/apisource/monitoring/__init__.py +9 -0
- aiecs/tools/apisource/monitoring/metrics.py +330 -0
- aiecs/tools/apisource/providers/__init__.py +112 -0
- aiecs/tools/apisource/providers/base.py +671 -0
- aiecs/tools/apisource/providers/census.py +397 -0
- aiecs/tools/apisource/providers/fred.py +535 -0
- aiecs/tools/apisource/providers/newsapi.py +409 -0
- aiecs/tools/apisource/providers/worldbank.py +352 -0
- aiecs/tools/apisource/reliability/__init__.py +12 -0
- aiecs/tools/apisource/reliability/error_handler.py +363 -0
- aiecs/tools/apisource/reliability/fallback_strategy.py +376 -0
- aiecs/tools/apisource/tool.py +832 -0
- aiecs/tools/apisource/utils/__init__.py +9 -0
- aiecs/tools/apisource/utils/validators.py +334 -0
- aiecs/tools/base_tool.py +415 -21
- aiecs/tools/docs/__init__.py +121 -0
- aiecs/tools/docs/ai_document_orchestrator.py +607 -0
- aiecs/tools/docs/ai_document_writer_orchestrator.py +2350 -0
- aiecs/tools/docs/content_insertion_tool.py +1320 -0
- aiecs/tools/docs/document_creator_tool.py +1323 -0
- aiecs/tools/docs/document_layout_tool.py +1160 -0
- aiecs/tools/docs/document_parser_tool.py +1011 -0
- aiecs/tools/docs/document_writer_tool.py +1829 -0
- aiecs/tools/knowledge_graph/__init__.py +17 -0
- aiecs/tools/knowledge_graph/graph_reasoning_tool.py +807 -0
- aiecs/tools/knowledge_graph/graph_search_tool.py +944 -0
- aiecs/tools/knowledge_graph/kg_builder_tool.py +524 -0
- aiecs/tools/langchain_adapter.py +300 -138
- aiecs/tools/schema_generator.py +455 -0
- aiecs/tools/search_tool/__init__.py +100 -0
- aiecs/tools/search_tool/analyzers.py +581 -0
- aiecs/tools/search_tool/cache.py +264 -0
- aiecs/tools/search_tool/constants.py +128 -0
- aiecs/tools/search_tool/context.py +224 -0
- aiecs/tools/search_tool/core.py +778 -0
- aiecs/tools/search_tool/deduplicator.py +119 -0
- aiecs/tools/search_tool/error_handler.py +242 -0
- aiecs/tools/search_tool/metrics.py +343 -0
- aiecs/tools/search_tool/rate_limiter.py +172 -0
- aiecs/tools/search_tool/schemas.py +275 -0
- aiecs/tools/statistics/__init__.py +80 -0
- aiecs/tools/statistics/ai_data_analysis_orchestrator.py +646 -0
- aiecs/tools/statistics/ai_insight_generator_tool.py +508 -0
- aiecs/tools/statistics/ai_report_orchestrator_tool.py +684 -0
- aiecs/tools/statistics/data_loader_tool.py +555 -0
- aiecs/tools/statistics/data_profiler_tool.py +638 -0
- aiecs/tools/statistics/data_transformer_tool.py +580 -0
- aiecs/tools/statistics/data_visualizer_tool.py +498 -0
- aiecs/tools/statistics/model_trainer_tool.py +507 -0
- aiecs/tools/statistics/statistical_analyzer_tool.py +472 -0
- aiecs/tools/task_tools/__init__.py +49 -36
- aiecs/tools/task_tools/chart_tool.py +200 -184
- aiecs/tools/task_tools/classfire_tool.py +268 -267
- aiecs/tools/task_tools/image_tool.py +175 -131
- aiecs/tools/task_tools/office_tool.py +226 -146
- aiecs/tools/task_tools/pandas_tool.py +477 -121
- aiecs/tools/task_tools/report_tool.py +390 -142
- aiecs/tools/task_tools/research_tool.py +149 -79
- aiecs/tools/task_tools/scraper_tool.py +339 -145
- aiecs/tools/task_tools/stats_tool.py +448 -209
- aiecs/tools/temp_file_manager.py +26 -24
- aiecs/tools/tool_executor/__init__.py +18 -16
- aiecs/tools/tool_executor/tool_executor.py +364 -52
- aiecs/utils/LLM_output_structor.py +74 -48
- aiecs/utils/__init__.py +14 -3
- aiecs/utils/base_callback.py +0 -3
- aiecs/utils/cache_provider.py +696 -0
- aiecs/utils/execution_utils.py +50 -31
- aiecs/utils/prompt_loader.py +1 -0
- aiecs/utils/token_usage_repository.py +37 -11
- aiecs/ws/socket_server.py +14 -4
- {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/METADATA +52 -15
- aiecs-1.7.6.dist-info/RECORD +337 -0
- aiecs-1.7.6.dist-info/entry_points.txt +13 -0
- aiecs/config/registry.py +0 -19
- aiecs/domain/context/content_engine.py +0 -982
- aiecs/llm/base_client.py +0 -99
- aiecs/llm/openai_client.py +0 -125
- aiecs/llm/vertex_client.py +0 -186
- aiecs/llm/xai_client.py +0 -184
- aiecs/scripts/dependency_checker.py +0 -857
- aiecs/scripts/quick_dependency_check.py +0 -269
- aiecs/tools/task_tools/search_api.py +0 -7
- aiecs-1.0.1.dist-info/RECORD +0 -90
- aiecs-1.0.1.dist-info/entry_points.txt +0 -7
- /aiecs/scripts/{setup_nlp_data.sh → dependance_check/setup_nlp_data.sh} +0 -0
- /aiecs/scripts/{README_WEASEL_PATCH.md → dependance_patch/fix_weasel/README_WEASEL_PATCH.md} +0 -0
- /aiecs/scripts/{fix_weasel_validator.sh → dependance_patch/fix_weasel/fix_weasel_validator.sh} +0 -0
- /aiecs/scripts/{run_weasel_patch.sh → dependance_patch/fix_weasel/run_weasel_patch.sh} +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/WHEEL +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/licenses/LICENSE +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,324 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from typing import Dict, Any, Optional, List, AsyncGenerator
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
import logging
|
|
5
|
+
|
|
6
|
+
logger = logging.getLogger(__name__)
|
|
7
|
+
|
|
8
|
+
# Lazy import to avoid circular dependency
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _get_config_loader():
|
|
12
|
+
"""Lazy import of config loader to avoid circular dependency"""
|
|
13
|
+
from aiecs.llm.config import get_llm_config_loader
|
|
14
|
+
|
|
15
|
+
return get_llm_config_loader()
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class CacheControl:
|
|
20
|
+
"""
|
|
21
|
+
Cache control marker for message content.
|
|
22
|
+
|
|
23
|
+
Used to indicate that a message or message block should be cached
|
|
24
|
+
by providers that support prompt caching (e.g., Anthropic, Google).
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
type: str = "ephemeral" # Cache type - "ephemeral" for session-scoped caching
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class LLMMessage:
|
|
32
|
+
"""
|
|
33
|
+
Represents a message in an LLM conversation.
|
|
34
|
+
|
|
35
|
+
Attributes:
|
|
36
|
+
role: Message role - "system", "user", "assistant", or "tool"
|
|
37
|
+
content: Text content of the message (None when using tool calls)
|
|
38
|
+
tool_calls: Tool call information for assistant messages
|
|
39
|
+
tool_call_id: Tool call ID for tool response messages
|
|
40
|
+
cache_control: Cache control marker for prompt caching support
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
role: str # "system", "user", "assistant", "tool"
|
|
44
|
+
content: Optional[str] = None # None when using tool calls
|
|
45
|
+
tool_calls: Optional[List[Dict[str, Any]]] = None # For assistant messages with tool calls
|
|
46
|
+
tool_call_id: Optional[str] = None # For tool messages
|
|
47
|
+
cache_control: Optional[CacheControl] = None # Cache control for prompt caching
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@dataclass
|
|
51
|
+
class LLMResponse:
|
|
52
|
+
"""
|
|
53
|
+
Response from an LLM provider.
|
|
54
|
+
|
|
55
|
+
Attributes:
|
|
56
|
+
content: Generated text content
|
|
57
|
+
provider: Name of the LLM provider (e.g., "openai", "google", "vertex")
|
|
58
|
+
model: Model name used for generation
|
|
59
|
+
tokens_used: Total tokens used (prompt + completion)
|
|
60
|
+
prompt_tokens: Number of tokens in the prompt
|
|
61
|
+
completion_tokens: Number of tokens in the completion
|
|
62
|
+
cost_estimate: Estimated cost in USD
|
|
63
|
+
response_time: Response time in seconds
|
|
64
|
+
metadata: Additional provider-specific metadata
|
|
65
|
+
cache_creation_tokens: Tokens used to create a new cache entry
|
|
66
|
+
cache_read_tokens: Tokens read from cache (indicates cache hit)
|
|
67
|
+
cache_hit: Whether the request hit a cached prompt prefix
|
|
68
|
+
"""
|
|
69
|
+
|
|
70
|
+
content: str
|
|
71
|
+
provider: str
|
|
72
|
+
model: str
|
|
73
|
+
tokens_used: Optional[int] = None
|
|
74
|
+
prompt_tokens: Optional[int] = None
|
|
75
|
+
completion_tokens: Optional[int] = None
|
|
76
|
+
cost_estimate: Optional[float] = None
|
|
77
|
+
response_time: Optional[float] = None
|
|
78
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
79
|
+
# Cache metadata for prompt caching observability
|
|
80
|
+
cache_creation_tokens: Optional[int] = None
|
|
81
|
+
cache_read_tokens: Optional[int] = None
|
|
82
|
+
cache_hit: Optional[bool] = None
|
|
83
|
+
|
|
84
|
+
def __post_init__(self):
|
|
85
|
+
"""Ensure consistency of token data"""
|
|
86
|
+
# If there are detailed token information but no total, calculate the
|
|
87
|
+
# total
|
|
88
|
+
if self.prompt_tokens is not None and self.completion_tokens is not None and self.tokens_used is None:
|
|
89
|
+
self.tokens_used = self.prompt_tokens + self.completion_tokens
|
|
90
|
+
|
|
91
|
+
# If only total is available but no detailed information, try to
|
|
92
|
+
# estimate (cannot accurately allocate in this case)
|
|
93
|
+
elif self.tokens_used is not None and self.prompt_tokens is None and self.completion_tokens is None:
|
|
94
|
+
# In this case we cannot accurately allocate, keep as is
|
|
95
|
+
pass
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class LLMClientError(Exception):
|
|
99
|
+
"""Base exception for LLM client errors"""
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class ProviderNotAvailableError(LLMClientError):
|
|
103
|
+
"""Raised when a provider is not available or misconfigured"""
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
class RateLimitError(LLMClientError):
|
|
107
|
+
"""Raised when rate limit is exceeded"""
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class SafetyBlockError(LLMClientError):
|
|
111
|
+
"""Raised when content is blocked by safety filters"""
|
|
112
|
+
|
|
113
|
+
def __init__(
|
|
114
|
+
self,
|
|
115
|
+
message: str,
|
|
116
|
+
block_reason: Optional[str] = None,
|
|
117
|
+
block_type: Optional[str] = None, # "prompt" or "response"
|
|
118
|
+
safety_ratings: Optional[List[Dict[str, Any]]] = None,
|
|
119
|
+
):
|
|
120
|
+
"""
|
|
121
|
+
Initialize SafetyBlockError with detailed information.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
message: Error message
|
|
125
|
+
block_reason: Reason for blocking (e.g., SAFETY, RECITATION, JAILBREAK)
|
|
126
|
+
block_type: Type of block - "prompt" if input was blocked, "response" if output was blocked
|
|
127
|
+
safety_ratings: List of safety ratings with category, severity, etc.
|
|
128
|
+
"""
|
|
129
|
+
super().__init__(message)
|
|
130
|
+
self.block_reason = block_reason
|
|
131
|
+
self.block_type = block_type
|
|
132
|
+
self.safety_ratings = safety_ratings or []
|
|
133
|
+
|
|
134
|
+
def __str__(self) -> str:
|
|
135
|
+
"""Return detailed error message"""
|
|
136
|
+
msg = super().__str__()
|
|
137
|
+
if self.block_reason:
|
|
138
|
+
msg += f" (Block reason: {self.block_reason})"
|
|
139
|
+
if self.block_type:
|
|
140
|
+
msg += f" (Block type: {self.block_type})"
|
|
141
|
+
if self.safety_ratings:
|
|
142
|
+
categories = [r.get("category", "UNKNOWN") for r in self.safety_ratings if r.get("blocked")]
|
|
143
|
+
if categories:
|
|
144
|
+
msg += f" (Categories: {', '.join(categories)})"
|
|
145
|
+
return msg
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
class BaseLLMClient(ABC):
|
|
149
|
+
"""Abstract base class for all LLM provider clients"""
|
|
150
|
+
|
|
151
|
+
def __init__(self, provider_name: str):
|
|
152
|
+
self.provider_name = provider_name
|
|
153
|
+
self.logger = logging.getLogger(f"{__name__}.{provider_name}")
|
|
154
|
+
|
|
155
|
+
@abstractmethod
|
|
156
|
+
async def generate_text(
|
|
157
|
+
self,
|
|
158
|
+
messages: List[LLMMessage],
|
|
159
|
+
model: Optional[str] = None,
|
|
160
|
+
temperature: float = 0.7,
|
|
161
|
+
max_tokens: Optional[int] = None,
|
|
162
|
+
**kwargs,
|
|
163
|
+
) -> LLMResponse:
|
|
164
|
+
"""Generate text using the provider's API"""
|
|
165
|
+
|
|
166
|
+
@abstractmethod
|
|
167
|
+
async def stream_text(
|
|
168
|
+
self,
|
|
169
|
+
messages: List[LLMMessage],
|
|
170
|
+
model: Optional[str] = None,
|
|
171
|
+
temperature: float = 0.7,
|
|
172
|
+
max_tokens: Optional[int] = None,
|
|
173
|
+
**kwargs,
|
|
174
|
+
) -> AsyncGenerator[str, None]:
|
|
175
|
+
"""Stream text generation using the provider's API"""
|
|
176
|
+
|
|
177
|
+
@abstractmethod
|
|
178
|
+
async def close(self):
|
|
179
|
+
"""Clean up resources"""
|
|
180
|
+
|
|
181
|
+
async def __aenter__(self):
|
|
182
|
+
return self
|
|
183
|
+
|
|
184
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
185
|
+
await self.close()
|
|
186
|
+
|
|
187
|
+
def _count_tokens_estimate(self, text: str) -> int:
|
|
188
|
+
"""Rough token count estimation (4 chars ≈ 1 token for English)"""
|
|
189
|
+
return len(text) // 4
|
|
190
|
+
|
|
191
|
+
def _apply_cache_control(
|
|
192
|
+
self,
|
|
193
|
+
messages: List[LLMMessage],
|
|
194
|
+
enable_caching: bool = True,
|
|
195
|
+
) -> List[LLMMessage]:
|
|
196
|
+
"""
|
|
197
|
+
Apply cache control markers to cacheable messages.
|
|
198
|
+
|
|
199
|
+
Marks system messages and the first message in the conversation
|
|
200
|
+
with cache_control for providers that support prompt caching.
|
|
201
|
+
|
|
202
|
+
Args:
|
|
203
|
+
messages: List of LLM messages
|
|
204
|
+
enable_caching: Whether to enable caching (default: True)
|
|
205
|
+
|
|
206
|
+
Returns:
|
|
207
|
+
List of messages with cache_control applied where appropriate
|
|
208
|
+
"""
|
|
209
|
+
if not enable_caching:
|
|
210
|
+
return messages
|
|
211
|
+
|
|
212
|
+
result = []
|
|
213
|
+
for msg in messages:
|
|
214
|
+
if msg.role == "system" and msg.cache_control is None:
|
|
215
|
+
# Mark system messages as cacheable
|
|
216
|
+
result.append(
|
|
217
|
+
LLMMessage(
|
|
218
|
+
role=msg.role,
|
|
219
|
+
content=msg.content,
|
|
220
|
+
tool_calls=msg.tool_calls,
|
|
221
|
+
tool_call_id=msg.tool_call_id,
|
|
222
|
+
cache_control=CacheControl(type="ephemeral"),
|
|
223
|
+
)
|
|
224
|
+
)
|
|
225
|
+
else:
|
|
226
|
+
result.append(msg)
|
|
227
|
+
return result
|
|
228
|
+
|
|
229
|
+
def _extract_cache_metadata(
|
|
230
|
+
self,
|
|
231
|
+
usage: Any,
|
|
232
|
+
) -> Dict[str, Any]:
|
|
233
|
+
"""
|
|
234
|
+
Extract cache metadata from provider response usage data.
|
|
235
|
+
|
|
236
|
+
Override in subclasses for provider-specific extraction.
|
|
237
|
+
|
|
238
|
+
Args:
|
|
239
|
+
usage: Usage data from provider response
|
|
240
|
+
|
|
241
|
+
Returns:
|
|
242
|
+
Dictionary with cache_creation_tokens, cache_read_tokens, cache_hit
|
|
243
|
+
"""
|
|
244
|
+
return {
|
|
245
|
+
"cache_creation_tokens": None,
|
|
246
|
+
"cache_read_tokens": None,
|
|
247
|
+
"cache_hit": None,
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
def _estimate_cost(
|
|
251
|
+
self,
|
|
252
|
+
model: str,
|
|
253
|
+
input_tokens: int,
|
|
254
|
+
output_tokens: int,
|
|
255
|
+
token_costs: Dict,
|
|
256
|
+
) -> float:
|
|
257
|
+
"""
|
|
258
|
+
Estimate the cost of the API call.
|
|
259
|
+
|
|
260
|
+
DEPRECATED: Use _estimate_cost_from_config instead for config-based cost estimation.
|
|
261
|
+
This method is kept for backward compatibility.
|
|
262
|
+
"""
|
|
263
|
+
if model in token_costs:
|
|
264
|
+
costs = token_costs[model]
|
|
265
|
+
return (input_tokens * costs["input"] + output_tokens * costs["output"]) / 1000
|
|
266
|
+
return 0.0
|
|
267
|
+
|
|
268
|
+
def _estimate_cost_from_config(self, model_name: str, input_tokens: int, output_tokens: int) -> float:
|
|
269
|
+
"""
|
|
270
|
+
Estimate the cost using configuration-based pricing.
|
|
271
|
+
|
|
272
|
+
Args:
|
|
273
|
+
model_name: Name of the model
|
|
274
|
+
input_tokens: Number of input tokens
|
|
275
|
+
output_tokens: Number of output tokens
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
Estimated cost in USD
|
|
279
|
+
"""
|
|
280
|
+
try:
|
|
281
|
+
loader = _get_config_loader()
|
|
282
|
+
model_config = loader.get_model_config(self.provider_name, model_name)
|
|
283
|
+
|
|
284
|
+
if model_config and model_config.costs:
|
|
285
|
+
input_cost = (input_tokens * model_config.costs.input) / 1000
|
|
286
|
+
output_cost = (output_tokens * model_config.costs.output) / 1000
|
|
287
|
+
return input_cost + output_cost
|
|
288
|
+
else:
|
|
289
|
+
self.logger.warning(f"No cost configuration found for model {model_name} " f"in provider {self.provider_name}")
|
|
290
|
+
return 0.0
|
|
291
|
+
except Exception as e:
|
|
292
|
+
self.logger.warning(f"Failed to estimate cost from config: {e}")
|
|
293
|
+
return 0.0
|
|
294
|
+
|
|
295
|
+
def _get_model_config(self, model_name: str):
|
|
296
|
+
"""
|
|
297
|
+
Get model configuration from the config loader.
|
|
298
|
+
|
|
299
|
+
Args:
|
|
300
|
+
model_name: Name of the model
|
|
301
|
+
|
|
302
|
+
Returns:
|
|
303
|
+
ModelConfig if found, None otherwise
|
|
304
|
+
"""
|
|
305
|
+
try:
|
|
306
|
+
loader = _get_config_loader()
|
|
307
|
+
return loader.get_model_config(self.provider_name, model_name)
|
|
308
|
+
except Exception as e:
|
|
309
|
+
self.logger.warning(f"Failed to get model config: {e}")
|
|
310
|
+
return None
|
|
311
|
+
|
|
312
|
+
def _get_default_model(self) -> Optional[str]:
|
|
313
|
+
"""
|
|
314
|
+
Get the default model for this provider from configuration.
|
|
315
|
+
|
|
316
|
+
Returns:
|
|
317
|
+
Default model name if configured, None otherwise
|
|
318
|
+
"""
|
|
319
|
+
try:
|
|
320
|
+
loader = _get_config_loader()
|
|
321
|
+
return loader.get_default_model(self.provider_name)
|
|
322
|
+
except Exception as e:
|
|
323
|
+
self.logger.warning(f"Failed to get default model: {e}")
|
|
324
|
+
return None
|