aiecs 1.0.1__py3-none-any.whl → 1.7.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiecs might be problematic. Click here for more details.
- aiecs/__init__.py +13 -16
- aiecs/__main__.py +7 -7
- aiecs/aiecs_client.py +269 -75
- aiecs/application/executors/operation_executor.py +79 -54
- aiecs/application/knowledge_graph/__init__.py +7 -0
- aiecs/application/knowledge_graph/builder/__init__.py +37 -0
- aiecs/application/knowledge_graph/builder/data_quality.py +302 -0
- aiecs/application/knowledge_graph/builder/data_reshaping.py +293 -0
- aiecs/application/knowledge_graph/builder/document_builder.py +369 -0
- aiecs/application/knowledge_graph/builder/graph_builder.py +490 -0
- aiecs/application/knowledge_graph/builder/import_optimizer.py +396 -0
- aiecs/application/knowledge_graph/builder/schema_inference.py +462 -0
- aiecs/application/knowledge_graph/builder/schema_mapping.py +563 -0
- aiecs/application/knowledge_graph/builder/structured_pipeline.py +1384 -0
- aiecs/application/knowledge_graph/builder/text_chunker.py +317 -0
- aiecs/application/knowledge_graph/extractors/__init__.py +27 -0
- aiecs/application/knowledge_graph/extractors/base.py +98 -0
- aiecs/application/knowledge_graph/extractors/llm_entity_extractor.py +422 -0
- aiecs/application/knowledge_graph/extractors/llm_relation_extractor.py +347 -0
- aiecs/application/knowledge_graph/extractors/ner_entity_extractor.py +241 -0
- aiecs/application/knowledge_graph/fusion/__init__.py +78 -0
- aiecs/application/knowledge_graph/fusion/ab_testing.py +395 -0
- aiecs/application/knowledge_graph/fusion/abbreviation_expander.py +327 -0
- aiecs/application/knowledge_graph/fusion/alias_index.py +597 -0
- aiecs/application/knowledge_graph/fusion/alias_matcher.py +384 -0
- aiecs/application/knowledge_graph/fusion/cache_coordinator.py +343 -0
- aiecs/application/knowledge_graph/fusion/entity_deduplicator.py +433 -0
- aiecs/application/knowledge_graph/fusion/entity_linker.py +511 -0
- aiecs/application/knowledge_graph/fusion/evaluation_dataset.py +240 -0
- aiecs/application/knowledge_graph/fusion/knowledge_fusion.py +632 -0
- aiecs/application/knowledge_graph/fusion/matching_config.py +489 -0
- aiecs/application/knowledge_graph/fusion/name_normalizer.py +352 -0
- aiecs/application/knowledge_graph/fusion/relation_deduplicator.py +183 -0
- aiecs/application/knowledge_graph/fusion/semantic_name_matcher.py +464 -0
- aiecs/application/knowledge_graph/fusion/similarity_pipeline.py +534 -0
- aiecs/application/knowledge_graph/pattern_matching/__init__.py +21 -0
- aiecs/application/knowledge_graph/pattern_matching/pattern_matcher.py +342 -0
- aiecs/application/knowledge_graph/pattern_matching/query_executor.py +366 -0
- aiecs/application/knowledge_graph/profiling/__init__.py +12 -0
- aiecs/application/knowledge_graph/profiling/query_plan_visualizer.py +195 -0
- aiecs/application/knowledge_graph/profiling/query_profiler.py +223 -0
- aiecs/application/knowledge_graph/reasoning/__init__.py +27 -0
- aiecs/application/knowledge_graph/reasoning/evidence_synthesis.py +341 -0
- aiecs/application/knowledge_graph/reasoning/inference_engine.py +500 -0
- aiecs/application/knowledge_graph/reasoning/logic_form_parser.py +163 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/__init__.py +79 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_builder.py +513 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_nodes.py +913 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_validator.py +866 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/error_handler.py +475 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/parser.py +396 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/query_context.py +208 -0
- aiecs/application/knowledge_graph/reasoning/logic_query_integration.py +170 -0
- aiecs/application/knowledge_graph/reasoning/query_planner.py +855 -0
- aiecs/application/knowledge_graph/reasoning/reasoning_engine.py +518 -0
- aiecs/application/knowledge_graph/retrieval/__init__.py +27 -0
- aiecs/application/knowledge_graph/retrieval/query_intent_classifier.py +211 -0
- aiecs/application/knowledge_graph/retrieval/retrieval_strategies.py +592 -0
- aiecs/application/knowledge_graph/retrieval/strategy_types.py +23 -0
- aiecs/application/knowledge_graph/search/__init__.py +59 -0
- aiecs/application/knowledge_graph/search/hybrid_search.py +457 -0
- aiecs/application/knowledge_graph/search/reranker.py +293 -0
- aiecs/application/knowledge_graph/search/reranker_strategies.py +535 -0
- aiecs/application/knowledge_graph/search/text_similarity.py +392 -0
- aiecs/application/knowledge_graph/traversal/__init__.py +15 -0
- aiecs/application/knowledge_graph/traversal/enhanced_traversal.py +305 -0
- aiecs/application/knowledge_graph/traversal/path_scorer.py +271 -0
- aiecs/application/knowledge_graph/validators/__init__.py +13 -0
- aiecs/application/knowledge_graph/validators/relation_validator.py +239 -0
- aiecs/application/knowledge_graph/visualization/__init__.py +11 -0
- aiecs/application/knowledge_graph/visualization/graph_visualizer.py +313 -0
- aiecs/common/__init__.py +9 -0
- aiecs/common/knowledge_graph/__init__.py +17 -0
- aiecs/common/knowledge_graph/runnable.py +471 -0
- aiecs/config/__init__.py +20 -5
- aiecs/config/config.py +762 -31
- aiecs/config/graph_config.py +131 -0
- aiecs/config/tool_config.py +435 -0
- aiecs/core/__init__.py +29 -13
- aiecs/core/interface/__init__.py +2 -2
- aiecs/core/interface/execution_interface.py +22 -22
- aiecs/core/interface/storage_interface.py +37 -88
- aiecs/core/registry/__init__.py +31 -0
- aiecs/core/registry/service_registry.py +92 -0
- aiecs/domain/__init__.py +270 -1
- aiecs/domain/agent/__init__.py +191 -0
- aiecs/domain/agent/base_agent.py +3949 -0
- aiecs/domain/agent/exceptions.py +99 -0
- aiecs/domain/agent/graph_aware_mixin.py +569 -0
- aiecs/domain/agent/hybrid_agent.py +1731 -0
- aiecs/domain/agent/integration/__init__.py +29 -0
- aiecs/domain/agent/integration/context_compressor.py +216 -0
- aiecs/domain/agent/integration/context_engine_adapter.py +587 -0
- aiecs/domain/agent/integration/protocols.py +281 -0
- aiecs/domain/agent/integration/retry_policy.py +218 -0
- aiecs/domain/agent/integration/role_config.py +213 -0
- aiecs/domain/agent/knowledge_aware_agent.py +1892 -0
- aiecs/domain/agent/lifecycle.py +291 -0
- aiecs/domain/agent/llm_agent.py +692 -0
- aiecs/domain/agent/memory/__init__.py +12 -0
- aiecs/domain/agent/memory/conversation.py +1124 -0
- aiecs/domain/agent/migration/__init__.py +14 -0
- aiecs/domain/agent/migration/conversion.py +163 -0
- aiecs/domain/agent/migration/legacy_wrapper.py +86 -0
- aiecs/domain/agent/models.py +894 -0
- aiecs/domain/agent/observability.py +479 -0
- aiecs/domain/agent/persistence.py +449 -0
- aiecs/domain/agent/prompts/__init__.py +29 -0
- aiecs/domain/agent/prompts/builder.py +159 -0
- aiecs/domain/agent/prompts/formatters.py +187 -0
- aiecs/domain/agent/prompts/template.py +255 -0
- aiecs/domain/agent/registry.py +253 -0
- aiecs/domain/agent/tool_agent.py +444 -0
- aiecs/domain/agent/tools/__init__.py +15 -0
- aiecs/domain/agent/tools/schema_generator.py +377 -0
- aiecs/domain/community/__init__.py +155 -0
- aiecs/domain/community/agent_adapter.py +469 -0
- aiecs/domain/community/analytics.py +432 -0
- aiecs/domain/community/collaborative_workflow.py +648 -0
- aiecs/domain/community/communication_hub.py +634 -0
- aiecs/domain/community/community_builder.py +320 -0
- aiecs/domain/community/community_integration.py +796 -0
- aiecs/domain/community/community_manager.py +803 -0
- aiecs/domain/community/decision_engine.py +849 -0
- aiecs/domain/community/exceptions.py +231 -0
- aiecs/domain/community/models/__init__.py +33 -0
- aiecs/domain/community/models/community_models.py +234 -0
- aiecs/domain/community/resource_manager.py +461 -0
- aiecs/domain/community/shared_context_manager.py +589 -0
- aiecs/domain/context/__init__.py +40 -10
- aiecs/domain/context/context_engine.py +1910 -0
- aiecs/domain/context/conversation_models.py +87 -53
- aiecs/domain/context/graph_memory.py +582 -0
- aiecs/domain/execution/model.py +12 -4
- aiecs/domain/knowledge_graph/__init__.py +19 -0
- aiecs/domain/knowledge_graph/models/__init__.py +52 -0
- aiecs/domain/knowledge_graph/models/entity.py +148 -0
- aiecs/domain/knowledge_graph/models/evidence.py +178 -0
- aiecs/domain/knowledge_graph/models/inference_rule.py +184 -0
- aiecs/domain/knowledge_graph/models/path.py +171 -0
- aiecs/domain/knowledge_graph/models/path_pattern.py +171 -0
- aiecs/domain/knowledge_graph/models/query.py +261 -0
- aiecs/domain/knowledge_graph/models/query_plan.py +181 -0
- aiecs/domain/knowledge_graph/models/relation.py +202 -0
- aiecs/domain/knowledge_graph/schema/__init__.py +23 -0
- aiecs/domain/knowledge_graph/schema/entity_type.py +131 -0
- aiecs/domain/knowledge_graph/schema/graph_schema.py +253 -0
- aiecs/domain/knowledge_graph/schema/property_schema.py +143 -0
- aiecs/domain/knowledge_graph/schema/relation_type.py +163 -0
- aiecs/domain/knowledge_graph/schema/schema_manager.py +691 -0
- aiecs/domain/knowledge_graph/schema/type_enums.py +209 -0
- aiecs/domain/task/dsl_processor.py +172 -56
- aiecs/domain/task/model.py +20 -8
- aiecs/domain/task/task_context.py +27 -24
- aiecs/infrastructure/__init__.py +0 -2
- aiecs/infrastructure/graph_storage/__init__.py +11 -0
- aiecs/infrastructure/graph_storage/base.py +837 -0
- aiecs/infrastructure/graph_storage/batch_operations.py +458 -0
- aiecs/infrastructure/graph_storage/cache.py +424 -0
- aiecs/infrastructure/graph_storage/distributed.py +223 -0
- aiecs/infrastructure/graph_storage/error_handling.py +380 -0
- aiecs/infrastructure/graph_storage/graceful_degradation.py +294 -0
- aiecs/infrastructure/graph_storage/health_checks.py +378 -0
- aiecs/infrastructure/graph_storage/in_memory.py +1197 -0
- aiecs/infrastructure/graph_storage/index_optimization.py +446 -0
- aiecs/infrastructure/graph_storage/lazy_loading.py +431 -0
- aiecs/infrastructure/graph_storage/metrics.py +344 -0
- aiecs/infrastructure/graph_storage/migration.py +400 -0
- aiecs/infrastructure/graph_storage/pagination.py +483 -0
- aiecs/infrastructure/graph_storage/performance_monitoring.py +456 -0
- aiecs/infrastructure/graph_storage/postgres.py +1563 -0
- aiecs/infrastructure/graph_storage/property_storage.py +353 -0
- aiecs/infrastructure/graph_storage/protocols.py +76 -0
- aiecs/infrastructure/graph_storage/query_optimizer.py +642 -0
- aiecs/infrastructure/graph_storage/schema_cache.py +290 -0
- aiecs/infrastructure/graph_storage/sqlite.py +1373 -0
- aiecs/infrastructure/graph_storage/streaming.py +487 -0
- aiecs/infrastructure/graph_storage/tenant.py +412 -0
- aiecs/infrastructure/messaging/celery_task_manager.py +92 -54
- aiecs/infrastructure/messaging/websocket_manager.py +51 -35
- aiecs/infrastructure/monitoring/__init__.py +22 -0
- aiecs/infrastructure/monitoring/executor_metrics.py +45 -11
- aiecs/infrastructure/monitoring/global_metrics_manager.py +212 -0
- aiecs/infrastructure/monitoring/structured_logger.py +3 -7
- aiecs/infrastructure/monitoring/tracing_manager.py +63 -35
- aiecs/infrastructure/persistence/__init__.py +14 -1
- aiecs/infrastructure/persistence/context_engine_client.py +184 -0
- aiecs/infrastructure/persistence/database_manager.py +67 -43
- aiecs/infrastructure/persistence/file_storage.py +180 -103
- aiecs/infrastructure/persistence/redis_client.py +74 -21
- aiecs/llm/__init__.py +73 -25
- aiecs/llm/callbacks/__init__.py +11 -0
- aiecs/llm/{custom_callbacks.py → callbacks/custom_callbacks.py} +26 -19
- aiecs/llm/client_factory.py +230 -37
- aiecs/llm/client_resolver.py +155 -0
- aiecs/llm/clients/__init__.py +38 -0
- aiecs/llm/clients/base_client.py +328 -0
- aiecs/llm/clients/google_function_calling_mixin.py +415 -0
- aiecs/llm/clients/googleai_client.py +314 -0
- aiecs/llm/clients/openai_client.py +158 -0
- aiecs/llm/clients/openai_compatible_mixin.py +367 -0
- aiecs/llm/clients/vertex_client.py +1186 -0
- aiecs/llm/clients/xai_client.py +201 -0
- aiecs/llm/config/__init__.py +51 -0
- aiecs/llm/config/config_loader.py +272 -0
- aiecs/llm/config/config_validator.py +206 -0
- aiecs/llm/config/model_config.py +143 -0
- aiecs/llm/protocols.py +149 -0
- aiecs/llm/utils/__init__.py +10 -0
- aiecs/llm/utils/validate_config.py +89 -0
- aiecs/main.py +140 -121
- aiecs/scripts/aid/VERSION_MANAGEMENT.md +138 -0
- aiecs/scripts/aid/__init__.py +19 -0
- aiecs/scripts/aid/module_checker.py +499 -0
- aiecs/scripts/aid/version_manager.py +235 -0
- aiecs/scripts/{DEPENDENCY_SYSTEM_SUMMARY.md → dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md} +1 -0
- aiecs/scripts/{README_DEPENDENCY_CHECKER.md → dependance_check/README_DEPENDENCY_CHECKER.md} +1 -0
- aiecs/scripts/dependance_check/__init__.py +15 -0
- aiecs/scripts/dependance_check/dependency_checker.py +1835 -0
- aiecs/scripts/{dependency_fixer.py → dependance_check/dependency_fixer.py} +192 -90
- aiecs/scripts/{download_nlp_data.py → dependance_check/download_nlp_data.py} +203 -71
- aiecs/scripts/dependance_patch/__init__.py +7 -0
- aiecs/scripts/dependance_patch/fix_weasel/__init__.py +11 -0
- aiecs/scripts/{fix_weasel_validator.py → dependance_patch/fix_weasel/fix_weasel_validator.py} +21 -14
- aiecs/scripts/{patch_weasel_library.sh → dependance_patch/fix_weasel/patch_weasel_library.sh} +1 -1
- aiecs/scripts/knowledge_graph/__init__.py +3 -0
- aiecs/scripts/knowledge_graph/run_threshold_experiments.py +212 -0
- aiecs/scripts/migrations/multi_tenancy/README.md +142 -0
- aiecs/scripts/tools_develop/README.md +671 -0
- aiecs/scripts/tools_develop/README_CONFIG_CHECKER.md +273 -0
- aiecs/scripts/tools_develop/TOOLS_CONFIG_GUIDE.md +1287 -0
- aiecs/scripts/tools_develop/TOOL_AUTO_DISCOVERY.md +234 -0
- aiecs/scripts/tools_develop/__init__.py +21 -0
- aiecs/scripts/tools_develop/check_all_tools_config.py +548 -0
- aiecs/scripts/tools_develop/check_type_annotations.py +257 -0
- aiecs/scripts/tools_develop/pre-commit-schema-coverage.sh +66 -0
- aiecs/scripts/tools_develop/schema_coverage.py +511 -0
- aiecs/scripts/tools_develop/validate_tool_schemas.py +475 -0
- aiecs/scripts/tools_develop/verify_executor_config_fix.py +98 -0
- aiecs/scripts/tools_develop/verify_tools.py +352 -0
- aiecs/tasks/__init__.py +0 -1
- aiecs/tasks/worker.py +115 -47
- aiecs/tools/__init__.py +194 -72
- aiecs/tools/apisource/__init__.py +99 -0
- aiecs/tools/apisource/intelligence/__init__.py +19 -0
- aiecs/tools/apisource/intelligence/data_fusion.py +632 -0
- aiecs/tools/apisource/intelligence/query_analyzer.py +417 -0
- aiecs/tools/apisource/intelligence/search_enhancer.py +385 -0
- aiecs/tools/apisource/monitoring/__init__.py +9 -0
- aiecs/tools/apisource/monitoring/metrics.py +330 -0
- aiecs/tools/apisource/providers/__init__.py +112 -0
- aiecs/tools/apisource/providers/base.py +671 -0
- aiecs/tools/apisource/providers/census.py +397 -0
- aiecs/tools/apisource/providers/fred.py +535 -0
- aiecs/tools/apisource/providers/newsapi.py +409 -0
- aiecs/tools/apisource/providers/worldbank.py +352 -0
- aiecs/tools/apisource/reliability/__init__.py +12 -0
- aiecs/tools/apisource/reliability/error_handler.py +363 -0
- aiecs/tools/apisource/reliability/fallback_strategy.py +376 -0
- aiecs/tools/apisource/tool.py +832 -0
- aiecs/tools/apisource/utils/__init__.py +9 -0
- aiecs/tools/apisource/utils/validators.py +334 -0
- aiecs/tools/base_tool.py +415 -21
- aiecs/tools/docs/__init__.py +121 -0
- aiecs/tools/docs/ai_document_orchestrator.py +607 -0
- aiecs/tools/docs/ai_document_writer_orchestrator.py +2350 -0
- aiecs/tools/docs/content_insertion_tool.py +1320 -0
- aiecs/tools/docs/document_creator_tool.py +1464 -0
- aiecs/tools/docs/document_layout_tool.py +1160 -0
- aiecs/tools/docs/document_parser_tool.py +1016 -0
- aiecs/tools/docs/document_writer_tool.py +2008 -0
- aiecs/tools/knowledge_graph/__init__.py +17 -0
- aiecs/tools/knowledge_graph/graph_reasoning_tool.py +807 -0
- aiecs/tools/knowledge_graph/graph_search_tool.py +944 -0
- aiecs/tools/knowledge_graph/kg_builder_tool.py +524 -0
- aiecs/tools/langchain_adapter.py +300 -138
- aiecs/tools/schema_generator.py +455 -0
- aiecs/tools/search_tool/__init__.py +100 -0
- aiecs/tools/search_tool/analyzers.py +581 -0
- aiecs/tools/search_tool/cache.py +264 -0
- aiecs/tools/search_tool/constants.py +128 -0
- aiecs/tools/search_tool/context.py +224 -0
- aiecs/tools/search_tool/core.py +778 -0
- aiecs/tools/search_tool/deduplicator.py +119 -0
- aiecs/tools/search_tool/error_handler.py +242 -0
- aiecs/tools/search_tool/metrics.py +343 -0
- aiecs/tools/search_tool/rate_limiter.py +172 -0
- aiecs/tools/search_tool/schemas.py +275 -0
- aiecs/tools/statistics/__init__.py +80 -0
- aiecs/tools/statistics/ai_data_analysis_orchestrator.py +646 -0
- aiecs/tools/statistics/ai_insight_generator_tool.py +508 -0
- aiecs/tools/statistics/ai_report_orchestrator_tool.py +684 -0
- aiecs/tools/statistics/data_loader_tool.py +555 -0
- aiecs/tools/statistics/data_profiler_tool.py +638 -0
- aiecs/tools/statistics/data_transformer_tool.py +580 -0
- aiecs/tools/statistics/data_visualizer_tool.py +498 -0
- aiecs/tools/statistics/model_trainer_tool.py +507 -0
- aiecs/tools/statistics/statistical_analyzer_tool.py +472 -0
- aiecs/tools/task_tools/__init__.py +49 -36
- aiecs/tools/task_tools/chart_tool.py +200 -184
- aiecs/tools/task_tools/classfire_tool.py +268 -267
- aiecs/tools/task_tools/image_tool.py +220 -141
- aiecs/tools/task_tools/office_tool.py +226 -146
- aiecs/tools/task_tools/pandas_tool.py +477 -121
- aiecs/tools/task_tools/report_tool.py +390 -142
- aiecs/tools/task_tools/research_tool.py +149 -79
- aiecs/tools/task_tools/scraper_tool.py +339 -145
- aiecs/tools/task_tools/stats_tool.py +448 -209
- aiecs/tools/temp_file_manager.py +26 -24
- aiecs/tools/tool_executor/__init__.py +18 -16
- aiecs/tools/tool_executor/tool_executor.py +364 -52
- aiecs/utils/LLM_output_structor.py +74 -48
- aiecs/utils/__init__.py +14 -3
- aiecs/utils/base_callback.py +0 -3
- aiecs/utils/cache_provider.py +696 -0
- aiecs/utils/execution_utils.py +50 -31
- aiecs/utils/prompt_loader.py +1 -0
- aiecs/utils/token_usage_repository.py +37 -11
- aiecs/ws/socket_server.py +14 -4
- {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/METADATA +52 -15
- aiecs-1.7.17.dist-info/RECORD +337 -0
- aiecs-1.7.17.dist-info/entry_points.txt +13 -0
- aiecs/config/registry.py +0 -19
- aiecs/domain/context/content_engine.py +0 -982
- aiecs/llm/base_client.py +0 -99
- aiecs/llm/openai_client.py +0 -125
- aiecs/llm/vertex_client.py +0 -186
- aiecs/llm/xai_client.py +0 -184
- aiecs/scripts/dependency_checker.py +0 -857
- aiecs/scripts/quick_dependency_check.py +0 -269
- aiecs/tools/task_tools/search_api.py +0 -7
- aiecs-1.0.1.dist-info/RECORD +0 -90
- aiecs-1.0.1.dist-info/entry_points.txt +0 -7
- /aiecs/scripts/{setup_nlp_data.sh → dependance_check/setup_nlp_data.sh} +0 -0
- /aiecs/scripts/{README_WEASEL_PATCH.md → dependance_patch/fix_weasel/README_WEASEL_PATCH.md} +0 -0
- /aiecs/scripts/{fix_weasel_validator.sh → dependance_patch/fix_weasel/fix_weasel_validator.sh} +0 -0
- /aiecs/scripts/{run_weasel_patch.sh → dependance_patch/fix_weasel/run_weasel_patch.sh} +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/WHEEL +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/licenses/LICENSE +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,807 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Knowledge Graph Reasoning Tool
|
|
3
|
+
|
|
4
|
+
AIECS tool for advanced reasoning over knowledge graphs.
|
|
5
|
+
Provides query planning, multi-hop reasoning, inference, and evidence synthesis.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
from typing import Dict, Any, List, Optional
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
|
12
|
+
from enum import Enum
|
|
13
|
+
|
|
14
|
+
from aiecs.tools.base_tool import BaseTool
|
|
15
|
+
from aiecs.tools import register_tool
|
|
16
|
+
from aiecs.infrastructure.graph_storage.base import GraphStore
|
|
17
|
+
from aiecs.application.knowledge_graph.reasoning.query_planner import (
|
|
18
|
+
QueryPlanner,
|
|
19
|
+
)
|
|
20
|
+
from aiecs.application.knowledge_graph.reasoning.reasoning_engine import (
|
|
21
|
+
ReasoningEngine,
|
|
22
|
+
)
|
|
23
|
+
from aiecs.application.knowledge_graph.reasoning.inference_engine import (
|
|
24
|
+
InferenceEngine,
|
|
25
|
+
)
|
|
26
|
+
from aiecs.application.knowledge_graph.reasoning.evidence_synthesis import (
|
|
27
|
+
EvidenceSynthesizer,
|
|
28
|
+
)
|
|
29
|
+
from aiecs.application.knowledge_graph.reasoning.logic_form_parser import (
|
|
30
|
+
LogicFormParser,
|
|
31
|
+
)
|
|
32
|
+
from aiecs.domain.knowledge_graph.models.inference_rule import (
|
|
33
|
+
InferenceRule,
|
|
34
|
+
RuleType,
|
|
35
|
+
)
|
|
36
|
+
from aiecs.domain.knowledge_graph.models.query_plan import OptimizationStrategy
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class ReasoningModeEnum(str, Enum):
|
|
40
|
+
"""Reasoning mode enumeration"""
|
|
41
|
+
|
|
42
|
+
QUERY_PLAN = "query_plan" # Plan a query execution
|
|
43
|
+
MULTI_HOP = "multi_hop" # Multi-hop path reasoning
|
|
44
|
+
INFERENCE = "inference" # Logical inference
|
|
45
|
+
EVIDENCE_SYNTHESIS = "evidence_synthesis" # Combine evidence
|
|
46
|
+
LOGICAL_QUERY = "logical_query" # Parse natural language to logical query
|
|
47
|
+
FULL_REASONING = "full_reasoning" # Complete reasoning pipeline
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class GraphReasoningInput(BaseModel):
|
|
51
|
+
"""Input schema for Graph Reasoning Tool (legacy, for execute() method)"""
|
|
52
|
+
|
|
53
|
+
mode: ReasoningModeEnum = Field(
|
|
54
|
+
...,
|
|
55
|
+
description=(
|
|
56
|
+
"Reasoning mode: 'query_plan' (plan execution), "
|
|
57
|
+
"'multi_hop' (path reasoning), 'inference' (logical rules), "
|
|
58
|
+
"'evidence_synthesis' (combine evidence), "
|
|
59
|
+
"'logical_query' (parse to logical form), "
|
|
60
|
+
"'full_reasoning' (complete pipeline)"
|
|
61
|
+
),
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
query: str = Field(..., description="Natural language query to reason about")
|
|
65
|
+
|
|
66
|
+
start_entity_id: Optional[str] = Field(None, description="Starting entity ID for multi-hop reasoning")
|
|
67
|
+
|
|
68
|
+
target_entity_id: Optional[str] = Field(None, description="Target entity ID for path finding")
|
|
69
|
+
|
|
70
|
+
max_hops: int = Field(
|
|
71
|
+
default=3,
|
|
72
|
+
ge=1,
|
|
73
|
+
le=5,
|
|
74
|
+
description="Maximum hops for multi-hop reasoning (1-5)",
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
relation_types: Optional[List[str]] = Field(None, description="Filter by relation types for reasoning")
|
|
78
|
+
|
|
79
|
+
optimization_strategy: Optional[str] = Field(
|
|
80
|
+
default="balanced",
|
|
81
|
+
description="Query optimization strategy: 'cost', 'latency', or 'balanced'",
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
apply_inference: bool = Field(
|
|
85
|
+
default=False,
|
|
86
|
+
description="Apply logical inference rules (transitive, symmetric)",
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
inference_relation_type: Optional[str] = Field(
|
|
90
|
+
None,
|
|
91
|
+
description="Relation type to apply inference on (required if apply_inference=True)",
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
inference_max_steps: int = Field(default=3, ge=1, le=10, description="Maximum inference steps (1-10)")
|
|
95
|
+
|
|
96
|
+
synthesize_evidence: bool = Field(default=True, description="Synthesize evidence from multiple sources")
|
|
97
|
+
|
|
98
|
+
synthesis_method: str = Field(
|
|
99
|
+
default="weighted_average",
|
|
100
|
+
description="Evidence synthesis method: 'weighted_average', 'max', or 'voting'",
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
confidence_threshold: float = Field(
|
|
104
|
+
default=0.5,
|
|
105
|
+
ge=0.0,
|
|
106
|
+
le=1.0,
|
|
107
|
+
description="Minimum confidence threshold for evidence (0.0-1.0)",
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
# Schemas for individual operations - moved to GraphReasoningTool class as inner classes
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
@register_tool("graph_reasoning")
|
|
115
|
+
class GraphReasoningTool(BaseTool):
|
|
116
|
+
"""
|
|
117
|
+
Knowledge Graph Reasoning Tool
|
|
118
|
+
|
|
119
|
+
Performs advanced reasoning over knowledge graphs using:
|
|
120
|
+
- Query Planning: Optimize query execution
|
|
121
|
+
- Multi-Hop Reasoning: Find and reason over paths
|
|
122
|
+
- Logical Inference: Apply inference rules
|
|
123
|
+
- Evidence Synthesis: Combine evidence from multiple sources
|
|
124
|
+
|
|
125
|
+
Example:
|
|
126
|
+
```python
|
|
127
|
+
tool = GraphReasoningTool(graph_store)
|
|
128
|
+
|
|
129
|
+
result = await tool.execute({
|
|
130
|
+
"mode": "full_reasoning",
|
|
131
|
+
"query": "How is Alice connected to Company X?",
|
|
132
|
+
"start_entity_id": "alice",
|
|
133
|
+
"max_hops": 3,
|
|
134
|
+
"apply_inference": True,
|
|
135
|
+
"synthesize_evidence": True
|
|
136
|
+
})
|
|
137
|
+
```
|
|
138
|
+
"""
|
|
139
|
+
|
|
140
|
+
# Configuration schema
|
|
141
|
+
class Config(BaseSettings):
|
|
142
|
+
"""Configuration for the Graph Reasoning Tool
|
|
143
|
+
|
|
144
|
+
Automatically reads from environment variables with GRAPH_REASONING_ prefix.
|
|
145
|
+
Example: GRAPH_REASONING_DEFAULT_MAX_HOPS -> default_max_hops
|
|
146
|
+
"""
|
|
147
|
+
|
|
148
|
+
model_config = SettingsConfigDict(env_prefix="GRAPH_REASONING_")
|
|
149
|
+
|
|
150
|
+
default_max_hops: int = Field(
|
|
151
|
+
default=3,
|
|
152
|
+
description="Default maximum hops for multi-hop reasoning",
|
|
153
|
+
)
|
|
154
|
+
default_confidence_threshold: float = Field(
|
|
155
|
+
default=0.5,
|
|
156
|
+
description="Default confidence threshold for evidence",
|
|
157
|
+
)
|
|
158
|
+
default_inference_max_steps: int = Field(
|
|
159
|
+
default=3,
|
|
160
|
+
description="Default maximum inference steps",
|
|
161
|
+
)
|
|
162
|
+
enable_default_rules: bool = Field(
|
|
163
|
+
default=False,
|
|
164
|
+
description="Enable default inference rules by default",
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
# Schema definitions
|
|
168
|
+
class Query_planSchema(BaseModel):
|
|
169
|
+
"""Schema for query_plan operation"""
|
|
170
|
+
|
|
171
|
+
query: str = Field(description="Natural language query to plan")
|
|
172
|
+
optimization_strategy: Optional[str] = Field(
|
|
173
|
+
default="balanced",
|
|
174
|
+
description="Query optimization strategy: 'cost', 'latency', or 'balanced'",
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
class Multi_hopSchema(BaseModel):
|
|
178
|
+
"""Schema for multi_hop operation"""
|
|
179
|
+
|
|
180
|
+
query: str = Field(description="Natural language query to reason about")
|
|
181
|
+
start_entity_id: str = Field(description="Starting entity ID")
|
|
182
|
+
target_entity_id: Optional[str] = Field(default=None, description="Optional target entity ID")
|
|
183
|
+
max_hops: int = Field(default=3, ge=1, le=5, description="Maximum hops (1-5)")
|
|
184
|
+
relation_types: Optional[List[str]] = Field(default=None, description="Optional filter by relation types")
|
|
185
|
+
synthesize_evidence: bool = Field(default=True, description="Whether to synthesize evidence from multiple paths")
|
|
186
|
+
synthesis_method: str = Field(default="weighted_average", description="Evidence synthesis method: 'weighted_average', 'max', or 'voting'")
|
|
187
|
+
confidence_threshold: float = Field(default=0.5, ge=0.0, le=1.0, description="Minimum confidence threshold for evidence (0.0-1.0)")
|
|
188
|
+
|
|
189
|
+
class InferenceSchema(BaseModel):
|
|
190
|
+
"""Schema for inference operation"""
|
|
191
|
+
|
|
192
|
+
relation_type: str = Field(description="Relation type to apply inference on")
|
|
193
|
+
max_steps: int = Field(default=3, ge=1, le=10, description="Maximum inference steps (1-10)")
|
|
194
|
+
|
|
195
|
+
class Evidence_synthesisSchema(BaseModel):
|
|
196
|
+
"""Schema for evidence_synthesis operation"""
|
|
197
|
+
|
|
198
|
+
synthesis_method: str = Field(default="weighted_average", description="Evidence synthesis method: 'weighted_average', 'max', or 'voting'")
|
|
199
|
+
confidence_threshold: float = Field(default=0.5, ge=0.0, le=1.0, description="Minimum confidence threshold for evidence (0.0-1.0)")
|
|
200
|
+
|
|
201
|
+
class Full_reasoningSchema(BaseModel):
|
|
202
|
+
"""Schema for full_reasoning operation"""
|
|
203
|
+
|
|
204
|
+
query: str = Field(description="Natural language query")
|
|
205
|
+
start_entity_id: str = Field(description="Starting entity ID")
|
|
206
|
+
target_entity_id: Optional[str] = Field(default=None, description="Optional target entity ID")
|
|
207
|
+
max_hops: int = Field(default=3, ge=1, le=5, description="Maximum hops (1-5)")
|
|
208
|
+
relation_types: Optional[List[str]] = Field(default=None, description="Optional filter by relation types")
|
|
209
|
+
optimization_strategy: Optional[str] = Field(default="balanced", description="Query optimization strategy: 'cost', 'latency', or 'balanced'")
|
|
210
|
+
apply_inference: bool = Field(default=False, description="Whether to apply logical inference rules")
|
|
211
|
+
inference_relation_type: Optional[str] = Field(default=None, description="Optional relation type for inference")
|
|
212
|
+
inference_max_steps: int = Field(default=3, ge=1, le=10, description="Maximum inference steps (1-10)")
|
|
213
|
+
synthesize_evidence: bool = Field(default=True, description="Whether to synthesize evidence from multiple sources")
|
|
214
|
+
synthesis_method: str = Field(default="weighted_average", description="Evidence synthesis method: 'weighted_average', 'max', or 'voting'")
|
|
215
|
+
confidence_threshold: float = Field(default=0.5, ge=0.0, le=1.0, description="Minimum confidence threshold for evidence (0.0-1.0)")
|
|
216
|
+
|
|
217
|
+
def __init__(self, graph_store: Optional[GraphStore] = None, config: Optional[Dict[str, Any]] = None, **kwargs):
|
|
218
|
+
"""
|
|
219
|
+
Initialize Graph Reasoning Tool
|
|
220
|
+
|
|
221
|
+
Args:
|
|
222
|
+
graph_store: Graph storage backend (optional, can be set via _initialize())
|
|
223
|
+
config (Dict, optional): Configuration overrides for Graph Reasoning Tool.
|
|
224
|
+
**kwargs: Additional arguments passed to BaseTool (e.g., tool_name)
|
|
225
|
+
|
|
226
|
+
Configuration is automatically loaded by BaseTool from:
|
|
227
|
+
1. Explicit config dict (highest priority)
|
|
228
|
+
2. YAML config files (config/tools/graph_reasoning.yaml)
|
|
229
|
+
3. Environment variables (via dotenv from .env files)
|
|
230
|
+
4. Tool defaults (lowest priority)
|
|
231
|
+
|
|
232
|
+
Note:
|
|
233
|
+
If graph_store is not provided, you must call _initialize() before using
|
|
234
|
+
the tool. This allows the tool to be registered and instantiated via
|
|
235
|
+
get_tool() without requiring a graph_store at import time.
|
|
236
|
+
"""
|
|
237
|
+
super().__init__(config, **kwargs)
|
|
238
|
+
|
|
239
|
+
# Configuration is automatically loaded by BaseTool into self._config_obj
|
|
240
|
+
# Access config via self._config_obj (BaseSettings instance)
|
|
241
|
+
self.config = self._config_obj if self._config_obj else self.Config()
|
|
242
|
+
|
|
243
|
+
# Initialize components (may be None if graph_store not provided)
|
|
244
|
+
self.graph_store = graph_store
|
|
245
|
+
self.query_planner = None
|
|
246
|
+
self.reasoning_engine = None
|
|
247
|
+
self.inference_engine = None
|
|
248
|
+
self.evidence_synthesizer = None
|
|
249
|
+
self._initialized = False
|
|
250
|
+
|
|
251
|
+
# If graph_store provided, initialize immediately (backward compatibility)
|
|
252
|
+
if graph_store is not None:
|
|
253
|
+
self._setup_components(graph_store)
|
|
254
|
+
|
|
255
|
+
def _setup_components(self, graph_store: GraphStore) -> None:
|
|
256
|
+
"""Setup reasoning components with the given graph store."""
|
|
257
|
+
self.graph_store = graph_store
|
|
258
|
+
self.query_planner = QueryPlanner(graph_store)
|
|
259
|
+
self.reasoning_engine = ReasoningEngine(graph_store)
|
|
260
|
+
self.inference_engine = InferenceEngine(graph_store)
|
|
261
|
+
self.evidence_synthesizer = EvidenceSynthesizer()
|
|
262
|
+
|
|
263
|
+
# Add default inference rules
|
|
264
|
+
self._setup_default_rules()
|
|
265
|
+
self._initialized = True
|
|
266
|
+
|
|
267
|
+
async def _initialize(self, graph_store: Optional[GraphStore] = None) -> None:
|
|
268
|
+
"""
|
|
269
|
+
Lazy initialization of components.
|
|
270
|
+
|
|
271
|
+
Args:
|
|
272
|
+
graph_store: Graph storage backend. If not provided, creates an
|
|
273
|
+
InMemoryGraphStore.
|
|
274
|
+
"""
|
|
275
|
+
if self._initialized:
|
|
276
|
+
return
|
|
277
|
+
|
|
278
|
+
# Use provided graph_store or create a default one
|
|
279
|
+
if graph_store is None:
|
|
280
|
+
from aiecs.infrastructure.graph_storage.in_memory import InMemoryGraphStore
|
|
281
|
+
graph_store = InMemoryGraphStore()
|
|
282
|
+
await graph_store.initialize()
|
|
283
|
+
|
|
284
|
+
self._setup_components(graph_store)
|
|
285
|
+
|
|
286
|
+
def _setup_default_rules(self):
|
|
287
|
+
"""Setup default inference rules"""
|
|
288
|
+
# Common transitive rules
|
|
289
|
+
transitive_relations = [
|
|
290
|
+
"KNOWS",
|
|
291
|
+
"FOLLOWS",
|
|
292
|
+
"CONNECTED_TO",
|
|
293
|
+
"RELATED_TO",
|
|
294
|
+
]
|
|
295
|
+
for rel_type in transitive_relations:
|
|
296
|
+
self.inference_engine.add_rule(
|
|
297
|
+
InferenceRule(
|
|
298
|
+
rule_id=f"transitive_{rel_type.lower()}",
|
|
299
|
+
rule_type=RuleType.TRANSITIVE,
|
|
300
|
+
relation_type=rel_type,
|
|
301
|
+
description=f"Transitive closure for {rel_type}",
|
|
302
|
+
confidence_decay=0.1,
|
|
303
|
+
enabled=False, # Only enable when requested
|
|
304
|
+
)
|
|
305
|
+
)
|
|
306
|
+
|
|
307
|
+
# Common symmetric rules
|
|
308
|
+
symmetric_relations = [
|
|
309
|
+
"FRIEND_OF",
|
|
310
|
+
"COLLEAGUE_OF",
|
|
311
|
+
"PARTNER_WITH",
|
|
312
|
+
"SIBLING_OF",
|
|
313
|
+
]
|
|
314
|
+
for rel_type in symmetric_relations:
|
|
315
|
+
self.inference_engine.add_rule(
|
|
316
|
+
InferenceRule(
|
|
317
|
+
rule_id=f"symmetric_{rel_type.lower()}",
|
|
318
|
+
rule_type=RuleType.SYMMETRIC,
|
|
319
|
+
relation_type=rel_type,
|
|
320
|
+
description=f"Symmetric relationship for {rel_type}",
|
|
321
|
+
confidence_decay=0.05,
|
|
322
|
+
enabled=False, # Only enable when requested
|
|
323
|
+
)
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
@property
|
|
327
|
+
def name(self) -> str:
|
|
328
|
+
return "graph_reasoning"
|
|
329
|
+
|
|
330
|
+
@property
|
|
331
|
+
def description(self) -> str:
|
|
332
|
+
return "Advanced reasoning over knowledge graphs with query planning, multi-hop reasoning, inference, and evidence synthesis"
|
|
333
|
+
|
|
334
|
+
@property
|
|
335
|
+
def input_schema(self) -> type[GraphReasoningInput]:
|
|
336
|
+
return GraphReasoningInput
|
|
337
|
+
|
|
338
|
+
async def _execute(self, validated_input: GraphReasoningInput) -> Dict[str, Any]:
|
|
339
|
+
"""
|
|
340
|
+
Execute reasoning based on mode
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
validated_input: Validated input parameters
|
|
344
|
+
|
|
345
|
+
Returns:
|
|
346
|
+
Reasoning results
|
|
347
|
+
|
|
348
|
+
Raises:
|
|
349
|
+
RuntimeError: If tool is not initialized (call _initialize() first)
|
|
350
|
+
"""
|
|
351
|
+
if not self._initialized:
|
|
352
|
+
raise RuntimeError(
|
|
353
|
+
"GraphReasoningTool is not initialized. "
|
|
354
|
+
"Call await tool._initialize(graph_store) first, or provide "
|
|
355
|
+
"graph_store in the constructor."
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
mode = validated_input.mode
|
|
359
|
+
|
|
360
|
+
if mode == ReasoningModeEnum.QUERY_PLAN:
|
|
361
|
+
return await self._execute_query_plan(validated_input)
|
|
362
|
+
|
|
363
|
+
elif mode == ReasoningModeEnum.MULTI_HOP:
|
|
364
|
+
return await self._execute_multi_hop(validated_input)
|
|
365
|
+
|
|
366
|
+
elif mode == ReasoningModeEnum.INFERENCE:
|
|
367
|
+
return await self._execute_inference(validated_input)
|
|
368
|
+
|
|
369
|
+
elif mode == ReasoningModeEnum.EVIDENCE_SYNTHESIS:
|
|
370
|
+
return await self._execute_evidence_synthesis(validated_input)
|
|
371
|
+
|
|
372
|
+
elif mode == ReasoningModeEnum.LOGICAL_QUERY:
|
|
373
|
+
return await self._execute_logical_query(validated_input)
|
|
374
|
+
|
|
375
|
+
elif mode == ReasoningModeEnum.FULL_REASONING:
|
|
376
|
+
return await self._execute_full_reasoning(validated_input)
|
|
377
|
+
|
|
378
|
+
else:
|
|
379
|
+
raise ValueError(f"Unknown reasoning mode: {mode}")
|
|
380
|
+
|
|
381
|
+
async def _execute_query_plan(self, input_data: GraphReasoningInput) -> Dict[str, Any]:
|
|
382
|
+
"""Execute query planning"""
|
|
383
|
+
# Plan the query (not async)
|
|
384
|
+
plan = self.query_planner.plan_query(input_data.query)
|
|
385
|
+
|
|
386
|
+
# Optimize if strategy provided
|
|
387
|
+
strategy_map = {
|
|
388
|
+
"cost": OptimizationStrategy.MINIMIZE_COST,
|
|
389
|
+
"latency": OptimizationStrategy.MINIMIZE_LATENCY,
|
|
390
|
+
"balanced": OptimizationStrategy.BALANCED,
|
|
391
|
+
}
|
|
392
|
+
strategy_key = input_data.optimization_strategy or "balanced"
|
|
393
|
+
strategy = strategy_map.get(strategy_key, OptimizationStrategy.BALANCED)
|
|
394
|
+
optimized_plan = self.query_planner.optimize_plan(plan, strategy)
|
|
395
|
+
|
|
396
|
+
return {
|
|
397
|
+
"mode": "query_plan",
|
|
398
|
+
"query": input_data.query,
|
|
399
|
+
"plan": {
|
|
400
|
+
"steps": [
|
|
401
|
+
{
|
|
402
|
+
"step_id": step.step_id,
|
|
403
|
+
"operation": step.operation.value,
|
|
404
|
+
"depends_on": step.depends_on,
|
|
405
|
+
"estimated_cost": step.estimated_cost,
|
|
406
|
+
"description": step.description,
|
|
407
|
+
}
|
|
408
|
+
for step in optimized_plan.steps
|
|
409
|
+
],
|
|
410
|
+
"total_cost": optimized_plan.calculate_total_cost(),
|
|
411
|
+
"estimated_latency_ms": optimized_plan.calculate_total_cost() * 100, # Rough estimate
|
|
412
|
+
"optimization_strategy": strategy.value,
|
|
413
|
+
},
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
async def _execute_multi_hop(self, input_data: GraphReasoningInput) -> Dict[str, Any]:
|
|
417
|
+
"""Execute multi-hop reasoning"""
|
|
418
|
+
if not input_data.start_entity_id:
|
|
419
|
+
raise ValueError("start_entity_id is required for multi-hop reasoning")
|
|
420
|
+
|
|
421
|
+
# Build context for reasoning
|
|
422
|
+
context: Dict[str, Any] = {}
|
|
423
|
+
if input_data.start_entity_id:
|
|
424
|
+
context["start_entity_id"] = input_data.start_entity_id
|
|
425
|
+
if input_data.target_entity_id:
|
|
426
|
+
context["target_entity_id"] = input_data.target_entity_id
|
|
427
|
+
if input_data.relation_types:
|
|
428
|
+
context["relation_types"] = input_data.relation_types
|
|
429
|
+
|
|
430
|
+
# Execute reasoning
|
|
431
|
+
result = await self.reasoning_engine.reason(
|
|
432
|
+
query=input_data.query,
|
|
433
|
+
context=context,
|
|
434
|
+
max_hops=input_data.max_hops,
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
# Optionally synthesize evidence
|
|
438
|
+
evidence_list = result.evidence
|
|
439
|
+
if input_data.synthesize_evidence and evidence_list:
|
|
440
|
+
evidence_list = self.evidence_synthesizer.synthesize_evidence(evidence_list, method=input_data.synthesis_method)
|
|
441
|
+
# Filter by confidence
|
|
442
|
+
evidence_list = self.evidence_synthesizer.filter_by_confidence(evidence_list, threshold=input_data.confidence_threshold)
|
|
443
|
+
|
|
444
|
+
return {
|
|
445
|
+
"mode": "multi_hop",
|
|
446
|
+
"query": input_data.query,
|
|
447
|
+
"answer": result.answer,
|
|
448
|
+
"confidence": result.confidence,
|
|
449
|
+
"evidence_count": len(evidence_list),
|
|
450
|
+
"evidence": [
|
|
451
|
+
{
|
|
452
|
+
"evidence_id": ev.evidence_id,
|
|
453
|
+
"type": ev.evidence_type.value,
|
|
454
|
+
"confidence": ev.confidence,
|
|
455
|
+
"relevance_score": ev.relevance_score,
|
|
456
|
+
"explanation": ev.explanation,
|
|
457
|
+
"entity_ids": ev.get_entity_ids(),
|
|
458
|
+
}
|
|
459
|
+
for ev in evidence_list[:10] # Limit to top 10
|
|
460
|
+
],
|
|
461
|
+
"execution_time_ms": result.execution_time_ms,
|
|
462
|
+
"reasoning_trace": result.reasoning_trace,
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
async def _execute_inference(self, input_data: GraphReasoningInput) -> Dict[str, Any]:
|
|
466
|
+
"""Execute logical inference"""
|
|
467
|
+
if not input_data.apply_inference:
|
|
468
|
+
raise ValueError("apply_inference must be True for inference mode")
|
|
469
|
+
|
|
470
|
+
if not input_data.inference_relation_type:
|
|
471
|
+
raise ValueError("inference_relation_type is required for inference mode")
|
|
472
|
+
|
|
473
|
+
# Enable the relevant rules
|
|
474
|
+
for rule in self.inference_engine.get_rules(input_data.inference_relation_type):
|
|
475
|
+
rule.enabled = True
|
|
476
|
+
|
|
477
|
+
# Apply inference
|
|
478
|
+
result = await self.inference_engine.infer_relations(
|
|
479
|
+
relation_type=input_data.inference_relation_type,
|
|
480
|
+
max_steps=input_data.inference_max_steps,
|
|
481
|
+
use_cache=True,
|
|
482
|
+
)
|
|
483
|
+
|
|
484
|
+
# Get trace
|
|
485
|
+
trace = self.inference_engine.get_inference_trace(result)
|
|
486
|
+
|
|
487
|
+
return {
|
|
488
|
+
"mode": "inference",
|
|
489
|
+
"relation_type": input_data.inference_relation_type,
|
|
490
|
+
"inferred_count": len(result.inferred_relations),
|
|
491
|
+
"inferred_relations": [
|
|
492
|
+
{
|
|
493
|
+
"source_id": rel.source_id,
|
|
494
|
+
"target_id": rel.target_id,
|
|
495
|
+
"relation_type": rel.relation_type,
|
|
496
|
+
"properties": rel.properties,
|
|
497
|
+
}
|
|
498
|
+
for rel in result.inferred_relations[:10] # Limit to top 10
|
|
499
|
+
],
|
|
500
|
+
"confidence": result.confidence,
|
|
501
|
+
"total_steps": result.total_steps,
|
|
502
|
+
"inference_trace": trace[:20], # Limit trace lines
|
|
503
|
+
}
|
|
504
|
+
|
|
505
|
+
async def _execute_evidence_synthesis(self, input_data: GraphReasoningInput) -> Dict[str, Any]:
|
|
506
|
+
"""Execute evidence synthesis (requires pre-collected evidence)"""
|
|
507
|
+
# This mode is for synthesizing already collected evidence
|
|
508
|
+
# In practice, evidence would come from a previous reasoning step
|
|
509
|
+
|
|
510
|
+
return {
|
|
511
|
+
"mode": "evidence_synthesis",
|
|
512
|
+
"message": "Evidence synthesis requires pre-collected evidence. Use 'full_reasoning' mode for end-to-end reasoning with synthesis.",
|
|
513
|
+
"synthesis_method": input_data.synthesis_method,
|
|
514
|
+
"confidence_threshold": input_data.confidence_threshold,
|
|
515
|
+
}
|
|
516
|
+
|
|
517
|
+
async def _execute_logical_query(self, input_data: GraphReasoningInput) -> Dict[str, Any]:
|
|
518
|
+
"""
|
|
519
|
+
Parse natural language query to logical form
|
|
520
|
+
|
|
521
|
+
Converts natural language queries into structured logical representations
|
|
522
|
+
that can be executed against the knowledge graph.
|
|
523
|
+
|
|
524
|
+
Args:
|
|
525
|
+
input_data: Reasoning input with query
|
|
526
|
+
|
|
527
|
+
Returns:
|
|
528
|
+
Dictionary with parsed logical query
|
|
529
|
+
"""
|
|
530
|
+
# Create logic form parser
|
|
531
|
+
parser = LogicFormParser()
|
|
532
|
+
|
|
533
|
+
# Parse query to logical form
|
|
534
|
+
logical_query = parser.parse(input_data.query)
|
|
535
|
+
|
|
536
|
+
# Extract components
|
|
537
|
+
result = {
|
|
538
|
+
"mode": "logical_query",
|
|
539
|
+
"query": input_data.query,
|
|
540
|
+
"logical_form": logical_query.to_dict(),
|
|
541
|
+
"query_type": logical_query.query_type.value,
|
|
542
|
+
"variables": [v.name for v in logical_query.variables],
|
|
543
|
+
"predicates": [
|
|
544
|
+
{
|
|
545
|
+
"name": p.name,
|
|
546
|
+
"arguments": [arg.name if hasattr(arg, "name") else str(arg) for arg in p.arguments],
|
|
547
|
+
}
|
|
548
|
+
for p in logical_query.predicates
|
|
549
|
+
],
|
|
550
|
+
"constraints": [
|
|
551
|
+
{
|
|
552
|
+
"type": c.constraint_type.value if hasattr(c.constraint_type, "value") else str(c.constraint_type),
|
|
553
|
+
"variable": c.variable.name,
|
|
554
|
+
"value": c.value,
|
|
555
|
+
}
|
|
556
|
+
for c in logical_query.constraints
|
|
557
|
+
],
|
|
558
|
+
}
|
|
559
|
+
|
|
560
|
+
# Add execution plan if available
|
|
561
|
+
if hasattr(logical_query, "execution_plan"):
|
|
562
|
+
result["execution_plan"] = {
|
|
563
|
+
"steps": len(logical_query.execution_plan.steps),
|
|
564
|
+
"estimated_cost": logical_query.execution_plan.calculate_total_cost(),
|
|
565
|
+
}
|
|
566
|
+
|
|
567
|
+
return result
|
|
568
|
+
|
|
569
|
+
async def _execute_full_reasoning(self, input_data: GraphReasoningInput) -> Dict[str, Any]:
|
|
570
|
+
"""Execute full reasoning pipeline"""
|
|
571
|
+
if not input_data.start_entity_id:
|
|
572
|
+
raise ValueError("start_entity_id is required for full reasoning")
|
|
573
|
+
|
|
574
|
+
results: Dict[str, Any] = {
|
|
575
|
+
"mode": "full_reasoning",
|
|
576
|
+
"query": input_data.query,
|
|
577
|
+
"steps": [],
|
|
578
|
+
}
|
|
579
|
+
|
|
580
|
+
# Step 1: Query Planning
|
|
581
|
+
plan = self.query_planner.plan_query(input_data.query)
|
|
582
|
+
strategy_map = {
|
|
583
|
+
"cost": OptimizationStrategy.MINIMIZE_COST,
|
|
584
|
+
"latency": OptimizationStrategy.MINIMIZE_LATENCY,
|
|
585
|
+
"balanced": OptimizationStrategy.BALANCED,
|
|
586
|
+
}
|
|
587
|
+
strategy_key = input_data.optimization_strategy or "balanced"
|
|
588
|
+
strategy = strategy_map.get(strategy_key, OptimizationStrategy.BALANCED)
|
|
589
|
+
optimized_plan = self.query_planner.optimize_plan(plan, strategy)
|
|
590
|
+
|
|
591
|
+
results["steps"].append(
|
|
592
|
+
{
|
|
593
|
+
"name": "query_planning",
|
|
594
|
+
"plan_steps": len(optimized_plan.steps),
|
|
595
|
+
"estimated_cost": optimized_plan.calculate_total_cost(),
|
|
596
|
+
"estimated_latency_ms": optimized_plan.calculate_total_cost() * 100, # Rough estimate
|
|
597
|
+
}
|
|
598
|
+
)
|
|
599
|
+
|
|
600
|
+
# Step 2: Multi-Hop Reasoning
|
|
601
|
+
# Build context for reasoning
|
|
602
|
+
context: Dict[str, Any] = {}
|
|
603
|
+
if input_data.start_entity_id:
|
|
604
|
+
context["start_entity_id"] = input_data.start_entity_id
|
|
605
|
+
if input_data.target_entity_id:
|
|
606
|
+
context["target_entity_id"] = input_data.target_entity_id
|
|
607
|
+
if input_data.relation_types:
|
|
608
|
+
context["relation_types"] = input_data.relation_types
|
|
609
|
+
|
|
610
|
+
reasoning_result = await self.reasoning_engine.reason(
|
|
611
|
+
query=input_data.query,
|
|
612
|
+
context=context,
|
|
613
|
+
max_hops=input_data.max_hops,
|
|
614
|
+
)
|
|
615
|
+
|
|
616
|
+
results["steps"].append(
|
|
617
|
+
{
|
|
618
|
+
"name": "multi_hop_reasoning",
|
|
619
|
+
"evidence_collected": len(reasoning_result.evidence),
|
|
620
|
+
"confidence": reasoning_result.confidence,
|
|
621
|
+
"execution_time_ms": reasoning_result.execution_time_ms,
|
|
622
|
+
}
|
|
623
|
+
)
|
|
624
|
+
|
|
625
|
+
# Step 3: Logical Inference (if requested)
|
|
626
|
+
if input_data.apply_inference and input_data.inference_relation_type:
|
|
627
|
+
# Enable rules
|
|
628
|
+
for rule in self.inference_engine.get_rules(input_data.inference_relation_type):
|
|
629
|
+
rule.enabled = True
|
|
630
|
+
|
|
631
|
+
inference_result = await self.inference_engine.infer_relations(
|
|
632
|
+
relation_type=input_data.inference_relation_type,
|
|
633
|
+
max_steps=input_data.inference_max_steps,
|
|
634
|
+
use_cache=True,
|
|
635
|
+
)
|
|
636
|
+
|
|
637
|
+
results["steps"].append(
|
|
638
|
+
{
|
|
639
|
+
"name": "logical_inference",
|
|
640
|
+
"inferred_relations": len(inference_result.inferred_relations),
|
|
641
|
+
"inference_confidence": inference_result.confidence,
|
|
642
|
+
"inference_steps": inference_result.total_steps,
|
|
643
|
+
}
|
|
644
|
+
)
|
|
645
|
+
|
|
646
|
+
# Step 4: Evidence Synthesis
|
|
647
|
+
evidence_list = reasoning_result.evidence
|
|
648
|
+
if input_data.synthesize_evidence and evidence_list:
|
|
649
|
+
synthesized = self.evidence_synthesizer.synthesize_evidence(evidence_list, method=input_data.synthesis_method)
|
|
650
|
+
|
|
651
|
+
filtered = self.evidence_synthesizer.filter_by_confidence(synthesized, threshold=input_data.confidence_threshold)
|
|
652
|
+
|
|
653
|
+
ranked = self.evidence_synthesizer.rank_by_reliability(filtered)
|
|
654
|
+
|
|
655
|
+
overall_confidence = self.evidence_synthesizer.estimate_overall_confidence(ranked)
|
|
656
|
+
|
|
657
|
+
results["steps"].append(
|
|
658
|
+
{
|
|
659
|
+
"name": "evidence_synthesis",
|
|
660
|
+
"original_evidence": len(evidence_list),
|
|
661
|
+
"synthesized_evidence": len(synthesized),
|
|
662
|
+
"filtered_evidence": len(filtered),
|
|
663
|
+
"overall_confidence": overall_confidence,
|
|
664
|
+
}
|
|
665
|
+
)
|
|
666
|
+
|
|
667
|
+
evidence_list = ranked
|
|
668
|
+
|
|
669
|
+
# Final Results
|
|
670
|
+
results["answer"] = reasoning_result.answer
|
|
671
|
+
results["final_confidence"] = self.evidence_synthesizer.estimate_overall_confidence(evidence_list) if evidence_list else reasoning_result.confidence
|
|
672
|
+
results["evidence_count"] = len(evidence_list)
|
|
673
|
+
# Create top evidence list
|
|
674
|
+
top_evidence_list: List[Dict[str, Any]] = [
|
|
675
|
+
{
|
|
676
|
+
"evidence_id": ev.evidence_id,
|
|
677
|
+
"type": ev.evidence_type.value,
|
|
678
|
+
"confidence": ev.confidence,
|
|
679
|
+
"relevance_score": ev.relevance_score,
|
|
680
|
+
"explanation": ev.explanation,
|
|
681
|
+
}
|
|
682
|
+
for ev in evidence_list[:5] # Top 5
|
|
683
|
+
]
|
|
684
|
+
results["top_evidence"] = top_evidence_list
|
|
685
|
+
# Limit trace
|
|
686
|
+
results["reasoning_trace"] = reasoning_result.reasoning_trace[:10]
|
|
687
|
+
|
|
688
|
+
return results
|
|
689
|
+
|
|
690
|
+
# Public methods for ToolExecutor integration
|
|
691
|
+
async def query_plan(self, query: str, optimization_strategy: Optional[str] = "balanced") -> Dict[str, Any]:
|
|
692
|
+
"""Query planning (public method for ToolExecutor)"""
|
|
693
|
+
input_data = GraphReasoningInput(
|
|
694
|
+
mode=ReasoningModeEnum.QUERY_PLAN,
|
|
695
|
+
query=query,
|
|
696
|
+
optimization_strategy=optimization_strategy,
|
|
697
|
+
start_entity_id="dummy", # Not used for query_plan
|
|
698
|
+
target_entity_id=None,
|
|
699
|
+
relation_types=None,
|
|
700
|
+
inference_relation_type=None,
|
|
701
|
+
)
|
|
702
|
+
return await self._execute_query_plan(input_data)
|
|
703
|
+
|
|
704
|
+
async def multi_hop(
|
|
705
|
+
self,
|
|
706
|
+
query: str,
|
|
707
|
+
start_entity_id: str,
|
|
708
|
+
target_entity_id: Optional[str] = None,
|
|
709
|
+
max_hops: int = 3,
|
|
710
|
+
relation_types: Optional[List[str]] = None,
|
|
711
|
+
synthesize_evidence: bool = True,
|
|
712
|
+
synthesis_method: str = "weighted_average",
|
|
713
|
+
confidence_threshold: float = 0.5,
|
|
714
|
+
) -> Dict[str, Any]:
|
|
715
|
+
"""Multi-hop reasoning (public method for ToolExecutor)"""
|
|
716
|
+
input_data = GraphReasoningInput(
|
|
717
|
+
mode=ReasoningModeEnum.MULTI_HOP,
|
|
718
|
+
query=query,
|
|
719
|
+
start_entity_id=start_entity_id,
|
|
720
|
+
target_entity_id=target_entity_id,
|
|
721
|
+
max_hops=max_hops,
|
|
722
|
+
relation_types=relation_types,
|
|
723
|
+
inference_relation_type=None,
|
|
724
|
+
synthesize_evidence=synthesize_evidence,
|
|
725
|
+
synthesis_method=synthesis_method,
|
|
726
|
+
confidence_threshold=confidence_threshold,
|
|
727
|
+
)
|
|
728
|
+
return await self._execute_multi_hop(input_data)
|
|
729
|
+
|
|
730
|
+
async def inference(self, relation_type: str, max_steps: int = 3) -> Dict[str, Any]:
|
|
731
|
+
"""Logical inference (public method for ToolExecutor)"""
|
|
732
|
+
input_data = GraphReasoningInput(
|
|
733
|
+
mode=ReasoningModeEnum.INFERENCE,
|
|
734
|
+
query="inference", # Not used for inference mode
|
|
735
|
+
start_entity_id="dummy", # Not used for inference mode
|
|
736
|
+
apply_inference=True,
|
|
737
|
+
inference_relation_type=relation_type,
|
|
738
|
+
inference_max_steps=max_steps,
|
|
739
|
+
target_entity_id=None,
|
|
740
|
+
relation_types=None,
|
|
741
|
+
)
|
|
742
|
+
return await self._execute_inference(input_data)
|
|
743
|
+
|
|
744
|
+
async def evidence_synthesis(
|
|
745
|
+
self,
|
|
746
|
+
synthesis_method: str = "weighted_average",
|
|
747
|
+
confidence_threshold: float = 0.5,
|
|
748
|
+
) -> Dict[str, Any]:
|
|
749
|
+
"""Evidence synthesis (public method for ToolExecutor)"""
|
|
750
|
+
input_data = GraphReasoningInput(
|
|
751
|
+
mode=ReasoningModeEnum.EVIDENCE_SYNTHESIS,
|
|
752
|
+
query="synthesis", # Not used
|
|
753
|
+
start_entity_id="dummy", # Not used
|
|
754
|
+
synthesis_method=synthesis_method,
|
|
755
|
+
target_entity_id=None,
|
|
756
|
+
relation_types=None,
|
|
757
|
+
inference_relation_type=None,
|
|
758
|
+
confidence_threshold=confidence_threshold,
|
|
759
|
+
)
|
|
760
|
+
return await self._execute_evidence_synthesis(input_data)
|
|
761
|
+
|
|
762
|
+
async def full_reasoning(
|
|
763
|
+
self,
|
|
764
|
+
query: str,
|
|
765
|
+
start_entity_id: str,
|
|
766
|
+
target_entity_id: Optional[str] = None,
|
|
767
|
+
max_hops: int = 3,
|
|
768
|
+
relation_types: Optional[List[str]] = None,
|
|
769
|
+
optimization_strategy: Optional[str] = "balanced",
|
|
770
|
+
apply_inference: bool = False,
|
|
771
|
+
inference_relation_type: Optional[str] = None,
|
|
772
|
+
inference_max_steps: int = 3,
|
|
773
|
+
synthesize_evidence: bool = True,
|
|
774
|
+
synthesis_method: str = "weighted_average",
|
|
775
|
+
confidence_threshold: float = 0.5,
|
|
776
|
+
) -> Dict[str, Any]:
|
|
777
|
+
"""Full reasoning pipeline (public method for ToolExecutor)"""
|
|
778
|
+
input_data = GraphReasoningInput(
|
|
779
|
+
mode=ReasoningModeEnum.FULL_REASONING,
|
|
780
|
+
query=query,
|
|
781
|
+
start_entity_id=start_entity_id,
|
|
782
|
+
target_entity_id=target_entity_id,
|
|
783
|
+
max_hops=max_hops,
|
|
784
|
+
relation_types=relation_types,
|
|
785
|
+
optimization_strategy=optimization_strategy,
|
|
786
|
+
apply_inference=apply_inference,
|
|
787
|
+
inference_relation_type=inference_relation_type,
|
|
788
|
+
inference_max_steps=inference_max_steps,
|
|
789
|
+
synthesize_evidence=synthesize_evidence,
|
|
790
|
+
synthesis_method=synthesis_method,
|
|
791
|
+
confidence_threshold=confidence_threshold,
|
|
792
|
+
)
|
|
793
|
+
return await self._execute_full_reasoning(input_data)
|
|
794
|
+
|
|
795
|
+
async def execute(self, **kwargs) -> Dict[str, Any]:
|
|
796
|
+
"""
|
|
797
|
+
Execute the tool (public interface)
|
|
798
|
+
|
|
799
|
+
Args:
|
|
800
|
+
**kwargs: Tool input parameters (will be validated against input_schema)
|
|
801
|
+
|
|
802
|
+
Returns:
|
|
803
|
+
Dictionary with reasoning results
|
|
804
|
+
"""
|
|
805
|
+
# Validate input using Pydantic schema
|
|
806
|
+
validated_input = self.input_schema(**kwargs)
|
|
807
|
+
return await self._execute(validated_input)
|