aiecs 1.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiecs/__init__.py +72 -0
- aiecs/__main__.py +41 -0
- aiecs/aiecs_client.py +469 -0
- aiecs/application/__init__.py +10 -0
- aiecs/application/executors/__init__.py +10 -0
- aiecs/application/executors/operation_executor.py +363 -0
- aiecs/application/knowledge_graph/__init__.py +7 -0
- aiecs/application/knowledge_graph/builder/__init__.py +37 -0
- aiecs/application/knowledge_graph/builder/document_builder.py +375 -0
- aiecs/application/knowledge_graph/builder/graph_builder.py +356 -0
- aiecs/application/knowledge_graph/builder/schema_mapping.py +531 -0
- aiecs/application/knowledge_graph/builder/structured_pipeline.py +443 -0
- aiecs/application/knowledge_graph/builder/text_chunker.py +319 -0
- aiecs/application/knowledge_graph/extractors/__init__.py +27 -0
- aiecs/application/knowledge_graph/extractors/base.py +100 -0
- aiecs/application/knowledge_graph/extractors/llm_entity_extractor.py +327 -0
- aiecs/application/knowledge_graph/extractors/llm_relation_extractor.py +349 -0
- aiecs/application/knowledge_graph/extractors/ner_entity_extractor.py +244 -0
- aiecs/application/knowledge_graph/fusion/__init__.py +23 -0
- aiecs/application/knowledge_graph/fusion/entity_deduplicator.py +387 -0
- aiecs/application/knowledge_graph/fusion/entity_linker.py +343 -0
- aiecs/application/knowledge_graph/fusion/knowledge_fusion.py +580 -0
- aiecs/application/knowledge_graph/fusion/relation_deduplicator.py +189 -0
- aiecs/application/knowledge_graph/pattern_matching/__init__.py +21 -0
- aiecs/application/knowledge_graph/pattern_matching/pattern_matcher.py +344 -0
- aiecs/application/knowledge_graph/pattern_matching/query_executor.py +378 -0
- aiecs/application/knowledge_graph/profiling/__init__.py +12 -0
- aiecs/application/knowledge_graph/profiling/query_plan_visualizer.py +199 -0
- aiecs/application/knowledge_graph/profiling/query_profiler.py +223 -0
- aiecs/application/knowledge_graph/reasoning/__init__.py +27 -0
- aiecs/application/knowledge_graph/reasoning/evidence_synthesis.py +347 -0
- aiecs/application/knowledge_graph/reasoning/inference_engine.py +504 -0
- aiecs/application/knowledge_graph/reasoning/logic_form_parser.py +167 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/__init__.py +79 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_builder.py +513 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_nodes.py +630 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_validator.py +654 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/error_handler.py +477 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/parser.py +390 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/query_context.py +217 -0
- aiecs/application/knowledge_graph/reasoning/logic_query_integration.py +169 -0
- aiecs/application/knowledge_graph/reasoning/query_planner.py +872 -0
- aiecs/application/knowledge_graph/reasoning/reasoning_engine.py +554 -0
- aiecs/application/knowledge_graph/retrieval/__init__.py +19 -0
- aiecs/application/knowledge_graph/retrieval/retrieval_strategies.py +596 -0
- aiecs/application/knowledge_graph/search/__init__.py +59 -0
- aiecs/application/knowledge_graph/search/hybrid_search.py +423 -0
- aiecs/application/knowledge_graph/search/reranker.py +295 -0
- aiecs/application/knowledge_graph/search/reranker_strategies.py +553 -0
- aiecs/application/knowledge_graph/search/text_similarity.py +398 -0
- aiecs/application/knowledge_graph/traversal/__init__.py +15 -0
- aiecs/application/knowledge_graph/traversal/enhanced_traversal.py +329 -0
- aiecs/application/knowledge_graph/traversal/path_scorer.py +269 -0
- aiecs/application/knowledge_graph/validators/__init__.py +13 -0
- aiecs/application/knowledge_graph/validators/relation_validator.py +189 -0
- aiecs/application/knowledge_graph/visualization/__init__.py +11 -0
- aiecs/application/knowledge_graph/visualization/graph_visualizer.py +321 -0
- aiecs/common/__init__.py +9 -0
- aiecs/common/knowledge_graph/__init__.py +17 -0
- aiecs/common/knowledge_graph/runnable.py +484 -0
- aiecs/config/__init__.py +16 -0
- aiecs/config/config.py +498 -0
- aiecs/config/graph_config.py +137 -0
- aiecs/config/registry.py +23 -0
- aiecs/core/__init__.py +46 -0
- aiecs/core/interface/__init__.py +34 -0
- aiecs/core/interface/execution_interface.py +152 -0
- aiecs/core/interface/storage_interface.py +171 -0
- aiecs/domain/__init__.py +289 -0
- aiecs/domain/agent/__init__.py +189 -0
- aiecs/domain/agent/base_agent.py +697 -0
- aiecs/domain/agent/exceptions.py +103 -0
- aiecs/domain/agent/graph_aware_mixin.py +559 -0
- aiecs/domain/agent/hybrid_agent.py +490 -0
- aiecs/domain/agent/integration/__init__.py +26 -0
- aiecs/domain/agent/integration/context_compressor.py +222 -0
- aiecs/domain/agent/integration/context_engine_adapter.py +252 -0
- aiecs/domain/agent/integration/retry_policy.py +219 -0
- aiecs/domain/agent/integration/role_config.py +213 -0
- aiecs/domain/agent/knowledge_aware_agent.py +646 -0
- aiecs/domain/agent/lifecycle.py +296 -0
- aiecs/domain/agent/llm_agent.py +300 -0
- aiecs/domain/agent/memory/__init__.py +12 -0
- aiecs/domain/agent/memory/conversation.py +197 -0
- aiecs/domain/agent/migration/__init__.py +14 -0
- aiecs/domain/agent/migration/conversion.py +160 -0
- aiecs/domain/agent/migration/legacy_wrapper.py +90 -0
- aiecs/domain/agent/models.py +317 -0
- aiecs/domain/agent/observability.py +407 -0
- aiecs/domain/agent/persistence.py +289 -0
- aiecs/domain/agent/prompts/__init__.py +29 -0
- aiecs/domain/agent/prompts/builder.py +161 -0
- aiecs/domain/agent/prompts/formatters.py +189 -0
- aiecs/domain/agent/prompts/template.py +255 -0
- aiecs/domain/agent/registry.py +260 -0
- aiecs/domain/agent/tool_agent.py +257 -0
- aiecs/domain/agent/tools/__init__.py +12 -0
- aiecs/domain/agent/tools/schema_generator.py +221 -0
- aiecs/domain/community/__init__.py +155 -0
- aiecs/domain/community/agent_adapter.py +477 -0
- aiecs/domain/community/analytics.py +481 -0
- aiecs/domain/community/collaborative_workflow.py +642 -0
- aiecs/domain/community/communication_hub.py +645 -0
- aiecs/domain/community/community_builder.py +320 -0
- aiecs/domain/community/community_integration.py +800 -0
- aiecs/domain/community/community_manager.py +813 -0
- aiecs/domain/community/decision_engine.py +879 -0
- aiecs/domain/community/exceptions.py +225 -0
- aiecs/domain/community/models/__init__.py +33 -0
- aiecs/domain/community/models/community_models.py +268 -0
- aiecs/domain/community/resource_manager.py +457 -0
- aiecs/domain/community/shared_context_manager.py +603 -0
- aiecs/domain/context/__init__.py +58 -0
- aiecs/domain/context/context_engine.py +989 -0
- aiecs/domain/context/conversation_models.py +354 -0
- aiecs/domain/context/graph_memory.py +467 -0
- aiecs/domain/execution/__init__.py +12 -0
- aiecs/domain/execution/model.py +57 -0
- aiecs/domain/knowledge_graph/__init__.py +19 -0
- aiecs/domain/knowledge_graph/models/__init__.py +52 -0
- aiecs/domain/knowledge_graph/models/entity.py +130 -0
- aiecs/domain/knowledge_graph/models/evidence.py +194 -0
- aiecs/domain/knowledge_graph/models/inference_rule.py +186 -0
- aiecs/domain/knowledge_graph/models/path.py +179 -0
- aiecs/domain/knowledge_graph/models/path_pattern.py +173 -0
- aiecs/domain/knowledge_graph/models/query.py +272 -0
- aiecs/domain/knowledge_graph/models/query_plan.py +187 -0
- aiecs/domain/knowledge_graph/models/relation.py +136 -0
- aiecs/domain/knowledge_graph/schema/__init__.py +23 -0
- aiecs/domain/knowledge_graph/schema/entity_type.py +135 -0
- aiecs/domain/knowledge_graph/schema/graph_schema.py +271 -0
- aiecs/domain/knowledge_graph/schema/property_schema.py +155 -0
- aiecs/domain/knowledge_graph/schema/relation_type.py +171 -0
- aiecs/domain/knowledge_graph/schema/schema_manager.py +496 -0
- aiecs/domain/knowledge_graph/schema/type_enums.py +205 -0
- aiecs/domain/task/__init__.py +13 -0
- aiecs/domain/task/dsl_processor.py +613 -0
- aiecs/domain/task/model.py +62 -0
- aiecs/domain/task/task_context.py +268 -0
- aiecs/infrastructure/__init__.py +24 -0
- aiecs/infrastructure/graph_storage/__init__.py +11 -0
- aiecs/infrastructure/graph_storage/base.py +601 -0
- aiecs/infrastructure/graph_storage/batch_operations.py +449 -0
- aiecs/infrastructure/graph_storage/cache.py +429 -0
- aiecs/infrastructure/graph_storage/distributed.py +226 -0
- aiecs/infrastructure/graph_storage/error_handling.py +390 -0
- aiecs/infrastructure/graph_storage/graceful_degradation.py +306 -0
- aiecs/infrastructure/graph_storage/health_checks.py +378 -0
- aiecs/infrastructure/graph_storage/in_memory.py +514 -0
- aiecs/infrastructure/graph_storage/index_optimization.py +483 -0
- aiecs/infrastructure/graph_storage/lazy_loading.py +410 -0
- aiecs/infrastructure/graph_storage/metrics.py +357 -0
- aiecs/infrastructure/graph_storage/migration.py +413 -0
- aiecs/infrastructure/graph_storage/pagination.py +471 -0
- aiecs/infrastructure/graph_storage/performance_monitoring.py +466 -0
- aiecs/infrastructure/graph_storage/postgres.py +871 -0
- aiecs/infrastructure/graph_storage/query_optimizer.py +635 -0
- aiecs/infrastructure/graph_storage/schema_cache.py +290 -0
- aiecs/infrastructure/graph_storage/sqlite.py +623 -0
- aiecs/infrastructure/graph_storage/streaming.py +495 -0
- aiecs/infrastructure/messaging/__init__.py +13 -0
- aiecs/infrastructure/messaging/celery_task_manager.py +383 -0
- aiecs/infrastructure/messaging/websocket_manager.py +298 -0
- aiecs/infrastructure/monitoring/__init__.py +34 -0
- aiecs/infrastructure/monitoring/executor_metrics.py +174 -0
- aiecs/infrastructure/monitoring/global_metrics_manager.py +213 -0
- aiecs/infrastructure/monitoring/structured_logger.py +48 -0
- aiecs/infrastructure/monitoring/tracing_manager.py +410 -0
- aiecs/infrastructure/persistence/__init__.py +24 -0
- aiecs/infrastructure/persistence/context_engine_client.py +187 -0
- aiecs/infrastructure/persistence/database_manager.py +333 -0
- aiecs/infrastructure/persistence/file_storage.py +754 -0
- aiecs/infrastructure/persistence/redis_client.py +220 -0
- aiecs/llm/__init__.py +86 -0
- aiecs/llm/callbacks/__init__.py +11 -0
- aiecs/llm/callbacks/custom_callbacks.py +264 -0
- aiecs/llm/client_factory.py +420 -0
- aiecs/llm/clients/__init__.py +33 -0
- aiecs/llm/clients/base_client.py +193 -0
- aiecs/llm/clients/googleai_client.py +181 -0
- aiecs/llm/clients/openai_client.py +131 -0
- aiecs/llm/clients/vertex_client.py +437 -0
- aiecs/llm/clients/xai_client.py +184 -0
- aiecs/llm/config/__init__.py +51 -0
- aiecs/llm/config/config_loader.py +275 -0
- aiecs/llm/config/config_validator.py +236 -0
- aiecs/llm/config/model_config.py +151 -0
- aiecs/llm/utils/__init__.py +10 -0
- aiecs/llm/utils/validate_config.py +91 -0
- aiecs/main.py +363 -0
- aiecs/scripts/__init__.py +3 -0
- aiecs/scripts/aid/VERSION_MANAGEMENT.md +97 -0
- aiecs/scripts/aid/__init__.py +19 -0
- aiecs/scripts/aid/version_manager.py +215 -0
- aiecs/scripts/dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md +242 -0
- aiecs/scripts/dependance_check/README_DEPENDENCY_CHECKER.md +310 -0
- aiecs/scripts/dependance_check/__init__.py +17 -0
- aiecs/scripts/dependance_check/dependency_checker.py +938 -0
- aiecs/scripts/dependance_check/dependency_fixer.py +391 -0
- aiecs/scripts/dependance_check/download_nlp_data.py +396 -0
- aiecs/scripts/dependance_check/quick_dependency_check.py +270 -0
- aiecs/scripts/dependance_check/setup_nlp_data.sh +217 -0
- aiecs/scripts/dependance_patch/__init__.py +7 -0
- aiecs/scripts/dependance_patch/fix_weasel/README_WEASEL_PATCH.md +126 -0
- aiecs/scripts/dependance_patch/fix_weasel/__init__.py +11 -0
- aiecs/scripts/dependance_patch/fix_weasel/fix_weasel_validator.py +128 -0
- aiecs/scripts/dependance_patch/fix_weasel/fix_weasel_validator.sh +82 -0
- aiecs/scripts/dependance_patch/fix_weasel/patch_weasel_library.sh +188 -0
- aiecs/scripts/dependance_patch/fix_weasel/run_weasel_patch.sh +41 -0
- aiecs/scripts/tools_develop/README.md +449 -0
- aiecs/scripts/tools_develop/TOOL_AUTO_DISCOVERY.md +234 -0
- aiecs/scripts/tools_develop/__init__.py +21 -0
- aiecs/scripts/tools_develop/check_type_annotations.py +259 -0
- aiecs/scripts/tools_develop/validate_tool_schemas.py +422 -0
- aiecs/scripts/tools_develop/verify_tools.py +356 -0
- aiecs/tasks/__init__.py +1 -0
- aiecs/tasks/worker.py +172 -0
- aiecs/tools/__init__.py +299 -0
- aiecs/tools/apisource/__init__.py +99 -0
- aiecs/tools/apisource/intelligence/__init__.py +19 -0
- aiecs/tools/apisource/intelligence/data_fusion.py +381 -0
- aiecs/tools/apisource/intelligence/query_analyzer.py +413 -0
- aiecs/tools/apisource/intelligence/search_enhancer.py +388 -0
- aiecs/tools/apisource/monitoring/__init__.py +9 -0
- aiecs/tools/apisource/monitoring/metrics.py +303 -0
- aiecs/tools/apisource/providers/__init__.py +115 -0
- aiecs/tools/apisource/providers/base.py +664 -0
- aiecs/tools/apisource/providers/census.py +401 -0
- aiecs/tools/apisource/providers/fred.py +564 -0
- aiecs/tools/apisource/providers/newsapi.py +412 -0
- aiecs/tools/apisource/providers/worldbank.py +357 -0
- aiecs/tools/apisource/reliability/__init__.py +12 -0
- aiecs/tools/apisource/reliability/error_handler.py +375 -0
- aiecs/tools/apisource/reliability/fallback_strategy.py +391 -0
- aiecs/tools/apisource/tool.py +850 -0
- aiecs/tools/apisource/utils/__init__.py +9 -0
- aiecs/tools/apisource/utils/validators.py +338 -0
- aiecs/tools/base_tool.py +201 -0
- aiecs/tools/docs/__init__.py +121 -0
- aiecs/tools/docs/ai_document_orchestrator.py +599 -0
- aiecs/tools/docs/ai_document_writer_orchestrator.py +2403 -0
- aiecs/tools/docs/content_insertion_tool.py +1333 -0
- aiecs/tools/docs/document_creator_tool.py +1317 -0
- aiecs/tools/docs/document_layout_tool.py +1166 -0
- aiecs/tools/docs/document_parser_tool.py +994 -0
- aiecs/tools/docs/document_writer_tool.py +1818 -0
- aiecs/tools/knowledge_graph/__init__.py +17 -0
- aiecs/tools/knowledge_graph/graph_reasoning_tool.py +734 -0
- aiecs/tools/knowledge_graph/graph_search_tool.py +923 -0
- aiecs/tools/knowledge_graph/kg_builder_tool.py +476 -0
- aiecs/tools/langchain_adapter.py +542 -0
- aiecs/tools/schema_generator.py +275 -0
- aiecs/tools/search_tool/__init__.py +100 -0
- aiecs/tools/search_tool/analyzers.py +589 -0
- aiecs/tools/search_tool/cache.py +260 -0
- aiecs/tools/search_tool/constants.py +128 -0
- aiecs/tools/search_tool/context.py +216 -0
- aiecs/tools/search_tool/core.py +749 -0
- aiecs/tools/search_tool/deduplicator.py +123 -0
- aiecs/tools/search_tool/error_handler.py +271 -0
- aiecs/tools/search_tool/metrics.py +371 -0
- aiecs/tools/search_tool/rate_limiter.py +178 -0
- aiecs/tools/search_tool/schemas.py +277 -0
- aiecs/tools/statistics/__init__.py +80 -0
- aiecs/tools/statistics/ai_data_analysis_orchestrator.py +643 -0
- aiecs/tools/statistics/ai_insight_generator_tool.py +505 -0
- aiecs/tools/statistics/ai_report_orchestrator_tool.py +694 -0
- aiecs/tools/statistics/data_loader_tool.py +564 -0
- aiecs/tools/statistics/data_profiler_tool.py +658 -0
- aiecs/tools/statistics/data_transformer_tool.py +573 -0
- aiecs/tools/statistics/data_visualizer_tool.py +495 -0
- aiecs/tools/statistics/model_trainer_tool.py +487 -0
- aiecs/tools/statistics/statistical_analyzer_tool.py +459 -0
- aiecs/tools/task_tools/__init__.py +86 -0
- aiecs/tools/task_tools/chart_tool.py +732 -0
- aiecs/tools/task_tools/classfire_tool.py +922 -0
- aiecs/tools/task_tools/image_tool.py +447 -0
- aiecs/tools/task_tools/office_tool.py +684 -0
- aiecs/tools/task_tools/pandas_tool.py +635 -0
- aiecs/tools/task_tools/report_tool.py +635 -0
- aiecs/tools/task_tools/research_tool.py +392 -0
- aiecs/tools/task_tools/scraper_tool.py +715 -0
- aiecs/tools/task_tools/stats_tool.py +688 -0
- aiecs/tools/temp_file_manager.py +130 -0
- aiecs/tools/tool_executor/__init__.py +37 -0
- aiecs/tools/tool_executor/tool_executor.py +881 -0
- aiecs/utils/LLM_output_structor.py +445 -0
- aiecs/utils/__init__.py +34 -0
- aiecs/utils/base_callback.py +47 -0
- aiecs/utils/cache_provider.py +695 -0
- aiecs/utils/execution_utils.py +184 -0
- aiecs/utils/logging.py +1 -0
- aiecs/utils/prompt_loader.py +14 -0
- aiecs/utils/token_usage_repository.py +323 -0
- aiecs/ws/__init__.py +0 -0
- aiecs/ws/socket_server.py +52 -0
- aiecs-1.5.1.dist-info/METADATA +608 -0
- aiecs-1.5.1.dist-info/RECORD +302 -0
- aiecs-1.5.1.dist-info/WHEEL +5 -0
- aiecs-1.5.1.dist-info/entry_points.txt +10 -0
- aiecs-1.5.1.dist-info/licenses/LICENSE +225 -0
- aiecs-1.5.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,734 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Knowledge Graph Reasoning Tool
|
|
3
|
+
|
|
4
|
+
AIECS tool for advanced reasoning over knowledge graphs.
|
|
5
|
+
Provides query planning, multi-hop reasoning, inference, and evidence synthesis.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
from typing import Dict, Any, List, Optional
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
from enum import Enum
|
|
12
|
+
|
|
13
|
+
from aiecs.tools.base_tool import BaseTool
|
|
14
|
+
from aiecs.tools import register_tool
|
|
15
|
+
from aiecs.infrastructure.graph_storage.base import GraphStore
|
|
16
|
+
from aiecs.application.knowledge_graph.reasoning.query_planner import (
|
|
17
|
+
QueryPlanner,
|
|
18
|
+
)
|
|
19
|
+
from aiecs.application.knowledge_graph.reasoning.reasoning_engine import (
|
|
20
|
+
ReasoningEngine,
|
|
21
|
+
)
|
|
22
|
+
from aiecs.application.knowledge_graph.reasoning.inference_engine import (
|
|
23
|
+
InferenceEngine,
|
|
24
|
+
)
|
|
25
|
+
from aiecs.application.knowledge_graph.reasoning.evidence_synthesis import (
|
|
26
|
+
EvidenceSynthesizer,
|
|
27
|
+
)
|
|
28
|
+
from aiecs.application.knowledge_graph.reasoning.logic_form_parser import (
|
|
29
|
+
LogicFormParser,
|
|
30
|
+
)
|
|
31
|
+
from aiecs.domain.knowledge_graph.models.inference_rule import (
|
|
32
|
+
InferenceRule,
|
|
33
|
+
RuleType,
|
|
34
|
+
)
|
|
35
|
+
from aiecs.domain.knowledge_graph.models.query_plan import OptimizationStrategy
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class ReasoningModeEnum(str, Enum):
|
|
39
|
+
"""Reasoning mode enumeration"""
|
|
40
|
+
|
|
41
|
+
QUERY_PLAN = "query_plan" # Plan a query execution
|
|
42
|
+
MULTI_HOP = "multi_hop" # Multi-hop path reasoning
|
|
43
|
+
INFERENCE = "inference" # Logical inference
|
|
44
|
+
EVIDENCE_SYNTHESIS = "evidence_synthesis" # Combine evidence
|
|
45
|
+
LOGICAL_QUERY = "logical_query" # Parse natural language to logical query
|
|
46
|
+
FULL_REASONING = "full_reasoning" # Complete reasoning pipeline
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class GraphReasoningInput(BaseModel):
|
|
50
|
+
"""Input schema for Graph Reasoning Tool (legacy, for execute() method)"""
|
|
51
|
+
|
|
52
|
+
mode: ReasoningModeEnum = Field(
|
|
53
|
+
...,
|
|
54
|
+
description=(
|
|
55
|
+
"Reasoning mode: 'query_plan' (plan execution), "
|
|
56
|
+
"'multi_hop' (path reasoning), 'inference' (logical rules), "
|
|
57
|
+
"'evidence_synthesis' (combine evidence), "
|
|
58
|
+
"'logical_query' (parse to logical form), "
|
|
59
|
+
"'full_reasoning' (complete pipeline)"
|
|
60
|
+
),
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
query: str = Field(..., description="Natural language query to reason about")
|
|
64
|
+
|
|
65
|
+
start_entity_id: Optional[str] = Field(
|
|
66
|
+
None, description="Starting entity ID for multi-hop reasoning"
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
target_entity_id: Optional[str] = Field(None, description="Target entity ID for path finding")
|
|
70
|
+
|
|
71
|
+
max_hops: int = Field(
|
|
72
|
+
default=3,
|
|
73
|
+
ge=1,
|
|
74
|
+
le=5,
|
|
75
|
+
description="Maximum hops for multi-hop reasoning (1-5)",
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
relation_types: Optional[List[str]] = Field(
|
|
79
|
+
None, description="Filter by relation types for reasoning"
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
optimization_strategy: Optional[str] = Field(
|
|
83
|
+
default="balanced",
|
|
84
|
+
description="Query optimization strategy: 'cost', 'latency', or 'balanced'",
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
apply_inference: bool = Field(
|
|
88
|
+
default=False,
|
|
89
|
+
description="Apply logical inference rules (transitive, symmetric)",
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
inference_relation_type: Optional[str] = Field(
|
|
93
|
+
None,
|
|
94
|
+
description="Relation type to apply inference on (required if apply_inference=True)",
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
inference_max_steps: int = Field(
|
|
98
|
+
default=3, ge=1, le=10, description="Maximum inference steps (1-10)"
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
synthesize_evidence: bool = Field(
|
|
102
|
+
default=True, description="Synthesize evidence from multiple sources"
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
synthesis_method: str = Field(
|
|
106
|
+
default="weighted_average",
|
|
107
|
+
description="Evidence synthesis method: 'weighted_average', 'max', or 'voting'",
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
confidence_threshold: float = Field(
|
|
111
|
+
default=0.5,
|
|
112
|
+
ge=0.0,
|
|
113
|
+
le=1.0,
|
|
114
|
+
description="Minimum confidence threshold for evidence (0.0-1.0)",
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
# Schemas for individual operations (used with run_async)
|
|
119
|
+
class QueryPlanSchema(BaseModel):
|
|
120
|
+
"""Schema for query_plan operation"""
|
|
121
|
+
|
|
122
|
+
query: str = Field(..., description="Natural language query to plan")
|
|
123
|
+
optimization_strategy: Optional[str] = Field(
|
|
124
|
+
default="balanced",
|
|
125
|
+
description="Query optimization strategy: 'cost', 'latency', or 'balanced'",
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
class MultiHopSchema(BaseModel):
|
|
130
|
+
"""Schema for multi_hop operation"""
|
|
131
|
+
|
|
132
|
+
query: str = Field(..., description="Natural language query to reason about")
|
|
133
|
+
start_entity_id: str = Field(..., description="Starting entity ID")
|
|
134
|
+
target_entity_id: Optional[str] = Field(None, description="Target entity ID")
|
|
135
|
+
max_hops: int = Field(default=3, ge=1, le=5, description="Maximum hops (1-5)")
|
|
136
|
+
relation_types: Optional[List[str]] = Field(None, description="Filter by relation types")
|
|
137
|
+
synthesize_evidence: bool = Field(default=True, description="Synthesize evidence")
|
|
138
|
+
synthesis_method: str = Field(default="weighted_average", description="Synthesis method")
|
|
139
|
+
confidence_threshold: float = Field(default=0.5, ge=0.0, le=1.0, description="Min confidence")
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
class InferenceSchema(BaseModel):
|
|
143
|
+
"""Schema for inference operation"""
|
|
144
|
+
|
|
145
|
+
relation_type: str = Field(..., description="Relation type to apply inference on")
|
|
146
|
+
max_steps: int = Field(default=3, ge=1, le=10, description="Maximum inference steps")
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
class EvidenceSynthesisSchema(BaseModel):
|
|
150
|
+
"""Schema for evidence_synthesis operation"""
|
|
151
|
+
|
|
152
|
+
synthesis_method: str = Field(default="weighted_average", description="Synthesis method")
|
|
153
|
+
confidence_threshold: float = Field(default=0.5, ge=0.0, le=1.0, description="Min confidence")
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
class FullReasoningSchema(BaseModel):
|
|
157
|
+
"""Schema for full_reasoning operation"""
|
|
158
|
+
|
|
159
|
+
query: str = Field(..., description="Natural language query")
|
|
160
|
+
start_entity_id: str = Field(..., description="Starting entity ID")
|
|
161
|
+
target_entity_id: Optional[str] = Field(None, description="Target entity ID")
|
|
162
|
+
max_hops: int = Field(default=3, ge=1, le=5, description="Maximum hops")
|
|
163
|
+
relation_types: Optional[List[str]] = Field(None, description="Filter by relation types")
|
|
164
|
+
optimization_strategy: Optional[str] = Field(
|
|
165
|
+
default="balanced", description="Optimization strategy"
|
|
166
|
+
)
|
|
167
|
+
apply_inference: bool = Field(default=False, description="Apply logical inference rules")
|
|
168
|
+
inference_relation_type: Optional[str] = Field(None, description="Relation type for inference")
|
|
169
|
+
inference_max_steps: int = Field(default=3, ge=1, le=10, description="Max inference steps")
|
|
170
|
+
synthesize_evidence: bool = Field(default=True, description="Synthesize evidence")
|
|
171
|
+
synthesis_method: str = Field(default="weighted_average", description="Synthesis method")
|
|
172
|
+
confidence_threshold: float = Field(default=0.5, ge=0.0, le=1.0, description="Min confidence")
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
@register_tool("graph_reasoning")
|
|
176
|
+
class GraphReasoningTool(BaseTool):
|
|
177
|
+
"""
|
|
178
|
+
Knowledge Graph Reasoning Tool
|
|
179
|
+
|
|
180
|
+
Performs advanced reasoning over knowledge graphs using:
|
|
181
|
+
- Query Planning: Optimize query execution
|
|
182
|
+
- Multi-Hop Reasoning: Find and reason over paths
|
|
183
|
+
- Logical Inference: Apply inference rules
|
|
184
|
+
- Evidence Synthesis: Combine evidence from multiple sources
|
|
185
|
+
|
|
186
|
+
Example:
|
|
187
|
+
```python
|
|
188
|
+
tool = GraphReasoningTool(graph_store)
|
|
189
|
+
|
|
190
|
+
result = await tool.execute({
|
|
191
|
+
"mode": "full_reasoning",
|
|
192
|
+
"query": "How is Alice connected to Company X?",
|
|
193
|
+
"start_entity_id": "alice",
|
|
194
|
+
"max_hops": 3,
|
|
195
|
+
"apply_inference": True,
|
|
196
|
+
"synthesize_evidence": True
|
|
197
|
+
})
|
|
198
|
+
```
|
|
199
|
+
"""
|
|
200
|
+
|
|
201
|
+
def __init__(self, graph_store: GraphStore):
|
|
202
|
+
"""
|
|
203
|
+
Initialize Graph Reasoning Tool
|
|
204
|
+
|
|
205
|
+
Args:
|
|
206
|
+
graph_store: Graph storage backend
|
|
207
|
+
"""
|
|
208
|
+
super().__init__()
|
|
209
|
+
self.graph_store = graph_store
|
|
210
|
+
self.query_planner = QueryPlanner(graph_store)
|
|
211
|
+
self.reasoning_engine = ReasoningEngine(graph_store)
|
|
212
|
+
self.inference_engine = InferenceEngine(graph_store)
|
|
213
|
+
self.evidence_synthesizer = EvidenceSynthesizer()
|
|
214
|
+
|
|
215
|
+
# Add default inference rules
|
|
216
|
+
self._setup_default_rules()
|
|
217
|
+
|
|
218
|
+
def _setup_default_rules(self):
|
|
219
|
+
"""Setup default inference rules"""
|
|
220
|
+
# Common transitive rules
|
|
221
|
+
transitive_relations = [
|
|
222
|
+
"KNOWS",
|
|
223
|
+
"FOLLOWS",
|
|
224
|
+
"CONNECTED_TO",
|
|
225
|
+
"RELATED_TO",
|
|
226
|
+
]
|
|
227
|
+
for rel_type in transitive_relations:
|
|
228
|
+
self.inference_engine.add_rule(
|
|
229
|
+
InferenceRule(
|
|
230
|
+
rule_id=f"transitive_{rel_type.lower()}",
|
|
231
|
+
rule_type=RuleType.TRANSITIVE,
|
|
232
|
+
relation_type=rel_type,
|
|
233
|
+
description=f"Transitive closure for {rel_type}",
|
|
234
|
+
confidence_decay=0.1,
|
|
235
|
+
enabled=False, # Only enable when requested
|
|
236
|
+
)
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
# Common symmetric rules
|
|
240
|
+
symmetric_relations = [
|
|
241
|
+
"FRIEND_OF",
|
|
242
|
+
"COLLEAGUE_OF",
|
|
243
|
+
"PARTNER_WITH",
|
|
244
|
+
"SIBLING_OF",
|
|
245
|
+
]
|
|
246
|
+
for rel_type in symmetric_relations:
|
|
247
|
+
self.inference_engine.add_rule(
|
|
248
|
+
InferenceRule(
|
|
249
|
+
rule_id=f"symmetric_{rel_type.lower()}",
|
|
250
|
+
rule_type=RuleType.SYMMETRIC,
|
|
251
|
+
relation_type=rel_type,
|
|
252
|
+
description=f"Symmetric relationship for {rel_type}",
|
|
253
|
+
confidence_decay=0.05,
|
|
254
|
+
enabled=False, # Only enable when requested
|
|
255
|
+
)
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
@property
|
|
259
|
+
def name(self) -> str:
|
|
260
|
+
return "graph_reasoning"
|
|
261
|
+
|
|
262
|
+
@property
|
|
263
|
+
def description(self) -> str:
|
|
264
|
+
return "Advanced reasoning over knowledge graphs with query planning, multi-hop reasoning, inference, and evidence synthesis"
|
|
265
|
+
|
|
266
|
+
@property
|
|
267
|
+
def input_schema(self) -> type[GraphReasoningInput]:
|
|
268
|
+
return GraphReasoningInput
|
|
269
|
+
|
|
270
|
+
async def _execute(self, validated_input: GraphReasoningInput) -> Dict[str, Any]:
|
|
271
|
+
"""
|
|
272
|
+
Execute reasoning based on mode
|
|
273
|
+
|
|
274
|
+
Args:
|
|
275
|
+
validated_input: Validated input parameters
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
Reasoning results
|
|
279
|
+
"""
|
|
280
|
+
mode = validated_input.mode
|
|
281
|
+
|
|
282
|
+
if mode == ReasoningModeEnum.QUERY_PLAN:
|
|
283
|
+
return await self._execute_query_plan(validated_input)
|
|
284
|
+
|
|
285
|
+
elif mode == ReasoningModeEnum.MULTI_HOP:
|
|
286
|
+
return await self._execute_multi_hop(validated_input)
|
|
287
|
+
|
|
288
|
+
elif mode == ReasoningModeEnum.INFERENCE:
|
|
289
|
+
return await self._execute_inference(validated_input)
|
|
290
|
+
|
|
291
|
+
elif mode == ReasoningModeEnum.EVIDENCE_SYNTHESIS:
|
|
292
|
+
return await self._execute_evidence_synthesis(validated_input)
|
|
293
|
+
|
|
294
|
+
elif mode == ReasoningModeEnum.LOGICAL_QUERY:
|
|
295
|
+
return await self._execute_logical_query(validated_input)
|
|
296
|
+
|
|
297
|
+
elif mode == ReasoningModeEnum.FULL_REASONING:
|
|
298
|
+
return await self._execute_full_reasoning(validated_input)
|
|
299
|
+
|
|
300
|
+
else:
|
|
301
|
+
raise ValueError(f"Unknown reasoning mode: {mode}")
|
|
302
|
+
|
|
303
|
+
async def _execute_query_plan(self, input_data: GraphReasoningInput) -> Dict[str, Any]:
|
|
304
|
+
"""Execute query planning"""
|
|
305
|
+
# Plan the query (not async)
|
|
306
|
+
plan = self.query_planner.plan_query(input_data.query)
|
|
307
|
+
|
|
308
|
+
# Optimize if strategy provided
|
|
309
|
+
strategy_map = {
|
|
310
|
+
"cost": OptimizationStrategy.MINIMIZE_COST,
|
|
311
|
+
"latency": OptimizationStrategy.MINIMIZE_LATENCY,
|
|
312
|
+
"balanced": OptimizationStrategy.BALANCED,
|
|
313
|
+
}
|
|
314
|
+
strategy = strategy_map.get(input_data.optimization_strategy, OptimizationStrategy.BALANCED)
|
|
315
|
+
optimized_plan = self.query_planner.optimize_plan(plan, strategy)
|
|
316
|
+
|
|
317
|
+
return {
|
|
318
|
+
"mode": "query_plan",
|
|
319
|
+
"query": input_data.query,
|
|
320
|
+
"plan": {
|
|
321
|
+
"steps": [
|
|
322
|
+
{
|
|
323
|
+
"step_id": step.step_id,
|
|
324
|
+
"operation": step.operation.value,
|
|
325
|
+
"depends_on": step.depends_on,
|
|
326
|
+
"estimated_cost": step.estimated_cost,
|
|
327
|
+
"description": step.description,
|
|
328
|
+
}
|
|
329
|
+
for step in optimized_plan.steps
|
|
330
|
+
],
|
|
331
|
+
"total_cost": optimized_plan.calculate_total_cost(),
|
|
332
|
+
"estimated_latency_ms": optimized_plan.calculate_total_cost()
|
|
333
|
+
* 100, # Rough estimate
|
|
334
|
+
"optimization_strategy": strategy.value,
|
|
335
|
+
},
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
async def _execute_multi_hop(self, input_data: GraphReasoningInput) -> Dict[str, Any]:
|
|
339
|
+
"""Execute multi-hop reasoning"""
|
|
340
|
+
if not input_data.start_entity_id:
|
|
341
|
+
raise ValueError("start_entity_id is required for multi-hop reasoning")
|
|
342
|
+
|
|
343
|
+
# Build context for reasoning
|
|
344
|
+
context = {}
|
|
345
|
+
if input_data.start_entity_id:
|
|
346
|
+
context["start_entity_id"] = input_data.start_entity_id
|
|
347
|
+
if input_data.target_entity_id:
|
|
348
|
+
context["target_entity_id"] = input_data.target_entity_id
|
|
349
|
+
if input_data.relation_types:
|
|
350
|
+
context["relation_types"] = input_data.relation_types
|
|
351
|
+
|
|
352
|
+
# Execute reasoning
|
|
353
|
+
result = await self.reasoning_engine.reason(
|
|
354
|
+
query=input_data.query,
|
|
355
|
+
context=context,
|
|
356
|
+
max_hops=input_data.max_hops,
|
|
357
|
+
)
|
|
358
|
+
|
|
359
|
+
# Optionally synthesize evidence
|
|
360
|
+
evidence_list = result.evidence
|
|
361
|
+
if input_data.synthesize_evidence and evidence_list:
|
|
362
|
+
evidence_list = self.evidence_synthesizer.synthesize_evidence(
|
|
363
|
+
evidence_list, method=input_data.synthesis_method
|
|
364
|
+
)
|
|
365
|
+
# Filter by confidence
|
|
366
|
+
evidence_list = self.evidence_synthesizer.filter_by_confidence(
|
|
367
|
+
evidence_list, threshold=input_data.confidence_threshold
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
return {
|
|
371
|
+
"mode": "multi_hop",
|
|
372
|
+
"query": input_data.query,
|
|
373
|
+
"answer": result.answer,
|
|
374
|
+
"confidence": result.confidence,
|
|
375
|
+
"evidence_count": len(evidence_list),
|
|
376
|
+
"evidence": [
|
|
377
|
+
{
|
|
378
|
+
"evidence_id": ev.evidence_id,
|
|
379
|
+
"type": ev.evidence_type.value,
|
|
380
|
+
"confidence": ev.confidence,
|
|
381
|
+
"relevance_score": ev.relevance_score,
|
|
382
|
+
"explanation": ev.explanation,
|
|
383
|
+
"entity_ids": ev.get_entity_ids(),
|
|
384
|
+
}
|
|
385
|
+
for ev in evidence_list[:10] # Limit to top 10
|
|
386
|
+
],
|
|
387
|
+
"execution_time_ms": result.execution_time_ms,
|
|
388
|
+
"reasoning_trace": result.reasoning_trace,
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
async def _execute_inference(self, input_data: GraphReasoningInput) -> Dict[str, Any]:
|
|
392
|
+
"""Execute logical inference"""
|
|
393
|
+
if not input_data.apply_inference:
|
|
394
|
+
raise ValueError("apply_inference must be True for inference mode")
|
|
395
|
+
|
|
396
|
+
if not input_data.inference_relation_type:
|
|
397
|
+
raise ValueError("inference_relation_type is required for inference mode")
|
|
398
|
+
|
|
399
|
+
# Enable the relevant rules
|
|
400
|
+
for rule in self.inference_engine.get_rules(input_data.inference_relation_type):
|
|
401
|
+
rule.enabled = True
|
|
402
|
+
|
|
403
|
+
# Apply inference
|
|
404
|
+
result = await self.inference_engine.infer_relations(
|
|
405
|
+
relation_type=input_data.inference_relation_type,
|
|
406
|
+
max_steps=input_data.inference_max_steps,
|
|
407
|
+
use_cache=True,
|
|
408
|
+
)
|
|
409
|
+
|
|
410
|
+
# Get trace
|
|
411
|
+
trace = self.inference_engine.get_inference_trace(result)
|
|
412
|
+
|
|
413
|
+
return {
|
|
414
|
+
"mode": "inference",
|
|
415
|
+
"relation_type": input_data.inference_relation_type,
|
|
416
|
+
"inferred_count": len(result.inferred_relations),
|
|
417
|
+
"inferred_relations": [
|
|
418
|
+
{
|
|
419
|
+
"source_id": rel.source_id,
|
|
420
|
+
"target_id": rel.target_id,
|
|
421
|
+
"relation_type": rel.relation_type,
|
|
422
|
+
"properties": rel.properties,
|
|
423
|
+
}
|
|
424
|
+
for rel in result.inferred_relations[:10] # Limit to top 10
|
|
425
|
+
],
|
|
426
|
+
"confidence": result.confidence,
|
|
427
|
+
"total_steps": result.total_steps,
|
|
428
|
+
"inference_trace": trace[:20], # Limit trace lines
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
async def _execute_evidence_synthesis(self, input_data: GraphReasoningInput) -> Dict[str, Any]:
|
|
432
|
+
"""Execute evidence synthesis (requires pre-collected evidence)"""
|
|
433
|
+
# This mode is for synthesizing already collected evidence
|
|
434
|
+
# In practice, evidence would come from a previous reasoning step
|
|
435
|
+
|
|
436
|
+
return {
|
|
437
|
+
"mode": "evidence_synthesis",
|
|
438
|
+
"message": "Evidence synthesis requires pre-collected evidence. Use 'full_reasoning' mode for end-to-end reasoning with synthesis.",
|
|
439
|
+
"synthesis_method": input_data.synthesis_method,
|
|
440
|
+
"confidence_threshold": input_data.confidence_threshold,
|
|
441
|
+
}
|
|
442
|
+
|
|
443
|
+
async def _execute_logical_query(self, input_data: GraphReasoningInput) -> Dict[str, Any]:
|
|
444
|
+
"""
|
|
445
|
+
Parse natural language query to logical form
|
|
446
|
+
|
|
447
|
+
Converts natural language queries into structured logical representations
|
|
448
|
+
that can be executed against the knowledge graph.
|
|
449
|
+
|
|
450
|
+
Args:
|
|
451
|
+
input_data: Reasoning input with query
|
|
452
|
+
|
|
453
|
+
Returns:
|
|
454
|
+
Dictionary with parsed logical query
|
|
455
|
+
"""
|
|
456
|
+
# Create logic form parser
|
|
457
|
+
parser = LogicFormParser()
|
|
458
|
+
|
|
459
|
+
# Parse query to logical form
|
|
460
|
+
logical_query = parser.parse(input_data.query)
|
|
461
|
+
|
|
462
|
+
# Extract components
|
|
463
|
+
result = {
|
|
464
|
+
"mode": "logical_query",
|
|
465
|
+
"query": input_data.query,
|
|
466
|
+
"logical_form": logical_query.to_dict(),
|
|
467
|
+
"query_type": logical_query.query_type.value,
|
|
468
|
+
"variables": [v.name for v in logical_query.variables],
|
|
469
|
+
"predicates": [
|
|
470
|
+
{
|
|
471
|
+
"name": p.name,
|
|
472
|
+
"arguments": [
|
|
473
|
+
arg.name if hasattr(arg, "name") else str(arg) for arg in p.arguments
|
|
474
|
+
],
|
|
475
|
+
}
|
|
476
|
+
for p in logical_query.predicates
|
|
477
|
+
],
|
|
478
|
+
"constraints": [
|
|
479
|
+
{
|
|
480
|
+
"type": c.constraint_type.value,
|
|
481
|
+
"variable": c.variable.name,
|
|
482
|
+
"value": c.value,
|
|
483
|
+
}
|
|
484
|
+
for c in logical_query.constraints
|
|
485
|
+
],
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
# Add execution plan if available
|
|
489
|
+
if hasattr(logical_query, "execution_plan"):
|
|
490
|
+
result["execution_plan"] = {
|
|
491
|
+
"steps": len(logical_query.execution_plan.steps),
|
|
492
|
+
"estimated_cost": logical_query.execution_plan.calculate_total_cost(),
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
return result
|
|
496
|
+
|
|
497
|
+
async def _execute_full_reasoning(self, input_data: GraphReasoningInput) -> Dict[str, Any]:
|
|
498
|
+
"""Execute full reasoning pipeline"""
|
|
499
|
+
if not input_data.start_entity_id:
|
|
500
|
+
raise ValueError("start_entity_id is required for full reasoning")
|
|
501
|
+
|
|
502
|
+
results = {
|
|
503
|
+
"mode": "full_reasoning",
|
|
504
|
+
"query": input_data.query,
|
|
505
|
+
"steps": [],
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
# Step 1: Query Planning
|
|
509
|
+
plan = self.query_planner.plan_query(input_data.query)
|
|
510
|
+
strategy_map = {
|
|
511
|
+
"cost": OptimizationStrategy.MINIMIZE_COST,
|
|
512
|
+
"latency": OptimizationStrategy.MINIMIZE_LATENCY,
|
|
513
|
+
"balanced": OptimizationStrategy.BALANCED,
|
|
514
|
+
}
|
|
515
|
+
strategy = strategy_map.get(input_data.optimization_strategy, OptimizationStrategy.BALANCED)
|
|
516
|
+
optimized_plan = self.query_planner.optimize_plan(plan, strategy)
|
|
517
|
+
|
|
518
|
+
results["steps"].append(
|
|
519
|
+
{
|
|
520
|
+
"name": "query_planning",
|
|
521
|
+
"plan_steps": len(optimized_plan.steps),
|
|
522
|
+
"estimated_cost": optimized_plan.calculate_total_cost(),
|
|
523
|
+
"estimated_latency_ms": optimized_plan.calculate_total_cost()
|
|
524
|
+
* 100, # Rough estimate
|
|
525
|
+
}
|
|
526
|
+
)
|
|
527
|
+
|
|
528
|
+
# Step 2: Multi-Hop Reasoning
|
|
529
|
+
# Build context for reasoning
|
|
530
|
+
context = {}
|
|
531
|
+
if input_data.start_entity_id:
|
|
532
|
+
context["start_entity_id"] = input_data.start_entity_id
|
|
533
|
+
if input_data.target_entity_id:
|
|
534
|
+
context["target_entity_id"] = input_data.target_entity_id
|
|
535
|
+
if input_data.relation_types:
|
|
536
|
+
context["relation_types"] = input_data.relation_types
|
|
537
|
+
|
|
538
|
+
reasoning_result = await self.reasoning_engine.reason(
|
|
539
|
+
query=input_data.query,
|
|
540
|
+
context=context,
|
|
541
|
+
max_hops=input_data.max_hops,
|
|
542
|
+
)
|
|
543
|
+
|
|
544
|
+
results["steps"].append(
|
|
545
|
+
{
|
|
546
|
+
"name": "multi_hop_reasoning",
|
|
547
|
+
"evidence_collected": len(reasoning_result.evidence),
|
|
548
|
+
"confidence": reasoning_result.confidence,
|
|
549
|
+
"execution_time_ms": reasoning_result.execution_time_ms,
|
|
550
|
+
}
|
|
551
|
+
)
|
|
552
|
+
|
|
553
|
+
# Step 3: Logical Inference (if requested)
|
|
554
|
+
if input_data.apply_inference and input_data.inference_relation_type:
|
|
555
|
+
# Enable rules
|
|
556
|
+
for rule in self.inference_engine.get_rules(input_data.inference_relation_type):
|
|
557
|
+
rule.enabled = True
|
|
558
|
+
|
|
559
|
+
inference_result = await self.inference_engine.infer_relations(
|
|
560
|
+
relation_type=input_data.inference_relation_type,
|
|
561
|
+
max_steps=input_data.inference_max_steps,
|
|
562
|
+
use_cache=True,
|
|
563
|
+
)
|
|
564
|
+
|
|
565
|
+
results["steps"].append(
|
|
566
|
+
{
|
|
567
|
+
"name": "logical_inference",
|
|
568
|
+
"inferred_relations": len(inference_result.inferred_relations),
|
|
569
|
+
"inference_confidence": inference_result.confidence,
|
|
570
|
+
"inference_steps": inference_result.total_steps,
|
|
571
|
+
}
|
|
572
|
+
)
|
|
573
|
+
|
|
574
|
+
# Step 4: Evidence Synthesis
|
|
575
|
+
evidence_list = reasoning_result.evidence
|
|
576
|
+
if input_data.synthesize_evidence and evidence_list:
|
|
577
|
+
synthesized = self.evidence_synthesizer.synthesize_evidence(
|
|
578
|
+
evidence_list, method=input_data.synthesis_method
|
|
579
|
+
)
|
|
580
|
+
|
|
581
|
+
filtered = self.evidence_synthesizer.filter_by_confidence(
|
|
582
|
+
synthesized, threshold=input_data.confidence_threshold
|
|
583
|
+
)
|
|
584
|
+
|
|
585
|
+
ranked = self.evidence_synthesizer.rank_by_reliability(filtered)
|
|
586
|
+
|
|
587
|
+
overall_confidence = self.evidence_synthesizer.estimate_overall_confidence(ranked)
|
|
588
|
+
|
|
589
|
+
results["steps"].append(
|
|
590
|
+
{
|
|
591
|
+
"name": "evidence_synthesis",
|
|
592
|
+
"original_evidence": len(evidence_list),
|
|
593
|
+
"synthesized_evidence": len(synthesized),
|
|
594
|
+
"filtered_evidence": len(filtered),
|
|
595
|
+
"overall_confidence": overall_confidence,
|
|
596
|
+
}
|
|
597
|
+
)
|
|
598
|
+
|
|
599
|
+
evidence_list = ranked
|
|
600
|
+
|
|
601
|
+
# Final Results
|
|
602
|
+
results["answer"] = reasoning_result.answer
|
|
603
|
+
results["final_confidence"] = (
|
|
604
|
+
self.evidence_synthesizer.estimate_overall_confidence(evidence_list)
|
|
605
|
+
if evidence_list
|
|
606
|
+
else reasoning_result.confidence
|
|
607
|
+
)
|
|
608
|
+
results["evidence_count"] = len(evidence_list)
|
|
609
|
+
results["top_evidence"] = [
|
|
610
|
+
{
|
|
611
|
+
"evidence_id": ev.evidence_id,
|
|
612
|
+
"type": ev.evidence_type.value,
|
|
613
|
+
"confidence": ev.confidence,
|
|
614
|
+
"relevance_score": ev.relevance_score,
|
|
615
|
+
"explanation": ev.explanation,
|
|
616
|
+
}
|
|
617
|
+
for ev in evidence_list[:5] # Top 5
|
|
618
|
+
]
|
|
619
|
+
# Limit trace
|
|
620
|
+
results["reasoning_trace"] = reasoning_result.reasoning_trace[:10]
|
|
621
|
+
|
|
622
|
+
return results
|
|
623
|
+
|
|
624
|
+
# Public methods for ToolExecutor integration
|
|
625
|
+
async def query_plan(
|
|
626
|
+
self, query: str, optimization_strategy: Optional[str] = "balanced"
|
|
627
|
+
) -> Dict[str, Any]:
|
|
628
|
+
"""Query planning (public method for ToolExecutor)"""
|
|
629
|
+
input_data = GraphReasoningInput(
|
|
630
|
+
mode=ReasoningModeEnum.QUERY_PLAN,
|
|
631
|
+
query=query,
|
|
632
|
+
optimization_strategy=optimization_strategy,
|
|
633
|
+
start_entity_id="dummy", # Not used for query_plan
|
|
634
|
+
)
|
|
635
|
+
return await self._execute_query_plan(input_data)
|
|
636
|
+
|
|
637
|
+
async def multi_hop(
|
|
638
|
+
self,
|
|
639
|
+
query: str,
|
|
640
|
+
start_entity_id: str,
|
|
641
|
+
target_entity_id: Optional[str] = None,
|
|
642
|
+
max_hops: int = 3,
|
|
643
|
+
relation_types: Optional[List[str]] = None,
|
|
644
|
+
synthesize_evidence: bool = True,
|
|
645
|
+
synthesis_method: str = "weighted_average",
|
|
646
|
+
confidence_threshold: float = 0.5,
|
|
647
|
+
) -> Dict[str, Any]:
|
|
648
|
+
"""Multi-hop reasoning (public method for ToolExecutor)"""
|
|
649
|
+
input_data = GraphReasoningInput(
|
|
650
|
+
mode=ReasoningModeEnum.MULTI_HOP,
|
|
651
|
+
query=query,
|
|
652
|
+
start_entity_id=start_entity_id,
|
|
653
|
+
target_entity_id=target_entity_id,
|
|
654
|
+
max_hops=max_hops,
|
|
655
|
+
relation_types=relation_types,
|
|
656
|
+
synthesize_evidence=synthesize_evidence,
|
|
657
|
+
synthesis_method=synthesis_method,
|
|
658
|
+
confidence_threshold=confidence_threshold,
|
|
659
|
+
)
|
|
660
|
+
return await self._execute_multi_hop(input_data)
|
|
661
|
+
|
|
662
|
+
async def inference(self, relation_type: str, max_steps: int = 3) -> Dict[str, Any]:
|
|
663
|
+
"""Logical inference (public method for ToolExecutor)"""
|
|
664
|
+
input_data = GraphReasoningInput(
|
|
665
|
+
mode=ReasoningModeEnum.INFERENCE,
|
|
666
|
+
query="inference", # Not used for inference mode
|
|
667
|
+
start_entity_id="dummy", # Not used for inference mode
|
|
668
|
+
apply_inference=True,
|
|
669
|
+
inference_relation_type=relation_type,
|
|
670
|
+
inference_max_steps=max_steps,
|
|
671
|
+
)
|
|
672
|
+
return await self._execute_inference(input_data)
|
|
673
|
+
|
|
674
|
+
async def evidence_synthesis(
|
|
675
|
+
self,
|
|
676
|
+
synthesis_method: str = "weighted_average",
|
|
677
|
+
confidence_threshold: float = 0.5,
|
|
678
|
+
) -> Dict[str, Any]:
|
|
679
|
+
"""Evidence synthesis (public method for ToolExecutor)"""
|
|
680
|
+
input_data = GraphReasoningInput(
|
|
681
|
+
mode=ReasoningModeEnum.EVIDENCE_SYNTHESIS,
|
|
682
|
+
query="synthesis", # Not used
|
|
683
|
+
start_entity_id="dummy", # Not used
|
|
684
|
+
synthesis_method=synthesis_method,
|
|
685
|
+
confidence_threshold=confidence_threshold,
|
|
686
|
+
)
|
|
687
|
+
return await self._execute_evidence_synthesis(input_data)
|
|
688
|
+
|
|
689
|
+
async def full_reasoning(
|
|
690
|
+
self,
|
|
691
|
+
query: str,
|
|
692
|
+
start_entity_id: str,
|
|
693
|
+
target_entity_id: Optional[str] = None,
|
|
694
|
+
max_hops: int = 3,
|
|
695
|
+
relation_types: Optional[List[str]] = None,
|
|
696
|
+
optimization_strategy: Optional[str] = "balanced",
|
|
697
|
+
apply_inference: bool = False,
|
|
698
|
+
inference_relation_type: Optional[str] = None,
|
|
699
|
+
inference_max_steps: int = 3,
|
|
700
|
+
synthesize_evidence: bool = True,
|
|
701
|
+
synthesis_method: str = "weighted_average",
|
|
702
|
+
confidence_threshold: float = 0.5,
|
|
703
|
+
) -> Dict[str, Any]:
|
|
704
|
+
"""Full reasoning pipeline (public method for ToolExecutor)"""
|
|
705
|
+
input_data = GraphReasoningInput(
|
|
706
|
+
mode=ReasoningModeEnum.FULL_REASONING,
|
|
707
|
+
query=query,
|
|
708
|
+
start_entity_id=start_entity_id,
|
|
709
|
+
target_entity_id=target_entity_id,
|
|
710
|
+
max_hops=max_hops,
|
|
711
|
+
relation_types=relation_types,
|
|
712
|
+
optimization_strategy=optimization_strategy,
|
|
713
|
+
apply_inference=apply_inference,
|
|
714
|
+
inference_relation_type=inference_relation_type,
|
|
715
|
+
inference_max_steps=inference_max_steps,
|
|
716
|
+
synthesize_evidence=synthesize_evidence,
|
|
717
|
+
synthesis_method=synthesis_method,
|
|
718
|
+
confidence_threshold=confidence_threshold,
|
|
719
|
+
)
|
|
720
|
+
return await self._execute_full_reasoning(input_data)
|
|
721
|
+
|
|
722
|
+
async def execute(self, **kwargs) -> Dict[str, Any]:
|
|
723
|
+
"""
|
|
724
|
+
Execute the tool (public interface)
|
|
725
|
+
|
|
726
|
+
Args:
|
|
727
|
+
**kwargs: Tool input parameters (will be validated against input_schema)
|
|
728
|
+
|
|
729
|
+
Returns:
|
|
730
|
+
Dictionary with reasoning results
|
|
731
|
+
"""
|
|
732
|
+
# Validate input using Pydantic schema
|
|
733
|
+
validated_input = self.input_schema(**kwargs)
|
|
734
|
+
return await self._execute(validated_input)
|