aiecs 1.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiecs/__init__.py +72 -0
- aiecs/__main__.py +41 -0
- aiecs/aiecs_client.py +469 -0
- aiecs/application/__init__.py +10 -0
- aiecs/application/executors/__init__.py +10 -0
- aiecs/application/executors/operation_executor.py +363 -0
- aiecs/application/knowledge_graph/__init__.py +7 -0
- aiecs/application/knowledge_graph/builder/__init__.py +37 -0
- aiecs/application/knowledge_graph/builder/document_builder.py +375 -0
- aiecs/application/knowledge_graph/builder/graph_builder.py +356 -0
- aiecs/application/knowledge_graph/builder/schema_mapping.py +531 -0
- aiecs/application/knowledge_graph/builder/structured_pipeline.py +443 -0
- aiecs/application/knowledge_graph/builder/text_chunker.py +319 -0
- aiecs/application/knowledge_graph/extractors/__init__.py +27 -0
- aiecs/application/knowledge_graph/extractors/base.py +100 -0
- aiecs/application/knowledge_graph/extractors/llm_entity_extractor.py +327 -0
- aiecs/application/knowledge_graph/extractors/llm_relation_extractor.py +349 -0
- aiecs/application/knowledge_graph/extractors/ner_entity_extractor.py +244 -0
- aiecs/application/knowledge_graph/fusion/__init__.py +23 -0
- aiecs/application/knowledge_graph/fusion/entity_deduplicator.py +387 -0
- aiecs/application/knowledge_graph/fusion/entity_linker.py +343 -0
- aiecs/application/knowledge_graph/fusion/knowledge_fusion.py +580 -0
- aiecs/application/knowledge_graph/fusion/relation_deduplicator.py +189 -0
- aiecs/application/knowledge_graph/pattern_matching/__init__.py +21 -0
- aiecs/application/knowledge_graph/pattern_matching/pattern_matcher.py +344 -0
- aiecs/application/knowledge_graph/pattern_matching/query_executor.py +378 -0
- aiecs/application/knowledge_graph/profiling/__init__.py +12 -0
- aiecs/application/knowledge_graph/profiling/query_plan_visualizer.py +199 -0
- aiecs/application/knowledge_graph/profiling/query_profiler.py +223 -0
- aiecs/application/knowledge_graph/reasoning/__init__.py +27 -0
- aiecs/application/knowledge_graph/reasoning/evidence_synthesis.py +347 -0
- aiecs/application/knowledge_graph/reasoning/inference_engine.py +504 -0
- aiecs/application/knowledge_graph/reasoning/logic_form_parser.py +167 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/__init__.py +79 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_builder.py +513 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_nodes.py +630 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_validator.py +654 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/error_handler.py +477 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/parser.py +390 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/query_context.py +217 -0
- aiecs/application/knowledge_graph/reasoning/logic_query_integration.py +169 -0
- aiecs/application/knowledge_graph/reasoning/query_planner.py +872 -0
- aiecs/application/knowledge_graph/reasoning/reasoning_engine.py +554 -0
- aiecs/application/knowledge_graph/retrieval/__init__.py +19 -0
- aiecs/application/knowledge_graph/retrieval/retrieval_strategies.py +596 -0
- aiecs/application/knowledge_graph/search/__init__.py +59 -0
- aiecs/application/knowledge_graph/search/hybrid_search.py +423 -0
- aiecs/application/knowledge_graph/search/reranker.py +295 -0
- aiecs/application/knowledge_graph/search/reranker_strategies.py +553 -0
- aiecs/application/knowledge_graph/search/text_similarity.py +398 -0
- aiecs/application/knowledge_graph/traversal/__init__.py +15 -0
- aiecs/application/knowledge_graph/traversal/enhanced_traversal.py +329 -0
- aiecs/application/knowledge_graph/traversal/path_scorer.py +269 -0
- aiecs/application/knowledge_graph/validators/__init__.py +13 -0
- aiecs/application/knowledge_graph/validators/relation_validator.py +189 -0
- aiecs/application/knowledge_graph/visualization/__init__.py +11 -0
- aiecs/application/knowledge_graph/visualization/graph_visualizer.py +321 -0
- aiecs/common/__init__.py +9 -0
- aiecs/common/knowledge_graph/__init__.py +17 -0
- aiecs/common/knowledge_graph/runnable.py +484 -0
- aiecs/config/__init__.py +16 -0
- aiecs/config/config.py +498 -0
- aiecs/config/graph_config.py +137 -0
- aiecs/config/registry.py +23 -0
- aiecs/core/__init__.py +46 -0
- aiecs/core/interface/__init__.py +34 -0
- aiecs/core/interface/execution_interface.py +152 -0
- aiecs/core/interface/storage_interface.py +171 -0
- aiecs/domain/__init__.py +289 -0
- aiecs/domain/agent/__init__.py +189 -0
- aiecs/domain/agent/base_agent.py +697 -0
- aiecs/domain/agent/exceptions.py +103 -0
- aiecs/domain/agent/graph_aware_mixin.py +559 -0
- aiecs/domain/agent/hybrid_agent.py +490 -0
- aiecs/domain/agent/integration/__init__.py +26 -0
- aiecs/domain/agent/integration/context_compressor.py +222 -0
- aiecs/domain/agent/integration/context_engine_adapter.py +252 -0
- aiecs/domain/agent/integration/retry_policy.py +219 -0
- aiecs/domain/agent/integration/role_config.py +213 -0
- aiecs/domain/agent/knowledge_aware_agent.py +646 -0
- aiecs/domain/agent/lifecycle.py +296 -0
- aiecs/domain/agent/llm_agent.py +300 -0
- aiecs/domain/agent/memory/__init__.py +12 -0
- aiecs/domain/agent/memory/conversation.py +197 -0
- aiecs/domain/agent/migration/__init__.py +14 -0
- aiecs/domain/agent/migration/conversion.py +160 -0
- aiecs/domain/agent/migration/legacy_wrapper.py +90 -0
- aiecs/domain/agent/models.py +317 -0
- aiecs/domain/agent/observability.py +407 -0
- aiecs/domain/agent/persistence.py +289 -0
- aiecs/domain/agent/prompts/__init__.py +29 -0
- aiecs/domain/agent/prompts/builder.py +161 -0
- aiecs/domain/agent/prompts/formatters.py +189 -0
- aiecs/domain/agent/prompts/template.py +255 -0
- aiecs/domain/agent/registry.py +260 -0
- aiecs/domain/agent/tool_agent.py +257 -0
- aiecs/domain/agent/tools/__init__.py +12 -0
- aiecs/domain/agent/tools/schema_generator.py +221 -0
- aiecs/domain/community/__init__.py +155 -0
- aiecs/domain/community/agent_adapter.py +477 -0
- aiecs/domain/community/analytics.py +481 -0
- aiecs/domain/community/collaborative_workflow.py +642 -0
- aiecs/domain/community/communication_hub.py +645 -0
- aiecs/domain/community/community_builder.py +320 -0
- aiecs/domain/community/community_integration.py +800 -0
- aiecs/domain/community/community_manager.py +813 -0
- aiecs/domain/community/decision_engine.py +879 -0
- aiecs/domain/community/exceptions.py +225 -0
- aiecs/domain/community/models/__init__.py +33 -0
- aiecs/domain/community/models/community_models.py +268 -0
- aiecs/domain/community/resource_manager.py +457 -0
- aiecs/domain/community/shared_context_manager.py +603 -0
- aiecs/domain/context/__init__.py +58 -0
- aiecs/domain/context/context_engine.py +989 -0
- aiecs/domain/context/conversation_models.py +354 -0
- aiecs/domain/context/graph_memory.py +467 -0
- aiecs/domain/execution/__init__.py +12 -0
- aiecs/domain/execution/model.py +57 -0
- aiecs/domain/knowledge_graph/__init__.py +19 -0
- aiecs/domain/knowledge_graph/models/__init__.py +52 -0
- aiecs/domain/knowledge_graph/models/entity.py +130 -0
- aiecs/domain/knowledge_graph/models/evidence.py +194 -0
- aiecs/domain/knowledge_graph/models/inference_rule.py +186 -0
- aiecs/domain/knowledge_graph/models/path.py +179 -0
- aiecs/domain/knowledge_graph/models/path_pattern.py +173 -0
- aiecs/domain/knowledge_graph/models/query.py +272 -0
- aiecs/domain/knowledge_graph/models/query_plan.py +187 -0
- aiecs/domain/knowledge_graph/models/relation.py +136 -0
- aiecs/domain/knowledge_graph/schema/__init__.py +23 -0
- aiecs/domain/knowledge_graph/schema/entity_type.py +135 -0
- aiecs/domain/knowledge_graph/schema/graph_schema.py +271 -0
- aiecs/domain/knowledge_graph/schema/property_schema.py +155 -0
- aiecs/domain/knowledge_graph/schema/relation_type.py +171 -0
- aiecs/domain/knowledge_graph/schema/schema_manager.py +496 -0
- aiecs/domain/knowledge_graph/schema/type_enums.py +205 -0
- aiecs/domain/task/__init__.py +13 -0
- aiecs/domain/task/dsl_processor.py +613 -0
- aiecs/domain/task/model.py +62 -0
- aiecs/domain/task/task_context.py +268 -0
- aiecs/infrastructure/__init__.py +24 -0
- aiecs/infrastructure/graph_storage/__init__.py +11 -0
- aiecs/infrastructure/graph_storage/base.py +601 -0
- aiecs/infrastructure/graph_storage/batch_operations.py +449 -0
- aiecs/infrastructure/graph_storage/cache.py +429 -0
- aiecs/infrastructure/graph_storage/distributed.py +226 -0
- aiecs/infrastructure/graph_storage/error_handling.py +390 -0
- aiecs/infrastructure/graph_storage/graceful_degradation.py +306 -0
- aiecs/infrastructure/graph_storage/health_checks.py +378 -0
- aiecs/infrastructure/graph_storage/in_memory.py +514 -0
- aiecs/infrastructure/graph_storage/index_optimization.py +483 -0
- aiecs/infrastructure/graph_storage/lazy_loading.py +410 -0
- aiecs/infrastructure/graph_storage/metrics.py +357 -0
- aiecs/infrastructure/graph_storage/migration.py +413 -0
- aiecs/infrastructure/graph_storage/pagination.py +471 -0
- aiecs/infrastructure/graph_storage/performance_monitoring.py +466 -0
- aiecs/infrastructure/graph_storage/postgres.py +871 -0
- aiecs/infrastructure/graph_storage/query_optimizer.py +635 -0
- aiecs/infrastructure/graph_storage/schema_cache.py +290 -0
- aiecs/infrastructure/graph_storage/sqlite.py +623 -0
- aiecs/infrastructure/graph_storage/streaming.py +495 -0
- aiecs/infrastructure/messaging/__init__.py +13 -0
- aiecs/infrastructure/messaging/celery_task_manager.py +383 -0
- aiecs/infrastructure/messaging/websocket_manager.py +298 -0
- aiecs/infrastructure/monitoring/__init__.py +34 -0
- aiecs/infrastructure/monitoring/executor_metrics.py +174 -0
- aiecs/infrastructure/monitoring/global_metrics_manager.py +213 -0
- aiecs/infrastructure/monitoring/structured_logger.py +48 -0
- aiecs/infrastructure/monitoring/tracing_manager.py +410 -0
- aiecs/infrastructure/persistence/__init__.py +24 -0
- aiecs/infrastructure/persistence/context_engine_client.py +187 -0
- aiecs/infrastructure/persistence/database_manager.py +333 -0
- aiecs/infrastructure/persistence/file_storage.py +754 -0
- aiecs/infrastructure/persistence/redis_client.py +220 -0
- aiecs/llm/__init__.py +86 -0
- aiecs/llm/callbacks/__init__.py +11 -0
- aiecs/llm/callbacks/custom_callbacks.py +264 -0
- aiecs/llm/client_factory.py +420 -0
- aiecs/llm/clients/__init__.py +33 -0
- aiecs/llm/clients/base_client.py +193 -0
- aiecs/llm/clients/googleai_client.py +181 -0
- aiecs/llm/clients/openai_client.py +131 -0
- aiecs/llm/clients/vertex_client.py +437 -0
- aiecs/llm/clients/xai_client.py +184 -0
- aiecs/llm/config/__init__.py +51 -0
- aiecs/llm/config/config_loader.py +275 -0
- aiecs/llm/config/config_validator.py +236 -0
- aiecs/llm/config/model_config.py +151 -0
- aiecs/llm/utils/__init__.py +10 -0
- aiecs/llm/utils/validate_config.py +91 -0
- aiecs/main.py +363 -0
- aiecs/scripts/__init__.py +3 -0
- aiecs/scripts/aid/VERSION_MANAGEMENT.md +97 -0
- aiecs/scripts/aid/__init__.py +19 -0
- aiecs/scripts/aid/version_manager.py +215 -0
- aiecs/scripts/dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md +242 -0
- aiecs/scripts/dependance_check/README_DEPENDENCY_CHECKER.md +310 -0
- aiecs/scripts/dependance_check/__init__.py +17 -0
- aiecs/scripts/dependance_check/dependency_checker.py +938 -0
- aiecs/scripts/dependance_check/dependency_fixer.py +391 -0
- aiecs/scripts/dependance_check/download_nlp_data.py +396 -0
- aiecs/scripts/dependance_check/quick_dependency_check.py +270 -0
- aiecs/scripts/dependance_check/setup_nlp_data.sh +217 -0
- aiecs/scripts/dependance_patch/__init__.py +7 -0
- aiecs/scripts/dependance_patch/fix_weasel/README_WEASEL_PATCH.md +126 -0
- aiecs/scripts/dependance_patch/fix_weasel/__init__.py +11 -0
- aiecs/scripts/dependance_patch/fix_weasel/fix_weasel_validator.py +128 -0
- aiecs/scripts/dependance_patch/fix_weasel/fix_weasel_validator.sh +82 -0
- aiecs/scripts/dependance_patch/fix_weasel/patch_weasel_library.sh +188 -0
- aiecs/scripts/dependance_patch/fix_weasel/run_weasel_patch.sh +41 -0
- aiecs/scripts/tools_develop/README.md +449 -0
- aiecs/scripts/tools_develop/TOOL_AUTO_DISCOVERY.md +234 -0
- aiecs/scripts/tools_develop/__init__.py +21 -0
- aiecs/scripts/tools_develop/check_type_annotations.py +259 -0
- aiecs/scripts/tools_develop/validate_tool_schemas.py +422 -0
- aiecs/scripts/tools_develop/verify_tools.py +356 -0
- aiecs/tasks/__init__.py +1 -0
- aiecs/tasks/worker.py +172 -0
- aiecs/tools/__init__.py +299 -0
- aiecs/tools/apisource/__init__.py +99 -0
- aiecs/tools/apisource/intelligence/__init__.py +19 -0
- aiecs/tools/apisource/intelligence/data_fusion.py +381 -0
- aiecs/tools/apisource/intelligence/query_analyzer.py +413 -0
- aiecs/tools/apisource/intelligence/search_enhancer.py +388 -0
- aiecs/tools/apisource/monitoring/__init__.py +9 -0
- aiecs/tools/apisource/monitoring/metrics.py +303 -0
- aiecs/tools/apisource/providers/__init__.py +115 -0
- aiecs/tools/apisource/providers/base.py +664 -0
- aiecs/tools/apisource/providers/census.py +401 -0
- aiecs/tools/apisource/providers/fred.py +564 -0
- aiecs/tools/apisource/providers/newsapi.py +412 -0
- aiecs/tools/apisource/providers/worldbank.py +357 -0
- aiecs/tools/apisource/reliability/__init__.py +12 -0
- aiecs/tools/apisource/reliability/error_handler.py +375 -0
- aiecs/tools/apisource/reliability/fallback_strategy.py +391 -0
- aiecs/tools/apisource/tool.py +850 -0
- aiecs/tools/apisource/utils/__init__.py +9 -0
- aiecs/tools/apisource/utils/validators.py +338 -0
- aiecs/tools/base_tool.py +201 -0
- aiecs/tools/docs/__init__.py +121 -0
- aiecs/tools/docs/ai_document_orchestrator.py +599 -0
- aiecs/tools/docs/ai_document_writer_orchestrator.py +2403 -0
- aiecs/tools/docs/content_insertion_tool.py +1333 -0
- aiecs/tools/docs/document_creator_tool.py +1317 -0
- aiecs/tools/docs/document_layout_tool.py +1166 -0
- aiecs/tools/docs/document_parser_tool.py +994 -0
- aiecs/tools/docs/document_writer_tool.py +1818 -0
- aiecs/tools/knowledge_graph/__init__.py +17 -0
- aiecs/tools/knowledge_graph/graph_reasoning_tool.py +734 -0
- aiecs/tools/knowledge_graph/graph_search_tool.py +923 -0
- aiecs/tools/knowledge_graph/kg_builder_tool.py +476 -0
- aiecs/tools/langchain_adapter.py +542 -0
- aiecs/tools/schema_generator.py +275 -0
- aiecs/tools/search_tool/__init__.py +100 -0
- aiecs/tools/search_tool/analyzers.py +589 -0
- aiecs/tools/search_tool/cache.py +260 -0
- aiecs/tools/search_tool/constants.py +128 -0
- aiecs/tools/search_tool/context.py +216 -0
- aiecs/tools/search_tool/core.py +749 -0
- aiecs/tools/search_tool/deduplicator.py +123 -0
- aiecs/tools/search_tool/error_handler.py +271 -0
- aiecs/tools/search_tool/metrics.py +371 -0
- aiecs/tools/search_tool/rate_limiter.py +178 -0
- aiecs/tools/search_tool/schemas.py +277 -0
- aiecs/tools/statistics/__init__.py +80 -0
- aiecs/tools/statistics/ai_data_analysis_orchestrator.py +643 -0
- aiecs/tools/statistics/ai_insight_generator_tool.py +505 -0
- aiecs/tools/statistics/ai_report_orchestrator_tool.py +694 -0
- aiecs/tools/statistics/data_loader_tool.py +564 -0
- aiecs/tools/statistics/data_profiler_tool.py +658 -0
- aiecs/tools/statistics/data_transformer_tool.py +573 -0
- aiecs/tools/statistics/data_visualizer_tool.py +495 -0
- aiecs/tools/statistics/model_trainer_tool.py +487 -0
- aiecs/tools/statistics/statistical_analyzer_tool.py +459 -0
- aiecs/tools/task_tools/__init__.py +86 -0
- aiecs/tools/task_tools/chart_tool.py +732 -0
- aiecs/tools/task_tools/classfire_tool.py +922 -0
- aiecs/tools/task_tools/image_tool.py +447 -0
- aiecs/tools/task_tools/office_tool.py +684 -0
- aiecs/tools/task_tools/pandas_tool.py +635 -0
- aiecs/tools/task_tools/report_tool.py +635 -0
- aiecs/tools/task_tools/research_tool.py +392 -0
- aiecs/tools/task_tools/scraper_tool.py +715 -0
- aiecs/tools/task_tools/stats_tool.py +688 -0
- aiecs/tools/temp_file_manager.py +130 -0
- aiecs/tools/tool_executor/__init__.py +37 -0
- aiecs/tools/tool_executor/tool_executor.py +881 -0
- aiecs/utils/LLM_output_structor.py +445 -0
- aiecs/utils/__init__.py +34 -0
- aiecs/utils/base_callback.py +47 -0
- aiecs/utils/cache_provider.py +695 -0
- aiecs/utils/execution_utils.py +184 -0
- aiecs/utils/logging.py +1 -0
- aiecs/utils/prompt_loader.py +14 -0
- aiecs/utils/token_usage_repository.py +323 -0
- aiecs/ws/__init__.py +0 -0
- aiecs/ws/socket_server.py +52 -0
- aiecs-1.5.1.dist-info/METADATA +608 -0
- aiecs-1.5.1.dist-info/RECORD +302 -0
- aiecs-1.5.1.dist-info/WHEEL +5 -0
- aiecs-1.5.1.dist-info/entry_points.txt +10 -0
- aiecs-1.5.1.dist-info/licenses/LICENSE +225 -0
- aiecs-1.5.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,646 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Knowledge-Aware Agent
|
|
3
|
+
|
|
4
|
+
Enhanced hybrid agent with knowledge graph integration.
|
|
5
|
+
Extends the standard HybridAgent with graph reasoning capabilities.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
from typing import Dict, List, Any, Optional
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
|
|
12
|
+
from aiecs.llm import BaseLLMClient
|
|
13
|
+
from aiecs.infrastructure.graph_storage.base import GraphStore
|
|
14
|
+
from aiecs.tools.knowledge_graph import GraphReasoningTool
|
|
15
|
+
from aiecs.domain.knowledge_graph.models.entity import Entity
|
|
16
|
+
|
|
17
|
+
from .hybrid_agent import HybridAgent
|
|
18
|
+
from .models import AgentConfiguration
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class KnowledgeAwareAgent(HybridAgent):
|
|
24
|
+
"""
|
|
25
|
+
Knowledge-Aware Agent with integrated knowledge graph reasoning.
|
|
26
|
+
|
|
27
|
+
Extends HybridAgent with:
|
|
28
|
+
- Knowledge graph consultation during reasoning
|
|
29
|
+
- Graph-aware tool selection
|
|
30
|
+
- Knowledge-augmented prompt construction
|
|
31
|
+
- Automatic access to graph reasoning capabilities
|
|
32
|
+
|
|
33
|
+
Example:
|
|
34
|
+
```python
|
|
35
|
+
from aiecs.domain.agent import KnowledgeAwareAgent
|
|
36
|
+
from aiecs.infrastructure.graph_storage import InMemoryGraphStore
|
|
37
|
+
|
|
38
|
+
# Initialize with knowledge graph
|
|
39
|
+
graph_store = InMemoryGraphStore()
|
|
40
|
+
await graph_store.initialize()
|
|
41
|
+
|
|
42
|
+
agent = KnowledgeAwareAgent(
|
|
43
|
+
agent_id="kg_agent_001",
|
|
44
|
+
name="Knowledge Assistant",
|
|
45
|
+
llm_client=llm_client,
|
|
46
|
+
tools=["web_search", "calculator"],
|
|
47
|
+
config=config,
|
|
48
|
+
graph_store=graph_store
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
await agent.initialize()
|
|
52
|
+
result = await agent.execute_task("How is Alice connected to Company X?")
|
|
53
|
+
```
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
def __init__(
|
|
57
|
+
self,
|
|
58
|
+
agent_id: str,
|
|
59
|
+
name: str,
|
|
60
|
+
llm_client: BaseLLMClient,
|
|
61
|
+
tools: List[str],
|
|
62
|
+
config: AgentConfiguration,
|
|
63
|
+
graph_store: Optional[GraphStore] = None,
|
|
64
|
+
description: Optional[str] = None,
|
|
65
|
+
version: str = "1.0.0",
|
|
66
|
+
max_iterations: int = 10,
|
|
67
|
+
enable_graph_reasoning: bool = True,
|
|
68
|
+
):
|
|
69
|
+
"""
|
|
70
|
+
Initialize Knowledge-Aware agent.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
agent_id: Unique agent identifier
|
|
74
|
+
name: Agent name
|
|
75
|
+
llm_client: LLM client for reasoning
|
|
76
|
+
tools: List of tool names (graph_reasoning auto-added if graph_store provided)
|
|
77
|
+
config: Agent configuration
|
|
78
|
+
graph_store: Optional knowledge graph store
|
|
79
|
+
description: Optional description
|
|
80
|
+
version: Agent version
|
|
81
|
+
max_iterations: Maximum ReAct iterations
|
|
82
|
+
enable_graph_reasoning: Whether to enable graph reasoning capabilities
|
|
83
|
+
"""
|
|
84
|
+
# Auto-add graph_reasoning tool if graph_store is provided
|
|
85
|
+
if graph_store is not None and enable_graph_reasoning:
|
|
86
|
+
if "graph_reasoning" not in tools:
|
|
87
|
+
tools = tools + ["graph_reasoning"]
|
|
88
|
+
|
|
89
|
+
super().__init__(
|
|
90
|
+
agent_id=agent_id,
|
|
91
|
+
name=name,
|
|
92
|
+
llm_client=llm_client,
|
|
93
|
+
tools=tools,
|
|
94
|
+
config=config,
|
|
95
|
+
description=description or "Knowledge-aware agent with integrated graph reasoning",
|
|
96
|
+
version=version,
|
|
97
|
+
max_iterations=max_iterations,
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
self.graph_store = graph_store
|
|
101
|
+
self.enable_graph_reasoning = enable_graph_reasoning
|
|
102
|
+
self._graph_reasoning_tool: Optional[GraphReasoningTool] = None
|
|
103
|
+
self._knowledge_context: Dict[str, Any] = {}
|
|
104
|
+
|
|
105
|
+
logger.info(
|
|
106
|
+
f"KnowledgeAwareAgent initialized: {agent_id} "
|
|
107
|
+
f"with graph_store={'enabled' if graph_store else 'disabled'}"
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
async def _initialize(self) -> None:
|
|
111
|
+
"""Initialize Knowledge-Aware agent - setup graph tools and augmented prompts."""
|
|
112
|
+
# Call parent initialization
|
|
113
|
+
await super()._initialize()
|
|
114
|
+
|
|
115
|
+
# Initialize graph reasoning tool if graph store is available
|
|
116
|
+
if self.graph_store is not None and self.enable_graph_reasoning:
|
|
117
|
+
try:
|
|
118
|
+
self._graph_reasoning_tool = GraphReasoningTool(self.graph_store)
|
|
119
|
+
logger.info(f"KnowledgeAwareAgent {self.agent_id} initialized graph reasoning")
|
|
120
|
+
except Exception as e:
|
|
121
|
+
logger.warning(f"Failed to initialize graph reasoning tool: {e}")
|
|
122
|
+
|
|
123
|
+
# Rebuild system prompt with knowledge graph capabilities
|
|
124
|
+
if self.graph_store is not None:
|
|
125
|
+
self._system_prompt = self._build_kg_augmented_system_prompt()
|
|
126
|
+
|
|
127
|
+
logger.info(f"KnowledgeAwareAgent {self.agent_id} initialized with enhanced capabilities")
|
|
128
|
+
|
|
129
|
+
async def _shutdown(self) -> None:
|
|
130
|
+
"""Shutdown Knowledge-Aware agent."""
|
|
131
|
+
# Clear knowledge context
|
|
132
|
+
self._knowledge_context.clear()
|
|
133
|
+
|
|
134
|
+
# Shutdown graph store if needed
|
|
135
|
+
if self.graph_store is not None:
|
|
136
|
+
try:
|
|
137
|
+
await self.graph_store.close()
|
|
138
|
+
except Exception as e:
|
|
139
|
+
logger.warning(f"Error closing graph store: {e}")
|
|
140
|
+
|
|
141
|
+
# Call parent shutdown
|
|
142
|
+
await super()._shutdown()
|
|
143
|
+
|
|
144
|
+
logger.info(f"KnowledgeAwareAgent {self.agent_id} shut down")
|
|
145
|
+
|
|
146
|
+
def _build_kg_augmented_system_prompt(self) -> str:
|
|
147
|
+
"""
|
|
148
|
+
Build knowledge graph-augmented system prompt.
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
Enhanced system prompt with KG capabilities
|
|
152
|
+
"""
|
|
153
|
+
base_prompt = super()._build_system_prompt()
|
|
154
|
+
|
|
155
|
+
# Add knowledge graph capabilities section
|
|
156
|
+
kg_section = """
|
|
157
|
+
|
|
158
|
+
KNOWLEDGE GRAPH CAPABILITIES:
|
|
159
|
+
You have access to an integrated knowledge graph that can help answer complex questions.
|
|
160
|
+
|
|
161
|
+
REASONING WITH KNOWLEDGE:
|
|
162
|
+
Your reasoning process now includes an automatic RETRIEVE phase:
|
|
163
|
+
1. RETRIEVE: Relevant knowledge is automatically fetched from the graph before each reasoning step
|
|
164
|
+
2. THOUGHT: You analyze the task considering retrieved knowledge
|
|
165
|
+
3. ACTION: Use tools or provide final answer
|
|
166
|
+
4. OBSERVATION: Review results and continue
|
|
167
|
+
|
|
168
|
+
Retrieved knowledge will be provided as:
|
|
169
|
+
RETRIEVED KNOWLEDGE:
|
|
170
|
+
- Entity: id (properties)
|
|
171
|
+
- Entity: id (properties)
|
|
172
|
+
...
|
|
173
|
+
|
|
174
|
+
When to use the 'graph_reasoning' tool:
|
|
175
|
+
- Multi-hop questions (e.g., "How is X connected to Y?")
|
|
176
|
+
- Relationship discovery (e.g., "Who knows people at Company Z?")
|
|
177
|
+
- Knowledge completion (e.g., "What do we know about Person A?")
|
|
178
|
+
- Evidence-based reasoning (multiple sources needed)
|
|
179
|
+
|
|
180
|
+
The 'graph_reasoning' tool supports these modes:
|
|
181
|
+
- query_plan: Plan complex query execution
|
|
182
|
+
- multi_hop: Find connections between entities
|
|
183
|
+
- inference: Apply logical inference rules
|
|
184
|
+
- full_reasoning: Complete reasoning pipeline with evidence synthesis
|
|
185
|
+
|
|
186
|
+
Use graph reasoning proactively when questions involve:
|
|
187
|
+
- Connections, relationships, or paths
|
|
188
|
+
- Multiple entities or complex queries
|
|
189
|
+
- Need for evidence from multiple sources
|
|
190
|
+
"""
|
|
191
|
+
|
|
192
|
+
return base_prompt + kg_section
|
|
193
|
+
|
|
194
|
+
async def _reason_with_graph(
|
|
195
|
+
self, query: str, context: Optional[Dict[str, Any]] = None
|
|
196
|
+
) -> Dict[str, Any]:
|
|
197
|
+
"""
|
|
198
|
+
Consult knowledge graph during reasoning.
|
|
199
|
+
|
|
200
|
+
Args:
|
|
201
|
+
query: Query to reason about
|
|
202
|
+
context: Optional context for reasoning
|
|
203
|
+
|
|
204
|
+
Returns:
|
|
205
|
+
Reasoning results from knowledge graph
|
|
206
|
+
"""
|
|
207
|
+
if self._graph_reasoning_tool is None:
|
|
208
|
+
logger.warning("Graph reasoning tool not available")
|
|
209
|
+
return {"error": "Graph reasoning not available"}
|
|
210
|
+
|
|
211
|
+
try:
|
|
212
|
+
# Use multi_hop mode by default for general queries
|
|
213
|
+
from aiecs.tools.knowledge_graph.graph_reasoning_tool import (
|
|
214
|
+
GraphReasoningInput,
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
# Extract entity IDs from context if available
|
|
218
|
+
start_entity_id = None
|
|
219
|
+
target_entity_id = None
|
|
220
|
+
if context:
|
|
221
|
+
start_entity_id = context.get("start_entity_id")
|
|
222
|
+
target_entity_id = context.get("target_entity_id")
|
|
223
|
+
|
|
224
|
+
input_data = GraphReasoningInput(
|
|
225
|
+
mode="multi_hop",
|
|
226
|
+
query=query,
|
|
227
|
+
start_entity_id=start_entity_id,
|
|
228
|
+
target_entity_id=target_entity_id,
|
|
229
|
+
max_hops=3,
|
|
230
|
+
synthesize_evidence=True,
|
|
231
|
+
confidence_threshold=0.6,
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
result = await self._graph_reasoning_tool._execute(input_data)
|
|
235
|
+
|
|
236
|
+
# Store knowledge context for later use
|
|
237
|
+
self._knowledge_context[query] = {
|
|
238
|
+
"answer": result.get("answer"),
|
|
239
|
+
"confidence": result.get("confidence"),
|
|
240
|
+
"evidence_count": result.get("evidence_count"),
|
|
241
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
return result
|
|
245
|
+
|
|
246
|
+
except Exception as e:
|
|
247
|
+
logger.error(f"Error in graph reasoning: {e}")
|
|
248
|
+
return {"error": str(e)}
|
|
249
|
+
|
|
250
|
+
async def _select_tools_with_graph_awareness(
|
|
251
|
+
self, task: str, available_tools: List[str]
|
|
252
|
+
) -> List[str]:
|
|
253
|
+
"""
|
|
254
|
+
Select tools with graph awareness.
|
|
255
|
+
|
|
256
|
+
Prioritizes graph reasoning tool for knowledge-related queries.
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
task: Task description
|
|
260
|
+
available_tools: Available tool names
|
|
261
|
+
|
|
262
|
+
Returns:
|
|
263
|
+
Selected tool names
|
|
264
|
+
"""
|
|
265
|
+
# Keywords that suggest graph reasoning might be useful
|
|
266
|
+
graph_keywords = [
|
|
267
|
+
"connected",
|
|
268
|
+
"connection",
|
|
269
|
+
"relationship",
|
|
270
|
+
"related",
|
|
271
|
+
"knows",
|
|
272
|
+
"works",
|
|
273
|
+
"friend",
|
|
274
|
+
"colleague",
|
|
275
|
+
"partner",
|
|
276
|
+
"how",
|
|
277
|
+
"why",
|
|
278
|
+
"who",
|
|
279
|
+
"what",
|
|
280
|
+
"which",
|
|
281
|
+
"find",
|
|
282
|
+
"discover",
|
|
283
|
+
"explore",
|
|
284
|
+
"trace",
|
|
285
|
+
]
|
|
286
|
+
|
|
287
|
+
task_lower = task.lower()
|
|
288
|
+
|
|
289
|
+
# Check if task involves knowledge graph queries
|
|
290
|
+
uses_graph_keywords = any(keyword in task_lower for keyword in graph_keywords)
|
|
291
|
+
|
|
292
|
+
# If graph reasoning is available and task seems graph-related,
|
|
293
|
+
# prioritize it
|
|
294
|
+
if uses_graph_keywords and "graph_reasoning" in available_tools:
|
|
295
|
+
# Put graph_reasoning first
|
|
296
|
+
selected = ["graph_reasoning"]
|
|
297
|
+
# Add other tools
|
|
298
|
+
selected.extend([t for t in available_tools if t != "graph_reasoning"])
|
|
299
|
+
return selected
|
|
300
|
+
|
|
301
|
+
return available_tools
|
|
302
|
+
|
|
303
|
+
async def _augment_prompt_with_knowledge(
|
|
304
|
+
self, task: str, context: Optional[Dict[str, Any]] = None
|
|
305
|
+
) -> str:
|
|
306
|
+
"""
|
|
307
|
+
Augment prompt with relevant knowledge from graph.
|
|
308
|
+
|
|
309
|
+
Args:
|
|
310
|
+
task: Original task
|
|
311
|
+
context: Optional context
|
|
312
|
+
|
|
313
|
+
Returns:
|
|
314
|
+
Augmented task with knowledge context
|
|
315
|
+
"""
|
|
316
|
+
if self.graph_store is None or not self.enable_graph_reasoning:
|
|
317
|
+
return task
|
|
318
|
+
|
|
319
|
+
# Check if we have cached knowledge for similar queries
|
|
320
|
+
relevant_knowledge = []
|
|
321
|
+
for query, kg_context in self._knowledge_context.items():
|
|
322
|
+
# Simple keyword matching (could be enhanced with embeddings)
|
|
323
|
+
if any(word in task.lower() for word in query.lower().split()):
|
|
324
|
+
relevant_knowledge.append(
|
|
325
|
+
f"- {query}: {kg_context['answer']} (confidence: {kg_context['confidence']:.2f})"
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
if relevant_knowledge:
|
|
329
|
+
knowledge_section = "\n\nRELEVANT KNOWLEDGE FROM GRAPH:\n" + "\n".join(
|
|
330
|
+
relevant_knowledge[:3]
|
|
331
|
+
)
|
|
332
|
+
return task + knowledge_section
|
|
333
|
+
|
|
334
|
+
return task
|
|
335
|
+
|
|
336
|
+
async def execute_task(self, task: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
|
|
337
|
+
"""
|
|
338
|
+
Execute task with knowledge graph augmentation.
|
|
339
|
+
|
|
340
|
+
Uses knowledge-augmented ReAct loop that includes a RETRIEVE phase.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
task: Task specification with 'description' or 'prompt'
|
|
344
|
+
context: Execution context
|
|
345
|
+
|
|
346
|
+
Returns:
|
|
347
|
+
Task execution result
|
|
348
|
+
"""
|
|
349
|
+
# Extract task description
|
|
350
|
+
task_description = task.get("description") or task.get("prompt") or task.get("task")
|
|
351
|
+
if not task_description:
|
|
352
|
+
return await super().execute_task(task, context)
|
|
353
|
+
|
|
354
|
+
# Augment task with knowledge if available
|
|
355
|
+
augmented_task_desc = await self._augment_prompt_with_knowledge(task_description, context)
|
|
356
|
+
|
|
357
|
+
# If task seems graph-related, consult graph first
|
|
358
|
+
if self.graph_store is not None and self.enable_graph_reasoning:
|
|
359
|
+
# Check if this is a direct graph query
|
|
360
|
+
graph_keywords = [
|
|
361
|
+
"connected",
|
|
362
|
+
"connection",
|
|
363
|
+
"relationship",
|
|
364
|
+
"knows",
|
|
365
|
+
"works at",
|
|
366
|
+
]
|
|
367
|
+
if any(keyword in task_description.lower() for keyword in graph_keywords):
|
|
368
|
+
logger.info(f"Consulting knowledge graph for task: {task_description}")
|
|
369
|
+
|
|
370
|
+
# Try graph reasoning
|
|
371
|
+
graph_result = await self._reason_with_graph(augmented_task_desc, context)
|
|
372
|
+
|
|
373
|
+
# If we got a good answer from the graph, use it
|
|
374
|
+
if "answer" in graph_result and graph_result.get("confidence", 0) > 0.7:
|
|
375
|
+
return {
|
|
376
|
+
"success": True,
|
|
377
|
+
"output": graph_result["answer"],
|
|
378
|
+
"confidence": graph_result["confidence"],
|
|
379
|
+
"source": "knowledge_graph",
|
|
380
|
+
"evidence_count": graph_result.get("evidence_count", 0),
|
|
381
|
+
"reasoning_trace": graph_result.get("reasoning_trace", []),
|
|
382
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
# Fall back to standard hybrid agent execution
|
|
386
|
+
# This will use the overridden _react_loop with knowledge retrieval
|
|
387
|
+
# Create modified task dict with augmented description
|
|
388
|
+
augmented_task = task.copy()
|
|
389
|
+
if "description" in task:
|
|
390
|
+
augmented_task["description"] = augmented_task_desc
|
|
391
|
+
elif "prompt" in task:
|
|
392
|
+
augmented_task["prompt"] = augmented_task_desc
|
|
393
|
+
elif "task" in task:
|
|
394
|
+
augmented_task["task"] = augmented_task_desc
|
|
395
|
+
|
|
396
|
+
return await super().execute_task(augmented_task, context)
|
|
397
|
+
|
|
398
|
+
async def _react_loop(self, task: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
|
399
|
+
"""
|
|
400
|
+
Execute knowledge-augmented ReAct loop: Retrieve → Reason → Act → Observe.
|
|
401
|
+
|
|
402
|
+
Extends the standard ReAct loop with a RETRIEVE phase that fetches
|
|
403
|
+
relevant knowledge from the graph before each reasoning step.
|
|
404
|
+
|
|
405
|
+
Args:
|
|
406
|
+
task: Task description
|
|
407
|
+
context: Context dictionary
|
|
408
|
+
|
|
409
|
+
Returns:
|
|
410
|
+
Result dictionary with 'final_answer', 'steps', 'iterations'
|
|
411
|
+
"""
|
|
412
|
+
steps = []
|
|
413
|
+
tool_calls_count = 0
|
|
414
|
+
total_tokens = 0
|
|
415
|
+
knowledge_retrievals = 0
|
|
416
|
+
|
|
417
|
+
# Build initial messages
|
|
418
|
+
from aiecs.llm import LLMMessage
|
|
419
|
+
|
|
420
|
+
messages = self._build_initial_messages(task, context)
|
|
421
|
+
|
|
422
|
+
for iteration in range(self._max_iterations):
|
|
423
|
+
logger.debug(f"KnowledgeAwareAgent {self.agent_id} - ReAct iteration {iteration + 1}")
|
|
424
|
+
|
|
425
|
+
# RETRIEVE: Get relevant knowledge from graph (if enabled)
|
|
426
|
+
retrieved_knowledge = []
|
|
427
|
+
if self.graph_store is not None and self.enable_graph_reasoning:
|
|
428
|
+
try:
|
|
429
|
+
retrieved_knowledge = await self._retrieve_relevant_knowledge(
|
|
430
|
+
task, context, iteration
|
|
431
|
+
)
|
|
432
|
+
|
|
433
|
+
if retrieved_knowledge:
|
|
434
|
+
knowledge_retrievals += 1
|
|
435
|
+
knowledge_str = self._format_retrieved_knowledge(retrieved_knowledge)
|
|
436
|
+
|
|
437
|
+
steps.append(
|
|
438
|
+
{
|
|
439
|
+
"type": "retrieve",
|
|
440
|
+
"knowledge_count": len(retrieved_knowledge),
|
|
441
|
+
"content": (
|
|
442
|
+
knowledge_str[:200] + "..."
|
|
443
|
+
if len(knowledge_str) > 200
|
|
444
|
+
else knowledge_str
|
|
445
|
+
),
|
|
446
|
+
"iteration": iteration + 1,
|
|
447
|
+
}
|
|
448
|
+
)
|
|
449
|
+
|
|
450
|
+
# Add knowledge to messages
|
|
451
|
+
messages.append(
|
|
452
|
+
LLMMessage(
|
|
453
|
+
role="system",
|
|
454
|
+
content=f"RETRIEVED KNOWLEDGE:\n{knowledge_str}",
|
|
455
|
+
)
|
|
456
|
+
)
|
|
457
|
+
except Exception as e:
|
|
458
|
+
logger.warning(f"Knowledge retrieval failed: {e}")
|
|
459
|
+
|
|
460
|
+
# THINK: LLM reasons about next action
|
|
461
|
+
response = await self.llm_client.generate_text(
|
|
462
|
+
messages=messages,
|
|
463
|
+
model=self._config.llm_model,
|
|
464
|
+
temperature=self._config.temperature,
|
|
465
|
+
max_tokens=self._config.max_tokens,
|
|
466
|
+
)
|
|
467
|
+
|
|
468
|
+
thought = response.content
|
|
469
|
+
total_tokens += getattr(response, "total_tokens", 0)
|
|
470
|
+
|
|
471
|
+
steps.append(
|
|
472
|
+
{
|
|
473
|
+
"type": "thought",
|
|
474
|
+
"content": thought,
|
|
475
|
+
"iteration": iteration + 1,
|
|
476
|
+
}
|
|
477
|
+
)
|
|
478
|
+
|
|
479
|
+
# Check if final answer
|
|
480
|
+
if "FINAL ANSWER:" in thought:
|
|
481
|
+
final_answer = self._extract_final_answer(thought)
|
|
482
|
+
return {
|
|
483
|
+
"final_answer": final_answer,
|
|
484
|
+
"steps": steps,
|
|
485
|
+
"iterations": iteration + 1,
|
|
486
|
+
"tool_calls_count": tool_calls_count,
|
|
487
|
+
"knowledge_retrievals": knowledge_retrievals,
|
|
488
|
+
"total_tokens": total_tokens,
|
|
489
|
+
}
|
|
490
|
+
|
|
491
|
+
# Check if tool call
|
|
492
|
+
if "TOOL:" in thought:
|
|
493
|
+
# ACT: Execute tool
|
|
494
|
+
try:
|
|
495
|
+
tool_info = self._parse_tool_call(thought)
|
|
496
|
+
tool_result = await self._execute_tool(
|
|
497
|
+
tool_info["tool"],
|
|
498
|
+
tool_info.get("operation"),
|
|
499
|
+
tool_info.get("parameters", {}),
|
|
500
|
+
)
|
|
501
|
+
tool_calls_count += 1
|
|
502
|
+
|
|
503
|
+
steps.append(
|
|
504
|
+
{
|
|
505
|
+
"type": "action",
|
|
506
|
+
"tool": tool_info["tool"],
|
|
507
|
+
"operation": tool_info.get("operation"),
|
|
508
|
+
"parameters": tool_info.get("parameters"),
|
|
509
|
+
"iteration": iteration + 1,
|
|
510
|
+
}
|
|
511
|
+
)
|
|
512
|
+
|
|
513
|
+
# OBSERVE: Add tool result to conversation
|
|
514
|
+
observation = f"OBSERVATION: Tool '{tool_info['tool']}' returned: {tool_result}"
|
|
515
|
+
steps.append(
|
|
516
|
+
{
|
|
517
|
+
"type": "observation",
|
|
518
|
+
"content": observation,
|
|
519
|
+
"iteration": iteration + 1,
|
|
520
|
+
}
|
|
521
|
+
)
|
|
522
|
+
|
|
523
|
+
# Add to messages for next iteration
|
|
524
|
+
messages.append(LLMMessage(role="assistant", content=thought))
|
|
525
|
+
messages.append(LLMMessage(role="user", content=observation))
|
|
526
|
+
|
|
527
|
+
except Exception as e:
|
|
528
|
+
error_msg = f"OBSERVATION: Tool execution failed: {str(e)}"
|
|
529
|
+
steps.append(
|
|
530
|
+
{
|
|
531
|
+
"type": "observation",
|
|
532
|
+
"content": error_msg,
|
|
533
|
+
"iteration": iteration + 1,
|
|
534
|
+
"error": True,
|
|
535
|
+
}
|
|
536
|
+
)
|
|
537
|
+
messages.append(LLMMessage(role="assistant", content=thought))
|
|
538
|
+
messages.append(LLMMessage(role="user", content=error_msg))
|
|
539
|
+
|
|
540
|
+
else:
|
|
541
|
+
# LLM didn't provide clear action - treat as final answer
|
|
542
|
+
return {
|
|
543
|
+
"final_answer": thought,
|
|
544
|
+
"steps": steps,
|
|
545
|
+
"iterations": iteration + 1,
|
|
546
|
+
"tool_calls_count": tool_calls_count,
|
|
547
|
+
"knowledge_retrievals": knowledge_retrievals,
|
|
548
|
+
"total_tokens": total_tokens,
|
|
549
|
+
}
|
|
550
|
+
|
|
551
|
+
# Max iterations reached
|
|
552
|
+
logger.warning(f"KnowledgeAwareAgent {self.agent_id} reached max iterations")
|
|
553
|
+
return {
|
|
554
|
+
"final_answer": "Max iterations reached. Unable to complete task fully.",
|
|
555
|
+
"steps": steps,
|
|
556
|
+
"iterations": self._max_iterations,
|
|
557
|
+
"tool_calls_count": tool_calls_count,
|
|
558
|
+
"knowledge_retrievals": knowledge_retrievals,
|
|
559
|
+
"total_tokens": total_tokens,
|
|
560
|
+
"max_iterations_reached": True,
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
async def _retrieve_relevant_knowledge(
|
|
564
|
+
self, task: str, context: Dict[str, Any], iteration: int
|
|
565
|
+
) -> List[Entity]:
|
|
566
|
+
"""
|
|
567
|
+
Retrieve relevant knowledge for the current reasoning step.
|
|
568
|
+
|
|
569
|
+
Args:
|
|
570
|
+
task: Task description
|
|
571
|
+
context: Context dictionary
|
|
572
|
+
iteration: Current iteration number
|
|
573
|
+
|
|
574
|
+
Returns:
|
|
575
|
+
List of relevant entities
|
|
576
|
+
"""
|
|
577
|
+
# Extract entity IDs or types from context
|
|
578
|
+
context.get("entity_types")
|
|
579
|
+
context.get("session_id", f"temp_{self.agent_id}")
|
|
580
|
+
|
|
581
|
+
# Try to retrieve knowledge
|
|
582
|
+
# For now, use a simple approach - could be enhanced with embeddings
|
|
583
|
+
try:
|
|
584
|
+
# Use vector search if query is provided (simplified)
|
|
585
|
+
# In production, this would generate embeddings for the task
|
|
586
|
+
|
|
587
|
+
# For iteration 0, retrieve general context
|
|
588
|
+
# For later iterations, retrieve more specific knowledge
|
|
589
|
+
# limit = 5 if iteration == 0 else 3 # Reserved for future use
|
|
590
|
+
|
|
591
|
+
if hasattr(self, "_knowledge_context") and self._knowledge_context:
|
|
592
|
+
# Get entities mentioned in previous knowledge
|
|
593
|
+
for _, kg_ctx in self._knowledge_context.items():
|
|
594
|
+
# This is simplified - in production would extract entity
|
|
595
|
+
# IDs properly
|
|
596
|
+
pass
|
|
597
|
+
|
|
598
|
+
# Placeholder: Return empty for now
|
|
599
|
+
# In a full implementation, this would:
|
|
600
|
+
# 1. Generate embedding for task
|
|
601
|
+
# 2. Use vector_search on graph_store
|
|
602
|
+
# 3. Filter by relevance
|
|
603
|
+
# 4. Return top-k results
|
|
604
|
+
|
|
605
|
+
return []
|
|
606
|
+
|
|
607
|
+
except Exception as e:
|
|
608
|
+
logger.error(f"Error retrieving knowledge: {e}")
|
|
609
|
+
return []
|
|
610
|
+
|
|
611
|
+
def _format_retrieved_knowledge(self, entities: List[Entity]) -> str:
|
|
612
|
+
"""
|
|
613
|
+
Format retrieved knowledge entities for inclusion in prompt.
|
|
614
|
+
|
|
615
|
+
Args:
|
|
616
|
+
entities: List of entities retrieved from graph
|
|
617
|
+
|
|
618
|
+
Returns:
|
|
619
|
+
Formatted knowledge string
|
|
620
|
+
"""
|
|
621
|
+
if not entities:
|
|
622
|
+
return ""
|
|
623
|
+
|
|
624
|
+
lines = []
|
|
625
|
+
for entity in entities:
|
|
626
|
+
entity_str = f"- {entity.entity_type}: {entity.id}"
|
|
627
|
+
if entity.properties:
|
|
628
|
+
props_str = ", ".join(f"{k}={v}" for k, v in entity.properties.items())
|
|
629
|
+
entity_str += f" ({props_str})"
|
|
630
|
+
lines.append(entity_str)
|
|
631
|
+
|
|
632
|
+
return "\n".join(lines)
|
|
633
|
+
|
|
634
|
+
def get_knowledge_context(self) -> Dict[str, Any]:
|
|
635
|
+
"""
|
|
636
|
+
Get accumulated knowledge context.
|
|
637
|
+
|
|
638
|
+
Returns:
|
|
639
|
+
Dictionary of accumulated knowledge
|
|
640
|
+
"""
|
|
641
|
+
return self._knowledge_context.copy()
|
|
642
|
+
|
|
643
|
+
def clear_knowledge_context(self) -> None:
|
|
644
|
+
"""Clear accumulated knowledge context."""
|
|
645
|
+
self._knowledge_context.clear()
|
|
646
|
+
logger.debug(f"Cleared knowledge context for agent {self.agent_id}")
|