aiecs 1.0.1__py3-none-any.whl → 1.7.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiecs might be problematic. Click here for more details.
- aiecs/__init__.py +13 -16
- aiecs/__main__.py +7 -7
- aiecs/aiecs_client.py +269 -75
- aiecs/application/executors/operation_executor.py +79 -54
- aiecs/application/knowledge_graph/__init__.py +7 -0
- aiecs/application/knowledge_graph/builder/__init__.py +37 -0
- aiecs/application/knowledge_graph/builder/data_quality.py +302 -0
- aiecs/application/knowledge_graph/builder/data_reshaping.py +293 -0
- aiecs/application/knowledge_graph/builder/document_builder.py +369 -0
- aiecs/application/knowledge_graph/builder/graph_builder.py +490 -0
- aiecs/application/knowledge_graph/builder/import_optimizer.py +396 -0
- aiecs/application/knowledge_graph/builder/schema_inference.py +462 -0
- aiecs/application/knowledge_graph/builder/schema_mapping.py +563 -0
- aiecs/application/knowledge_graph/builder/structured_pipeline.py +1384 -0
- aiecs/application/knowledge_graph/builder/text_chunker.py +317 -0
- aiecs/application/knowledge_graph/extractors/__init__.py +27 -0
- aiecs/application/knowledge_graph/extractors/base.py +98 -0
- aiecs/application/knowledge_graph/extractors/llm_entity_extractor.py +422 -0
- aiecs/application/knowledge_graph/extractors/llm_relation_extractor.py +347 -0
- aiecs/application/knowledge_graph/extractors/ner_entity_extractor.py +241 -0
- aiecs/application/knowledge_graph/fusion/__init__.py +78 -0
- aiecs/application/knowledge_graph/fusion/ab_testing.py +395 -0
- aiecs/application/knowledge_graph/fusion/abbreviation_expander.py +327 -0
- aiecs/application/knowledge_graph/fusion/alias_index.py +597 -0
- aiecs/application/knowledge_graph/fusion/alias_matcher.py +384 -0
- aiecs/application/knowledge_graph/fusion/cache_coordinator.py +343 -0
- aiecs/application/knowledge_graph/fusion/entity_deduplicator.py +433 -0
- aiecs/application/knowledge_graph/fusion/entity_linker.py +511 -0
- aiecs/application/knowledge_graph/fusion/evaluation_dataset.py +240 -0
- aiecs/application/knowledge_graph/fusion/knowledge_fusion.py +632 -0
- aiecs/application/knowledge_graph/fusion/matching_config.py +489 -0
- aiecs/application/knowledge_graph/fusion/name_normalizer.py +352 -0
- aiecs/application/knowledge_graph/fusion/relation_deduplicator.py +183 -0
- aiecs/application/knowledge_graph/fusion/semantic_name_matcher.py +464 -0
- aiecs/application/knowledge_graph/fusion/similarity_pipeline.py +534 -0
- aiecs/application/knowledge_graph/pattern_matching/__init__.py +21 -0
- aiecs/application/knowledge_graph/pattern_matching/pattern_matcher.py +342 -0
- aiecs/application/knowledge_graph/pattern_matching/query_executor.py +366 -0
- aiecs/application/knowledge_graph/profiling/__init__.py +12 -0
- aiecs/application/knowledge_graph/profiling/query_plan_visualizer.py +195 -0
- aiecs/application/knowledge_graph/profiling/query_profiler.py +223 -0
- aiecs/application/knowledge_graph/reasoning/__init__.py +27 -0
- aiecs/application/knowledge_graph/reasoning/evidence_synthesis.py +341 -0
- aiecs/application/knowledge_graph/reasoning/inference_engine.py +500 -0
- aiecs/application/knowledge_graph/reasoning/logic_form_parser.py +163 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/__init__.py +79 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_builder.py +513 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_nodes.py +913 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_validator.py +866 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/error_handler.py +475 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/parser.py +396 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/query_context.py +208 -0
- aiecs/application/knowledge_graph/reasoning/logic_query_integration.py +170 -0
- aiecs/application/knowledge_graph/reasoning/query_planner.py +855 -0
- aiecs/application/knowledge_graph/reasoning/reasoning_engine.py +518 -0
- aiecs/application/knowledge_graph/retrieval/__init__.py +27 -0
- aiecs/application/knowledge_graph/retrieval/query_intent_classifier.py +211 -0
- aiecs/application/knowledge_graph/retrieval/retrieval_strategies.py +592 -0
- aiecs/application/knowledge_graph/retrieval/strategy_types.py +23 -0
- aiecs/application/knowledge_graph/search/__init__.py +59 -0
- aiecs/application/knowledge_graph/search/hybrid_search.py +457 -0
- aiecs/application/knowledge_graph/search/reranker.py +293 -0
- aiecs/application/knowledge_graph/search/reranker_strategies.py +535 -0
- aiecs/application/knowledge_graph/search/text_similarity.py +392 -0
- aiecs/application/knowledge_graph/traversal/__init__.py +15 -0
- aiecs/application/knowledge_graph/traversal/enhanced_traversal.py +305 -0
- aiecs/application/knowledge_graph/traversal/path_scorer.py +271 -0
- aiecs/application/knowledge_graph/validators/__init__.py +13 -0
- aiecs/application/knowledge_graph/validators/relation_validator.py +239 -0
- aiecs/application/knowledge_graph/visualization/__init__.py +11 -0
- aiecs/application/knowledge_graph/visualization/graph_visualizer.py +313 -0
- aiecs/common/__init__.py +9 -0
- aiecs/common/knowledge_graph/__init__.py +17 -0
- aiecs/common/knowledge_graph/runnable.py +471 -0
- aiecs/config/__init__.py +20 -5
- aiecs/config/config.py +762 -31
- aiecs/config/graph_config.py +131 -0
- aiecs/config/tool_config.py +435 -0
- aiecs/core/__init__.py +29 -13
- aiecs/core/interface/__init__.py +2 -2
- aiecs/core/interface/execution_interface.py +22 -22
- aiecs/core/interface/storage_interface.py +37 -88
- aiecs/core/registry/__init__.py +31 -0
- aiecs/core/registry/service_registry.py +92 -0
- aiecs/domain/__init__.py +270 -1
- aiecs/domain/agent/__init__.py +191 -0
- aiecs/domain/agent/base_agent.py +3949 -0
- aiecs/domain/agent/exceptions.py +99 -0
- aiecs/domain/agent/graph_aware_mixin.py +569 -0
- aiecs/domain/agent/hybrid_agent.py +1731 -0
- aiecs/domain/agent/integration/__init__.py +29 -0
- aiecs/domain/agent/integration/context_compressor.py +216 -0
- aiecs/domain/agent/integration/context_engine_adapter.py +587 -0
- aiecs/domain/agent/integration/protocols.py +281 -0
- aiecs/domain/agent/integration/retry_policy.py +218 -0
- aiecs/domain/agent/integration/role_config.py +213 -0
- aiecs/domain/agent/knowledge_aware_agent.py +1892 -0
- aiecs/domain/agent/lifecycle.py +291 -0
- aiecs/domain/agent/llm_agent.py +692 -0
- aiecs/domain/agent/memory/__init__.py +12 -0
- aiecs/domain/agent/memory/conversation.py +1124 -0
- aiecs/domain/agent/migration/__init__.py +14 -0
- aiecs/domain/agent/migration/conversion.py +163 -0
- aiecs/domain/agent/migration/legacy_wrapper.py +86 -0
- aiecs/domain/agent/models.py +894 -0
- aiecs/domain/agent/observability.py +479 -0
- aiecs/domain/agent/persistence.py +449 -0
- aiecs/domain/agent/prompts/__init__.py +29 -0
- aiecs/domain/agent/prompts/builder.py +159 -0
- aiecs/domain/agent/prompts/formatters.py +187 -0
- aiecs/domain/agent/prompts/template.py +255 -0
- aiecs/domain/agent/registry.py +253 -0
- aiecs/domain/agent/tool_agent.py +444 -0
- aiecs/domain/agent/tools/__init__.py +15 -0
- aiecs/domain/agent/tools/schema_generator.py +377 -0
- aiecs/domain/community/__init__.py +155 -0
- aiecs/domain/community/agent_adapter.py +469 -0
- aiecs/domain/community/analytics.py +432 -0
- aiecs/domain/community/collaborative_workflow.py +648 -0
- aiecs/domain/community/communication_hub.py +634 -0
- aiecs/domain/community/community_builder.py +320 -0
- aiecs/domain/community/community_integration.py +796 -0
- aiecs/domain/community/community_manager.py +803 -0
- aiecs/domain/community/decision_engine.py +849 -0
- aiecs/domain/community/exceptions.py +231 -0
- aiecs/domain/community/models/__init__.py +33 -0
- aiecs/domain/community/models/community_models.py +234 -0
- aiecs/domain/community/resource_manager.py +461 -0
- aiecs/domain/community/shared_context_manager.py +589 -0
- aiecs/domain/context/__init__.py +40 -10
- aiecs/domain/context/context_engine.py +1910 -0
- aiecs/domain/context/conversation_models.py +87 -53
- aiecs/domain/context/graph_memory.py +582 -0
- aiecs/domain/execution/model.py +12 -4
- aiecs/domain/knowledge_graph/__init__.py +19 -0
- aiecs/domain/knowledge_graph/models/__init__.py +52 -0
- aiecs/domain/knowledge_graph/models/entity.py +148 -0
- aiecs/domain/knowledge_graph/models/evidence.py +178 -0
- aiecs/domain/knowledge_graph/models/inference_rule.py +184 -0
- aiecs/domain/knowledge_graph/models/path.py +171 -0
- aiecs/domain/knowledge_graph/models/path_pattern.py +171 -0
- aiecs/domain/knowledge_graph/models/query.py +261 -0
- aiecs/domain/knowledge_graph/models/query_plan.py +181 -0
- aiecs/domain/knowledge_graph/models/relation.py +202 -0
- aiecs/domain/knowledge_graph/schema/__init__.py +23 -0
- aiecs/domain/knowledge_graph/schema/entity_type.py +131 -0
- aiecs/domain/knowledge_graph/schema/graph_schema.py +253 -0
- aiecs/domain/knowledge_graph/schema/property_schema.py +143 -0
- aiecs/domain/knowledge_graph/schema/relation_type.py +163 -0
- aiecs/domain/knowledge_graph/schema/schema_manager.py +691 -0
- aiecs/domain/knowledge_graph/schema/type_enums.py +209 -0
- aiecs/domain/task/dsl_processor.py +172 -56
- aiecs/domain/task/model.py +20 -8
- aiecs/domain/task/task_context.py +27 -24
- aiecs/infrastructure/__init__.py +0 -2
- aiecs/infrastructure/graph_storage/__init__.py +11 -0
- aiecs/infrastructure/graph_storage/base.py +837 -0
- aiecs/infrastructure/graph_storage/batch_operations.py +458 -0
- aiecs/infrastructure/graph_storage/cache.py +424 -0
- aiecs/infrastructure/graph_storage/distributed.py +223 -0
- aiecs/infrastructure/graph_storage/error_handling.py +380 -0
- aiecs/infrastructure/graph_storage/graceful_degradation.py +294 -0
- aiecs/infrastructure/graph_storage/health_checks.py +378 -0
- aiecs/infrastructure/graph_storage/in_memory.py +1197 -0
- aiecs/infrastructure/graph_storage/index_optimization.py +446 -0
- aiecs/infrastructure/graph_storage/lazy_loading.py +431 -0
- aiecs/infrastructure/graph_storage/metrics.py +344 -0
- aiecs/infrastructure/graph_storage/migration.py +400 -0
- aiecs/infrastructure/graph_storage/pagination.py +483 -0
- aiecs/infrastructure/graph_storage/performance_monitoring.py +456 -0
- aiecs/infrastructure/graph_storage/postgres.py +1563 -0
- aiecs/infrastructure/graph_storage/property_storage.py +353 -0
- aiecs/infrastructure/graph_storage/protocols.py +76 -0
- aiecs/infrastructure/graph_storage/query_optimizer.py +642 -0
- aiecs/infrastructure/graph_storage/schema_cache.py +290 -0
- aiecs/infrastructure/graph_storage/sqlite.py +1373 -0
- aiecs/infrastructure/graph_storage/streaming.py +487 -0
- aiecs/infrastructure/graph_storage/tenant.py +412 -0
- aiecs/infrastructure/messaging/celery_task_manager.py +92 -54
- aiecs/infrastructure/messaging/websocket_manager.py +51 -35
- aiecs/infrastructure/monitoring/__init__.py +22 -0
- aiecs/infrastructure/monitoring/executor_metrics.py +45 -11
- aiecs/infrastructure/monitoring/global_metrics_manager.py +212 -0
- aiecs/infrastructure/monitoring/structured_logger.py +3 -7
- aiecs/infrastructure/monitoring/tracing_manager.py +63 -35
- aiecs/infrastructure/persistence/__init__.py +14 -1
- aiecs/infrastructure/persistence/context_engine_client.py +184 -0
- aiecs/infrastructure/persistence/database_manager.py +67 -43
- aiecs/infrastructure/persistence/file_storage.py +180 -103
- aiecs/infrastructure/persistence/redis_client.py +74 -21
- aiecs/llm/__init__.py +73 -25
- aiecs/llm/callbacks/__init__.py +11 -0
- aiecs/llm/{custom_callbacks.py → callbacks/custom_callbacks.py} +26 -19
- aiecs/llm/client_factory.py +230 -37
- aiecs/llm/client_resolver.py +155 -0
- aiecs/llm/clients/__init__.py +38 -0
- aiecs/llm/clients/base_client.py +328 -0
- aiecs/llm/clients/google_function_calling_mixin.py +415 -0
- aiecs/llm/clients/googleai_client.py +314 -0
- aiecs/llm/clients/openai_client.py +158 -0
- aiecs/llm/clients/openai_compatible_mixin.py +367 -0
- aiecs/llm/clients/vertex_client.py +1186 -0
- aiecs/llm/clients/xai_client.py +201 -0
- aiecs/llm/config/__init__.py +51 -0
- aiecs/llm/config/config_loader.py +272 -0
- aiecs/llm/config/config_validator.py +206 -0
- aiecs/llm/config/model_config.py +143 -0
- aiecs/llm/protocols.py +149 -0
- aiecs/llm/utils/__init__.py +10 -0
- aiecs/llm/utils/validate_config.py +89 -0
- aiecs/main.py +140 -121
- aiecs/scripts/aid/VERSION_MANAGEMENT.md +138 -0
- aiecs/scripts/aid/__init__.py +19 -0
- aiecs/scripts/aid/module_checker.py +499 -0
- aiecs/scripts/aid/version_manager.py +235 -0
- aiecs/scripts/{DEPENDENCY_SYSTEM_SUMMARY.md → dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md} +1 -0
- aiecs/scripts/{README_DEPENDENCY_CHECKER.md → dependance_check/README_DEPENDENCY_CHECKER.md} +1 -0
- aiecs/scripts/dependance_check/__init__.py +15 -0
- aiecs/scripts/dependance_check/dependency_checker.py +1835 -0
- aiecs/scripts/{dependency_fixer.py → dependance_check/dependency_fixer.py} +192 -90
- aiecs/scripts/{download_nlp_data.py → dependance_check/download_nlp_data.py} +203 -71
- aiecs/scripts/dependance_patch/__init__.py +7 -0
- aiecs/scripts/dependance_patch/fix_weasel/__init__.py +11 -0
- aiecs/scripts/{fix_weasel_validator.py → dependance_patch/fix_weasel/fix_weasel_validator.py} +21 -14
- aiecs/scripts/{patch_weasel_library.sh → dependance_patch/fix_weasel/patch_weasel_library.sh} +1 -1
- aiecs/scripts/knowledge_graph/__init__.py +3 -0
- aiecs/scripts/knowledge_graph/run_threshold_experiments.py +212 -0
- aiecs/scripts/migrations/multi_tenancy/README.md +142 -0
- aiecs/scripts/tools_develop/README.md +671 -0
- aiecs/scripts/tools_develop/README_CONFIG_CHECKER.md +273 -0
- aiecs/scripts/tools_develop/TOOLS_CONFIG_GUIDE.md +1287 -0
- aiecs/scripts/tools_develop/TOOL_AUTO_DISCOVERY.md +234 -0
- aiecs/scripts/tools_develop/__init__.py +21 -0
- aiecs/scripts/tools_develop/check_all_tools_config.py +548 -0
- aiecs/scripts/tools_develop/check_type_annotations.py +257 -0
- aiecs/scripts/tools_develop/pre-commit-schema-coverage.sh +66 -0
- aiecs/scripts/tools_develop/schema_coverage.py +511 -0
- aiecs/scripts/tools_develop/validate_tool_schemas.py +475 -0
- aiecs/scripts/tools_develop/verify_executor_config_fix.py +98 -0
- aiecs/scripts/tools_develop/verify_tools.py +352 -0
- aiecs/tasks/__init__.py +0 -1
- aiecs/tasks/worker.py +115 -47
- aiecs/tools/__init__.py +194 -72
- aiecs/tools/apisource/__init__.py +99 -0
- aiecs/tools/apisource/intelligence/__init__.py +19 -0
- aiecs/tools/apisource/intelligence/data_fusion.py +632 -0
- aiecs/tools/apisource/intelligence/query_analyzer.py +417 -0
- aiecs/tools/apisource/intelligence/search_enhancer.py +385 -0
- aiecs/tools/apisource/monitoring/__init__.py +9 -0
- aiecs/tools/apisource/monitoring/metrics.py +330 -0
- aiecs/tools/apisource/providers/__init__.py +112 -0
- aiecs/tools/apisource/providers/base.py +671 -0
- aiecs/tools/apisource/providers/census.py +397 -0
- aiecs/tools/apisource/providers/fred.py +535 -0
- aiecs/tools/apisource/providers/newsapi.py +409 -0
- aiecs/tools/apisource/providers/worldbank.py +352 -0
- aiecs/tools/apisource/reliability/__init__.py +12 -0
- aiecs/tools/apisource/reliability/error_handler.py +363 -0
- aiecs/tools/apisource/reliability/fallback_strategy.py +376 -0
- aiecs/tools/apisource/tool.py +832 -0
- aiecs/tools/apisource/utils/__init__.py +9 -0
- aiecs/tools/apisource/utils/validators.py +334 -0
- aiecs/tools/base_tool.py +415 -21
- aiecs/tools/docs/__init__.py +121 -0
- aiecs/tools/docs/ai_document_orchestrator.py +607 -0
- aiecs/tools/docs/ai_document_writer_orchestrator.py +2350 -0
- aiecs/tools/docs/content_insertion_tool.py +1320 -0
- aiecs/tools/docs/document_creator_tool.py +1464 -0
- aiecs/tools/docs/document_layout_tool.py +1160 -0
- aiecs/tools/docs/document_parser_tool.py +1016 -0
- aiecs/tools/docs/document_writer_tool.py +2008 -0
- aiecs/tools/knowledge_graph/__init__.py +17 -0
- aiecs/tools/knowledge_graph/graph_reasoning_tool.py +807 -0
- aiecs/tools/knowledge_graph/graph_search_tool.py +944 -0
- aiecs/tools/knowledge_graph/kg_builder_tool.py +524 -0
- aiecs/tools/langchain_adapter.py +300 -138
- aiecs/tools/schema_generator.py +455 -0
- aiecs/tools/search_tool/__init__.py +100 -0
- aiecs/tools/search_tool/analyzers.py +581 -0
- aiecs/tools/search_tool/cache.py +264 -0
- aiecs/tools/search_tool/constants.py +128 -0
- aiecs/tools/search_tool/context.py +224 -0
- aiecs/tools/search_tool/core.py +778 -0
- aiecs/tools/search_tool/deduplicator.py +119 -0
- aiecs/tools/search_tool/error_handler.py +242 -0
- aiecs/tools/search_tool/metrics.py +343 -0
- aiecs/tools/search_tool/rate_limiter.py +172 -0
- aiecs/tools/search_tool/schemas.py +275 -0
- aiecs/tools/statistics/__init__.py +80 -0
- aiecs/tools/statistics/ai_data_analysis_orchestrator.py +646 -0
- aiecs/tools/statistics/ai_insight_generator_tool.py +508 -0
- aiecs/tools/statistics/ai_report_orchestrator_tool.py +684 -0
- aiecs/tools/statistics/data_loader_tool.py +555 -0
- aiecs/tools/statistics/data_profiler_tool.py +638 -0
- aiecs/tools/statistics/data_transformer_tool.py +580 -0
- aiecs/tools/statistics/data_visualizer_tool.py +498 -0
- aiecs/tools/statistics/model_trainer_tool.py +507 -0
- aiecs/tools/statistics/statistical_analyzer_tool.py +472 -0
- aiecs/tools/task_tools/__init__.py +49 -36
- aiecs/tools/task_tools/chart_tool.py +200 -184
- aiecs/tools/task_tools/classfire_tool.py +268 -267
- aiecs/tools/task_tools/image_tool.py +220 -141
- aiecs/tools/task_tools/office_tool.py +226 -146
- aiecs/tools/task_tools/pandas_tool.py +477 -121
- aiecs/tools/task_tools/report_tool.py +390 -142
- aiecs/tools/task_tools/research_tool.py +149 -79
- aiecs/tools/task_tools/scraper_tool.py +339 -145
- aiecs/tools/task_tools/stats_tool.py +448 -209
- aiecs/tools/temp_file_manager.py +26 -24
- aiecs/tools/tool_executor/__init__.py +18 -16
- aiecs/tools/tool_executor/tool_executor.py +364 -52
- aiecs/utils/LLM_output_structor.py +74 -48
- aiecs/utils/__init__.py +14 -3
- aiecs/utils/base_callback.py +0 -3
- aiecs/utils/cache_provider.py +696 -0
- aiecs/utils/execution_utils.py +50 -31
- aiecs/utils/prompt_loader.py +1 -0
- aiecs/utils/token_usage_repository.py +37 -11
- aiecs/ws/socket_server.py +14 -4
- {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/METADATA +52 -15
- aiecs-1.7.17.dist-info/RECORD +337 -0
- aiecs-1.7.17.dist-info/entry_points.txt +13 -0
- aiecs/config/registry.py +0 -19
- aiecs/domain/context/content_engine.py +0 -982
- aiecs/llm/base_client.py +0 -99
- aiecs/llm/openai_client.py +0 -125
- aiecs/llm/vertex_client.py +0 -186
- aiecs/llm/xai_client.py +0 -184
- aiecs/scripts/dependency_checker.py +0 -857
- aiecs/scripts/quick_dependency_check.py +0 -269
- aiecs/tools/task_tools/search_api.py +0 -7
- aiecs-1.0.1.dist-info/RECORD +0 -90
- aiecs-1.0.1.dist-info/entry_points.txt +0 -7
- /aiecs/scripts/{setup_nlp_data.sh → dependance_check/setup_nlp_data.sh} +0 -0
- /aiecs/scripts/{README_WEASEL_PATCH.md → dependance_patch/fix_weasel/README_WEASEL_PATCH.md} +0 -0
- /aiecs/scripts/{fix_weasel_validator.sh → dependance_patch/fix_weasel/fix_weasel_validator.sh} +0 -0
- /aiecs/scripts/{run_weasel_patch.sh → dependance_patch/fix_weasel/run_weasel_patch.sh} +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/WHEEL +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/licenses/LICENSE +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.17.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1731 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Hybrid Agent
|
|
3
|
+
|
|
4
|
+
Agent implementation combining LLM reasoning with tool execution capabilities.
|
|
5
|
+
Implements the ReAct (Reasoning + Acting) pattern.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
from typing import Dict, List, Any, Optional, Union, TYPE_CHECKING, AsyncIterator
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
|
|
12
|
+
from aiecs.llm import BaseLLMClient, CacheControl, LLMMessage
|
|
13
|
+
from aiecs.tools import get_tool, BaseTool
|
|
14
|
+
from aiecs.domain.agent.tools.schema_generator import ToolSchemaGenerator
|
|
15
|
+
|
|
16
|
+
from .base_agent import BaseAIAgent
|
|
17
|
+
from .models import AgentType, AgentConfiguration, ToolObservation
|
|
18
|
+
from .exceptions import TaskExecutionError, ToolAccessDeniedError
|
|
19
|
+
|
|
20
|
+
if TYPE_CHECKING:
|
|
21
|
+
from aiecs.llm.protocols import LLMClientProtocol
|
|
22
|
+
from aiecs.domain.agent.integration.protocols import (
|
|
23
|
+
ConfigManagerProtocol,
|
|
24
|
+
CheckpointerProtocol,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class HybridAgent(BaseAIAgent):
|
|
31
|
+
"""
|
|
32
|
+
Hybrid agent combining LLM reasoning with tool execution.
|
|
33
|
+
|
|
34
|
+
Implements ReAct pattern: Reason → Act → Observe loop.
|
|
35
|
+
|
|
36
|
+
This agent supports flexible tool and LLM client configurations:
|
|
37
|
+
|
|
38
|
+
**Tool Configuration:**
|
|
39
|
+
- Tool names (List[str]): Backward compatible, tools loaded by name
|
|
40
|
+
- Tool instances (Dict[str, BaseTool]): Pre-configured tools with preserved state
|
|
41
|
+
|
|
42
|
+
**LLM Client Configuration:**
|
|
43
|
+
- BaseLLMClient: Standard LLM clients (OpenAI, xAI, etc.)
|
|
44
|
+
- Custom clients: Any object implementing LLMClientProtocol (duck typing)
|
|
45
|
+
|
|
46
|
+
**ReAct Format Reference (for callers to include in their prompts):**
|
|
47
|
+
|
|
48
|
+
The caller is responsible for ensuring the LLM follows the correct format.
|
|
49
|
+
Below are the standard formats that HybridAgent expects:
|
|
50
|
+
|
|
51
|
+
CORRECT FORMAT EXAMPLE::
|
|
52
|
+
|
|
53
|
+
<THOUGHT>
|
|
54
|
+
I need to search for information about the weather. Let me use the search tool.
|
|
55
|
+
</THOUGHT>
|
|
56
|
+
|
|
57
|
+
TOOL: search
|
|
58
|
+
OPERATION: query
|
|
59
|
+
PARAMETERS: {"q": "weather today"}
|
|
60
|
+
|
|
61
|
+
<OBSERVATION>
|
|
62
|
+
The search tool returned: Today's weather is sunny, 72°F.
|
|
63
|
+
</OBSERVATION>
|
|
64
|
+
|
|
65
|
+
<THOUGHT>
|
|
66
|
+
I have the weather information. Now I can provide the final response.
|
|
67
|
+
</THOUGHT>
|
|
68
|
+
|
|
69
|
+
FINAL RESPONSE: Today's weather is sunny, 72°F. finish
|
|
70
|
+
|
|
71
|
+
INCORRECT FORMAT (DO NOT DO THIS)::
|
|
72
|
+
|
|
73
|
+
<THOUGHT>
|
|
74
|
+
I need to search.
|
|
75
|
+
TOOL: search
|
|
76
|
+
OPERATION: query
|
|
77
|
+
</THOUGHT>
|
|
78
|
+
❌ Tool calls must be OUTSIDE the <THOUGHT> and <OBSERVATION> tags
|
|
79
|
+
|
|
80
|
+
<THOUGHT>
|
|
81
|
+
I know the answer.
|
|
82
|
+
FINAL RESPONSE: The answer is... finish
|
|
83
|
+
</THOUGHT>
|
|
84
|
+
❌ Final responses must be OUTSIDE the <THOUGHT> and <OBSERVATION> tags
|
|
85
|
+
❌ FINAL RESPONSE must end with 'finish' suffix to indicate completion
|
|
86
|
+
|
|
87
|
+
TOOL CALL FORMAT::
|
|
88
|
+
|
|
89
|
+
TOOL: <tool_name>
|
|
90
|
+
OPERATION: <operation_name>
|
|
91
|
+
PARAMETERS: <json_parameters>
|
|
92
|
+
|
|
93
|
+
FINAL RESPONSE FORMAT::
|
|
94
|
+
|
|
95
|
+
FINAL RESPONSE: <your_response> finish
|
|
96
|
+
|
|
97
|
+
**Important Notes for Callers:**
|
|
98
|
+
|
|
99
|
+
- FINAL RESPONSE MUST end with 'finish' to indicate completion
|
|
100
|
+
- If no 'finish' suffix, the system assumes response is incomplete and will continue iteration
|
|
101
|
+
- LLM can output JSON or any text format - it will be passed through unchanged
|
|
102
|
+
- Each iteration will inform LLM of current iteration number and remaining iterations
|
|
103
|
+
- If LLM generation is incomplete, it will be asked to continue from where it left off
|
|
104
|
+
- Callers can customize max_iterations to control loop behavior
|
|
105
|
+
- Callers are responsible for parsing and handling LLM output format
|
|
106
|
+
|
|
107
|
+
Examples:
|
|
108
|
+
# Example 1: Basic usage with tool names (backward compatible)
|
|
109
|
+
agent = HybridAgent(
|
|
110
|
+
agent_id="agent1",
|
|
111
|
+
name="My Agent",
|
|
112
|
+
llm_client=OpenAIClient(),
|
|
113
|
+
tools=["search", "calculator"],
|
|
114
|
+
config=config
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
# Example 2: Using tool instances with preserved state
|
|
118
|
+
from aiecs.tools import BaseTool
|
|
119
|
+
|
|
120
|
+
class StatefulSearchTool(BaseTool):
|
|
121
|
+
def __init__(self, api_key: str, context_engine):
|
|
122
|
+
self.api_key = api_key
|
|
123
|
+
self.context_engine = context_engine
|
|
124
|
+
self.search_history = [] # State preserved across calls
|
|
125
|
+
|
|
126
|
+
async def run_async(self, operation: str, query: str):
|
|
127
|
+
self.search_history.append(query)
|
|
128
|
+
# Use context_engine for context-aware search
|
|
129
|
+
return f"Search results for: {query}"
|
|
130
|
+
|
|
131
|
+
# Create tool instances with dependencies
|
|
132
|
+
context_engine = ContextEngine()
|
|
133
|
+
await context_engine.initialize()
|
|
134
|
+
|
|
135
|
+
search_tool = StatefulSearchTool(
|
|
136
|
+
api_key="...",
|
|
137
|
+
context_engine=context_engine
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
agent = HybridAgent(
|
|
141
|
+
agent_id="agent1",
|
|
142
|
+
name="My Agent",
|
|
143
|
+
llm_client=OpenAIClient(),
|
|
144
|
+
tools={
|
|
145
|
+
"search": search_tool, # Stateful tool instance
|
|
146
|
+
"calculator": CalculatorTool()
|
|
147
|
+
},
|
|
148
|
+
config=config
|
|
149
|
+
)
|
|
150
|
+
# Tool state (search_history) is preserved across agent operations
|
|
151
|
+
|
|
152
|
+
# Example 3: Using custom LLM client wrapper
|
|
153
|
+
class CustomLLMWrapper:
|
|
154
|
+
provider_name = "custom_wrapper"
|
|
155
|
+
|
|
156
|
+
def __init__(self, base_client):
|
|
157
|
+
self.base_client = base_client
|
|
158
|
+
self.call_count = 0
|
|
159
|
+
|
|
160
|
+
async def generate_text(self, messages, **kwargs):
|
|
161
|
+
self.call_count += 1
|
|
162
|
+
# Add custom logging, retry logic, etc.
|
|
163
|
+
return await self.base_client.generate_text(messages, **kwargs)
|
|
164
|
+
|
|
165
|
+
async def stream_text(self, messages, **kwargs):
|
|
166
|
+
async for token in self.base_client.stream_text(messages, **kwargs):
|
|
167
|
+
yield token
|
|
168
|
+
|
|
169
|
+
async def close(self):
|
|
170
|
+
await self.base_client.close()
|
|
171
|
+
|
|
172
|
+
# Wrap existing client
|
|
173
|
+
base_client = OpenAIClient()
|
|
174
|
+
wrapped_client = CustomLLMWrapper(base_client)
|
|
175
|
+
|
|
176
|
+
agent = HybridAgent(
|
|
177
|
+
agent_id="agent1",
|
|
178
|
+
name="My Agent",
|
|
179
|
+
llm_client=wrapped_client, # Custom wrapper, no inheritance needed
|
|
180
|
+
tools=["search", "calculator"],
|
|
181
|
+
config=config
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
# Example 4: Full-featured agent with all options
|
|
185
|
+
from aiecs.domain.context import ContextEngine
|
|
186
|
+
from aiecs.domain.agent.models import ResourceLimits
|
|
187
|
+
|
|
188
|
+
context_engine = ContextEngine()
|
|
189
|
+
await context_engine.initialize()
|
|
190
|
+
|
|
191
|
+
resource_limits = ResourceLimits(
|
|
192
|
+
max_concurrent_tasks=5,
|
|
193
|
+
max_tokens_per_minute=10000
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
agent = HybridAgent(
|
|
197
|
+
agent_id="agent1",
|
|
198
|
+
name="My Agent",
|
|
199
|
+
llm_client=CustomLLMWrapper(OpenAIClient()),
|
|
200
|
+
tools={
|
|
201
|
+
"search": StatefulSearchTool(api_key="...", context_engine=context_engine),
|
|
202
|
+
"calculator": CalculatorTool()
|
|
203
|
+
},
|
|
204
|
+
config=config,
|
|
205
|
+
config_manager=DatabaseConfigManager(),
|
|
206
|
+
checkpointer=RedisCheckpointer(),
|
|
207
|
+
context_engine=context_engine,
|
|
208
|
+
collaboration_enabled=True,
|
|
209
|
+
agent_registry={"agent2": other_agent},
|
|
210
|
+
learning_enabled=True,
|
|
211
|
+
resource_limits=resource_limits
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
# Example 5: Streaming with tool instances
|
|
215
|
+
agent = HybridAgent(
|
|
216
|
+
agent_id="agent1",
|
|
217
|
+
name="My Agent",
|
|
218
|
+
llm_client=OpenAIClient(),
|
|
219
|
+
tools={
|
|
220
|
+
"search": StatefulSearchTool(api_key="..."),
|
|
221
|
+
"calculator": CalculatorTool()
|
|
222
|
+
},
|
|
223
|
+
config=config
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
# Stream task execution (tokens + tool calls)
|
|
227
|
+
async for event in agent.execute_task_streaming(task, context):
|
|
228
|
+
if event['type'] == 'token':
|
|
229
|
+
print(event['content'], end='', flush=True)
|
|
230
|
+
elif event['type'] == 'tool_call':
|
|
231
|
+
print(f"\\nCalling {event['tool_name']}...")
|
|
232
|
+
elif event['type'] == 'tool_result':
|
|
233
|
+
print(f"Result: {event['result']}")
|
|
234
|
+
"""
|
|
235
|
+
|
|
236
|
+
def __init__(
|
|
237
|
+
self,
|
|
238
|
+
agent_id: str,
|
|
239
|
+
name: str,
|
|
240
|
+
llm_client: Union[BaseLLMClient, "LLMClientProtocol"],
|
|
241
|
+
tools: Union[List[str], Dict[str, BaseTool]],
|
|
242
|
+
config: AgentConfiguration,
|
|
243
|
+
description: Optional[str] = None,
|
|
244
|
+
version: str = "1.0.0",
|
|
245
|
+
max_iterations: int = 10,
|
|
246
|
+
config_manager: Optional["ConfigManagerProtocol"] = None,
|
|
247
|
+
checkpointer: Optional["CheckpointerProtocol"] = None,
|
|
248
|
+
context_engine: Optional[Any] = None,
|
|
249
|
+
collaboration_enabled: bool = False,
|
|
250
|
+
agent_registry: Optional[Dict[str, Any]] = None,
|
|
251
|
+
learning_enabled: bool = False,
|
|
252
|
+
resource_limits: Optional[Any] = None,
|
|
253
|
+
):
|
|
254
|
+
"""
|
|
255
|
+
Initialize Hybrid agent.
|
|
256
|
+
|
|
257
|
+
Args:
|
|
258
|
+
agent_id: Unique agent identifier
|
|
259
|
+
name: Agent name
|
|
260
|
+
llm_client: LLM client for reasoning (BaseLLMClient or any LLMClientProtocol)
|
|
261
|
+
tools: Tools - either list of tool names or dict of tool instances
|
|
262
|
+
config: Agent configuration
|
|
263
|
+
description: Optional description
|
|
264
|
+
version: Agent version
|
|
265
|
+
max_iterations: Maximum ReAct iterations
|
|
266
|
+
config_manager: Optional configuration manager for dynamic config
|
|
267
|
+
checkpointer: Optional checkpointer for state persistence
|
|
268
|
+
context_engine: Optional context engine for persistent storage
|
|
269
|
+
collaboration_enabled: Enable collaboration features
|
|
270
|
+
agent_registry: Registry of other agents for collaboration
|
|
271
|
+
learning_enabled: Enable learning features
|
|
272
|
+
resource_limits: Optional resource limits configuration
|
|
273
|
+
|
|
274
|
+
Example with tool instances:
|
|
275
|
+
```python
|
|
276
|
+
agent = HybridAgent(
|
|
277
|
+
agent_id="agent1",
|
|
278
|
+
name="My Agent",
|
|
279
|
+
llm_client=OpenAIClient(),
|
|
280
|
+
tools={
|
|
281
|
+
"search": SearchTool(api_key="..."),
|
|
282
|
+
"calculator": CalculatorTool()
|
|
283
|
+
},
|
|
284
|
+
config=config
|
|
285
|
+
)
|
|
286
|
+
```
|
|
287
|
+
|
|
288
|
+
Example with tool names (backward compatible):
|
|
289
|
+
```python
|
|
290
|
+
agent = HybridAgent(
|
|
291
|
+
agent_id="agent1",
|
|
292
|
+
name="My Agent",
|
|
293
|
+
llm_client=OpenAIClient(),
|
|
294
|
+
tools=["search", "calculator"],
|
|
295
|
+
config=config
|
|
296
|
+
)
|
|
297
|
+
```
|
|
298
|
+
"""
|
|
299
|
+
super().__init__(
|
|
300
|
+
agent_id=agent_id,
|
|
301
|
+
name=name,
|
|
302
|
+
agent_type=AgentType.DEVELOPER, # Can be adjusted based on use case
|
|
303
|
+
config=config,
|
|
304
|
+
description=description or "Hybrid agent with LLM reasoning and tool execution",
|
|
305
|
+
version=version,
|
|
306
|
+
tools=tools,
|
|
307
|
+
llm_client=llm_client, # type: ignore[arg-type]
|
|
308
|
+
config_manager=config_manager,
|
|
309
|
+
checkpointer=checkpointer,
|
|
310
|
+
context_engine=context_engine,
|
|
311
|
+
collaboration_enabled=collaboration_enabled,
|
|
312
|
+
agent_registry=agent_registry,
|
|
313
|
+
learning_enabled=learning_enabled,
|
|
314
|
+
resource_limits=resource_limits,
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
# Store LLM client reference (from BaseAIAgent or local)
|
|
318
|
+
self.llm_client = self._llm_client if self._llm_client else llm_client
|
|
319
|
+
self._max_iterations = max_iterations
|
|
320
|
+
self._system_prompt: Optional[str] = None
|
|
321
|
+
self._conversation_history: List[LLMMessage] = []
|
|
322
|
+
self._tool_schemas: List[Dict[str, Any]] = []
|
|
323
|
+
self._use_function_calling: bool = False # Will be determined during initialization
|
|
324
|
+
|
|
325
|
+
logger.info(f"HybridAgent initialized: {agent_id} with LLM ({self.llm_client.provider_name}) " f"and {len(tools) if isinstance(tools, (list, dict)) else 0} tools")
|
|
326
|
+
|
|
327
|
+
async def _initialize(self) -> None:
|
|
328
|
+
"""Initialize Hybrid agent - validate LLM client, load tools, and build system prompt."""
|
|
329
|
+
# Validate LLM client using BaseAIAgent helper
|
|
330
|
+
self._validate_llm_client()
|
|
331
|
+
|
|
332
|
+
# Load tools using BaseAIAgent helper
|
|
333
|
+
self._load_tools()
|
|
334
|
+
|
|
335
|
+
# Get tool instances from BaseAIAgent (if provided as instances)
|
|
336
|
+
base_tool_instances = self._get_tool_instances()
|
|
337
|
+
|
|
338
|
+
if base_tool_instances:
|
|
339
|
+
# Tool instances were provided - use them directly
|
|
340
|
+
self._tool_instances = base_tool_instances
|
|
341
|
+
logger.info(f"HybridAgent {self.agent_id} using " f"{len(self._tool_instances)} pre-configured tool instances")
|
|
342
|
+
elif self._available_tools:
|
|
343
|
+
# Tool names were provided - load them
|
|
344
|
+
self._tool_instances = {}
|
|
345
|
+
for tool_name in self._available_tools:
|
|
346
|
+
try:
|
|
347
|
+
self._tool_instances[tool_name] = get_tool(tool_name)
|
|
348
|
+
logger.debug(f"HybridAgent {self.agent_id} loaded tool: {tool_name}")
|
|
349
|
+
except Exception as e:
|
|
350
|
+
logger.warning(f"Failed to load tool {tool_name}: {e}")
|
|
351
|
+
|
|
352
|
+
logger.info(f"HybridAgent {self.agent_id} initialized with {len(self._tool_instances)} tools")
|
|
353
|
+
|
|
354
|
+
# Generate tool schemas for Function Calling
|
|
355
|
+
self._generate_tool_schemas()
|
|
356
|
+
|
|
357
|
+
# Check if LLM client supports Function Calling
|
|
358
|
+
self._use_function_calling = self._check_function_calling_support()
|
|
359
|
+
|
|
360
|
+
# Build system prompt
|
|
361
|
+
self._system_prompt = self._build_system_prompt()
|
|
362
|
+
|
|
363
|
+
async def _shutdown(self) -> None:
|
|
364
|
+
"""Shutdown Hybrid agent."""
|
|
365
|
+
self._conversation_history.clear()
|
|
366
|
+
if self._tool_instances:
|
|
367
|
+
self._tool_instances.clear()
|
|
368
|
+
|
|
369
|
+
if hasattr(self.llm_client, "close"):
|
|
370
|
+
await self.llm_client.close()
|
|
371
|
+
|
|
372
|
+
logger.info(f"HybridAgent {self.agent_id} shut down")
|
|
373
|
+
|
|
374
|
+
def _build_system_prompt(self) -> str:
|
|
375
|
+
"""Build system prompt including tool descriptions.
|
|
376
|
+
|
|
377
|
+
Precedence order for base prompt:
|
|
378
|
+
1. config.system_prompt - Direct custom prompt (highest priority)
|
|
379
|
+
2. Assembled from goal/backstory/domain_knowledge
|
|
380
|
+
3. Default: Empty (ReAct instructions will be added)
|
|
381
|
+
|
|
382
|
+
Note: ReAct instructions and tool info are always appended regardless
|
|
383
|
+
of whether system_prompt is used, as they're essential for agent operation.
|
|
384
|
+
"""
|
|
385
|
+
parts = []
|
|
386
|
+
|
|
387
|
+
# 1. Custom system_prompt takes precedence over goal/backstory
|
|
388
|
+
if self._config.system_prompt:
|
|
389
|
+
parts.append(self._config.system_prompt)
|
|
390
|
+
else:
|
|
391
|
+
# 2. Assemble from individual fields
|
|
392
|
+
if self._config.goal:
|
|
393
|
+
parts.append(f"Goal: {self._config.goal}")
|
|
394
|
+
|
|
395
|
+
if self._config.backstory:
|
|
396
|
+
parts.append(f"Background: {self._config.backstory}")
|
|
397
|
+
|
|
398
|
+
if self._config.domain_knowledge:
|
|
399
|
+
parts.append(f"Domain Knowledge: {self._config.domain_knowledge}")
|
|
400
|
+
|
|
401
|
+
# Add ReAct instructions (always required for HybridAgent)
|
|
402
|
+
parts.append(
|
|
403
|
+
"Within the given identity framework, you are also a highly intelligent, responsive, and accurate reasoning agent. that can use tools to complete tasks. "
|
|
404
|
+
"Follow the ReAct (Reasoning + Acting) pattern to achieve best results:\n"
|
|
405
|
+
"1. THOUGHT: Analyze the task and decide what to do\n"
|
|
406
|
+
"2. ACTION: Use a tool if needed, or provide final answer\n"
|
|
407
|
+
"3. OBSERVATION: Review the tool result and continue reasoning\n\n"
|
|
408
|
+
"RESPONSE FORMAT REQUIREMENTS:\n"
|
|
409
|
+
"- Wrap your thinking process in <THOUGHT>...</THOUGHT> tags\n"
|
|
410
|
+
"- Wrap your insight about tool result in <OBSERVATION>...</OBSERVATION> tags\n"
|
|
411
|
+
"- Tool calls (TOOL:, OPERATION:, PARAMETERS:) MUST be OUTSIDE <THOUGHT> and <OBSERVATION> tags\n"
|
|
412
|
+
"- Final responses (FINAL RESPONSE:) MUST be OUTSIDE <THOUGHT> and <OBSERVATION> tags\n\n"
|
|
413
|
+
"THINKING GUIDANCE:\n"
|
|
414
|
+
"When writing <THOUGHT> sections, consider:\n"
|
|
415
|
+
"- What is the core thing to do?\n"
|
|
416
|
+
"- What information do I already have?\n"
|
|
417
|
+
"- What information do I need to gather?\n"
|
|
418
|
+
"- Which tools would be most helpful?\n"
|
|
419
|
+
"- What action should I take?\n\n"
|
|
420
|
+
"OBSERVATION GUIDANCE:\n"
|
|
421
|
+
"When writing <OBSERVATION> sections, consider:\n"
|
|
422
|
+
"- What did I learn from the tool results?\n"
|
|
423
|
+
"- How does this information inform my next work?\n"
|
|
424
|
+
"- Do I need additional information?\n"
|
|
425
|
+
"- Am I ready to provide a final response?"
|
|
426
|
+
)
|
|
427
|
+
|
|
428
|
+
# Add available tools (always required for HybridAgent)
|
|
429
|
+
if self._available_tools:
|
|
430
|
+
parts.append(f"\nAvailable tools: {', '.join(self._available_tools)}")
|
|
431
|
+
|
|
432
|
+
return "\n\n".join(parts)
|
|
433
|
+
|
|
434
|
+
async def execute_task(self, task: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
|
|
435
|
+
"""
|
|
436
|
+
Execute a task using ReAct loop.
|
|
437
|
+
|
|
438
|
+
Args:
|
|
439
|
+
task: Task specification with 'description' or 'prompt'
|
|
440
|
+
context: Execution context
|
|
441
|
+
|
|
442
|
+
Returns:
|
|
443
|
+
Execution result with 'output', 'reasoning_steps', 'tool_calls'
|
|
444
|
+
|
|
445
|
+
Raises:
|
|
446
|
+
TaskExecutionError: If task execution fails
|
|
447
|
+
"""
|
|
448
|
+
start_time = datetime.utcnow()
|
|
449
|
+
|
|
450
|
+
try:
|
|
451
|
+
# Extract task description
|
|
452
|
+
task_description = task.get("description") or task.get("prompt") or task.get("task")
|
|
453
|
+
if not task_description:
|
|
454
|
+
raise TaskExecutionError(
|
|
455
|
+
"Task must contain 'description', 'prompt', or 'task' field",
|
|
456
|
+
agent_id=self.agent_id,
|
|
457
|
+
)
|
|
458
|
+
|
|
459
|
+
# Transition to busy state
|
|
460
|
+
self._transition_state(self.state.__class__.BUSY)
|
|
461
|
+
self._current_task_id = task.get("task_id")
|
|
462
|
+
|
|
463
|
+
# Execute ReAct loop
|
|
464
|
+
result = await self._react_loop(task_description, context)
|
|
465
|
+
|
|
466
|
+
# Calculate execution time
|
|
467
|
+
execution_time = (datetime.utcnow() - start_time).total_seconds()
|
|
468
|
+
|
|
469
|
+
# Update metrics
|
|
470
|
+
self.update_metrics(
|
|
471
|
+
execution_time=execution_time,
|
|
472
|
+
success=True,
|
|
473
|
+
tokens_used=result.get("total_tokens"),
|
|
474
|
+
tool_calls=result.get("tool_calls_count", 0),
|
|
475
|
+
)
|
|
476
|
+
|
|
477
|
+
# Transition back to active
|
|
478
|
+
self._transition_state(self.state.__class__.ACTIVE)
|
|
479
|
+
self._current_task_id = None
|
|
480
|
+
self.last_active_at = datetime.utcnow()
|
|
481
|
+
|
|
482
|
+
return {
|
|
483
|
+
"success": True,
|
|
484
|
+
"output": result.get("final_response"), # Changed from final_answer
|
|
485
|
+
"reasoning_steps": result.get("steps"),
|
|
486
|
+
"tool_calls_count": result.get("tool_calls_count"),
|
|
487
|
+
"iterations": result.get("iterations"),
|
|
488
|
+
"execution_time": execution_time,
|
|
489
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
490
|
+
}
|
|
491
|
+
|
|
492
|
+
except Exception as e:
|
|
493
|
+
logger.error(f"Task execution failed for {self.agent_id}: {e}")
|
|
494
|
+
|
|
495
|
+
# Update metrics for failure
|
|
496
|
+
execution_time = (datetime.utcnow() - start_time).total_seconds()
|
|
497
|
+
self.update_metrics(execution_time=execution_time, success=False)
|
|
498
|
+
|
|
499
|
+
# Transition to error state
|
|
500
|
+
self._transition_state(self.state.__class__.ERROR)
|
|
501
|
+
self._current_task_id = None
|
|
502
|
+
|
|
503
|
+
raise TaskExecutionError(
|
|
504
|
+
f"Task execution failed: {str(e)}",
|
|
505
|
+
agent_id=self.agent_id,
|
|
506
|
+
task_id=task.get("task_id"),
|
|
507
|
+
)
|
|
508
|
+
|
|
509
|
+
async def process_message(self, message: str, sender_id: Optional[str] = None) -> Dict[str, Any]:
|
|
510
|
+
"""
|
|
511
|
+
Process an incoming message using ReAct loop.
|
|
512
|
+
|
|
513
|
+
Args:
|
|
514
|
+
message: Message content
|
|
515
|
+
sender_id: Optional sender identifier
|
|
516
|
+
|
|
517
|
+
Returns:
|
|
518
|
+
Response dictionary with 'response', 'reasoning_steps'
|
|
519
|
+
"""
|
|
520
|
+
try:
|
|
521
|
+
# Build task from message
|
|
522
|
+
task = {
|
|
523
|
+
"description": message,
|
|
524
|
+
"task_id": f"msg_{datetime.utcnow().timestamp()}",
|
|
525
|
+
}
|
|
526
|
+
|
|
527
|
+
# Execute as task
|
|
528
|
+
result = await self.execute_task(task, {"sender_id": sender_id})
|
|
529
|
+
|
|
530
|
+
return {
|
|
531
|
+
"response": result.get("output"),
|
|
532
|
+
"reasoning_steps": result.get("reasoning_steps"),
|
|
533
|
+
"timestamp": result.get("timestamp"),
|
|
534
|
+
}
|
|
535
|
+
|
|
536
|
+
except Exception as e:
|
|
537
|
+
logger.error(f"Message processing failed for {self.agent_id}: {e}")
|
|
538
|
+
raise
|
|
539
|
+
|
|
540
|
+
async def execute_task_streaming(self, task: Dict[str, Any], context: Dict[str, Any]) -> AsyncIterator[Dict[str, Any]]:
|
|
541
|
+
"""
|
|
542
|
+
Execute a task with streaming tokens and tool calls.
|
|
543
|
+
|
|
544
|
+
Args:
|
|
545
|
+
task: Task specification with 'description' or 'prompt'
|
|
546
|
+
context: Execution context
|
|
547
|
+
|
|
548
|
+
Yields:
|
|
549
|
+
Dict[str, Any]: Event dictionaries with streaming tokens, tool calls, and results
|
|
550
|
+
|
|
551
|
+
Example:
|
|
552
|
+
```python
|
|
553
|
+
async for event in agent.execute_task_streaming(task, context):
|
|
554
|
+
if event['type'] == 'token':
|
|
555
|
+
print(event['content'], end='', flush=True)
|
|
556
|
+
elif event['type'] == 'tool_call':
|
|
557
|
+
print(f"\\nCalling {event['tool_name']}...")
|
|
558
|
+
elif event['type'] == 'tool_result':
|
|
559
|
+
print(f"Result: {event['result']}")
|
|
560
|
+
```
|
|
561
|
+
"""
|
|
562
|
+
start_time = datetime.utcnow()
|
|
563
|
+
|
|
564
|
+
try:
|
|
565
|
+
# Extract task description
|
|
566
|
+
task_description = task.get("description") or task.get("prompt") or task.get("task")
|
|
567
|
+
if not task_description:
|
|
568
|
+
yield {
|
|
569
|
+
"type": "error",
|
|
570
|
+
"error": "Task must contain 'description', 'prompt', or 'task' field",
|
|
571
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
572
|
+
}
|
|
573
|
+
return
|
|
574
|
+
|
|
575
|
+
# Transition to busy state
|
|
576
|
+
self._transition_state(self.state.__class__.BUSY)
|
|
577
|
+
self._current_task_id = task.get("task_id")
|
|
578
|
+
|
|
579
|
+
# Yield status
|
|
580
|
+
yield {
|
|
581
|
+
"type": "status",
|
|
582
|
+
"status": "started",
|
|
583
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
584
|
+
}
|
|
585
|
+
|
|
586
|
+
# Execute streaming ReAct loop
|
|
587
|
+
async for event in self._react_loop_streaming(task_description, context):
|
|
588
|
+
yield event
|
|
589
|
+
|
|
590
|
+
# Get final result from last event
|
|
591
|
+
if event.get("type") == "result":
|
|
592
|
+
result = event
|
|
593
|
+
|
|
594
|
+
# Calculate execution time
|
|
595
|
+
execution_time = (datetime.utcnow() - start_time).total_seconds()
|
|
596
|
+
|
|
597
|
+
# Update metrics
|
|
598
|
+
self.update_metrics(
|
|
599
|
+
execution_time=execution_time,
|
|
600
|
+
success=True,
|
|
601
|
+
tokens_used=result.get("total_tokens"),
|
|
602
|
+
tool_calls=result.get("tool_calls_count", 0),
|
|
603
|
+
)
|
|
604
|
+
|
|
605
|
+
# Transition back to active
|
|
606
|
+
self._transition_state(self.state.__class__.ACTIVE)
|
|
607
|
+
self._current_task_id = None
|
|
608
|
+
self.last_active_at = datetime.utcnow()
|
|
609
|
+
|
|
610
|
+
except Exception as e:
|
|
611
|
+
logger.error(f"Streaming task execution failed for {self.agent_id}: {e}")
|
|
612
|
+
|
|
613
|
+
# Update metrics for failure
|
|
614
|
+
execution_time = (datetime.utcnow() - start_time).total_seconds()
|
|
615
|
+
self.update_metrics(execution_time=execution_time, success=False)
|
|
616
|
+
|
|
617
|
+
# Transition to error state
|
|
618
|
+
self._transition_state(self.state.__class__.ERROR)
|
|
619
|
+
self._current_task_id = None
|
|
620
|
+
|
|
621
|
+
yield {
|
|
622
|
+
"type": "error",
|
|
623
|
+
"error": str(e),
|
|
624
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
async def process_message_streaming(self, message: str, sender_id: Optional[str] = None) -> AsyncIterator[str]:
|
|
628
|
+
"""
|
|
629
|
+
Process a message with streaming response.
|
|
630
|
+
|
|
631
|
+
Args:
|
|
632
|
+
message: Message content
|
|
633
|
+
sender_id: Optional sender identifier
|
|
634
|
+
|
|
635
|
+
Yields:
|
|
636
|
+
str: Response text tokens
|
|
637
|
+
|
|
638
|
+
Example:
|
|
639
|
+
```python
|
|
640
|
+
async for token in agent.process_message_streaming("Hello!"):
|
|
641
|
+
print(token, end='', flush=True)
|
|
642
|
+
```
|
|
643
|
+
"""
|
|
644
|
+
try:
|
|
645
|
+
# Build task from message
|
|
646
|
+
task = {
|
|
647
|
+
"description": message,
|
|
648
|
+
"task_id": f"msg_{datetime.utcnow().timestamp()}",
|
|
649
|
+
}
|
|
650
|
+
|
|
651
|
+
# Stream task execution
|
|
652
|
+
async for event in self.execute_task_streaming(task, {"sender_id": sender_id}):
|
|
653
|
+
if event["type"] == "token":
|
|
654
|
+
yield event["content"]
|
|
655
|
+
|
|
656
|
+
except Exception as e:
|
|
657
|
+
logger.error(f"Streaming message processing failed for {self.agent_id}: {e}")
|
|
658
|
+
raise
|
|
659
|
+
|
|
660
|
+
async def _react_loop_streaming(self, task: str, context: Dict[str, Any]) -> AsyncIterator[Dict[str, Any]]:
|
|
661
|
+
"""
|
|
662
|
+
Execute ReAct loop with streaming: Reason → Act → Observe.
|
|
663
|
+
|
|
664
|
+
Args:
|
|
665
|
+
task: Task description
|
|
666
|
+
context: Context dictionary
|
|
667
|
+
|
|
668
|
+
Yields:
|
|
669
|
+
Dict[str, Any]: Event dictionaries with streaming tokens, tool calls, and results
|
|
670
|
+
"""
|
|
671
|
+
steps = []
|
|
672
|
+
tool_calls_count = 0
|
|
673
|
+
total_tokens = 0
|
|
674
|
+
|
|
675
|
+
# Build initial messages
|
|
676
|
+
messages = self._build_initial_messages(task, context)
|
|
677
|
+
|
|
678
|
+
for iteration in range(self._max_iterations):
|
|
679
|
+
logger.debug(f"HybridAgent {self.agent_id} - ReAct iteration {iteration + 1}")
|
|
680
|
+
|
|
681
|
+
# Add iteration info to messages (except first iteration which has task context)
|
|
682
|
+
if iteration > 0:
|
|
683
|
+
iteration_info = (
|
|
684
|
+
f"[Iteration {iteration + 1}/{self._max_iterations}, "
|
|
685
|
+
f"remaining: {self._max_iterations - iteration - 1}]"
|
|
686
|
+
)
|
|
687
|
+
# Only add if the last message is not already an iteration info
|
|
688
|
+
if messages and not messages[-1].content.startswith("[Iteration"):
|
|
689
|
+
messages.append(LLMMessage(role="user", content=iteration_info))
|
|
690
|
+
|
|
691
|
+
# Yield iteration status
|
|
692
|
+
yield {
|
|
693
|
+
"type": "status",
|
|
694
|
+
"status": "thinking",
|
|
695
|
+
"iteration": iteration + 1,
|
|
696
|
+
"max_iterations": self._max_iterations,
|
|
697
|
+
"remaining": self._max_iterations - iteration - 1,
|
|
698
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
699
|
+
}
|
|
700
|
+
|
|
701
|
+
# THINK: Stream LLM reasoning
|
|
702
|
+
thought_tokens = []
|
|
703
|
+
tool_calls_from_stream = None
|
|
704
|
+
|
|
705
|
+
# Use Function Calling if supported, otherwise use ReAct mode
|
|
706
|
+
if self._use_function_calling and self._tool_schemas:
|
|
707
|
+
# Convert schemas to tools format
|
|
708
|
+
tools = [{"type": "function", "function": schema} for schema in self._tool_schemas]
|
|
709
|
+
# Use return_chunks=True to get tool_calls information
|
|
710
|
+
stream_gen = self.llm_client.stream_text( # type: ignore[attr-defined]
|
|
711
|
+
messages=messages,
|
|
712
|
+
model=self._config.llm_model,
|
|
713
|
+
temperature=self._config.temperature,
|
|
714
|
+
max_tokens=self._config.max_tokens,
|
|
715
|
+
tools=tools,
|
|
716
|
+
tool_choice="auto",
|
|
717
|
+
return_chunks=True, # Enable tool_calls accumulation
|
|
718
|
+
)
|
|
719
|
+
else:
|
|
720
|
+
# Fallback to ReAct mode
|
|
721
|
+
stream_gen = self.llm_client.stream_text( # type: ignore[attr-defined]
|
|
722
|
+
messages=messages,
|
|
723
|
+
model=self._config.llm_model,
|
|
724
|
+
temperature=self._config.temperature,
|
|
725
|
+
max_tokens=self._config.max_tokens,
|
|
726
|
+
)
|
|
727
|
+
|
|
728
|
+
# Stream tokens and collect tool calls
|
|
729
|
+
from aiecs.llm.clients.openai_compatible_mixin import StreamChunk
|
|
730
|
+
|
|
731
|
+
async for chunk in stream_gen:
|
|
732
|
+
# Handle StreamChunk objects (Function Calling mode)
|
|
733
|
+
if isinstance(chunk, StreamChunk):
|
|
734
|
+
if chunk.type == "token" and chunk.content:
|
|
735
|
+
thought_tokens.append(chunk.content)
|
|
736
|
+
yield {
|
|
737
|
+
"type": "token",
|
|
738
|
+
"content": chunk.content,
|
|
739
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
740
|
+
}
|
|
741
|
+
elif chunk.type == "tool_call" and chunk.tool_call:
|
|
742
|
+
# Yield tool call update event
|
|
743
|
+
yield {
|
|
744
|
+
"type": "tool_call_update",
|
|
745
|
+
"tool_call": chunk.tool_call,
|
|
746
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
747
|
+
}
|
|
748
|
+
elif chunk.type == "tool_calls" and chunk.tool_calls:
|
|
749
|
+
# Complete tool_calls received
|
|
750
|
+
tool_calls_from_stream = chunk.tool_calls
|
|
751
|
+
yield {
|
|
752
|
+
"type": "tool_calls",
|
|
753
|
+
"tool_calls": chunk.tool_calls,
|
|
754
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
755
|
+
}
|
|
756
|
+
else:
|
|
757
|
+
# Handle plain string tokens (ReAct mode or non-Function Calling)
|
|
758
|
+
thought_tokens.append(chunk)
|
|
759
|
+
yield {
|
|
760
|
+
"type": "token",
|
|
761
|
+
"content": chunk,
|
|
762
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
763
|
+
}
|
|
764
|
+
|
|
765
|
+
thought_raw = "".join(thought_tokens)
|
|
766
|
+
|
|
767
|
+
# Store raw output in steps (no format processing)
|
|
768
|
+
steps.append(
|
|
769
|
+
{
|
|
770
|
+
"type": "thought",
|
|
771
|
+
"content": thought_raw.strip(), # Return raw output without processing
|
|
772
|
+
"iteration": iteration + 1,
|
|
773
|
+
}
|
|
774
|
+
)
|
|
775
|
+
|
|
776
|
+
# Process tool_calls if received from stream
|
|
777
|
+
if tool_calls_from_stream:
|
|
778
|
+
# Process each tool call
|
|
779
|
+
for tool_call in tool_calls_from_stream:
|
|
780
|
+
try:
|
|
781
|
+
func_name = tool_call["function"]["name"]
|
|
782
|
+
func_args = tool_call["function"]["arguments"]
|
|
783
|
+
|
|
784
|
+
# Parse function name to extract tool and operation
|
|
785
|
+
# CRITICAL: Try exact match first, then fall back to underscore parsing
|
|
786
|
+
if self._tool_instances and func_name in self._tool_instances:
|
|
787
|
+
# Exact match found - use full function name as tool name
|
|
788
|
+
tool_name = func_name
|
|
789
|
+
operation = None
|
|
790
|
+
elif self._available_tools and func_name in self._available_tools:
|
|
791
|
+
# Exact match in available tools list
|
|
792
|
+
tool_name = func_name
|
|
793
|
+
operation = None
|
|
794
|
+
else:
|
|
795
|
+
# Fallback: try underscore parsing for legacy compatibility
|
|
796
|
+
parts = func_name.split("_", 1)
|
|
797
|
+
if len(parts) == 2:
|
|
798
|
+
tool_name, operation = parts
|
|
799
|
+
else:
|
|
800
|
+
tool_name = parts[0]
|
|
801
|
+
operation = None
|
|
802
|
+
|
|
803
|
+
# Parse arguments JSON
|
|
804
|
+
import json
|
|
805
|
+
if isinstance(func_args, str):
|
|
806
|
+
parameters = json.loads(func_args)
|
|
807
|
+
else:
|
|
808
|
+
parameters = func_args if func_args else {}
|
|
809
|
+
|
|
810
|
+
# Yield tool call event
|
|
811
|
+
yield {
|
|
812
|
+
"type": "tool_call",
|
|
813
|
+
"tool_name": tool_name,
|
|
814
|
+
"operation": operation,
|
|
815
|
+
"parameters": parameters,
|
|
816
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
817
|
+
}
|
|
818
|
+
|
|
819
|
+
# Execute tool
|
|
820
|
+
tool_result = await self._execute_tool(tool_name, operation, parameters)
|
|
821
|
+
tool_calls_count += 1
|
|
822
|
+
|
|
823
|
+
# Wrap tool call and result in step
|
|
824
|
+
steps.append(
|
|
825
|
+
{
|
|
826
|
+
"type": "action",
|
|
827
|
+
"tool": tool_name,
|
|
828
|
+
"operation": operation,
|
|
829
|
+
"parameters": parameters,
|
|
830
|
+
"result": str(tool_result), # Include result in step
|
|
831
|
+
"iteration": iteration + 1,
|
|
832
|
+
}
|
|
833
|
+
)
|
|
834
|
+
|
|
835
|
+
# Yield tool result event (streaming)
|
|
836
|
+
yield {
|
|
837
|
+
"type": "tool_result",
|
|
838
|
+
"tool_name": tool_name,
|
|
839
|
+
"result": tool_result,
|
|
840
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
841
|
+
}
|
|
842
|
+
|
|
843
|
+
# Add tool result to messages (for LLM consumption)
|
|
844
|
+
observation_content = f"Tool '{tool_name}' returned: {tool_result}"
|
|
845
|
+
observation = f"<OBSERVATION>\n{observation_content}\n</OBSERVATION>"
|
|
846
|
+
|
|
847
|
+
# Add assistant message with tool call and tool result
|
|
848
|
+
messages.append(
|
|
849
|
+
LLMMessage(
|
|
850
|
+
role="assistant",
|
|
851
|
+
content=None,
|
|
852
|
+
tool_calls=tool_calls_from_stream,
|
|
853
|
+
)
|
|
854
|
+
)
|
|
855
|
+
messages.append(
|
|
856
|
+
LLMMessage(
|
|
857
|
+
role="tool",
|
|
858
|
+
content=str(tool_result),
|
|
859
|
+
tool_call_id=tool_call.get("id", "call_0"),
|
|
860
|
+
)
|
|
861
|
+
)
|
|
862
|
+
|
|
863
|
+
except Exception as e:
|
|
864
|
+
error_content = f"Tool execution failed: {str(e)}"
|
|
865
|
+
error_msg = f"<OBSERVATION>\n{error_content}\n</OBSERVATION>"
|
|
866
|
+
steps.append(
|
|
867
|
+
{
|
|
868
|
+
"type": "observation",
|
|
869
|
+
"content": error_msg,
|
|
870
|
+
"iteration": iteration + 1,
|
|
871
|
+
"has_error": True,
|
|
872
|
+
}
|
|
873
|
+
)
|
|
874
|
+
yield {
|
|
875
|
+
"type": "tool_error",
|
|
876
|
+
"tool_name": tool_name if "tool_name" in locals() else "unknown",
|
|
877
|
+
"error": str(e),
|
|
878
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
879
|
+
}
|
|
880
|
+
messages.append(
|
|
881
|
+
LLMMessage(
|
|
882
|
+
role="tool",
|
|
883
|
+
content=error_msg,
|
|
884
|
+
tool_call_id=tool_call.get("id", "call_0"),
|
|
885
|
+
)
|
|
886
|
+
)
|
|
887
|
+
|
|
888
|
+
# Continue to next iteration
|
|
889
|
+
continue
|
|
890
|
+
|
|
891
|
+
# Check for final response (outside tags only)
|
|
892
|
+
if self._has_final_response(thought_raw):
|
|
893
|
+
final_response = self._extract_final_response(thought_raw)
|
|
894
|
+
yield {
|
|
895
|
+
"type": "result",
|
|
896
|
+
"success": True,
|
|
897
|
+
"output": final_response, # Return raw output without processing
|
|
898
|
+
"reasoning_steps": steps,
|
|
899
|
+
"tool_calls_count": tool_calls_count,
|
|
900
|
+
"iterations": iteration + 1,
|
|
901
|
+
"total_tokens": total_tokens,
|
|
902
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
903
|
+
}
|
|
904
|
+
return
|
|
905
|
+
|
|
906
|
+
# Check if tool call (ReAct mode, outside tags only)
|
|
907
|
+
if self._has_tool_call(thought_raw):
|
|
908
|
+
# ACT: Execute tool
|
|
909
|
+
try:
|
|
910
|
+
tool_info = self._parse_tool_call(thought_raw) # Parse from raw text
|
|
911
|
+
tool_name = tool_info.get("tool", "")
|
|
912
|
+
if not tool_name:
|
|
913
|
+
raise ValueError("Tool name not found in tool call")
|
|
914
|
+
|
|
915
|
+
# Yield tool call event
|
|
916
|
+
yield {
|
|
917
|
+
"type": "tool_call",
|
|
918
|
+
"tool_name": tool_name,
|
|
919
|
+
"operation": tool_info.get("operation"),
|
|
920
|
+
"parameters": tool_info.get("parameters", {}),
|
|
921
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
922
|
+
}
|
|
923
|
+
|
|
924
|
+
tool_result = await self._execute_tool(
|
|
925
|
+
tool_name,
|
|
926
|
+
tool_info.get("operation"),
|
|
927
|
+
tool_info.get("parameters", {}),
|
|
928
|
+
)
|
|
929
|
+
tool_calls_count += 1
|
|
930
|
+
|
|
931
|
+
# Wrap tool call and result in step
|
|
932
|
+
steps.append(
|
|
933
|
+
{
|
|
934
|
+
"type": "action",
|
|
935
|
+
"tool": tool_info["tool"],
|
|
936
|
+
"operation": tool_info.get("operation"),
|
|
937
|
+
"parameters": tool_info.get("parameters"),
|
|
938
|
+
"result": str(tool_result), # Include result in step
|
|
939
|
+
"iteration": iteration + 1,
|
|
940
|
+
}
|
|
941
|
+
)
|
|
942
|
+
|
|
943
|
+
# Yield tool result event (streaming)
|
|
944
|
+
yield {
|
|
945
|
+
"type": "tool_result",
|
|
946
|
+
"tool_name": tool_name,
|
|
947
|
+
"result": tool_result,
|
|
948
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
949
|
+
}
|
|
950
|
+
|
|
951
|
+
# OBSERVE: Add tool result to conversation (for LLM consumption)
|
|
952
|
+
observation_content = f"Tool '{tool_info['tool']}' returned: {tool_result}"
|
|
953
|
+
observation = f"<OBSERVATION>\n{observation_content}\n</OBSERVATION>"
|
|
954
|
+
|
|
955
|
+
# Add to messages for next iteration
|
|
956
|
+
messages.append(LLMMessage(role="assistant", content=thought_raw))
|
|
957
|
+
messages.append(LLMMessage(role="user", content=observation))
|
|
958
|
+
|
|
959
|
+
except Exception as e:
|
|
960
|
+
error_content = f"Tool execution failed: {str(e)}"
|
|
961
|
+
error_msg = f"<OBSERVATION>\n{error_content}\n</OBSERVATION>"
|
|
962
|
+
steps.append(
|
|
963
|
+
{
|
|
964
|
+
"type": "action",
|
|
965
|
+
"tool": tool_name if "tool_name" in locals() else "unknown",
|
|
966
|
+
"error": str(e),
|
|
967
|
+
"iteration": iteration + 1,
|
|
968
|
+
"error": True,
|
|
969
|
+
}
|
|
970
|
+
)
|
|
971
|
+
|
|
972
|
+
# Yield error event
|
|
973
|
+
yield {
|
|
974
|
+
"type": "tool_error",
|
|
975
|
+
"tool_name": tool_name if "tool_name" in locals() else "unknown",
|
|
976
|
+
"error": str(e),
|
|
977
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
978
|
+
}
|
|
979
|
+
|
|
980
|
+
messages.append(LLMMessage(role="assistant", content=thought_raw))
|
|
981
|
+
messages.append(LLMMessage(role="user", content=error_msg))
|
|
982
|
+
|
|
983
|
+
else:
|
|
984
|
+
# Check if there's an incomplete final response (has FINAL RESPONSE but no finish)
|
|
985
|
+
if self._has_incomplete_final_response(thought_raw):
|
|
986
|
+
# Incomplete final response - ask LLM to continue
|
|
987
|
+
continue_message = (
|
|
988
|
+
f"[Iteration {iteration + 1}/{self._max_iterations}, "
|
|
989
|
+
f"remaining: {self._max_iterations - iteration - 1}]\n"
|
|
990
|
+
"Your FINAL RESPONSE appears incomplete (missing 'finish' suffix). "
|
|
991
|
+
"Please continue your response from where you left off and end with 'finish' "
|
|
992
|
+
"to indicate completion. If no 'finish' suffix, the system will continue iteration."
|
|
993
|
+
)
|
|
994
|
+
messages.append(LLMMessage(role="assistant", content=thought_raw))
|
|
995
|
+
messages.append(LLMMessage(role="user", content=continue_message))
|
|
996
|
+
else:
|
|
997
|
+
# No tool call or final response detected - ask LLM to continue
|
|
998
|
+
continue_message = (
|
|
999
|
+
f"[Iteration {iteration + 1}/{self._max_iterations}, "
|
|
1000
|
+
f"remaining: {self._max_iterations - iteration - 1}]\n"
|
|
1001
|
+
"Continuing from your previous output. "
|
|
1002
|
+
"If your generation is incomplete, please continue from where you left off. "
|
|
1003
|
+
"If you decide to take action, ensure proper format:\n"
|
|
1004
|
+
"- Tool call: TOOL:, OPERATION:, PARAMETERS: (outside tags)\n"
|
|
1005
|
+
"- Final response: FINAL RESPONSE: <content> finish (outside tags)"
|
|
1006
|
+
)
|
|
1007
|
+
messages.append(LLMMessage(role="assistant", content=thought_raw))
|
|
1008
|
+
messages.append(LLMMessage(role="user", content=continue_message))
|
|
1009
|
+
# Continue to next iteration
|
|
1010
|
+
continue
|
|
1011
|
+
|
|
1012
|
+
# Max iterations reached
|
|
1013
|
+
logger.warning(f"HybridAgent {self.agent_id} reached max iterations")
|
|
1014
|
+
yield {
|
|
1015
|
+
"type": "result",
|
|
1016
|
+
"success": True,
|
|
1017
|
+
"output": "Max iterations reached. Unable to complete task fully.",
|
|
1018
|
+
"reasoning_steps": steps,
|
|
1019
|
+
"tool_calls_count": tool_calls_count,
|
|
1020
|
+
"iterations": self._max_iterations,
|
|
1021
|
+
"total_tokens": total_tokens,
|
|
1022
|
+
"max_iterations_reached": True,
|
|
1023
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
1024
|
+
}
|
|
1025
|
+
|
|
1026
|
+
async def _react_loop(self, task: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
|
1027
|
+
"""
|
|
1028
|
+
Execute ReAct loop: Reason → Act → Observe.
|
|
1029
|
+
|
|
1030
|
+
Args:
|
|
1031
|
+
task: Task description
|
|
1032
|
+
context: Context dictionary
|
|
1033
|
+
|
|
1034
|
+
Returns:
|
|
1035
|
+
Result dictionary with 'final_answer', 'steps', 'iterations'
|
|
1036
|
+
"""
|
|
1037
|
+
steps = []
|
|
1038
|
+
tool_calls_count = 0
|
|
1039
|
+
total_tokens = 0
|
|
1040
|
+
|
|
1041
|
+
# Build initial messages
|
|
1042
|
+
messages = self._build_initial_messages(task, context)
|
|
1043
|
+
|
|
1044
|
+
for iteration in range(self._max_iterations):
|
|
1045
|
+
logger.debug(f"HybridAgent {self.agent_id} - ReAct iteration {iteration + 1}")
|
|
1046
|
+
|
|
1047
|
+
# Add iteration info to messages (except first iteration which has task context)
|
|
1048
|
+
if iteration > 0:
|
|
1049
|
+
iteration_info = (
|
|
1050
|
+
f"[Iteration {iteration + 1}/{self._max_iterations}, "
|
|
1051
|
+
f"remaining: {self._max_iterations - iteration - 1}]"
|
|
1052
|
+
)
|
|
1053
|
+
# Only add if the last message is not already an iteration info
|
|
1054
|
+
if messages and not messages[-1].content.startswith("[Iteration"):
|
|
1055
|
+
messages.append(LLMMessage(role="user", content=iteration_info))
|
|
1056
|
+
|
|
1057
|
+
# THINK: LLM reasons about next action
|
|
1058
|
+
# Use Function Calling if supported, otherwise use ReAct mode
|
|
1059
|
+
if self._use_function_calling and self._tool_schemas:
|
|
1060
|
+
# Convert schemas to tools format
|
|
1061
|
+
tools = [{"type": "function", "function": schema} for schema in self._tool_schemas]
|
|
1062
|
+
response = await self.llm_client.generate_text(
|
|
1063
|
+
messages=messages,
|
|
1064
|
+
model=self._config.llm_model,
|
|
1065
|
+
temperature=self._config.temperature,
|
|
1066
|
+
max_tokens=self._config.max_tokens,
|
|
1067
|
+
tools=tools,
|
|
1068
|
+
tool_choice="auto",
|
|
1069
|
+
)
|
|
1070
|
+
else:
|
|
1071
|
+
# Fallback to ReAct mode
|
|
1072
|
+
response = await self.llm_client.generate_text(
|
|
1073
|
+
messages=messages,
|
|
1074
|
+
model=self._config.llm_model,
|
|
1075
|
+
temperature=self._config.temperature,
|
|
1076
|
+
max_tokens=self._config.max_tokens,
|
|
1077
|
+
)
|
|
1078
|
+
|
|
1079
|
+
thought_raw = response.content or ""
|
|
1080
|
+
total_tokens += getattr(response, "total_tokens", 0)
|
|
1081
|
+
|
|
1082
|
+
# Update prompt cache metrics from LLM response
|
|
1083
|
+
cache_read_tokens = getattr(response, "cache_read_tokens", None)
|
|
1084
|
+
cache_creation_tokens = getattr(response, "cache_creation_tokens", None)
|
|
1085
|
+
cache_hit = getattr(response, "cache_hit", None)
|
|
1086
|
+
if cache_read_tokens is not None or cache_creation_tokens is not None or cache_hit is not None:
|
|
1087
|
+
self.update_cache_metrics(
|
|
1088
|
+
cache_read_tokens=cache_read_tokens,
|
|
1089
|
+
cache_creation_tokens=cache_creation_tokens,
|
|
1090
|
+
cache_hit=cache_hit,
|
|
1091
|
+
)
|
|
1092
|
+
|
|
1093
|
+
# Store raw output in steps (no format processing)
|
|
1094
|
+
steps.append(
|
|
1095
|
+
{
|
|
1096
|
+
"type": "thought",
|
|
1097
|
+
"content": thought_raw.strip(), # Return raw output without processing
|
|
1098
|
+
"iteration": iteration + 1,
|
|
1099
|
+
}
|
|
1100
|
+
)
|
|
1101
|
+
|
|
1102
|
+
# Check for Function Calling response
|
|
1103
|
+
tool_calls = getattr(response, "tool_calls", None)
|
|
1104
|
+
function_call = getattr(response, "function_call", None)
|
|
1105
|
+
|
|
1106
|
+
if tool_calls or function_call:
|
|
1107
|
+
# Handle Function Calling response
|
|
1108
|
+
tool_calls_to_process = tool_calls or []
|
|
1109
|
+
if function_call:
|
|
1110
|
+
# Convert legacy function_call to tool_calls format
|
|
1111
|
+
tool_calls_to_process = [
|
|
1112
|
+
{
|
|
1113
|
+
"id": "call_0",
|
|
1114
|
+
"type": "function",
|
|
1115
|
+
"function": {
|
|
1116
|
+
"name": function_call["name"],
|
|
1117
|
+
"arguments": function_call["arguments"],
|
|
1118
|
+
},
|
|
1119
|
+
}
|
|
1120
|
+
]
|
|
1121
|
+
|
|
1122
|
+
# Process each tool call
|
|
1123
|
+
for tool_call in tool_calls_to_process:
|
|
1124
|
+
try:
|
|
1125
|
+
func_name = tool_call["function"]["name"]
|
|
1126
|
+
func_args = tool_call["function"]["arguments"]
|
|
1127
|
+
|
|
1128
|
+
# Parse function name to extract tool and operation
|
|
1129
|
+
# CRITICAL: Try exact match first, then fall back to underscore parsing
|
|
1130
|
+
if self._tool_instances and func_name in self._tool_instances:
|
|
1131
|
+
# Exact match found - use full function name as tool name
|
|
1132
|
+
tool_name = func_name
|
|
1133
|
+
operation = None
|
|
1134
|
+
elif self._available_tools and func_name in self._available_tools:
|
|
1135
|
+
# Exact match in available tools list
|
|
1136
|
+
tool_name = func_name
|
|
1137
|
+
operation = None
|
|
1138
|
+
else:
|
|
1139
|
+
# Fallback: try underscore parsing for legacy compatibility
|
|
1140
|
+
parts = func_name.split("_", 1)
|
|
1141
|
+
if len(parts) == 2:
|
|
1142
|
+
tool_name, operation = parts
|
|
1143
|
+
else:
|
|
1144
|
+
tool_name = parts[0]
|
|
1145
|
+
operation = None
|
|
1146
|
+
|
|
1147
|
+
# Parse arguments JSON
|
|
1148
|
+
import json
|
|
1149
|
+
if isinstance(func_args, str):
|
|
1150
|
+
parameters = json.loads(func_args)
|
|
1151
|
+
else:
|
|
1152
|
+
parameters = func_args if func_args else {}
|
|
1153
|
+
|
|
1154
|
+
# Execute tool
|
|
1155
|
+
tool_result = await self._execute_tool(tool_name, operation, parameters)
|
|
1156
|
+
tool_calls_count += 1
|
|
1157
|
+
|
|
1158
|
+
# Wrap tool call and result in step
|
|
1159
|
+
steps.append(
|
|
1160
|
+
{
|
|
1161
|
+
"type": "action",
|
|
1162
|
+
"tool": tool_name,
|
|
1163
|
+
"operation": operation,
|
|
1164
|
+
"parameters": parameters,
|
|
1165
|
+
"result": str(tool_result), # Include result in step
|
|
1166
|
+
"iteration": iteration + 1,
|
|
1167
|
+
}
|
|
1168
|
+
)
|
|
1169
|
+
|
|
1170
|
+
# Add tool result to messages (for LLM consumption)
|
|
1171
|
+
observation_content = f"Tool '{tool_name}' returned: {tool_result}"
|
|
1172
|
+
observation = f"<OBSERVATION>\n{observation_content}\n</OBSERVATION>"
|
|
1173
|
+
|
|
1174
|
+
# Add assistant message with tool call and tool result
|
|
1175
|
+
messages.append(
|
|
1176
|
+
LLMMessage(
|
|
1177
|
+
role="assistant",
|
|
1178
|
+
content=None, # Content is None when using tool calls
|
|
1179
|
+
tool_calls=tool_calls_to_process if tool_calls else None,
|
|
1180
|
+
)
|
|
1181
|
+
)
|
|
1182
|
+
messages.append(
|
|
1183
|
+
LLMMessage(
|
|
1184
|
+
role="tool",
|
|
1185
|
+
content=str(tool_result),
|
|
1186
|
+
tool_call_id=tool_call.get("id", "call_0"),
|
|
1187
|
+
)
|
|
1188
|
+
)
|
|
1189
|
+
|
|
1190
|
+
except Exception as e:
|
|
1191
|
+
error_content = f"Tool execution failed: {str(e)}"
|
|
1192
|
+
error_msg = f"<OBSERVATION>\n{error_content}\n</OBSERVATION>"
|
|
1193
|
+
steps.append(
|
|
1194
|
+
{
|
|
1195
|
+
"type": "observation",
|
|
1196
|
+
"content": error_msg,
|
|
1197
|
+
"iteration": iteration + 1,
|
|
1198
|
+
"has_error": True,
|
|
1199
|
+
}
|
|
1200
|
+
)
|
|
1201
|
+
# Add error to messages
|
|
1202
|
+
messages.append(
|
|
1203
|
+
LLMMessage(
|
|
1204
|
+
role="tool",
|
|
1205
|
+
content=error_msg,
|
|
1206
|
+
tool_call_id=tool_call.get("id", "call_0"),
|
|
1207
|
+
)
|
|
1208
|
+
)
|
|
1209
|
+
|
|
1210
|
+
# Continue to next iteration
|
|
1211
|
+
continue
|
|
1212
|
+
|
|
1213
|
+
# Check for final response (outside tags only)
|
|
1214
|
+
if self._has_final_response(thought_raw):
|
|
1215
|
+
final_response = self._extract_final_response(thought_raw)
|
|
1216
|
+
return {
|
|
1217
|
+
"final_response": final_response, # Return raw output without processing
|
|
1218
|
+
"steps": steps,
|
|
1219
|
+
"iterations": iteration + 1,
|
|
1220
|
+
"tool_calls_count": tool_calls_count,
|
|
1221
|
+
"total_tokens": total_tokens,
|
|
1222
|
+
}
|
|
1223
|
+
|
|
1224
|
+
# Check if tool call (ReAct mode, outside tags only)
|
|
1225
|
+
if self._has_tool_call(thought_raw):
|
|
1226
|
+
# ACT: Execute tool
|
|
1227
|
+
try:
|
|
1228
|
+
tool_info = self._parse_tool_call(thought_raw) # Parse from raw text
|
|
1229
|
+
tool_name = tool_info.get("tool", "")
|
|
1230
|
+
if not tool_name:
|
|
1231
|
+
raise ValueError("Tool name not found in tool call")
|
|
1232
|
+
tool_result = await self._execute_tool(
|
|
1233
|
+
tool_name,
|
|
1234
|
+
tool_info.get("operation"),
|
|
1235
|
+
tool_info.get("parameters", {}),
|
|
1236
|
+
)
|
|
1237
|
+
tool_calls_count += 1
|
|
1238
|
+
|
|
1239
|
+
# Wrap tool call and result in step
|
|
1240
|
+
steps.append(
|
|
1241
|
+
{
|
|
1242
|
+
"type": "action",
|
|
1243
|
+
"tool": tool_info["tool"],
|
|
1244
|
+
"operation": tool_info.get("operation"),
|
|
1245
|
+
"parameters": tool_info.get("parameters"),
|
|
1246
|
+
"result": str(tool_result), # Include result in step
|
|
1247
|
+
"iteration": iteration + 1,
|
|
1248
|
+
}
|
|
1249
|
+
)
|
|
1250
|
+
|
|
1251
|
+
# OBSERVE: Add tool result to conversation (for LLM consumption)
|
|
1252
|
+
observation_content = f"Tool '{tool_info['tool']}' returned: {tool_result}"
|
|
1253
|
+
observation = f"<OBSERVATION>\n{observation_content}\n</OBSERVATION>"
|
|
1254
|
+
|
|
1255
|
+
# Add to messages for next iteration
|
|
1256
|
+
messages.append(LLMMessage(role="assistant", content=thought_raw))
|
|
1257
|
+
messages.append(LLMMessage(role="user", content=observation))
|
|
1258
|
+
|
|
1259
|
+
except Exception as e:
|
|
1260
|
+
error_content = f"Tool execution failed: {str(e)}"
|
|
1261
|
+
error_msg = f"<OBSERVATION>\n{error_content}\n</OBSERVATION>"
|
|
1262
|
+
steps.append(
|
|
1263
|
+
{
|
|
1264
|
+
"type": "action",
|
|
1265
|
+
"tool": tool_name if "tool_name" in locals() else "unknown",
|
|
1266
|
+
"error": str(e),
|
|
1267
|
+
"iteration": iteration + 1,
|
|
1268
|
+
"has_error": True,
|
|
1269
|
+
}
|
|
1270
|
+
)
|
|
1271
|
+
messages.append(LLMMessage(role="assistant", content=thought_raw))
|
|
1272
|
+
messages.append(LLMMessage(role="user", content=error_msg))
|
|
1273
|
+
|
|
1274
|
+
else:
|
|
1275
|
+
# Check if there's an incomplete final response (has FINAL RESPONSE but no finish)
|
|
1276
|
+
if self._has_incomplete_final_response(thought_raw):
|
|
1277
|
+
# Incomplete final response - ask LLM to continue
|
|
1278
|
+
continue_message = (
|
|
1279
|
+
f"[Iteration {iteration + 1}/{self._max_iterations}, "
|
|
1280
|
+
f"remaining: {self._max_iterations - iteration - 1}]\n"
|
|
1281
|
+
"Your FINAL RESPONSE appears incomplete (missing 'finish' suffix). "
|
|
1282
|
+
"Please continue your response from where you left off and end with 'finish' "
|
|
1283
|
+
"to indicate completion. If no 'finish' suffix, the system will continue iteration."
|
|
1284
|
+
)
|
|
1285
|
+
messages.append(LLMMessage(role="assistant", content=thought_raw))
|
|
1286
|
+
messages.append(LLMMessage(role="user", content=continue_message))
|
|
1287
|
+
else:
|
|
1288
|
+
# No tool call or final response detected - ask LLM to continue
|
|
1289
|
+
continue_message = (
|
|
1290
|
+
f"[Iteration {iteration + 1}/{self._max_iterations}, "
|
|
1291
|
+
f"remaining: {self._max_iterations - iteration - 1}]\n"
|
|
1292
|
+
"Continuing from your previous output. "
|
|
1293
|
+
"If your generation is incomplete, please continue from where you left off. "
|
|
1294
|
+
"If you decide to take action, ensure proper format:\n"
|
|
1295
|
+
"- Tool call: TOOL:, OPERATION:, PARAMETERS: (outside tags)\n"
|
|
1296
|
+
"- Final response: FINAL RESPONSE: <content> finish (outside tags)"
|
|
1297
|
+
)
|
|
1298
|
+
messages.append(LLMMessage(role="assistant", content=thought_raw))
|
|
1299
|
+
messages.append(LLMMessage(role="user", content=continue_message))
|
|
1300
|
+
# Continue to next iteration
|
|
1301
|
+
continue
|
|
1302
|
+
|
|
1303
|
+
# Max iterations reached
|
|
1304
|
+
logger.warning(f"HybridAgent {self.agent_id} reached max iterations")
|
|
1305
|
+
return {
|
|
1306
|
+
"final_response": "Max iterations reached. Unable to complete task fully.",
|
|
1307
|
+
"steps": steps,
|
|
1308
|
+
"iterations": self._max_iterations,
|
|
1309
|
+
"tool_calls_count": tool_calls_count,
|
|
1310
|
+
"total_tokens": total_tokens,
|
|
1311
|
+
"max_iterations_reached": True,
|
|
1312
|
+
}
|
|
1313
|
+
|
|
1314
|
+
def _build_initial_messages(self, task: str, context: Dict[str, Any]) -> List[LLMMessage]:
|
|
1315
|
+
"""Build initial messages for ReAct loop."""
|
|
1316
|
+
messages = []
|
|
1317
|
+
|
|
1318
|
+
# Add system prompt with cache control if caching is enabled
|
|
1319
|
+
if self._system_prompt:
|
|
1320
|
+
cache_control = (
|
|
1321
|
+
CacheControl(type="ephemeral")
|
|
1322
|
+
if self._config.enable_prompt_caching
|
|
1323
|
+
else None
|
|
1324
|
+
)
|
|
1325
|
+
messages.append(
|
|
1326
|
+
LLMMessage(
|
|
1327
|
+
role="system",
|
|
1328
|
+
content=self._system_prompt,
|
|
1329
|
+
cache_control=cache_control,
|
|
1330
|
+
)
|
|
1331
|
+
)
|
|
1332
|
+
|
|
1333
|
+
# Add context if provided
|
|
1334
|
+
if context:
|
|
1335
|
+
# Special handling: if context contains 'history' as a list of messages,
|
|
1336
|
+
# add them as separate user/assistant messages instead of formatting
|
|
1337
|
+
history = context.get("history")
|
|
1338
|
+
if isinstance(history, list) and len(history) > 0:
|
|
1339
|
+
# Check if history contains message-like dictionaries
|
|
1340
|
+
for msg in history:
|
|
1341
|
+
if isinstance(msg, dict) and "role" in msg and "content" in msg:
|
|
1342
|
+
# Valid message format - add as separate message
|
|
1343
|
+
messages.append(
|
|
1344
|
+
LLMMessage(
|
|
1345
|
+
role=msg["role"],
|
|
1346
|
+
content=msg["content"],
|
|
1347
|
+
)
|
|
1348
|
+
)
|
|
1349
|
+
elif isinstance(msg, LLMMessage):
|
|
1350
|
+
# Already an LLMMessage instance
|
|
1351
|
+
messages.append(msg)
|
|
1352
|
+
|
|
1353
|
+
# Format remaining context fields (excluding history) as Additional Context
|
|
1354
|
+
context_without_history = {k: v for k, v in context.items() if k != "history"}
|
|
1355
|
+
if context_without_history:
|
|
1356
|
+
context_str = self._format_context(context_without_history)
|
|
1357
|
+
if context_str:
|
|
1358
|
+
messages.append(
|
|
1359
|
+
LLMMessage(
|
|
1360
|
+
role="user",
|
|
1361
|
+
content=f"Additional Context:\n{context_str}",
|
|
1362
|
+
)
|
|
1363
|
+
)
|
|
1364
|
+
|
|
1365
|
+
# Add task with iteration info
|
|
1366
|
+
task_message = (
|
|
1367
|
+
f"Task: {task}\n\n"
|
|
1368
|
+
f"[Iteration 1/{self._max_iterations}, remaining: {self._max_iterations - 1}]"
|
|
1369
|
+
)
|
|
1370
|
+
messages.append(LLMMessage(role="user", content=task_message))
|
|
1371
|
+
|
|
1372
|
+
return messages
|
|
1373
|
+
|
|
1374
|
+
def _format_context(self, context: Dict[str, Any]) -> str:
|
|
1375
|
+
"""Format context dictionary as string."""
|
|
1376
|
+
relevant_fields = []
|
|
1377
|
+
for key, value in context.items():
|
|
1378
|
+
if not key.startswith("_") and value is not None:
|
|
1379
|
+
relevant_fields.append(f"{key}: {value}")
|
|
1380
|
+
return "\n".join(relevant_fields) if relevant_fields else ""
|
|
1381
|
+
|
|
1382
|
+
def _extract_thought_content(self, text: str) -> str:
|
|
1383
|
+
"""
|
|
1384
|
+
Extract content from <THOUGHT>...</THOUGHT> tags.
|
|
1385
|
+
|
|
1386
|
+
DEPRECATED: This method is kept for backward compatibility but no longer
|
|
1387
|
+
extracts content. Returns original text as-is per new design.
|
|
1388
|
+
|
|
1389
|
+
Args:
|
|
1390
|
+
text: Text that may contain THOUGHT tags
|
|
1391
|
+
|
|
1392
|
+
Returns:
|
|
1393
|
+
Original text (no extraction performed)
|
|
1394
|
+
"""
|
|
1395
|
+
# Return original text without processing (new design)
|
|
1396
|
+
return text.strip()
|
|
1397
|
+
|
|
1398
|
+
def _extract_observation_content(self, text: str) -> str:
|
|
1399
|
+
"""
|
|
1400
|
+
Extract content from <OBSERVATION>...</OBSERVATION> tags.
|
|
1401
|
+
|
|
1402
|
+
DEPRECATED: This method is kept for backward compatibility but no longer
|
|
1403
|
+
extracts content. Returns original text as-is per new design.
|
|
1404
|
+
|
|
1405
|
+
Args:
|
|
1406
|
+
text: Text that may contain OBSERVATION tags
|
|
1407
|
+
|
|
1408
|
+
Returns:
|
|
1409
|
+
Original text (no extraction performed)
|
|
1410
|
+
"""
|
|
1411
|
+
# Return original text without processing (new design)
|
|
1412
|
+
return text.strip()
|
|
1413
|
+
|
|
1414
|
+
def _has_final_response(self, text: str) -> bool:
|
|
1415
|
+
"""
|
|
1416
|
+
Check if text contains complete FINAL RESPONSE with 'finish' suffix.
|
|
1417
|
+
|
|
1418
|
+
The FINAL RESPONSE must end with 'finish' to be considered complete.
|
|
1419
|
+
If FINAL RESPONSE is present but without 'finish', it's considered incomplete
|
|
1420
|
+
and the loop will continue to let LLM complete the response.
|
|
1421
|
+
|
|
1422
|
+
Args:
|
|
1423
|
+
text: Text to check
|
|
1424
|
+
|
|
1425
|
+
Returns:
|
|
1426
|
+
True if complete FINAL RESPONSE (with finish suffix) found outside tags
|
|
1427
|
+
"""
|
|
1428
|
+
import re
|
|
1429
|
+
|
|
1430
|
+
# Remove content inside THOUGHT and OBSERVATION tags
|
|
1431
|
+
text_without_tags = re.sub(r'<THOUGHT>.*?</THOUGHT>', '', text, flags=re.DOTALL)
|
|
1432
|
+
text_without_tags = re.sub(r'<OBSERVATION>.*?</OBSERVATION>', '', text_without_tags, flags=re.DOTALL)
|
|
1433
|
+
|
|
1434
|
+
# Check for FINAL RESPONSE marker with 'finish' suffix in remaining text
|
|
1435
|
+
# The 'finish' must appear after FINAL RESPONSE: content
|
|
1436
|
+
if "FINAL RESPONSE:" not in text_without_tags:
|
|
1437
|
+
return False
|
|
1438
|
+
|
|
1439
|
+
# Check if 'finish' appears after FINAL RESPONSE:
|
|
1440
|
+
# Use case-insensitive search for 'finish' at the end
|
|
1441
|
+
text_lower = text_without_tags.lower()
|
|
1442
|
+
final_response_idx = text_lower.find("final response:")
|
|
1443
|
+
if final_response_idx == -1:
|
|
1444
|
+
return False
|
|
1445
|
+
|
|
1446
|
+
# Check if 'finish' appears after the FINAL RESPONSE marker
|
|
1447
|
+
remaining_text = text_without_tags[final_response_idx:]
|
|
1448
|
+
return "finish" in remaining_text.lower()
|
|
1449
|
+
|
|
1450
|
+
def _has_incomplete_final_response(self, text: str) -> bool:
|
|
1451
|
+
"""
|
|
1452
|
+
Check if text contains FINAL RESPONSE marker but without 'finish' suffix.
|
|
1453
|
+
|
|
1454
|
+
Args:
|
|
1455
|
+
text: Text to check
|
|
1456
|
+
|
|
1457
|
+
Returns:
|
|
1458
|
+
True if FINAL RESPONSE marker found but without finish suffix
|
|
1459
|
+
"""
|
|
1460
|
+
import re
|
|
1461
|
+
|
|
1462
|
+
# Remove content inside THOUGHT and OBSERVATION tags
|
|
1463
|
+
text_without_tags = re.sub(r'<THOUGHT>.*?</THOUGHT>', '', text, flags=re.DOTALL)
|
|
1464
|
+
text_without_tags = re.sub(r'<OBSERVATION>.*?</OBSERVATION>', '', text_without_tags, flags=re.DOTALL)
|
|
1465
|
+
|
|
1466
|
+
# Check for FINAL RESPONSE marker without 'finish' suffix
|
|
1467
|
+
if "FINAL RESPONSE:" not in text_without_tags:
|
|
1468
|
+
return False
|
|
1469
|
+
|
|
1470
|
+
# Check if 'finish' is missing
|
|
1471
|
+
text_lower = text_without_tags.lower()
|
|
1472
|
+
final_response_idx = text_lower.find("final response:")
|
|
1473
|
+
remaining_text = text_without_tags[final_response_idx:]
|
|
1474
|
+
return "finish" not in remaining_text.lower()
|
|
1475
|
+
|
|
1476
|
+
def _extract_final_response(self, text: str) -> str:
|
|
1477
|
+
"""
|
|
1478
|
+
Extract final response from text, preserving original format.
|
|
1479
|
+
Only extracts from outside THOUGHT/OBSERVATION tags.
|
|
1480
|
+
|
|
1481
|
+
Args:
|
|
1482
|
+
text: Text that may contain FINAL RESPONSE marker
|
|
1483
|
+
|
|
1484
|
+
Returns:
|
|
1485
|
+
Original text if FINAL RESPONSE found, otherwise empty string
|
|
1486
|
+
"""
|
|
1487
|
+
import re
|
|
1488
|
+
|
|
1489
|
+
# Remove content inside THOUGHT and OBSERVATION tags
|
|
1490
|
+
text_without_tags = re.sub(r'<THOUGHT>.*?</THOUGHT>', '', text, flags=re.DOTALL)
|
|
1491
|
+
text_without_tags = re.sub(r'<OBSERVATION>.*?</OBSERVATION>', '', text_without_tags, flags=re.DOTALL)
|
|
1492
|
+
|
|
1493
|
+
# Check for FINAL RESPONSE marker
|
|
1494
|
+
if "FINAL RESPONSE:" in text_without_tags:
|
|
1495
|
+
# Return original text without any processing
|
|
1496
|
+
return text.strip()
|
|
1497
|
+
|
|
1498
|
+
return ""
|
|
1499
|
+
|
|
1500
|
+
def _has_tool_call(self, text: str) -> bool:
|
|
1501
|
+
"""
|
|
1502
|
+
Check if text contains TOOL call marker outside of THOUGHT/OBSERVATION tags.
|
|
1503
|
+
|
|
1504
|
+
Args:
|
|
1505
|
+
text: Text to check
|
|
1506
|
+
|
|
1507
|
+
Returns:
|
|
1508
|
+
True if TOOL marker found outside tags
|
|
1509
|
+
"""
|
|
1510
|
+
import re
|
|
1511
|
+
|
|
1512
|
+
# Remove content inside THOUGHT and OBSERVATION tags
|
|
1513
|
+
text_without_tags = re.sub(r'<THOUGHT>.*?</THOUGHT>', '', text, flags=re.DOTALL)
|
|
1514
|
+
text_without_tags = re.sub(r'<OBSERVATION>.*?</OBSERVATION>', '', text_without_tags, flags=re.DOTALL)
|
|
1515
|
+
|
|
1516
|
+
# Check for TOOL marker in remaining text
|
|
1517
|
+
return "TOOL:" in text_without_tags
|
|
1518
|
+
|
|
1519
|
+
def _parse_tool_call(self, text: str) -> Dict[str, Any]:
|
|
1520
|
+
"""
|
|
1521
|
+
Parse tool call from LLM output.
|
|
1522
|
+
Only parses from outside THOUGHT/OBSERVATION tags.
|
|
1523
|
+
|
|
1524
|
+
Expected format:
|
|
1525
|
+
TOOL: <tool_name>
|
|
1526
|
+
OPERATION: <operation_name>
|
|
1527
|
+
PARAMETERS: <json_parameters>
|
|
1528
|
+
|
|
1529
|
+
Args:
|
|
1530
|
+
text: LLM output that may contain tool call
|
|
1531
|
+
|
|
1532
|
+
Returns:
|
|
1533
|
+
Dictionary with 'tool', 'operation', 'parameters'
|
|
1534
|
+
"""
|
|
1535
|
+
import json
|
|
1536
|
+
import re
|
|
1537
|
+
|
|
1538
|
+
result = {}
|
|
1539
|
+
|
|
1540
|
+
# Remove content inside THOUGHT and OBSERVATION tags
|
|
1541
|
+
text_without_tags = re.sub(r'<THOUGHT>.*?</THOUGHT>', '', text, flags=re.DOTALL)
|
|
1542
|
+
text_without_tags = re.sub(r'<OBSERVATION>.*?</OBSERVATION>', '', text_without_tags, flags=re.DOTALL)
|
|
1543
|
+
|
|
1544
|
+
# Extract tool from text outside tags
|
|
1545
|
+
if "TOOL:" in text_without_tags:
|
|
1546
|
+
tool_line = [line for line in text_without_tags.split("\n") if line.strip().startswith("TOOL:")][0]
|
|
1547
|
+
result["tool"] = tool_line.split("TOOL:", 1)[1].strip()
|
|
1548
|
+
|
|
1549
|
+
# Extract operation (optional)
|
|
1550
|
+
if "OPERATION:" in text_without_tags:
|
|
1551
|
+
op_line = [line for line in text_without_tags.split("\n") if line.strip().startswith("OPERATION:")][0]
|
|
1552
|
+
result["operation"] = op_line.split("OPERATION:", 1)[1].strip()
|
|
1553
|
+
|
|
1554
|
+
# Extract parameters (optional)
|
|
1555
|
+
if "PARAMETERS:" in text_without_tags:
|
|
1556
|
+
param_line = [line for line in text_without_tags.split("\n") if line.strip().startswith("PARAMETERS:")][0]
|
|
1557
|
+
param_str = param_line.split("PARAMETERS:", 1)[1].strip()
|
|
1558
|
+
try:
|
|
1559
|
+
result["parameters"] = json.loads(param_str)
|
|
1560
|
+
except json.JSONDecodeError:
|
|
1561
|
+
logger.warning(f"Failed to parse parameters: {param_str}")
|
|
1562
|
+
result["parameters"] = {} # type: ignore[assignment]
|
|
1563
|
+
|
|
1564
|
+
return result
|
|
1565
|
+
|
|
1566
|
+
async def _execute_tool(
|
|
1567
|
+
self,
|
|
1568
|
+
tool_name: str,
|
|
1569
|
+
operation: Optional[str],
|
|
1570
|
+
parameters: Dict[str, Any],
|
|
1571
|
+
) -> Any:
|
|
1572
|
+
"""Execute a tool operation."""
|
|
1573
|
+
# Check access
|
|
1574
|
+
if not self._available_tools or tool_name not in self._available_tools:
|
|
1575
|
+
raise ToolAccessDeniedError(self.agent_id, tool_name)
|
|
1576
|
+
|
|
1577
|
+
if not self._tool_instances:
|
|
1578
|
+
raise ValueError(f"Tool instances not available for {tool_name}")
|
|
1579
|
+
tool = self._tool_instances.get(tool_name)
|
|
1580
|
+
if not tool:
|
|
1581
|
+
raise ValueError(f"Tool {tool_name} not loaded")
|
|
1582
|
+
|
|
1583
|
+
# Execute tool
|
|
1584
|
+
if operation:
|
|
1585
|
+
result = await tool.run_async(operation, **parameters)
|
|
1586
|
+
else:
|
|
1587
|
+
if hasattr(tool, "run_async"):
|
|
1588
|
+
result = await tool.run_async(**parameters)
|
|
1589
|
+
else:
|
|
1590
|
+
raise ValueError(f"Tool {tool_name} requires operation to be specified")
|
|
1591
|
+
|
|
1592
|
+
return result
|
|
1593
|
+
|
|
1594
|
+
async def _execute_tool_with_observation(
|
|
1595
|
+
self,
|
|
1596
|
+
tool_name: str,
|
|
1597
|
+
operation: Optional[str],
|
|
1598
|
+
parameters: Dict[str, Any],
|
|
1599
|
+
) -> "ToolObservation":
|
|
1600
|
+
"""
|
|
1601
|
+
Execute a tool and return structured observation.
|
|
1602
|
+
|
|
1603
|
+
Wraps tool execution with automatic success/error tracking,
|
|
1604
|
+
execution time measurement, and structured result formatting.
|
|
1605
|
+
|
|
1606
|
+
Args:
|
|
1607
|
+
tool_name: Name of the tool to execute
|
|
1608
|
+
operation: Optional operation name
|
|
1609
|
+
parameters: Tool parameters
|
|
1610
|
+
|
|
1611
|
+
Returns:
|
|
1612
|
+
ToolObservation with execution details
|
|
1613
|
+
|
|
1614
|
+
Example:
|
|
1615
|
+
```python
|
|
1616
|
+
obs = await agent._execute_tool_with_observation(
|
|
1617
|
+
tool_name="search",
|
|
1618
|
+
operation="query",
|
|
1619
|
+
parameters={"q": "AI"}
|
|
1620
|
+
)
|
|
1621
|
+
print(obs.to_text())
|
|
1622
|
+
```
|
|
1623
|
+
"""
|
|
1624
|
+
|
|
1625
|
+
start_time = datetime.utcnow()
|
|
1626
|
+
|
|
1627
|
+
try:
|
|
1628
|
+
# Execute tool
|
|
1629
|
+
result = await self._execute_tool(tool_name, operation, parameters)
|
|
1630
|
+
|
|
1631
|
+
# Calculate execution time
|
|
1632
|
+
end_time = datetime.utcnow()
|
|
1633
|
+
execution_time_ms = (end_time - start_time).total_seconds() * 1000
|
|
1634
|
+
|
|
1635
|
+
# Create observation
|
|
1636
|
+
observation = ToolObservation(
|
|
1637
|
+
tool_name=tool_name,
|
|
1638
|
+
parameters=parameters,
|
|
1639
|
+
result=result,
|
|
1640
|
+
success=True,
|
|
1641
|
+
error=None,
|
|
1642
|
+
execution_time_ms=execution_time_ms,
|
|
1643
|
+
)
|
|
1644
|
+
|
|
1645
|
+
logger.info(f"Tool '{tool_name}' executed successfully in {execution_time_ms:.2f}ms")
|
|
1646
|
+
|
|
1647
|
+
return observation
|
|
1648
|
+
|
|
1649
|
+
except Exception as e:
|
|
1650
|
+
# Calculate execution time
|
|
1651
|
+
end_time = datetime.utcnow()
|
|
1652
|
+
execution_time_ms = (end_time - start_time).total_seconds() * 1000
|
|
1653
|
+
|
|
1654
|
+
# Create error observation
|
|
1655
|
+
observation = ToolObservation(
|
|
1656
|
+
tool_name=tool_name,
|
|
1657
|
+
parameters=parameters,
|
|
1658
|
+
result=None,
|
|
1659
|
+
success=False,
|
|
1660
|
+
error=str(e),
|
|
1661
|
+
execution_time_ms=execution_time_ms,
|
|
1662
|
+
)
|
|
1663
|
+
|
|
1664
|
+
logger.error(f"Tool '{tool_name}' failed after {execution_time_ms:.2f}ms: {e}")
|
|
1665
|
+
|
|
1666
|
+
return observation
|
|
1667
|
+
|
|
1668
|
+
def get_available_tools(self) -> List[str]:
|
|
1669
|
+
"""Get list of available tools."""
|
|
1670
|
+
return self._available_tools.copy() if self._available_tools else []
|
|
1671
|
+
|
|
1672
|
+
def _generate_tool_schemas(self) -> None:
|
|
1673
|
+
"""Generate OpenAI Function Calling schemas for available tools."""
|
|
1674
|
+
if not self._tool_instances:
|
|
1675
|
+
return
|
|
1676
|
+
|
|
1677
|
+
try:
|
|
1678
|
+
# Use ToolSchemaGenerator to generate schemas from tool instances
|
|
1679
|
+
self._tool_schemas = ToolSchemaGenerator.generate_schemas_for_tool_instances(
|
|
1680
|
+
self._tool_instances
|
|
1681
|
+
)
|
|
1682
|
+
logger.info(f"HybridAgent {self.agent_id} generated {len(self._tool_schemas)} tool schemas")
|
|
1683
|
+
except Exception as e:
|
|
1684
|
+
logger.warning(f"Failed to generate tool schemas: {e}. Falling back to ReAct mode.")
|
|
1685
|
+
self._tool_schemas = []
|
|
1686
|
+
|
|
1687
|
+
def _check_function_calling_support(self) -> bool:
|
|
1688
|
+
"""
|
|
1689
|
+
Check if LLM client supports Function Calling.
|
|
1690
|
+
|
|
1691
|
+
Returns:
|
|
1692
|
+
True if Function Calling is supported, False otherwise
|
|
1693
|
+
"""
|
|
1694
|
+
# Check if we have tools and schemas
|
|
1695
|
+
if not self._tool_instances or not self._tool_schemas:
|
|
1696
|
+
return False
|
|
1697
|
+
|
|
1698
|
+
# Check if LLM client supports Function Calling
|
|
1699
|
+
# OpenAI, xAI (OpenAI-compatible), Google Vertex AI, and some other providers support it
|
|
1700
|
+
provider_name = getattr(self.llm_client, "provider_name", "").lower()
|
|
1701
|
+
supported_providers = ["openai", "xai", "anthropic", "vertex"]
|
|
1702
|
+
|
|
1703
|
+
# Note: Google Vertex AI uses FunctionDeclaration format, but it's handled via GoogleFunctionCallingMixin
|
|
1704
|
+
# The mixin converts OpenAI format to Google format internally
|
|
1705
|
+
|
|
1706
|
+
# Also check if generate_text method accepts 'tools' or 'functions' parameter
|
|
1707
|
+
import inspect
|
|
1708
|
+
try:
|
|
1709
|
+
sig = inspect.signature(self.llm_client.generate_text)
|
|
1710
|
+
params = sig.parameters
|
|
1711
|
+
has_tools_param = "tools" in params or "functions" in params
|
|
1712
|
+
except (ValueError, TypeError):
|
|
1713
|
+
# If signature inspection fails, assume not supported
|
|
1714
|
+
has_tools_param = False
|
|
1715
|
+
|
|
1716
|
+
return provider_name in supported_providers or has_tools_param
|
|
1717
|
+
|
|
1718
|
+
@classmethod
|
|
1719
|
+
def from_dict(cls, data: Dict[str, Any]) -> "HybridAgent":
|
|
1720
|
+
"""
|
|
1721
|
+
Deserialize HybridAgent from dictionary.
|
|
1722
|
+
|
|
1723
|
+
Note: LLM client must be provided separately.
|
|
1724
|
+
|
|
1725
|
+
Args:
|
|
1726
|
+
data: Dictionary representation
|
|
1727
|
+
|
|
1728
|
+
Returns:
|
|
1729
|
+
HybridAgent instance
|
|
1730
|
+
"""
|
|
1731
|
+
raise NotImplementedError("HybridAgent.from_dict requires LLM client to be provided separately. " "Use constructor instead.")
|