aiecs 1.0.1__py3-none-any.whl → 1.7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiecs might be problematic. Click here for more details.
- aiecs/__init__.py +13 -16
- aiecs/__main__.py +7 -7
- aiecs/aiecs_client.py +269 -75
- aiecs/application/executors/operation_executor.py +79 -54
- aiecs/application/knowledge_graph/__init__.py +7 -0
- aiecs/application/knowledge_graph/builder/__init__.py +37 -0
- aiecs/application/knowledge_graph/builder/data_quality.py +302 -0
- aiecs/application/knowledge_graph/builder/data_reshaping.py +293 -0
- aiecs/application/knowledge_graph/builder/document_builder.py +369 -0
- aiecs/application/knowledge_graph/builder/graph_builder.py +490 -0
- aiecs/application/knowledge_graph/builder/import_optimizer.py +396 -0
- aiecs/application/knowledge_graph/builder/schema_inference.py +462 -0
- aiecs/application/knowledge_graph/builder/schema_mapping.py +563 -0
- aiecs/application/knowledge_graph/builder/structured_pipeline.py +1384 -0
- aiecs/application/knowledge_graph/builder/text_chunker.py +317 -0
- aiecs/application/knowledge_graph/extractors/__init__.py +27 -0
- aiecs/application/knowledge_graph/extractors/base.py +98 -0
- aiecs/application/knowledge_graph/extractors/llm_entity_extractor.py +422 -0
- aiecs/application/knowledge_graph/extractors/llm_relation_extractor.py +347 -0
- aiecs/application/knowledge_graph/extractors/ner_entity_extractor.py +241 -0
- aiecs/application/knowledge_graph/fusion/__init__.py +78 -0
- aiecs/application/knowledge_graph/fusion/ab_testing.py +395 -0
- aiecs/application/knowledge_graph/fusion/abbreviation_expander.py +327 -0
- aiecs/application/knowledge_graph/fusion/alias_index.py +597 -0
- aiecs/application/knowledge_graph/fusion/alias_matcher.py +384 -0
- aiecs/application/knowledge_graph/fusion/cache_coordinator.py +343 -0
- aiecs/application/knowledge_graph/fusion/entity_deduplicator.py +433 -0
- aiecs/application/knowledge_graph/fusion/entity_linker.py +511 -0
- aiecs/application/knowledge_graph/fusion/evaluation_dataset.py +240 -0
- aiecs/application/knowledge_graph/fusion/knowledge_fusion.py +632 -0
- aiecs/application/knowledge_graph/fusion/matching_config.py +489 -0
- aiecs/application/knowledge_graph/fusion/name_normalizer.py +352 -0
- aiecs/application/knowledge_graph/fusion/relation_deduplicator.py +183 -0
- aiecs/application/knowledge_graph/fusion/semantic_name_matcher.py +464 -0
- aiecs/application/knowledge_graph/fusion/similarity_pipeline.py +534 -0
- aiecs/application/knowledge_graph/pattern_matching/__init__.py +21 -0
- aiecs/application/knowledge_graph/pattern_matching/pattern_matcher.py +342 -0
- aiecs/application/knowledge_graph/pattern_matching/query_executor.py +366 -0
- aiecs/application/knowledge_graph/profiling/__init__.py +12 -0
- aiecs/application/knowledge_graph/profiling/query_plan_visualizer.py +195 -0
- aiecs/application/knowledge_graph/profiling/query_profiler.py +223 -0
- aiecs/application/knowledge_graph/reasoning/__init__.py +27 -0
- aiecs/application/knowledge_graph/reasoning/evidence_synthesis.py +341 -0
- aiecs/application/knowledge_graph/reasoning/inference_engine.py +500 -0
- aiecs/application/knowledge_graph/reasoning/logic_form_parser.py +163 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/__init__.py +79 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_builder.py +513 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_nodes.py +913 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_validator.py +866 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/error_handler.py +475 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/parser.py +396 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/query_context.py +208 -0
- aiecs/application/knowledge_graph/reasoning/logic_query_integration.py +170 -0
- aiecs/application/knowledge_graph/reasoning/query_planner.py +855 -0
- aiecs/application/knowledge_graph/reasoning/reasoning_engine.py +518 -0
- aiecs/application/knowledge_graph/retrieval/__init__.py +27 -0
- aiecs/application/knowledge_graph/retrieval/query_intent_classifier.py +211 -0
- aiecs/application/knowledge_graph/retrieval/retrieval_strategies.py +592 -0
- aiecs/application/knowledge_graph/retrieval/strategy_types.py +23 -0
- aiecs/application/knowledge_graph/search/__init__.py +59 -0
- aiecs/application/knowledge_graph/search/hybrid_search.py +457 -0
- aiecs/application/knowledge_graph/search/reranker.py +293 -0
- aiecs/application/knowledge_graph/search/reranker_strategies.py +535 -0
- aiecs/application/knowledge_graph/search/text_similarity.py +392 -0
- aiecs/application/knowledge_graph/traversal/__init__.py +15 -0
- aiecs/application/knowledge_graph/traversal/enhanced_traversal.py +305 -0
- aiecs/application/knowledge_graph/traversal/path_scorer.py +271 -0
- aiecs/application/knowledge_graph/validators/__init__.py +13 -0
- aiecs/application/knowledge_graph/validators/relation_validator.py +239 -0
- aiecs/application/knowledge_graph/visualization/__init__.py +11 -0
- aiecs/application/knowledge_graph/visualization/graph_visualizer.py +313 -0
- aiecs/common/__init__.py +9 -0
- aiecs/common/knowledge_graph/__init__.py +17 -0
- aiecs/common/knowledge_graph/runnable.py +471 -0
- aiecs/config/__init__.py +20 -5
- aiecs/config/config.py +762 -31
- aiecs/config/graph_config.py +131 -0
- aiecs/config/tool_config.py +399 -0
- aiecs/core/__init__.py +29 -13
- aiecs/core/interface/__init__.py +2 -2
- aiecs/core/interface/execution_interface.py +22 -22
- aiecs/core/interface/storage_interface.py +37 -88
- aiecs/core/registry/__init__.py +31 -0
- aiecs/core/registry/service_registry.py +92 -0
- aiecs/domain/__init__.py +270 -1
- aiecs/domain/agent/__init__.py +191 -0
- aiecs/domain/agent/base_agent.py +3870 -0
- aiecs/domain/agent/exceptions.py +99 -0
- aiecs/domain/agent/graph_aware_mixin.py +569 -0
- aiecs/domain/agent/hybrid_agent.py +1435 -0
- aiecs/domain/agent/integration/__init__.py +29 -0
- aiecs/domain/agent/integration/context_compressor.py +216 -0
- aiecs/domain/agent/integration/context_engine_adapter.py +587 -0
- aiecs/domain/agent/integration/protocols.py +281 -0
- aiecs/domain/agent/integration/retry_policy.py +218 -0
- aiecs/domain/agent/integration/role_config.py +213 -0
- aiecs/domain/agent/knowledge_aware_agent.py +1892 -0
- aiecs/domain/agent/lifecycle.py +291 -0
- aiecs/domain/agent/llm_agent.py +692 -0
- aiecs/domain/agent/memory/__init__.py +12 -0
- aiecs/domain/agent/memory/conversation.py +1124 -0
- aiecs/domain/agent/migration/__init__.py +14 -0
- aiecs/domain/agent/migration/conversion.py +163 -0
- aiecs/domain/agent/migration/legacy_wrapper.py +86 -0
- aiecs/domain/agent/models.py +884 -0
- aiecs/domain/agent/observability.py +479 -0
- aiecs/domain/agent/persistence.py +449 -0
- aiecs/domain/agent/prompts/__init__.py +29 -0
- aiecs/domain/agent/prompts/builder.py +159 -0
- aiecs/domain/agent/prompts/formatters.py +187 -0
- aiecs/domain/agent/prompts/template.py +255 -0
- aiecs/domain/agent/registry.py +253 -0
- aiecs/domain/agent/tool_agent.py +444 -0
- aiecs/domain/agent/tools/__init__.py +15 -0
- aiecs/domain/agent/tools/schema_generator.py +364 -0
- aiecs/domain/community/__init__.py +155 -0
- aiecs/domain/community/agent_adapter.py +469 -0
- aiecs/domain/community/analytics.py +432 -0
- aiecs/domain/community/collaborative_workflow.py +648 -0
- aiecs/domain/community/communication_hub.py +634 -0
- aiecs/domain/community/community_builder.py +320 -0
- aiecs/domain/community/community_integration.py +796 -0
- aiecs/domain/community/community_manager.py +803 -0
- aiecs/domain/community/decision_engine.py +849 -0
- aiecs/domain/community/exceptions.py +231 -0
- aiecs/domain/community/models/__init__.py +33 -0
- aiecs/domain/community/models/community_models.py +234 -0
- aiecs/domain/community/resource_manager.py +461 -0
- aiecs/domain/community/shared_context_manager.py +589 -0
- aiecs/domain/context/__init__.py +40 -10
- aiecs/domain/context/context_engine.py +1910 -0
- aiecs/domain/context/conversation_models.py +87 -53
- aiecs/domain/context/graph_memory.py +582 -0
- aiecs/domain/execution/model.py +12 -4
- aiecs/domain/knowledge_graph/__init__.py +19 -0
- aiecs/domain/knowledge_graph/models/__init__.py +52 -0
- aiecs/domain/knowledge_graph/models/entity.py +148 -0
- aiecs/domain/knowledge_graph/models/evidence.py +178 -0
- aiecs/domain/knowledge_graph/models/inference_rule.py +184 -0
- aiecs/domain/knowledge_graph/models/path.py +171 -0
- aiecs/domain/knowledge_graph/models/path_pattern.py +171 -0
- aiecs/domain/knowledge_graph/models/query.py +261 -0
- aiecs/domain/knowledge_graph/models/query_plan.py +181 -0
- aiecs/domain/knowledge_graph/models/relation.py +202 -0
- aiecs/domain/knowledge_graph/schema/__init__.py +23 -0
- aiecs/domain/knowledge_graph/schema/entity_type.py +131 -0
- aiecs/domain/knowledge_graph/schema/graph_schema.py +253 -0
- aiecs/domain/knowledge_graph/schema/property_schema.py +143 -0
- aiecs/domain/knowledge_graph/schema/relation_type.py +163 -0
- aiecs/domain/knowledge_graph/schema/schema_manager.py +691 -0
- aiecs/domain/knowledge_graph/schema/type_enums.py +209 -0
- aiecs/domain/task/dsl_processor.py +172 -56
- aiecs/domain/task/model.py +20 -8
- aiecs/domain/task/task_context.py +27 -24
- aiecs/infrastructure/__init__.py +0 -2
- aiecs/infrastructure/graph_storage/__init__.py +11 -0
- aiecs/infrastructure/graph_storage/base.py +837 -0
- aiecs/infrastructure/graph_storage/batch_operations.py +458 -0
- aiecs/infrastructure/graph_storage/cache.py +424 -0
- aiecs/infrastructure/graph_storage/distributed.py +223 -0
- aiecs/infrastructure/graph_storage/error_handling.py +380 -0
- aiecs/infrastructure/graph_storage/graceful_degradation.py +294 -0
- aiecs/infrastructure/graph_storage/health_checks.py +378 -0
- aiecs/infrastructure/graph_storage/in_memory.py +1197 -0
- aiecs/infrastructure/graph_storage/index_optimization.py +446 -0
- aiecs/infrastructure/graph_storage/lazy_loading.py +431 -0
- aiecs/infrastructure/graph_storage/metrics.py +344 -0
- aiecs/infrastructure/graph_storage/migration.py +400 -0
- aiecs/infrastructure/graph_storage/pagination.py +483 -0
- aiecs/infrastructure/graph_storage/performance_monitoring.py +456 -0
- aiecs/infrastructure/graph_storage/postgres.py +1563 -0
- aiecs/infrastructure/graph_storage/property_storage.py +353 -0
- aiecs/infrastructure/graph_storage/protocols.py +76 -0
- aiecs/infrastructure/graph_storage/query_optimizer.py +642 -0
- aiecs/infrastructure/graph_storage/schema_cache.py +290 -0
- aiecs/infrastructure/graph_storage/sqlite.py +1373 -0
- aiecs/infrastructure/graph_storage/streaming.py +487 -0
- aiecs/infrastructure/graph_storage/tenant.py +412 -0
- aiecs/infrastructure/messaging/celery_task_manager.py +92 -54
- aiecs/infrastructure/messaging/websocket_manager.py +51 -35
- aiecs/infrastructure/monitoring/__init__.py +22 -0
- aiecs/infrastructure/monitoring/executor_metrics.py +45 -11
- aiecs/infrastructure/monitoring/global_metrics_manager.py +212 -0
- aiecs/infrastructure/monitoring/structured_logger.py +3 -7
- aiecs/infrastructure/monitoring/tracing_manager.py +63 -35
- aiecs/infrastructure/persistence/__init__.py +14 -1
- aiecs/infrastructure/persistence/context_engine_client.py +184 -0
- aiecs/infrastructure/persistence/database_manager.py +67 -43
- aiecs/infrastructure/persistence/file_storage.py +180 -103
- aiecs/infrastructure/persistence/redis_client.py +74 -21
- aiecs/llm/__init__.py +73 -25
- aiecs/llm/callbacks/__init__.py +11 -0
- aiecs/llm/{custom_callbacks.py → callbacks/custom_callbacks.py} +26 -19
- aiecs/llm/client_factory.py +224 -36
- aiecs/llm/client_resolver.py +155 -0
- aiecs/llm/clients/__init__.py +38 -0
- aiecs/llm/clients/base_client.py +324 -0
- aiecs/llm/clients/google_function_calling_mixin.py +457 -0
- aiecs/llm/clients/googleai_client.py +241 -0
- aiecs/llm/clients/openai_client.py +158 -0
- aiecs/llm/clients/openai_compatible_mixin.py +367 -0
- aiecs/llm/clients/vertex_client.py +897 -0
- aiecs/llm/clients/xai_client.py +201 -0
- aiecs/llm/config/__init__.py +51 -0
- aiecs/llm/config/config_loader.py +272 -0
- aiecs/llm/config/config_validator.py +206 -0
- aiecs/llm/config/model_config.py +143 -0
- aiecs/llm/protocols.py +149 -0
- aiecs/llm/utils/__init__.py +10 -0
- aiecs/llm/utils/validate_config.py +89 -0
- aiecs/main.py +140 -121
- aiecs/scripts/aid/VERSION_MANAGEMENT.md +138 -0
- aiecs/scripts/aid/__init__.py +19 -0
- aiecs/scripts/aid/module_checker.py +499 -0
- aiecs/scripts/aid/version_manager.py +235 -0
- aiecs/scripts/{DEPENDENCY_SYSTEM_SUMMARY.md → dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md} +1 -0
- aiecs/scripts/{README_DEPENDENCY_CHECKER.md → dependance_check/README_DEPENDENCY_CHECKER.md} +1 -0
- aiecs/scripts/dependance_check/__init__.py +15 -0
- aiecs/scripts/dependance_check/dependency_checker.py +1835 -0
- aiecs/scripts/{dependency_fixer.py → dependance_check/dependency_fixer.py} +192 -90
- aiecs/scripts/{download_nlp_data.py → dependance_check/download_nlp_data.py} +203 -71
- aiecs/scripts/dependance_patch/__init__.py +7 -0
- aiecs/scripts/dependance_patch/fix_weasel/__init__.py +11 -0
- aiecs/scripts/{fix_weasel_validator.py → dependance_patch/fix_weasel/fix_weasel_validator.py} +21 -14
- aiecs/scripts/{patch_weasel_library.sh → dependance_patch/fix_weasel/patch_weasel_library.sh} +1 -1
- aiecs/scripts/knowledge_graph/__init__.py +3 -0
- aiecs/scripts/knowledge_graph/run_threshold_experiments.py +212 -0
- aiecs/scripts/migrations/multi_tenancy/README.md +142 -0
- aiecs/scripts/tools_develop/README.md +671 -0
- aiecs/scripts/tools_develop/README_CONFIG_CHECKER.md +273 -0
- aiecs/scripts/tools_develop/TOOLS_CONFIG_GUIDE.md +1287 -0
- aiecs/scripts/tools_develop/TOOL_AUTO_DISCOVERY.md +234 -0
- aiecs/scripts/tools_develop/__init__.py +21 -0
- aiecs/scripts/tools_develop/check_all_tools_config.py +548 -0
- aiecs/scripts/tools_develop/check_type_annotations.py +257 -0
- aiecs/scripts/tools_develop/pre-commit-schema-coverage.sh +66 -0
- aiecs/scripts/tools_develop/schema_coverage.py +511 -0
- aiecs/scripts/tools_develop/validate_tool_schemas.py +475 -0
- aiecs/scripts/tools_develop/verify_executor_config_fix.py +98 -0
- aiecs/scripts/tools_develop/verify_tools.py +352 -0
- aiecs/tasks/__init__.py +0 -1
- aiecs/tasks/worker.py +115 -47
- aiecs/tools/__init__.py +194 -72
- aiecs/tools/apisource/__init__.py +99 -0
- aiecs/tools/apisource/intelligence/__init__.py +19 -0
- aiecs/tools/apisource/intelligence/data_fusion.py +632 -0
- aiecs/tools/apisource/intelligence/query_analyzer.py +417 -0
- aiecs/tools/apisource/intelligence/search_enhancer.py +385 -0
- aiecs/tools/apisource/monitoring/__init__.py +9 -0
- aiecs/tools/apisource/monitoring/metrics.py +330 -0
- aiecs/tools/apisource/providers/__init__.py +112 -0
- aiecs/tools/apisource/providers/base.py +671 -0
- aiecs/tools/apisource/providers/census.py +397 -0
- aiecs/tools/apisource/providers/fred.py +535 -0
- aiecs/tools/apisource/providers/newsapi.py +409 -0
- aiecs/tools/apisource/providers/worldbank.py +352 -0
- aiecs/tools/apisource/reliability/__init__.py +12 -0
- aiecs/tools/apisource/reliability/error_handler.py +363 -0
- aiecs/tools/apisource/reliability/fallback_strategy.py +376 -0
- aiecs/tools/apisource/tool.py +832 -0
- aiecs/tools/apisource/utils/__init__.py +9 -0
- aiecs/tools/apisource/utils/validators.py +334 -0
- aiecs/tools/base_tool.py +415 -21
- aiecs/tools/docs/__init__.py +121 -0
- aiecs/tools/docs/ai_document_orchestrator.py +607 -0
- aiecs/tools/docs/ai_document_writer_orchestrator.py +2350 -0
- aiecs/tools/docs/content_insertion_tool.py +1320 -0
- aiecs/tools/docs/document_creator_tool.py +1323 -0
- aiecs/tools/docs/document_layout_tool.py +1160 -0
- aiecs/tools/docs/document_parser_tool.py +1011 -0
- aiecs/tools/docs/document_writer_tool.py +1829 -0
- aiecs/tools/knowledge_graph/__init__.py +17 -0
- aiecs/tools/knowledge_graph/graph_reasoning_tool.py +807 -0
- aiecs/tools/knowledge_graph/graph_search_tool.py +944 -0
- aiecs/tools/knowledge_graph/kg_builder_tool.py +524 -0
- aiecs/tools/langchain_adapter.py +300 -138
- aiecs/tools/schema_generator.py +455 -0
- aiecs/tools/search_tool/__init__.py +100 -0
- aiecs/tools/search_tool/analyzers.py +581 -0
- aiecs/tools/search_tool/cache.py +264 -0
- aiecs/tools/search_tool/constants.py +128 -0
- aiecs/tools/search_tool/context.py +224 -0
- aiecs/tools/search_tool/core.py +778 -0
- aiecs/tools/search_tool/deduplicator.py +119 -0
- aiecs/tools/search_tool/error_handler.py +242 -0
- aiecs/tools/search_tool/metrics.py +343 -0
- aiecs/tools/search_tool/rate_limiter.py +172 -0
- aiecs/tools/search_tool/schemas.py +275 -0
- aiecs/tools/statistics/__init__.py +80 -0
- aiecs/tools/statistics/ai_data_analysis_orchestrator.py +646 -0
- aiecs/tools/statistics/ai_insight_generator_tool.py +508 -0
- aiecs/tools/statistics/ai_report_orchestrator_tool.py +684 -0
- aiecs/tools/statistics/data_loader_tool.py +555 -0
- aiecs/tools/statistics/data_profiler_tool.py +638 -0
- aiecs/tools/statistics/data_transformer_tool.py +580 -0
- aiecs/tools/statistics/data_visualizer_tool.py +498 -0
- aiecs/tools/statistics/model_trainer_tool.py +507 -0
- aiecs/tools/statistics/statistical_analyzer_tool.py +472 -0
- aiecs/tools/task_tools/__init__.py +49 -36
- aiecs/tools/task_tools/chart_tool.py +200 -184
- aiecs/tools/task_tools/classfire_tool.py +268 -267
- aiecs/tools/task_tools/image_tool.py +175 -131
- aiecs/tools/task_tools/office_tool.py +226 -146
- aiecs/tools/task_tools/pandas_tool.py +477 -121
- aiecs/tools/task_tools/report_tool.py +390 -142
- aiecs/tools/task_tools/research_tool.py +149 -79
- aiecs/tools/task_tools/scraper_tool.py +339 -145
- aiecs/tools/task_tools/stats_tool.py +448 -209
- aiecs/tools/temp_file_manager.py +26 -24
- aiecs/tools/tool_executor/__init__.py +18 -16
- aiecs/tools/tool_executor/tool_executor.py +364 -52
- aiecs/utils/LLM_output_structor.py +74 -48
- aiecs/utils/__init__.py +14 -3
- aiecs/utils/base_callback.py +0 -3
- aiecs/utils/cache_provider.py +696 -0
- aiecs/utils/execution_utils.py +50 -31
- aiecs/utils/prompt_loader.py +1 -0
- aiecs/utils/token_usage_repository.py +37 -11
- aiecs/ws/socket_server.py +14 -4
- {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/METADATA +52 -15
- aiecs-1.7.6.dist-info/RECORD +337 -0
- aiecs-1.7.6.dist-info/entry_points.txt +13 -0
- aiecs/config/registry.py +0 -19
- aiecs/domain/context/content_engine.py +0 -982
- aiecs/llm/base_client.py +0 -99
- aiecs/llm/openai_client.py +0 -125
- aiecs/llm/vertex_client.py +0 -186
- aiecs/llm/xai_client.py +0 -184
- aiecs/scripts/dependency_checker.py +0 -857
- aiecs/scripts/quick_dependency_check.py +0 -269
- aiecs/tools/task_tools/search_api.py +0 -7
- aiecs-1.0.1.dist-info/RECORD +0 -90
- aiecs-1.0.1.dist-info/entry_points.txt +0 -7
- /aiecs/scripts/{setup_nlp_data.sh → dependance_check/setup_nlp_data.sh} +0 -0
- /aiecs/scripts/{README_WEASEL_PATCH.md → dependance_patch/fix_weasel/README_WEASEL_PATCH.md} +0 -0
- /aiecs/scripts/{fix_weasel_validator.sh → dependance_patch/fix_weasel/fix_weasel_validator.sh} +0 -0
- /aiecs/scripts/{run_weasel_patch.sh → dependance_patch/fix_weasel/run_weasel_patch.sh} +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/WHEEL +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/licenses/LICENSE +0 -0
- {aiecs-1.0.1.dist-info → aiecs-1.7.6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1435 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Hybrid Agent
|
|
3
|
+
|
|
4
|
+
Agent implementation combining LLM reasoning with tool execution capabilities.
|
|
5
|
+
Implements the ReAct (Reasoning + Acting) pattern.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
from typing import Dict, List, Any, Optional, Union, TYPE_CHECKING, AsyncIterator
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
|
|
12
|
+
from aiecs.llm import BaseLLMClient, CacheControl, LLMMessage
|
|
13
|
+
from aiecs.tools import get_tool, BaseTool
|
|
14
|
+
from aiecs.domain.agent.tools.schema_generator import ToolSchemaGenerator
|
|
15
|
+
|
|
16
|
+
from .base_agent import BaseAIAgent
|
|
17
|
+
from .models import AgentType, AgentConfiguration, ToolObservation
|
|
18
|
+
from .exceptions import TaskExecutionError, ToolAccessDeniedError
|
|
19
|
+
|
|
20
|
+
if TYPE_CHECKING:
|
|
21
|
+
from aiecs.llm.protocols import LLMClientProtocol
|
|
22
|
+
from aiecs.domain.agent.integration.protocols import (
|
|
23
|
+
ConfigManagerProtocol,
|
|
24
|
+
CheckpointerProtocol,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class HybridAgent(BaseAIAgent):
|
|
31
|
+
"""
|
|
32
|
+
Hybrid agent combining LLM reasoning with tool execution.
|
|
33
|
+
|
|
34
|
+
Implements ReAct pattern: Reason → Act → Observe loop.
|
|
35
|
+
|
|
36
|
+
This agent supports flexible tool and LLM client configurations:
|
|
37
|
+
|
|
38
|
+
**Tool Configuration:**
|
|
39
|
+
- Tool names (List[str]): Backward compatible, tools loaded by name
|
|
40
|
+
- Tool instances (Dict[str, BaseTool]): Pre-configured tools with preserved state
|
|
41
|
+
|
|
42
|
+
**LLM Client Configuration:**
|
|
43
|
+
- BaseLLMClient: Standard LLM clients (OpenAI, xAI, etc.)
|
|
44
|
+
- Custom clients: Any object implementing LLMClientProtocol (duck typing)
|
|
45
|
+
|
|
46
|
+
Examples:
|
|
47
|
+
# Example 1: Basic usage with tool names (backward compatible)
|
|
48
|
+
agent = HybridAgent(
|
|
49
|
+
agent_id="agent1",
|
|
50
|
+
name="My Agent",
|
|
51
|
+
llm_client=OpenAIClient(),
|
|
52
|
+
tools=["search", "calculator"],
|
|
53
|
+
config=config
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
# Example 2: Using tool instances with preserved state
|
|
57
|
+
from aiecs.tools import BaseTool
|
|
58
|
+
|
|
59
|
+
class StatefulSearchTool(BaseTool):
|
|
60
|
+
def __init__(self, api_key: str, context_engine):
|
|
61
|
+
self.api_key = api_key
|
|
62
|
+
self.context_engine = context_engine
|
|
63
|
+
self.search_history = [] # State preserved across calls
|
|
64
|
+
|
|
65
|
+
async def run_async(self, operation: str, query: str):
|
|
66
|
+
self.search_history.append(query)
|
|
67
|
+
# Use context_engine for context-aware search
|
|
68
|
+
return f"Search results for: {query}"
|
|
69
|
+
|
|
70
|
+
# Create tool instances with dependencies
|
|
71
|
+
context_engine = ContextEngine()
|
|
72
|
+
await context_engine.initialize()
|
|
73
|
+
|
|
74
|
+
search_tool = StatefulSearchTool(
|
|
75
|
+
api_key="...",
|
|
76
|
+
context_engine=context_engine
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
agent = HybridAgent(
|
|
80
|
+
agent_id="agent1",
|
|
81
|
+
name="My Agent",
|
|
82
|
+
llm_client=OpenAIClient(),
|
|
83
|
+
tools={
|
|
84
|
+
"search": search_tool, # Stateful tool instance
|
|
85
|
+
"calculator": CalculatorTool()
|
|
86
|
+
},
|
|
87
|
+
config=config
|
|
88
|
+
)
|
|
89
|
+
# Tool state (search_history) is preserved across agent operations
|
|
90
|
+
|
|
91
|
+
# Example 3: Using custom LLM client wrapper
|
|
92
|
+
class CustomLLMWrapper:
|
|
93
|
+
provider_name = "custom_wrapper"
|
|
94
|
+
|
|
95
|
+
def __init__(self, base_client):
|
|
96
|
+
self.base_client = base_client
|
|
97
|
+
self.call_count = 0
|
|
98
|
+
|
|
99
|
+
async def generate_text(self, messages, **kwargs):
|
|
100
|
+
self.call_count += 1
|
|
101
|
+
# Add custom logging, retry logic, etc.
|
|
102
|
+
return await self.base_client.generate_text(messages, **kwargs)
|
|
103
|
+
|
|
104
|
+
async def stream_text(self, messages, **kwargs):
|
|
105
|
+
async for token in self.base_client.stream_text(messages, **kwargs):
|
|
106
|
+
yield token
|
|
107
|
+
|
|
108
|
+
async def close(self):
|
|
109
|
+
await self.base_client.close()
|
|
110
|
+
|
|
111
|
+
# Wrap existing client
|
|
112
|
+
base_client = OpenAIClient()
|
|
113
|
+
wrapped_client = CustomLLMWrapper(base_client)
|
|
114
|
+
|
|
115
|
+
agent = HybridAgent(
|
|
116
|
+
agent_id="agent1",
|
|
117
|
+
name="My Agent",
|
|
118
|
+
llm_client=wrapped_client, # Custom wrapper, no inheritance needed
|
|
119
|
+
tools=["search", "calculator"],
|
|
120
|
+
config=config
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
# Example 4: Full-featured agent with all options
|
|
124
|
+
from aiecs.domain.context import ContextEngine
|
|
125
|
+
from aiecs.domain.agent.models import ResourceLimits
|
|
126
|
+
|
|
127
|
+
context_engine = ContextEngine()
|
|
128
|
+
await context_engine.initialize()
|
|
129
|
+
|
|
130
|
+
resource_limits = ResourceLimits(
|
|
131
|
+
max_concurrent_tasks=5,
|
|
132
|
+
max_tokens_per_minute=10000
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
agent = HybridAgent(
|
|
136
|
+
agent_id="agent1",
|
|
137
|
+
name="My Agent",
|
|
138
|
+
llm_client=CustomLLMWrapper(OpenAIClient()),
|
|
139
|
+
tools={
|
|
140
|
+
"search": StatefulSearchTool(api_key="...", context_engine=context_engine),
|
|
141
|
+
"calculator": CalculatorTool()
|
|
142
|
+
},
|
|
143
|
+
config=config,
|
|
144
|
+
config_manager=DatabaseConfigManager(),
|
|
145
|
+
checkpointer=RedisCheckpointer(),
|
|
146
|
+
context_engine=context_engine,
|
|
147
|
+
collaboration_enabled=True,
|
|
148
|
+
agent_registry={"agent2": other_agent},
|
|
149
|
+
learning_enabled=True,
|
|
150
|
+
resource_limits=resource_limits
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
# Example 5: Streaming with tool instances
|
|
154
|
+
agent = HybridAgent(
|
|
155
|
+
agent_id="agent1",
|
|
156
|
+
name="My Agent",
|
|
157
|
+
llm_client=OpenAIClient(),
|
|
158
|
+
tools={
|
|
159
|
+
"search": StatefulSearchTool(api_key="..."),
|
|
160
|
+
"calculator": CalculatorTool()
|
|
161
|
+
},
|
|
162
|
+
config=config
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
# Stream task execution (tokens + tool calls)
|
|
166
|
+
async for event in agent.execute_task_streaming(task, context):
|
|
167
|
+
if event['type'] == 'token':
|
|
168
|
+
print(event['content'], end='', flush=True)
|
|
169
|
+
elif event['type'] == 'tool_call':
|
|
170
|
+
print(f"\\nCalling {event['tool_name']}...")
|
|
171
|
+
elif event['type'] == 'tool_result':
|
|
172
|
+
print(f"Result: {event['result']}")
|
|
173
|
+
"""
|
|
174
|
+
|
|
175
|
+
def __init__(
|
|
176
|
+
self,
|
|
177
|
+
agent_id: str,
|
|
178
|
+
name: str,
|
|
179
|
+
llm_client: Union[BaseLLMClient, "LLMClientProtocol"],
|
|
180
|
+
tools: Union[List[str], Dict[str, BaseTool]],
|
|
181
|
+
config: AgentConfiguration,
|
|
182
|
+
description: Optional[str] = None,
|
|
183
|
+
version: str = "1.0.0",
|
|
184
|
+
max_iterations: int = 10,
|
|
185
|
+
config_manager: Optional["ConfigManagerProtocol"] = None,
|
|
186
|
+
checkpointer: Optional["CheckpointerProtocol"] = None,
|
|
187
|
+
context_engine: Optional[Any] = None,
|
|
188
|
+
collaboration_enabled: bool = False,
|
|
189
|
+
agent_registry: Optional[Dict[str, Any]] = None,
|
|
190
|
+
learning_enabled: bool = False,
|
|
191
|
+
resource_limits: Optional[Any] = None,
|
|
192
|
+
):
|
|
193
|
+
"""
|
|
194
|
+
Initialize Hybrid agent.
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
agent_id: Unique agent identifier
|
|
198
|
+
name: Agent name
|
|
199
|
+
llm_client: LLM client for reasoning (BaseLLMClient or any LLMClientProtocol)
|
|
200
|
+
tools: Tools - either list of tool names or dict of tool instances
|
|
201
|
+
config: Agent configuration
|
|
202
|
+
description: Optional description
|
|
203
|
+
version: Agent version
|
|
204
|
+
max_iterations: Maximum ReAct iterations
|
|
205
|
+
config_manager: Optional configuration manager for dynamic config
|
|
206
|
+
checkpointer: Optional checkpointer for state persistence
|
|
207
|
+
context_engine: Optional context engine for persistent storage
|
|
208
|
+
collaboration_enabled: Enable collaboration features
|
|
209
|
+
agent_registry: Registry of other agents for collaboration
|
|
210
|
+
learning_enabled: Enable learning features
|
|
211
|
+
resource_limits: Optional resource limits configuration
|
|
212
|
+
|
|
213
|
+
Example with tool instances:
|
|
214
|
+
```python
|
|
215
|
+
agent = HybridAgent(
|
|
216
|
+
agent_id="agent1",
|
|
217
|
+
name="My Agent",
|
|
218
|
+
llm_client=OpenAIClient(),
|
|
219
|
+
tools={
|
|
220
|
+
"search": SearchTool(api_key="..."),
|
|
221
|
+
"calculator": CalculatorTool()
|
|
222
|
+
},
|
|
223
|
+
config=config
|
|
224
|
+
)
|
|
225
|
+
```
|
|
226
|
+
|
|
227
|
+
Example with tool names (backward compatible):
|
|
228
|
+
```python
|
|
229
|
+
agent = HybridAgent(
|
|
230
|
+
agent_id="agent1",
|
|
231
|
+
name="My Agent",
|
|
232
|
+
llm_client=OpenAIClient(),
|
|
233
|
+
tools=["search", "calculator"],
|
|
234
|
+
config=config
|
|
235
|
+
)
|
|
236
|
+
```
|
|
237
|
+
"""
|
|
238
|
+
super().__init__(
|
|
239
|
+
agent_id=agent_id,
|
|
240
|
+
name=name,
|
|
241
|
+
agent_type=AgentType.DEVELOPER, # Can be adjusted based on use case
|
|
242
|
+
config=config,
|
|
243
|
+
description=description or "Hybrid agent with LLM reasoning and tool execution",
|
|
244
|
+
version=version,
|
|
245
|
+
tools=tools,
|
|
246
|
+
llm_client=llm_client, # type: ignore[arg-type]
|
|
247
|
+
config_manager=config_manager,
|
|
248
|
+
checkpointer=checkpointer,
|
|
249
|
+
context_engine=context_engine,
|
|
250
|
+
collaboration_enabled=collaboration_enabled,
|
|
251
|
+
agent_registry=agent_registry,
|
|
252
|
+
learning_enabled=learning_enabled,
|
|
253
|
+
resource_limits=resource_limits,
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
# Store LLM client reference (from BaseAIAgent or local)
|
|
257
|
+
self.llm_client = self._llm_client if self._llm_client else llm_client
|
|
258
|
+
self._max_iterations = max_iterations
|
|
259
|
+
self._system_prompt: Optional[str] = None
|
|
260
|
+
self._conversation_history: List[LLMMessage] = []
|
|
261
|
+
self._tool_schemas: List[Dict[str, Any]] = []
|
|
262
|
+
self._use_function_calling: bool = False # Will be determined during initialization
|
|
263
|
+
|
|
264
|
+
logger.info(f"HybridAgent initialized: {agent_id} with LLM ({self.llm_client.provider_name}) " f"and {len(tools) if isinstance(tools, (list, dict)) else 0} tools")
|
|
265
|
+
|
|
266
|
+
async def _initialize(self) -> None:
|
|
267
|
+
"""Initialize Hybrid agent - validate LLM client, load tools, and build system prompt."""
|
|
268
|
+
# Validate LLM client using BaseAIAgent helper
|
|
269
|
+
self._validate_llm_client()
|
|
270
|
+
|
|
271
|
+
# Load tools using BaseAIAgent helper
|
|
272
|
+
self._load_tools()
|
|
273
|
+
|
|
274
|
+
# Get tool instances from BaseAIAgent (if provided as instances)
|
|
275
|
+
base_tool_instances = self._get_tool_instances()
|
|
276
|
+
|
|
277
|
+
if base_tool_instances:
|
|
278
|
+
# Tool instances were provided - use them directly
|
|
279
|
+
self._tool_instances = base_tool_instances
|
|
280
|
+
logger.info(f"HybridAgent {self.agent_id} using " f"{len(self._tool_instances)} pre-configured tool instances")
|
|
281
|
+
elif self._available_tools:
|
|
282
|
+
# Tool names were provided - load them
|
|
283
|
+
self._tool_instances = {}
|
|
284
|
+
for tool_name in self._available_tools:
|
|
285
|
+
try:
|
|
286
|
+
self._tool_instances[tool_name] = get_tool(tool_name)
|
|
287
|
+
logger.debug(f"HybridAgent {self.agent_id} loaded tool: {tool_name}")
|
|
288
|
+
except Exception as e:
|
|
289
|
+
logger.warning(f"Failed to load tool {tool_name}: {e}")
|
|
290
|
+
|
|
291
|
+
logger.info(f"HybridAgent {self.agent_id} initialized with {len(self._tool_instances)} tools")
|
|
292
|
+
|
|
293
|
+
# Generate tool schemas for Function Calling
|
|
294
|
+
self._generate_tool_schemas()
|
|
295
|
+
|
|
296
|
+
# Check if LLM client supports Function Calling
|
|
297
|
+
self._use_function_calling = self._check_function_calling_support()
|
|
298
|
+
|
|
299
|
+
# Build system prompt
|
|
300
|
+
self._system_prompt = self._build_system_prompt()
|
|
301
|
+
|
|
302
|
+
async def _shutdown(self) -> None:
|
|
303
|
+
"""Shutdown Hybrid agent."""
|
|
304
|
+
self._conversation_history.clear()
|
|
305
|
+
if self._tool_instances:
|
|
306
|
+
self._tool_instances.clear()
|
|
307
|
+
|
|
308
|
+
if hasattr(self.llm_client, "close"):
|
|
309
|
+
await self.llm_client.close()
|
|
310
|
+
|
|
311
|
+
logger.info(f"HybridAgent {self.agent_id} shut down")
|
|
312
|
+
|
|
313
|
+
def _build_system_prompt(self) -> str:
|
|
314
|
+
"""Build system prompt including tool descriptions.
|
|
315
|
+
|
|
316
|
+
Precedence order for base prompt:
|
|
317
|
+
1. config.system_prompt - Direct custom prompt (highest priority)
|
|
318
|
+
2. Assembled from goal/backstory/domain_knowledge
|
|
319
|
+
3. Default: Empty (ReAct instructions will be added)
|
|
320
|
+
|
|
321
|
+
Note: ReAct instructions and tool info are always appended regardless
|
|
322
|
+
of whether system_prompt is used, as they're essential for agent operation.
|
|
323
|
+
"""
|
|
324
|
+
parts = []
|
|
325
|
+
|
|
326
|
+
# 1. Custom system_prompt takes precedence over goal/backstory
|
|
327
|
+
if self._config.system_prompt:
|
|
328
|
+
parts.append(self._config.system_prompt)
|
|
329
|
+
else:
|
|
330
|
+
# 2. Assemble from individual fields
|
|
331
|
+
if self._config.goal:
|
|
332
|
+
parts.append(f"Goal: {self._config.goal}")
|
|
333
|
+
|
|
334
|
+
if self._config.backstory:
|
|
335
|
+
parts.append(f"Background: {self._config.backstory}")
|
|
336
|
+
|
|
337
|
+
if self._config.domain_knowledge:
|
|
338
|
+
parts.append(f"Domain Knowledge: {self._config.domain_knowledge}")
|
|
339
|
+
|
|
340
|
+
# Add ReAct instructions (always required for HybridAgent)
|
|
341
|
+
parts.append(
|
|
342
|
+
"You are a reasoning agent that can use tools to complete tasks. "
|
|
343
|
+
"Follow the ReAct pattern:\n"
|
|
344
|
+
"1. THOUGHT: Analyze the task and decide what to do\n"
|
|
345
|
+
"2. ACTION: Use a tool if needed, or provide final answer\n"
|
|
346
|
+
"3. OBSERVATION: Review the tool result and continue reasoning\n\n"
|
|
347
|
+
"When you need to use a tool, respond with:\n"
|
|
348
|
+
"TOOL: <tool_name>\n"
|
|
349
|
+
"OPERATION: <operation_name>\n"
|
|
350
|
+
"PARAMETERS: <json_parameters>\n\n"
|
|
351
|
+
"When you have the final answer, respond with:\n"
|
|
352
|
+
"FINAL ANSWER: <your_answer>"
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
# Add available tools (always required for HybridAgent)
|
|
356
|
+
if self._available_tools:
|
|
357
|
+
parts.append(f"\nAvailable tools: {', '.join(self._available_tools)}")
|
|
358
|
+
|
|
359
|
+
return "\n\n".join(parts)
|
|
360
|
+
|
|
361
|
+
async def execute_task(self, task: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
|
|
362
|
+
"""
|
|
363
|
+
Execute a task using ReAct loop.
|
|
364
|
+
|
|
365
|
+
Args:
|
|
366
|
+
task: Task specification with 'description' or 'prompt'
|
|
367
|
+
context: Execution context
|
|
368
|
+
|
|
369
|
+
Returns:
|
|
370
|
+
Execution result with 'output', 'reasoning_steps', 'tool_calls'
|
|
371
|
+
|
|
372
|
+
Raises:
|
|
373
|
+
TaskExecutionError: If task execution fails
|
|
374
|
+
"""
|
|
375
|
+
start_time = datetime.utcnow()
|
|
376
|
+
|
|
377
|
+
try:
|
|
378
|
+
# Extract task description
|
|
379
|
+
task_description = task.get("description") or task.get("prompt") or task.get("task")
|
|
380
|
+
if not task_description:
|
|
381
|
+
raise TaskExecutionError(
|
|
382
|
+
"Task must contain 'description', 'prompt', or 'task' field",
|
|
383
|
+
agent_id=self.agent_id,
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
# Transition to busy state
|
|
387
|
+
self._transition_state(self.state.__class__.BUSY)
|
|
388
|
+
self._current_task_id = task.get("task_id")
|
|
389
|
+
|
|
390
|
+
# Execute ReAct loop
|
|
391
|
+
result = await self._react_loop(task_description, context)
|
|
392
|
+
|
|
393
|
+
# Calculate execution time
|
|
394
|
+
execution_time = (datetime.utcnow() - start_time).total_seconds()
|
|
395
|
+
|
|
396
|
+
# Update metrics
|
|
397
|
+
self.update_metrics(
|
|
398
|
+
execution_time=execution_time,
|
|
399
|
+
success=True,
|
|
400
|
+
tokens_used=result.get("total_tokens"),
|
|
401
|
+
tool_calls=result.get("tool_calls_count", 0),
|
|
402
|
+
)
|
|
403
|
+
|
|
404
|
+
# Transition back to active
|
|
405
|
+
self._transition_state(self.state.__class__.ACTIVE)
|
|
406
|
+
self._current_task_id = None
|
|
407
|
+
self.last_active_at = datetime.utcnow()
|
|
408
|
+
|
|
409
|
+
return {
|
|
410
|
+
"success": True,
|
|
411
|
+
"output": result.get("final_answer"),
|
|
412
|
+
"reasoning_steps": result.get("steps"),
|
|
413
|
+
"tool_calls_count": result.get("tool_calls_count"),
|
|
414
|
+
"iterations": result.get("iterations"),
|
|
415
|
+
"execution_time": execution_time,
|
|
416
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
except Exception as e:
|
|
420
|
+
logger.error(f"Task execution failed for {self.agent_id}: {e}")
|
|
421
|
+
|
|
422
|
+
# Update metrics for failure
|
|
423
|
+
execution_time = (datetime.utcnow() - start_time).total_seconds()
|
|
424
|
+
self.update_metrics(execution_time=execution_time, success=False)
|
|
425
|
+
|
|
426
|
+
# Transition to error state
|
|
427
|
+
self._transition_state(self.state.__class__.ERROR)
|
|
428
|
+
self._current_task_id = None
|
|
429
|
+
|
|
430
|
+
raise TaskExecutionError(
|
|
431
|
+
f"Task execution failed: {str(e)}",
|
|
432
|
+
agent_id=self.agent_id,
|
|
433
|
+
task_id=task.get("task_id"),
|
|
434
|
+
)
|
|
435
|
+
|
|
436
|
+
async def process_message(self, message: str, sender_id: Optional[str] = None) -> Dict[str, Any]:
|
|
437
|
+
"""
|
|
438
|
+
Process an incoming message using ReAct loop.
|
|
439
|
+
|
|
440
|
+
Args:
|
|
441
|
+
message: Message content
|
|
442
|
+
sender_id: Optional sender identifier
|
|
443
|
+
|
|
444
|
+
Returns:
|
|
445
|
+
Response dictionary with 'response', 'reasoning_steps'
|
|
446
|
+
"""
|
|
447
|
+
try:
|
|
448
|
+
# Build task from message
|
|
449
|
+
task = {
|
|
450
|
+
"description": message,
|
|
451
|
+
"task_id": f"msg_{datetime.utcnow().timestamp()}",
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
# Execute as task
|
|
455
|
+
result = await self.execute_task(task, {"sender_id": sender_id})
|
|
456
|
+
|
|
457
|
+
return {
|
|
458
|
+
"response": result.get("output"),
|
|
459
|
+
"reasoning_steps": result.get("reasoning_steps"),
|
|
460
|
+
"timestamp": result.get("timestamp"),
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
except Exception as e:
|
|
464
|
+
logger.error(f"Message processing failed for {self.agent_id}: {e}")
|
|
465
|
+
raise
|
|
466
|
+
|
|
467
|
+
async def execute_task_streaming(self, task: Dict[str, Any], context: Dict[str, Any]) -> AsyncIterator[Dict[str, Any]]:
|
|
468
|
+
"""
|
|
469
|
+
Execute a task with streaming tokens and tool calls.
|
|
470
|
+
|
|
471
|
+
Args:
|
|
472
|
+
task: Task specification with 'description' or 'prompt'
|
|
473
|
+
context: Execution context
|
|
474
|
+
|
|
475
|
+
Yields:
|
|
476
|
+
Dict[str, Any]: Event dictionaries with streaming tokens, tool calls, and results
|
|
477
|
+
|
|
478
|
+
Example:
|
|
479
|
+
```python
|
|
480
|
+
async for event in agent.execute_task_streaming(task, context):
|
|
481
|
+
if event['type'] == 'token':
|
|
482
|
+
print(event['content'], end='', flush=True)
|
|
483
|
+
elif event['type'] == 'tool_call':
|
|
484
|
+
print(f"\\nCalling {event['tool_name']}...")
|
|
485
|
+
elif event['type'] == 'tool_result':
|
|
486
|
+
print(f"Result: {event['result']}")
|
|
487
|
+
```
|
|
488
|
+
"""
|
|
489
|
+
start_time = datetime.utcnow()
|
|
490
|
+
|
|
491
|
+
try:
|
|
492
|
+
# Extract task description
|
|
493
|
+
task_description = task.get("description") or task.get("prompt") or task.get("task")
|
|
494
|
+
if not task_description:
|
|
495
|
+
yield {
|
|
496
|
+
"type": "error",
|
|
497
|
+
"error": "Task must contain 'description', 'prompt', or 'task' field",
|
|
498
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
499
|
+
}
|
|
500
|
+
return
|
|
501
|
+
|
|
502
|
+
# Transition to busy state
|
|
503
|
+
self._transition_state(self.state.__class__.BUSY)
|
|
504
|
+
self._current_task_id = task.get("task_id")
|
|
505
|
+
|
|
506
|
+
# Yield status
|
|
507
|
+
yield {
|
|
508
|
+
"type": "status",
|
|
509
|
+
"status": "started",
|
|
510
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
511
|
+
}
|
|
512
|
+
|
|
513
|
+
# Execute streaming ReAct loop
|
|
514
|
+
async for event in self._react_loop_streaming(task_description, context):
|
|
515
|
+
yield event
|
|
516
|
+
|
|
517
|
+
# Get final result from last event
|
|
518
|
+
if event.get("type") == "result":
|
|
519
|
+
result = event
|
|
520
|
+
|
|
521
|
+
# Calculate execution time
|
|
522
|
+
execution_time = (datetime.utcnow() - start_time).total_seconds()
|
|
523
|
+
|
|
524
|
+
# Update metrics
|
|
525
|
+
self.update_metrics(
|
|
526
|
+
execution_time=execution_time,
|
|
527
|
+
success=True,
|
|
528
|
+
tokens_used=result.get("total_tokens"),
|
|
529
|
+
tool_calls=result.get("tool_calls_count", 0),
|
|
530
|
+
)
|
|
531
|
+
|
|
532
|
+
# Transition back to active
|
|
533
|
+
self._transition_state(self.state.__class__.ACTIVE)
|
|
534
|
+
self._current_task_id = None
|
|
535
|
+
self.last_active_at = datetime.utcnow()
|
|
536
|
+
|
|
537
|
+
except Exception as e:
|
|
538
|
+
logger.error(f"Streaming task execution failed for {self.agent_id}: {e}")
|
|
539
|
+
|
|
540
|
+
# Update metrics for failure
|
|
541
|
+
execution_time = (datetime.utcnow() - start_time).total_seconds()
|
|
542
|
+
self.update_metrics(execution_time=execution_time, success=False)
|
|
543
|
+
|
|
544
|
+
# Transition to error state
|
|
545
|
+
self._transition_state(self.state.__class__.ERROR)
|
|
546
|
+
self._current_task_id = None
|
|
547
|
+
|
|
548
|
+
yield {
|
|
549
|
+
"type": "error",
|
|
550
|
+
"error": str(e),
|
|
551
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
async def process_message_streaming(self, message: str, sender_id: Optional[str] = None) -> AsyncIterator[str]:
|
|
555
|
+
"""
|
|
556
|
+
Process a message with streaming response.
|
|
557
|
+
|
|
558
|
+
Args:
|
|
559
|
+
message: Message content
|
|
560
|
+
sender_id: Optional sender identifier
|
|
561
|
+
|
|
562
|
+
Yields:
|
|
563
|
+
str: Response text tokens
|
|
564
|
+
|
|
565
|
+
Example:
|
|
566
|
+
```python
|
|
567
|
+
async for token in agent.process_message_streaming("Hello!"):
|
|
568
|
+
print(token, end='', flush=True)
|
|
569
|
+
```
|
|
570
|
+
"""
|
|
571
|
+
try:
|
|
572
|
+
# Build task from message
|
|
573
|
+
task = {
|
|
574
|
+
"description": message,
|
|
575
|
+
"task_id": f"msg_{datetime.utcnow().timestamp()}",
|
|
576
|
+
}
|
|
577
|
+
|
|
578
|
+
# Stream task execution
|
|
579
|
+
async for event in self.execute_task_streaming(task, {"sender_id": sender_id}):
|
|
580
|
+
if event["type"] == "token":
|
|
581
|
+
yield event["content"]
|
|
582
|
+
|
|
583
|
+
except Exception as e:
|
|
584
|
+
logger.error(f"Streaming message processing failed for {self.agent_id}: {e}")
|
|
585
|
+
raise
|
|
586
|
+
|
|
587
|
+
async def _react_loop_streaming(self, task: str, context: Dict[str, Any]) -> AsyncIterator[Dict[str, Any]]:
|
|
588
|
+
"""
|
|
589
|
+
Execute ReAct loop with streaming: Reason → Act → Observe.
|
|
590
|
+
|
|
591
|
+
Args:
|
|
592
|
+
task: Task description
|
|
593
|
+
context: Context dictionary
|
|
594
|
+
|
|
595
|
+
Yields:
|
|
596
|
+
Dict[str, Any]: Event dictionaries with streaming tokens, tool calls, and results
|
|
597
|
+
"""
|
|
598
|
+
steps = []
|
|
599
|
+
tool_calls_count = 0
|
|
600
|
+
total_tokens = 0
|
|
601
|
+
|
|
602
|
+
# Build initial messages
|
|
603
|
+
messages = self._build_initial_messages(task, context)
|
|
604
|
+
|
|
605
|
+
for iteration in range(self._max_iterations):
|
|
606
|
+
logger.debug(f"HybridAgent {self.agent_id} - ReAct iteration {iteration + 1}")
|
|
607
|
+
|
|
608
|
+
# Yield iteration status
|
|
609
|
+
yield {
|
|
610
|
+
"type": "status",
|
|
611
|
+
"status": "thinking",
|
|
612
|
+
"iteration": iteration + 1,
|
|
613
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
614
|
+
}
|
|
615
|
+
|
|
616
|
+
# THINK: Stream LLM reasoning
|
|
617
|
+
thought_tokens = []
|
|
618
|
+
tool_calls_from_stream = None
|
|
619
|
+
|
|
620
|
+
# Use Function Calling if supported, otherwise use ReAct mode
|
|
621
|
+
if self._use_function_calling and self._tool_schemas:
|
|
622
|
+
# Convert schemas to tools format
|
|
623
|
+
tools = [{"type": "function", "function": schema} for schema in self._tool_schemas]
|
|
624
|
+
# Use return_chunks=True to get tool_calls information
|
|
625
|
+
stream_gen = self.llm_client.stream_text( # type: ignore[attr-defined]
|
|
626
|
+
messages=messages,
|
|
627
|
+
model=self._config.llm_model,
|
|
628
|
+
temperature=self._config.temperature,
|
|
629
|
+
max_tokens=self._config.max_tokens,
|
|
630
|
+
tools=tools,
|
|
631
|
+
tool_choice="auto",
|
|
632
|
+
return_chunks=True, # Enable tool_calls accumulation
|
|
633
|
+
)
|
|
634
|
+
else:
|
|
635
|
+
# Fallback to ReAct mode
|
|
636
|
+
stream_gen = self.llm_client.stream_text( # type: ignore[attr-defined]
|
|
637
|
+
messages=messages,
|
|
638
|
+
model=self._config.llm_model,
|
|
639
|
+
temperature=self._config.temperature,
|
|
640
|
+
max_tokens=self._config.max_tokens,
|
|
641
|
+
)
|
|
642
|
+
|
|
643
|
+
# Stream tokens and collect tool calls
|
|
644
|
+
from aiecs.llm.clients.openai_compatible_mixin import StreamChunk
|
|
645
|
+
|
|
646
|
+
async for chunk in stream_gen:
|
|
647
|
+
# Handle StreamChunk objects (Function Calling mode)
|
|
648
|
+
if isinstance(chunk, StreamChunk):
|
|
649
|
+
if chunk.type == "token" and chunk.content:
|
|
650
|
+
thought_tokens.append(chunk.content)
|
|
651
|
+
yield {
|
|
652
|
+
"type": "token",
|
|
653
|
+
"content": chunk.content,
|
|
654
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
655
|
+
}
|
|
656
|
+
elif chunk.type == "tool_call" and chunk.tool_call:
|
|
657
|
+
# Yield tool call update event
|
|
658
|
+
yield {
|
|
659
|
+
"type": "tool_call_update",
|
|
660
|
+
"tool_call": chunk.tool_call,
|
|
661
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
662
|
+
}
|
|
663
|
+
elif chunk.type == "tool_calls" and chunk.tool_calls:
|
|
664
|
+
# Complete tool_calls received
|
|
665
|
+
tool_calls_from_stream = chunk.tool_calls
|
|
666
|
+
yield {
|
|
667
|
+
"type": "tool_calls",
|
|
668
|
+
"tool_calls": chunk.tool_calls,
|
|
669
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
670
|
+
}
|
|
671
|
+
else:
|
|
672
|
+
# Handle plain string tokens (ReAct mode or non-Function Calling)
|
|
673
|
+
thought_tokens.append(chunk)
|
|
674
|
+
yield {
|
|
675
|
+
"type": "token",
|
|
676
|
+
"content": chunk,
|
|
677
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
678
|
+
}
|
|
679
|
+
|
|
680
|
+
thought = "".join(thought_tokens)
|
|
681
|
+
|
|
682
|
+
# Process tool_calls if received from stream
|
|
683
|
+
if tool_calls_from_stream:
|
|
684
|
+
# Process each tool call
|
|
685
|
+
for tool_call in tool_calls_from_stream:
|
|
686
|
+
try:
|
|
687
|
+
func_name = tool_call["function"]["name"]
|
|
688
|
+
func_args = tool_call["function"]["arguments"]
|
|
689
|
+
|
|
690
|
+
# Parse function name to extract tool and operation
|
|
691
|
+
parts = func_name.split("_", 1)
|
|
692
|
+
if len(parts) == 2:
|
|
693
|
+
tool_name, operation = parts
|
|
694
|
+
else:
|
|
695
|
+
tool_name = parts[0]
|
|
696
|
+
operation = None
|
|
697
|
+
|
|
698
|
+
# Parse arguments JSON
|
|
699
|
+
import json
|
|
700
|
+
if isinstance(func_args, str):
|
|
701
|
+
parameters = json.loads(func_args)
|
|
702
|
+
else:
|
|
703
|
+
parameters = func_args
|
|
704
|
+
|
|
705
|
+
# Yield tool call event
|
|
706
|
+
yield {
|
|
707
|
+
"type": "tool_call",
|
|
708
|
+
"tool_name": tool_name,
|
|
709
|
+
"operation": operation,
|
|
710
|
+
"parameters": parameters,
|
|
711
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
712
|
+
}
|
|
713
|
+
|
|
714
|
+
# Execute tool
|
|
715
|
+
tool_result = await self._execute_tool(tool_name, operation, parameters)
|
|
716
|
+
tool_calls_count += 1
|
|
717
|
+
|
|
718
|
+
steps.append(
|
|
719
|
+
{
|
|
720
|
+
"type": "action",
|
|
721
|
+
"tool": tool_name,
|
|
722
|
+
"operation": operation,
|
|
723
|
+
"parameters": parameters,
|
|
724
|
+
"iteration": iteration + 1,
|
|
725
|
+
}
|
|
726
|
+
)
|
|
727
|
+
|
|
728
|
+
# Yield tool result event
|
|
729
|
+
yield {
|
|
730
|
+
"type": "tool_result",
|
|
731
|
+
"tool_name": tool_name,
|
|
732
|
+
"result": tool_result,
|
|
733
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
734
|
+
}
|
|
735
|
+
|
|
736
|
+
# Add tool result to messages
|
|
737
|
+
observation = f"Tool '{tool_name}' returned: {tool_result}"
|
|
738
|
+
steps.append(
|
|
739
|
+
{
|
|
740
|
+
"type": "observation",
|
|
741
|
+
"content": observation,
|
|
742
|
+
"iteration": iteration + 1,
|
|
743
|
+
}
|
|
744
|
+
)
|
|
745
|
+
|
|
746
|
+
# Add assistant message with tool call and tool result
|
|
747
|
+
messages.append(
|
|
748
|
+
LLMMessage(
|
|
749
|
+
role="assistant",
|
|
750
|
+
content=None,
|
|
751
|
+
tool_calls=tool_calls_from_stream,
|
|
752
|
+
)
|
|
753
|
+
)
|
|
754
|
+
messages.append(
|
|
755
|
+
LLMMessage(
|
|
756
|
+
role="tool",
|
|
757
|
+
content=str(tool_result),
|
|
758
|
+
tool_call_id=tool_call.get("id", "call_0"),
|
|
759
|
+
)
|
|
760
|
+
)
|
|
761
|
+
|
|
762
|
+
except Exception as e:
|
|
763
|
+
error_msg = f"Tool execution failed: {str(e)}"
|
|
764
|
+
steps.append(
|
|
765
|
+
{
|
|
766
|
+
"type": "observation",
|
|
767
|
+
"content": error_msg,
|
|
768
|
+
"iteration": iteration + 1,
|
|
769
|
+
"error": True,
|
|
770
|
+
}
|
|
771
|
+
)
|
|
772
|
+
yield {
|
|
773
|
+
"type": "tool_error",
|
|
774
|
+
"tool_name": tool_name if "tool_name" in locals() else "unknown",
|
|
775
|
+
"error": str(e),
|
|
776
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
777
|
+
}
|
|
778
|
+
messages.append(
|
|
779
|
+
LLMMessage(
|
|
780
|
+
role="tool",
|
|
781
|
+
content=error_msg,
|
|
782
|
+
tool_call_id=tool_call.get("id", "call_0"),
|
|
783
|
+
)
|
|
784
|
+
)
|
|
785
|
+
|
|
786
|
+
# Continue to next iteration
|
|
787
|
+
continue
|
|
788
|
+
|
|
789
|
+
steps.append(
|
|
790
|
+
{
|
|
791
|
+
"type": "thought",
|
|
792
|
+
"content": thought,
|
|
793
|
+
"iteration": iteration + 1,
|
|
794
|
+
}
|
|
795
|
+
)
|
|
796
|
+
|
|
797
|
+
# Check if final answer
|
|
798
|
+
if "FINAL ANSWER:" in thought:
|
|
799
|
+
final_answer = self._extract_final_answer(thought)
|
|
800
|
+
yield {
|
|
801
|
+
"type": "result",
|
|
802
|
+
"success": True,
|
|
803
|
+
"output": final_answer,
|
|
804
|
+
"reasoning_steps": steps,
|
|
805
|
+
"tool_calls_count": tool_calls_count,
|
|
806
|
+
"iterations": iteration + 1,
|
|
807
|
+
"total_tokens": total_tokens,
|
|
808
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
809
|
+
}
|
|
810
|
+
return
|
|
811
|
+
|
|
812
|
+
# Check if tool call
|
|
813
|
+
if "TOOL:" in thought:
|
|
814
|
+
# ACT: Execute tool
|
|
815
|
+
try:
|
|
816
|
+
tool_info = self._parse_tool_call(thought)
|
|
817
|
+
tool_name = tool_info.get("tool", "")
|
|
818
|
+
if not tool_name:
|
|
819
|
+
raise ValueError("Tool name not found in tool call")
|
|
820
|
+
|
|
821
|
+
# Yield tool call event
|
|
822
|
+
yield {
|
|
823
|
+
"type": "tool_call",
|
|
824
|
+
"tool_name": tool_name,
|
|
825
|
+
"operation": tool_info.get("operation"),
|
|
826
|
+
"parameters": tool_info.get("parameters", {}),
|
|
827
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
828
|
+
}
|
|
829
|
+
|
|
830
|
+
tool_result = await self._execute_tool(
|
|
831
|
+
tool_name,
|
|
832
|
+
tool_info.get("operation"),
|
|
833
|
+
tool_info.get("parameters", {}),
|
|
834
|
+
)
|
|
835
|
+
tool_calls_count += 1
|
|
836
|
+
|
|
837
|
+
steps.append(
|
|
838
|
+
{
|
|
839
|
+
"type": "action",
|
|
840
|
+
"tool": tool_info["tool"],
|
|
841
|
+
"operation": tool_info.get("operation"),
|
|
842
|
+
"parameters": tool_info.get("parameters"),
|
|
843
|
+
"iteration": iteration + 1,
|
|
844
|
+
}
|
|
845
|
+
)
|
|
846
|
+
|
|
847
|
+
# OBSERVE: Add tool result to conversation
|
|
848
|
+
observation = f"OBSERVATION: Tool '{tool_info['tool']}' returned: {tool_result}"
|
|
849
|
+
steps.append(
|
|
850
|
+
{
|
|
851
|
+
"type": "observation",
|
|
852
|
+
"content": observation,
|
|
853
|
+
"iteration": iteration + 1,
|
|
854
|
+
}
|
|
855
|
+
)
|
|
856
|
+
|
|
857
|
+
# Yield tool result event
|
|
858
|
+
yield {
|
|
859
|
+
"type": "tool_result",
|
|
860
|
+
"tool_name": tool_name,
|
|
861
|
+
"result": tool_result,
|
|
862
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
863
|
+
}
|
|
864
|
+
|
|
865
|
+
# Add to messages for next iteration
|
|
866
|
+
messages.append(LLMMessage(role="assistant", content=thought))
|
|
867
|
+
messages.append(LLMMessage(role="user", content=observation))
|
|
868
|
+
|
|
869
|
+
except Exception as e:
|
|
870
|
+
error_msg = f"OBSERVATION: Tool execution failed: {str(e)}"
|
|
871
|
+
steps.append(
|
|
872
|
+
{
|
|
873
|
+
"type": "observation",
|
|
874
|
+
"content": error_msg,
|
|
875
|
+
"iteration": iteration + 1,
|
|
876
|
+
"error": True,
|
|
877
|
+
}
|
|
878
|
+
)
|
|
879
|
+
|
|
880
|
+
# Yield error event
|
|
881
|
+
yield {
|
|
882
|
+
"type": "tool_error",
|
|
883
|
+
"tool_name": tool_name if "tool_name" in locals() else "unknown",
|
|
884
|
+
"error": str(e),
|
|
885
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
886
|
+
}
|
|
887
|
+
|
|
888
|
+
messages.append(LLMMessage(role="assistant", content=thought))
|
|
889
|
+
messages.append(LLMMessage(role="user", content=error_msg))
|
|
890
|
+
|
|
891
|
+
else:
|
|
892
|
+
# LLM didn't provide clear action - treat as final answer
|
|
893
|
+
yield {
|
|
894
|
+
"type": "result",
|
|
895
|
+
"success": True,
|
|
896
|
+
"output": thought,
|
|
897
|
+
"reasoning_steps": steps,
|
|
898
|
+
"tool_calls_count": tool_calls_count,
|
|
899
|
+
"iterations": iteration + 1,
|
|
900
|
+
"total_tokens": total_tokens,
|
|
901
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
902
|
+
}
|
|
903
|
+
return
|
|
904
|
+
|
|
905
|
+
# Max iterations reached
|
|
906
|
+
logger.warning(f"HybridAgent {self.agent_id} reached max iterations")
|
|
907
|
+
yield {
|
|
908
|
+
"type": "result",
|
|
909
|
+
"success": True,
|
|
910
|
+
"output": "Max iterations reached. Unable to complete task fully.",
|
|
911
|
+
"reasoning_steps": steps,
|
|
912
|
+
"tool_calls_count": tool_calls_count,
|
|
913
|
+
"iterations": self._max_iterations,
|
|
914
|
+
"total_tokens": total_tokens,
|
|
915
|
+
"max_iterations_reached": True,
|
|
916
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
917
|
+
}
|
|
918
|
+
|
|
919
|
+
async def _react_loop(self, task: str, context: Dict[str, Any]) -> Dict[str, Any]:
|
|
920
|
+
"""
|
|
921
|
+
Execute ReAct loop: Reason → Act → Observe.
|
|
922
|
+
|
|
923
|
+
Args:
|
|
924
|
+
task: Task description
|
|
925
|
+
context: Context dictionary
|
|
926
|
+
|
|
927
|
+
Returns:
|
|
928
|
+
Result dictionary with 'final_answer', 'steps', 'iterations'
|
|
929
|
+
"""
|
|
930
|
+
steps = []
|
|
931
|
+
tool_calls_count = 0
|
|
932
|
+
total_tokens = 0
|
|
933
|
+
|
|
934
|
+
# Build initial messages
|
|
935
|
+
messages = self._build_initial_messages(task, context)
|
|
936
|
+
|
|
937
|
+
for iteration in range(self._max_iterations):
|
|
938
|
+
logger.debug(f"HybridAgent {self.agent_id} - ReAct iteration {iteration + 1}")
|
|
939
|
+
|
|
940
|
+
# THINK: LLM reasons about next action
|
|
941
|
+
# Use Function Calling if supported, otherwise use ReAct mode
|
|
942
|
+
if self._use_function_calling and self._tool_schemas:
|
|
943
|
+
# Convert schemas to tools format
|
|
944
|
+
tools = [{"type": "function", "function": schema} for schema in self._tool_schemas]
|
|
945
|
+
response = await self.llm_client.generate_text(
|
|
946
|
+
messages=messages,
|
|
947
|
+
model=self._config.llm_model,
|
|
948
|
+
temperature=self._config.temperature,
|
|
949
|
+
max_tokens=self._config.max_tokens,
|
|
950
|
+
tools=tools,
|
|
951
|
+
tool_choice="auto",
|
|
952
|
+
)
|
|
953
|
+
else:
|
|
954
|
+
# Fallback to ReAct mode
|
|
955
|
+
response = await self.llm_client.generate_text(
|
|
956
|
+
messages=messages,
|
|
957
|
+
model=self._config.llm_model,
|
|
958
|
+
temperature=self._config.temperature,
|
|
959
|
+
max_tokens=self._config.max_tokens,
|
|
960
|
+
)
|
|
961
|
+
|
|
962
|
+
thought = response.content or ""
|
|
963
|
+
total_tokens += getattr(response, "total_tokens", 0)
|
|
964
|
+
|
|
965
|
+
# Check for Function Calling response
|
|
966
|
+
tool_calls = getattr(response, "tool_calls", None)
|
|
967
|
+
function_call = getattr(response, "function_call", None)
|
|
968
|
+
|
|
969
|
+
if tool_calls or function_call:
|
|
970
|
+
# Handle Function Calling response
|
|
971
|
+
tool_calls_to_process = tool_calls or []
|
|
972
|
+
if function_call:
|
|
973
|
+
# Convert legacy function_call to tool_calls format
|
|
974
|
+
tool_calls_to_process = [
|
|
975
|
+
{
|
|
976
|
+
"id": "call_0",
|
|
977
|
+
"type": "function",
|
|
978
|
+
"function": {
|
|
979
|
+
"name": function_call["name"],
|
|
980
|
+
"arguments": function_call["arguments"],
|
|
981
|
+
},
|
|
982
|
+
}
|
|
983
|
+
]
|
|
984
|
+
|
|
985
|
+
# Process each tool call
|
|
986
|
+
for tool_call in tool_calls_to_process:
|
|
987
|
+
try:
|
|
988
|
+
func_name = tool_call["function"]["name"]
|
|
989
|
+
func_args = tool_call["function"]["arguments"]
|
|
990
|
+
|
|
991
|
+
# Parse function name to extract tool and operation
|
|
992
|
+
# Format: tool_name_operation or tool_name
|
|
993
|
+
parts = func_name.split("_", 1)
|
|
994
|
+
if len(parts) == 2:
|
|
995
|
+
tool_name, operation = parts
|
|
996
|
+
else:
|
|
997
|
+
tool_name = parts[0]
|
|
998
|
+
operation = None
|
|
999
|
+
|
|
1000
|
+
# Parse arguments JSON
|
|
1001
|
+
import json
|
|
1002
|
+
if isinstance(func_args, str):
|
|
1003
|
+
parameters = json.loads(func_args)
|
|
1004
|
+
else:
|
|
1005
|
+
parameters = func_args
|
|
1006
|
+
|
|
1007
|
+
steps.append(
|
|
1008
|
+
{
|
|
1009
|
+
"type": "thought",
|
|
1010
|
+
"content": f"Calling tool {func_name}",
|
|
1011
|
+
"iteration": iteration + 1,
|
|
1012
|
+
}
|
|
1013
|
+
)
|
|
1014
|
+
|
|
1015
|
+
# Execute tool
|
|
1016
|
+
tool_result = await self._execute_tool(tool_name, operation, parameters)
|
|
1017
|
+
tool_calls_count += 1
|
|
1018
|
+
|
|
1019
|
+
steps.append(
|
|
1020
|
+
{
|
|
1021
|
+
"type": "action",
|
|
1022
|
+
"tool": tool_name,
|
|
1023
|
+
"operation": operation,
|
|
1024
|
+
"parameters": parameters,
|
|
1025
|
+
"iteration": iteration + 1,
|
|
1026
|
+
}
|
|
1027
|
+
)
|
|
1028
|
+
|
|
1029
|
+
# Add tool result to messages
|
|
1030
|
+
observation = f"Tool '{tool_name}' returned: {tool_result}"
|
|
1031
|
+
steps.append(
|
|
1032
|
+
{
|
|
1033
|
+
"type": "observation",
|
|
1034
|
+
"content": observation,
|
|
1035
|
+
"iteration": iteration + 1,
|
|
1036
|
+
}
|
|
1037
|
+
)
|
|
1038
|
+
|
|
1039
|
+
# Add assistant message with tool call and tool result
|
|
1040
|
+
messages.append(
|
|
1041
|
+
LLMMessage(
|
|
1042
|
+
role="assistant",
|
|
1043
|
+
content=None, # Content is None when using tool calls
|
|
1044
|
+
tool_calls=tool_calls_to_process if tool_calls else None,
|
|
1045
|
+
)
|
|
1046
|
+
)
|
|
1047
|
+
messages.append(
|
|
1048
|
+
LLMMessage(
|
|
1049
|
+
role="tool",
|
|
1050
|
+
content=str(tool_result),
|
|
1051
|
+
tool_call_id=tool_call.get("id", "call_0"),
|
|
1052
|
+
)
|
|
1053
|
+
)
|
|
1054
|
+
|
|
1055
|
+
except Exception as e:
|
|
1056
|
+
error_msg = f"Tool execution failed: {str(e)}"
|
|
1057
|
+
steps.append(
|
|
1058
|
+
{
|
|
1059
|
+
"type": "observation",
|
|
1060
|
+
"content": error_msg,
|
|
1061
|
+
"iteration": iteration + 1,
|
|
1062
|
+
"error": True,
|
|
1063
|
+
}
|
|
1064
|
+
)
|
|
1065
|
+
# Add error to messages
|
|
1066
|
+
messages.append(
|
|
1067
|
+
LLMMessage(
|
|
1068
|
+
role="tool",
|
|
1069
|
+
content=error_msg,
|
|
1070
|
+
tool_call_id=tool_call.get("id", "call_0"),
|
|
1071
|
+
)
|
|
1072
|
+
)
|
|
1073
|
+
|
|
1074
|
+
# Continue to next iteration
|
|
1075
|
+
continue
|
|
1076
|
+
|
|
1077
|
+
# If using Function Calling and no tool calls, check if we have a final answer
|
|
1078
|
+
if self._use_function_calling and thought:
|
|
1079
|
+
# LLM provided a text response without tool calls - treat as final answer
|
|
1080
|
+
return {
|
|
1081
|
+
"final_answer": thought,
|
|
1082
|
+
"steps": steps,
|
|
1083
|
+
"iterations": iteration + 1,
|
|
1084
|
+
"tool_calls_count": tool_calls_count,
|
|
1085
|
+
"total_tokens": total_tokens,
|
|
1086
|
+
}
|
|
1087
|
+
|
|
1088
|
+
steps.append(
|
|
1089
|
+
{
|
|
1090
|
+
"type": "thought",
|
|
1091
|
+
"content": thought,
|
|
1092
|
+
"iteration": iteration + 1,
|
|
1093
|
+
}
|
|
1094
|
+
)
|
|
1095
|
+
|
|
1096
|
+
# Check if final answer (ReAct mode)
|
|
1097
|
+
if "FINAL ANSWER:" in thought:
|
|
1098
|
+
final_answer = self._extract_final_answer(thought)
|
|
1099
|
+
return {
|
|
1100
|
+
"final_answer": final_answer,
|
|
1101
|
+
"steps": steps,
|
|
1102
|
+
"iterations": iteration + 1,
|
|
1103
|
+
"tool_calls_count": tool_calls_count,
|
|
1104
|
+
"total_tokens": total_tokens,
|
|
1105
|
+
}
|
|
1106
|
+
|
|
1107
|
+
# Check if tool call (ReAct mode)
|
|
1108
|
+
if "TOOL:" in thought:
|
|
1109
|
+
# ACT: Execute tool
|
|
1110
|
+
try:
|
|
1111
|
+
tool_info = self._parse_tool_call(thought)
|
|
1112
|
+
tool_name = tool_info.get("tool", "")
|
|
1113
|
+
if not tool_name:
|
|
1114
|
+
raise ValueError("Tool name not found in tool call")
|
|
1115
|
+
tool_result = await self._execute_tool(
|
|
1116
|
+
tool_name,
|
|
1117
|
+
tool_info.get("operation"),
|
|
1118
|
+
tool_info.get("parameters", {}),
|
|
1119
|
+
)
|
|
1120
|
+
tool_calls_count += 1
|
|
1121
|
+
|
|
1122
|
+
steps.append(
|
|
1123
|
+
{
|
|
1124
|
+
"type": "action",
|
|
1125
|
+
"tool": tool_info["tool"],
|
|
1126
|
+
"operation": tool_info.get("operation"),
|
|
1127
|
+
"parameters": tool_info.get("parameters"),
|
|
1128
|
+
"iteration": iteration + 1,
|
|
1129
|
+
}
|
|
1130
|
+
)
|
|
1131
|
+
|
|
1132
|
+
# OBSERVE: Add tool result to conversation
|
|
1133
|
+
observation = f"OBSERVATION: Tool '{tool_info['tool']}' returned: {tool_result}"
|
|
1134
|
+
steps.append(
|
|
1135
|
+
{
|
|
1136
|
+
"type": "observation",
|
|
1137
|
+
"content": observation,
|
|
1138
|
+
"iteration": iteration + 1,
|
|
1139
|
+
}
|
|
1140
|
+
)
|
|
1141
|
+
|
|
1142
|
+
# Add to messages for next iteration
|
|
1143
|
+
messages.append(LLMMessage(role="assistant", content=thought))
|
|
1144
|
+
messages.append(LLMMessage(role="user", content=observation))
|
|
1145
|
+
|
|
1146
|
+
except Exception as e:
|
|
1147
|
+
error_msg = f"OBSERVATION: Tool execution failed: {str(e)}"
|
|
1148
|
+
steps.append(
|
|
1149
|
+
{
|
|
1150
|
+
"type": "observation",
|
|
1151
|
+
"content": error_msg,
|
|
1152
|
+
"iteration": iteration + 1,
|
|
1153
|
+
"error": True,
|
|
1154
|
+
}
|
|
1155
|
+
)
|
|
1156
|
+
messages.append(LLMMessage(role="assistant", content=thought))
|
|
1157
|
+
messages.append(LLMMessage(role="user", content=error_msg))
|
|
1158
|
+
|
|
1159
|
+
else:
|
|
1160
|
+
# LLM didn't provide clear action - treat as final answer
|
|
1161
|
+
return {
|
|
1162
|
+
"final_answer": thought,
|
|
1163
|
+
"steps": steps,
|
|
1164
|
+
"iterations": iteration + 1,
|
|
1165
|
+
"tool_calls_count": tool_calls_count,
|
|
1166
|
+
"total_tokens": total_tokens,
|
|
1167
|
+
}
|
|
1168
|
+
|
|
1169
|
+
# Max iterations reached
|
|
1170
|
+
logger.warning(f"HybridAgent {self.agent_id} reached max iterations")
|
|
1171
|
+
return {
|
|
1172
|
+
"final_answer": "Max iterations reached. Unable to complete task fully.",
|
|
1173
|
+
"steps": steps,
|
|
1174
|
+
"iterations": self._max_iterations,
|
|
1175
|
+
"tool_calls_count": tool_calls_count,
|
|
1176
|
+
"total_tokens": total_tokens,
|
|
1177
|
+
"max_iterations_reached": True,
|
|
1178
|
+
}
|
|
1179
|
+
|
|
1180
|
+
def _build_initial_messages(self, task: str, context: Dict[str, Any]) -> List[LLMMessage]:
|
|
1181
|
+
"""Build initial messages for ReAct loop."""
|
|
1182
|
+
messages = []
|
|
1183
|
+
|
|
1184
|
+
# Add system prompt with cache control if caching is enabled
|
|
1185
|
+
if self._system_prompt:
|
|
1186
|
+
cache_control = (
|
|
1187
|
+
CacheControl(type="ephemeral")
|
|
1188
|
+
if self._config.enable_prompt_caching
|
|
1189
|
+
else None
|
|
1190
|
+
)
|
|
1191
|
+
messages.append(
|
|
1192
|
+
LLMMessage(
|
|
1193
|
+
role="system",
|
|
1194
|
+
content=self._system_prompt,
|
|
1195
|
+
cache_control=cache_control,
|
|
1196
|
+
)
|
|
1197
|
+
)
|
|
1198
|
+
|
|
1199
|
+
# Add context if provided
|
|
1200
|
+
if context:
|
|
1201
|
+
context_str = self._format_context(context)
|
|
1202
|
+
if context_str:
|
|
1203
|
+
messages.append(
|
|
1204
|
+
LLMMessage(
|
|
1205
|
+
role="system",
|
|
1206
|
+
content=f"Additional Context:\n{context_str}",
|
|
1207
|
+
)
|
|
1208
|
+
)
|
|
1209
|
+
|
|
1210
|
+
# Add task
|
|
1211
|
+
messages.append(LLMMessage(role="user", content=f"Task: {task}"))
|
|
1212
|
+
|
|
1213
|
+
return messages
|
|
1214
|
+
|
|
1215
|
+
def _format_context(self, context: Dict[str, Any]) -> str:
|
|
1216
|
+
"""Format context dictionary as string."""
|
|
1217
|
+
relevant_fields = []
|
|
1218
|
+
for key, value in context.items():
|
|
1219
|
+
if not key.startswith("_") and value is not None:
|
|
1220
|
+
relevant_fields.append(f"{key}: {value}")
|
|
1221
|
+
return "\n".join(relevant_fields) if relevant_fields else ""
|
|
1222
|
+
|
|
1223
|
+
def _extract_final_answer(self, thought: str) -> str:
|
|
1224
|
+
"""Extract final answer from thought."""
|
|
1225
|
+
if "FINAL ANSWER:" in thought:
|
|
1226
|
+
return thought.split("FINAL ANSWER:", 1)[1].strip()
|
|
1227
|
+
return thought
|
|
1228
|
+
|
|
1229
|
+
def _parse_tool_call(self, thought: str) -> Dict[str, Any]:
|
|
1230
|
+
"""
|
|
1231
|
+
Parse tool call from LLM thought.
|
|
1232
|
+
|
|
1233
|
+
Expected format:
|
|
1234
|
+
TOOL: <tool_name>
|
|
1235
|
+
OPERATION: <operation_name>
|
|
1236
|
+
PARAMETERS: <json_parameters>
|
|
1237
|
+
|
|
1238
|
+
Args:
|
|
1239
|
+
thought: LLM thought containing tool call
|
|
1240
|
+
|
|
1241
|
+
Returns:
|
|
1242
|
+
Dictionary with 'tool', 'operation', 'parameters'
|
|
1243
|
+
"""
|
|
1244
|
+
import json
|
|
1245
|
+
|
|
1246
|
+
result = {}
|
|
1247
|
+
|
|
1248
|
+
# Extract tool
|
|
1249
|
+
if "TOOL:" in thought:
|
|
1250
|
+
tool_line = [line for line in thought.split("\n") if line.startswith("TOOL:")][0]
|
|
1251
|
+
result["tool"] = tool_line.split("TOOL:", 1)[1].strip()
|
|
1252
|
+
|
|
1253
|
+
# Extract operation (optional)
|
|
1254
|
+
if "OPERATION:" in thought:
|
|
1255
|
+
op_line = [line for line in thought.split("\n") if line.startswith("OPERATION:")][0]
|
|
1256
|
+
result["operation"] = op_line.split("OPERATION:", 1)[1].strip()
|
|
1257
|
+
|
|
1258
|
+
# Extract parameters (optional)
|
|
1259
|
+
if "PARAMETERS:" in thought:
|
|
1260
|
+
param_line = [line for line in thought.split("\n") if line.startswith("PARAMETERS:")][0]
|
|
1261
|
+
param_str = param_line.split("PARAMETERS:", 1)[1].strip()
|
|
1262
|
+
try:
|
|
1263
|
+
result["parameters"] = json.loads(param_str)
|
|
1264
|
+
except json.JSONDecodeError:
|
|
1265
|
+
logger.warning(f"Failed to parse parameters: {param_str}")
|
|
1266
|
+
result["parameters"] = {} # type: ignore[assignment]
|
|
1267
|
+
|
|
1268
|
+
return result
|
|
1269
|
+
|
|
1270
|
+
async def _execute_tool(
|
|
1271
|
+
self,
|
|
1272
|
+
tool_name: str,
|
|
1273
|
+
operation: Optional[str],
|
|
1274
|
+
parameters: Dict[str, Any],
|
|
1275
|
+
) -> Any:
|
|
1276
|
+
"""Execute a tool operation."""
|
|
1277
|
+
# Check access
|
|
1278
|
+
if not self._available_tools or tool_name not in self._available_tools:
|
|
1279
|
+
raise ToolAccessDeniedError(self.agent_id, tool_name)
|
|
1280
|
+
|
|
1281
|
+
if not self._tool_instances:
|
|
1282
|
+
raise ValueError(f"Tool instances not available for {tool_name}")
|
|
1283
|
+
tool = self._tool_instances.get(tool_name)
|
|
1284
|
+
if not tool:
|
|
1285
|
+
raise ValueError(f"Tool {tool_name} not loaded")
|
|
1286
|
+
|
|
1287
|
+
# Execute tool
|
|
1288
|
+
if operation:
|
|
1289
|
+
result = await tool.run_async(operation, **parameters)
|
|
1290
|
+
else:
|
|
1291
|
+
if hasattr(tool, "run_async"):
|
|
1292
|
+
result = await tool.run_async(**parameters)
|
|
1293
|
+
else:
|
|
1294
|
+
raise ValueError(f"Tool {tool_name} requires operation to be specified")
|
|
1295
|
+
|
|
1296
|
+
return result
|
|
1297
|
+
|
|
1298
|
+
async def _execute_tool_with_observation(
|
|
1299
|
+
self,
|
|
1300
|
+
tool_name: str,
|
|
1301
|
+
operation: Optional[str],
|
|
1302
|
+
parameters: Dict[str, Any],
|
|
1303
|
+
) -> "ToolObservation":
|
|
1304
|
+
"""
|
|
1305
|
+
Execute a tool and return structured observation.
|
|
1306
|
+
|
|
1307
|
+
Wraps tool execution with automatic success/error tracking,
|
|
1308
|
+
execution time measurement, and structured result formatting.
|
|
1309
|
+
|
|
1310
|
+
Args:
|
|
1311
|
+
tool_name: Name of the tool to execute
|
|
1312
|
+
operation: Optional operation name
|
|
1313
|
+
parameters: Tool parameters
|
|
1314
|
+
|
|
1315
|
+
Returns:
|
|
1316
|
+
ToolObservation with execution details
|
|
1317
|
+
|
|
1318
|
+
Example:
|
|
1319
|
+
```python
|
|
1320
|
+
obs = await agent._execute_tool_with_observation(
|
|
1321
|
+
tool_name="search",
|
|
1322
|
+
operation="query",
|
|
1323
|
+
parameters={"q": "AI"}
|
|
1324
|
+
)
|
|
1325
|
+
print(obs.to_text())
|
|
1326
|
+
```
|
|
1327
|
+
"""
|
|
1328
|
+
|
|
1329
|
+
start_time = datetime.utcnow()
|
|
1330
|
+
|
|
1331
|
+
try:
|
|
1332
|
+
# Execute tool
|
|
1333
|
+
result = await self._execute_tool(tool_name, operation, parameters)
|
|
1334
|
+
|
|
1335
|
+
# Calculate execution time
|
|
1336
|
+
end_time = datetime.utcnow()
|
|
1337
|
+
execution_time_ms = (end_time - start_time).total_seconds() * 1000
|
|
1338
|
+
|
|
1339
|
+
# Create observation
|
|
1340
|
+
observation = ToolObservation(
|
|
1341
|
+
tool_name=tool_name,
|
|
1342
|
+
parameters=parameters,
|
|
1343
|
+
result=result,
|
|
1344
|
+
success=True,
|
|
1345
|
+
error=None,
|
|
1346
|
+
execution_time_ms=execution_time_ms,
|
|
1347
|
+
)
|
|
1348
|
+
|
|
1349
|
+
logger.info(f"Tool '{tool_name}' executed successfully in {execution_time_ms:.2f}ms")
|
|
1350
|
+
|
|
1351
|
+
return observation
|
|
1352
|
+
|
|
1353
|
+
except Exception as e:
|
|
1354
|
+
# Calculate execution time
|
|
1355
|
+
end_time = datetime.utcnow()
|
|
1356
|
+
execution_time_ms = (end_time - start_time).total_seconds() * 1000
|
|
1357
|
+
|
|
1358
|
+
# Create error observation
|
|
1359
|
+
observation = ToolObservation(
|
|
1360
|
+
tool_name=tool_name,
|
|
1361
|
+
parameters=parameters,
|
|
1362
|
+
result=None,
|
|
1363
|
+
success=False,
|
|
1364
|
+
error=str(e),
|
|
1365
|
+
execution_time_ms=execution_time_ms,
|
|
1366
|
+
)
|
|
1367
|
+
|
|
1368
|
+
logger.error(f"Tool '{tool_name}' failed after {execution_time_ms:.2f}ms: {e}")
|
|
1369
|
+
|
|
1370
|
+
return observation
|
|
1371
|
+
|
|
1372
|
+
def get_available_tools(self) -> List[str]:
|
|
1373
|
+
"""Get list of available tools."""
|
|
1374
|
+
return self._available_tools.copy() if self._available_tools else []
|
|
1375
|
+
|
|
1376
|
+
def _generate_tool_schemas(self) -> None:
|
|
1377
|
+
"""Generate OpenAI Function Calling schemas for available tools."""
|
|
1378
|
+
if not self._tool_instances:
|
|
1379
|
+
return
|
|
1380
|
+
|
|
1381
|
+
try:
|
|
1382
|
+
# Use ToolSchemaGenerator to generate schemas from tool instances
|
|
1383
|
+
self._tool_schemas = ToolSchemaGenerator.generate_schemas_for_tool_instances(
|
|
1384
|
+
self._tool_instances
|
|
1385
|
+
)
|
|
1386
|
+
logger.info(f"HybridAgent {self.agent_id} generated {len(self._tool_schemas)} tool schemas")
|
|
1387
|
+
except Exception as e:
|
|
1388
|
+
logger.warning(f"Failed to generate tool schemas: {e}. Falling back to ReAct mode.")
|
|
1389
|
+
self._tool_schemas = []
|
|
1390
|
+
|
|
1391
|
+
def _check_function_calling_support(self) -> bool:
|
|
1392
|
+
"""
|
|
1393
|
+
Check if LLM client supports Function Calling.
|
|
1394
|
+
|
|
1395
|
+
Returns:
|
|
1396
|
+
True if Function Calling is supported, False otherwise
|
|
1397
|
+
"""
|
|
1398
|
+
# Check if we have tools and schemas
|
|
1399
|
+
if not self._tool_instances or not self._tool_schemas:
|
|
1400
|
+
return False
|
|
1401
|
+
|
|
1402
|
+
# Check if LLM client supports Function Calling
|
|
1403
|
+
# OpenAI, xAI (OpenAI-compatible), Google Vertex AI, and some other providers support it
|
|
1404
|
+
provider_name = getattr(self.llm_client, "provider_name", "").lower()
|
|
1405
|
+
supported_providers = ["openai", "xai", "anthropic", "vertex"]
|
|
1406
|
+
|
|
1407
|
+
# Note: Google Vertex AI uses FunctionDeclaration format, but it's handled via GoogleFunctionCallingMixin
|
|
1408
|
+
# The mixin converts OpenAI format to Google format internally
|
|
1409
|
+
|
|
1410
|
+
# Also check if generate_text method accepts 'tools' or 'functions' parameter
|
|
1411
|
+
import inspect
|
|
1412
|
+
try:
|
|
1413
|
+
sig = inspect.signature(self.llm_client.generate_text)
|
|
1414
|
+
params = sig.parameters
|
|
1415
|
+
has_tools_param = "tools" in params or "functions" in params
|
|
1416
|
+
except (ValueError, TypeError):
|
|
1417
|
+
# If signature inspection fails, assume not supported
|
|
1418
|
+
has_tools_param = False
|
|
1419
|
+
|
|
1420
|
+
return provider_name in supported_providers or has_tools_param
|
|
1421
|
+
|
|
1422
|
+
@classmethod
|
|
1423
|
+
def from_dict(cls, data: Dict[str, Any]) -> "HybridAgent":
|
|
1424
|
+
"""
|
|
1425
|
+
Deserialize HybridAgent from dictionary.
|
|
1426
|
+
|
|
1427
|
+
Note: LLM client must be provided separately.
|
|
1428
|
+
|
|
1429
|
+
Args:
|
|
1430
|
+
data: Dictionary representation
|
|
1431
|
+
|
|
1432
|
+
Returns:
|
|
1433
|
+
HybridAgent instance
|
|
1434
|
+
"""
|
|
1435
|
+
raise NotImplementedError("HybridAgent.from_dict requires LLM client to be provided separately. " "Use constructor instead.")
|