noesium 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- noesium/core/__init__.py +4 -0
- noesium/core/agent/__init__.py +14 -0
- noesium/core/agent/base.py +227 -0
- noesium/core/consts.py +6 -0
- noesium/core/goalith/conflict/conflict.py +104 -0
- noesium/core/goalith/conflict/detector.py +53 -0
- noesium/core/goalith/decomposer/__init__.py +6 -0
- noesium/core/goalith/decomposer/base.py +46 -0
- noesium/core/goalith/decomposer/callable_decomposer.py +65 -0
- noesium/core/goalith/decomposer/llm_decomposer.py +326 -0
- noesium/core/goalith/decomposer/prompts.py +140 -0
- noesium/core/goalith/decomposer/simple_decomposer.py +61 -0
- noesium/core/goalith/errors.py +22 -0
- noesium/core/goalith/goalgraph/graph.py +526 -0
- noesium/core/goalith/goalgraph/node.py +179 -0
- noesium/core/goalith/replanner/base.py +31 -0
- noesium/core/goalith/replanner/replanner.py +36 -0
- noesium/core/goalith/service.py +26 -0
- noesium/core/llm/__init__.py +154 -0
- noesium/core/llm/base.py +152 -0
- noesium/core/llm/litellm.py +528 -0
- noesium/core/llm/llamacpp.py +487 -0
- noesium/core/llm/message.py +184 -0
- noesium/core/llm/ollama.py +459 -0
- noesium/core/llm/openai.py +520 -0
- noesium/core/llm/openrouter.py +89 -0
- noesium/core/llm/prompt.py +551 -0
- noesium/core/memory/__init__.py +11 -0
- noesium/core/memory/base.py +464 -0
- noesium/core/memory/memu/__init__.py +24 -0
- noesium/core/memory/memu/config/__init__.py +26 -0
- noesium/core/memory/memu/config/activity/config.py +46 -0
- noesium/core/memory/memu/config/event/config.py +46 -0
- noesium/core/memory/memu/config/markdown_config.py +241 -0
- noesium/core/memory/memu/config/profile/config.py +48 -0
- noesium/core/memory/memu/llm_adapter.py +129 -0
- noesium/core/memory/memu/memory/__init__.py +31 -0
- noesium/core/memory/memu/memory/actions/__init__.py +40 -0
- noesium/core/memory/memu/memory/actions/add_activity_memory.py +299 -0
- noesium/core/memory/memu/memory/actions/base_action.py +342 -0
- noesium/core/memory/memu/memory/actions/cluster_memories.py +262 -0
- noesium/core/memory/memu/memory/actions/generate_suggestions.py +198 -0
- noesium/core/memory/memu/memory/actions/get_available_categories.py +66 -0
- noesium/core/memory/memu/memory/actions/link_related_memories.py +515 -0
- noesium/core/memory/memu/memory/actions/run_theory_of_mind.py +254 -0
- noesium/core/memory/memu/memory/actions/update_memory_with_suggestions.py +514 -0
- noesium/core/memory/memu/memory/embeddings.py +130 -0
- noesium/core/memory/memu/memory/file_manager.py +306 -0
- noesium/core/memory/memu/memory/memory_agent.py +578 -0
- noesium/core/memory/memu/memory/recall_agent.py +376 -0
- noesium/core/memory/memu/memory_store.py +628 -0
- noesium/core/memory/models.py +149 -0
- noesium/core/msgbus/__init__.py +12 -0
- noesium/core/msgbus/base.py +395 -0
- noesium/core/orchestrix/__init__.py +0 -0
- noesium/core/py.typed +0 -0
- noesium/core/routing/__init__.py +20 -0
- noesium/core/routing/base.py +66 -0
- noesium/core/routing/router.py +241 -0
- noesium/core/routing/strategies/__init__.py +9 -0
- noesium/core/routing/strategies/dynamic_complexity.py +361 -0
- noesium/core/routing/strategies/self_assessment.py +147 -0
- noesium/core/routing/types.py +38 -0
- noesium/core/toolify/__init__.py +39 -0
- noesium/core/toolify/base.py +360 -0
- noesium/core/toolify/config.py +138 -0
- noesium/core/toolify/mcp_integration.py +275 -0
- noesium/core/toolify/registry.py +214 -0
- noesium/core/toolify/toolkits/__init__.py +1 -0
- noesium/core/tracing/__init__.py +37 -0
- noesium/core/tracing/langgraph_hooks.py +308 -0
- noesium/core/tracing/opik_tracing.py +144 -0
- noesium/core/tracing/token_tracker.py +166 -0
- noesium/core/utils/__init__.py +10 -0
- noesium/core/utils/logging.py +172 -0
- noesium/core/utils/statistics.py +12 -0
- noesium/core/utils/typing.py +17 -0
- noesium/core/vector_store/__init__.py +79 -0
- noesium/core/vector_store/base.py +94 -0
- noesium/core/vector_store/pgvector.py +304 -0
- noesium/core/vector_store/weaviate.py +383 -0
- noesium-0.1.0.dist-info/METADATA +525 -0
- noesium-0.1.0.dist-info/RECORD +86 -0
- noesium-0.1.0.dist-info/WHEEL +5 -0
- noesium-0.1.0.dist-info/licenses/LICENSE +21 -0
- noesium-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
"""Self-assessment routing strategy implementation."""
|
|
2
|
+
|
|
3
|
+
from noesium.core.routing.base import BaseRoutingStrategy
|
|
4
|
+
from noesium.core.routing.types import ComplexityScore, ModelTier, RoutingResult
|
|
5
|
+
from noesium.core.utils.logging import get_logger
|
|
6
|
+
|
|
7
|
+
logger = get_logger(__name__)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class SelfAssessmentStrategy(BaseRoutingStrategy):
|
|
11
|
+
"""
|
|
12
|
+
Routing strategy where the lite model assesses query complexity itself.
|
|
13
|
+
|
|
14
|
+
This strategy asks the lite model to rate the complexity of a query
|
|
15
|
+
and routes based on that self-assessment.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(self, lite_client=None, config=None):
|
|
19
|
+
"""
|
|
20
|
+
Initialize the self-assessment strategy.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
lite_client: LLM client for the lite model
|
|
24
|
+
config: Configuration dict with optional parameters:
|
|
25
|
+
- temperature: Temperature for lite model (default: 0.1)
|
|
26
|
+
- max_tokens: Max tokens for assessment (default: 5)
|
|
27
|
+
- lite_threshold: Max score for lite routing (default: 1)
|
|
28
|
+
- fast_threshold: Max score for fast routing (default: 3)
|
|
29
|
+
"""
|
|
30
|
+
super().__init__(lite_client, config)
|
|
31
|
+
|
|
32
|
+
if not self.lite_client:
|
|
33
|
+
raise ValueError("SelfAssessmentStrategy requires a lite_client")
|
|
34
|
+
|
|
35
|
+
# Configuration parameters
|
|
36
|
+
self.temperature = self.config.get("temperature", 0.1)
|
|
37
|
+
self.max_tokens = self.config.get("max_tokens", 5)
|
|
38
|
+
self.lite_threshold = self.config.get("lite_threshold", 1)
|
|
39
|
+
self.fast_threshold = self.config.get("fast_threshold", 3)
|
|
40
|
+
|
|
41
|
+
def route(self, query: str) -> RoutingResult:
|
|
42
|
+
"""
|
|
43
|
+
Route query based on lite model's self-assessment.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
query: Input query to assess
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
RoutingResult with tier recommendation
|
|
50
|
+
"""
|
|
51
|
+
try:
|
|
52
|
+
# Create assessment prompt
|
|
53
|
+
prompt = self._create_assessment_prompt(query)
|
|
54
|
+
messages = [{"role": "user", "content": prompt}]
|
|
55
|
+
|
|
56
|
+
# Get assessment from lite model
|
|
57
|
+
response = self.lite_client.completion(
|
|
58
|
+
messages=messages, temperature=self.temperature, max_tokens=self.max_tokens
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
# Parse complexity score
|
|
62
|
+
complexity_score, confidence = self._parse_assessment_response(response)
|
|
63
|
+
|
|
64
|
+
# Determine tier based on score
|
|
65
|
+
tier = self._score_to_tier(complexity_score)
|
|
66
|
+
|
|
67
|
+
# Create complexity score object
|
|
68
|
+
score_obj = ComplexityScore(
|
|
69
|
+
total=complexity_score / 5.0, # Normalize to 0-1 range
|
|
70
|
+
metadata={"raw_score": complexity_score, "raw_response": response},
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
return self._create_result(
|
|
74
|
+
tier=tier, confidence=confidence, complexity_score=score_obj, metadata={"raw_assessment": response}
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
except Exception as e:
|
|
78
|
+
logger.error(f"Error in self-assessment routing: {e}")
|
|
79
|
+
# Fallback to fast tier on error
|
|
80
|
+
return self._create_result(
|
|
81
|
+
tier=ModelTier.FAST,
|
|
82
|
+
confidence=0.0,
|
|
83
|
+
complexity_score=ComplexityScore(total=0.5),
|
|
84
|
+
metadata={"error": str(e), "fallback": True},
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
def get_strategy_name(self) -> str:
|
|
88
|
+
"""Return the strategy name."""
|
|
89
|
+
return "self_assessment"
|
|
90
|
+
|
|
91
|
+
def _create_assessment_prompt(self, query: str) -> str:
|
|
92
|
+
"""Create the assessment prompt for the lite model."""
|
|
93
|
+
return f"""You are a request classifier. Rate the complexity of the following request:
|
|
94
|
+
- 1 = simple fact or direct instruction (lite model can handle)
|
|
95
|
+
- 2-3 = reasoning or multi-sentence but not too complex (fast model recommended)
|
|
96
|
+
- 4-5 = deep reasoning, creativity, or novel synthesis (power model required)
|
|
97
|
+
|
|
98
|
+
Request: "{query}"
|
|
99
|
+
|
|
100
|
+
Output ONLY a number from 1 to 5:"""
|
|
101
|
+
|
|
102
|
+
def _parse_assessment_response(self, response: str) -> tuple[int, float]:
|
|
103
|
+
"""
|
|
104
|
+
Parse the assessment response to extract complexity score.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
response: Raw response from lite model
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
Tuple of (complexity_score, confidence)
|
|
111
|
+
"""
|
|
112
|
+
try:
|
|
113
|
+
# Extract first digit from response
|
|
114
|
+
response_clean = response.strip()
|
|
115
|
+
for char in response_clean:
|
|
116
|
+
if char.isdigit():
|
|
117
|
+
score = int(char)
|
|
118
|
+
if 1 <= score <= 5:
|
|
119
|
+
# Higher confidence if response is clean (just the number with no extra text)
|
|
120
|
+
is_clean = response_clean.strip() == str(score)
|
|
121
|
+
confidence = 0.9 if is_clean else 0.7
|
|
122
|
+
return score, confidence
|
|
123
|
+
|
|
124
|
+
# Fallback if no valid digit found
|
|
125
|
+
logger.warning(f"Could not parse assessment response: {response}")
|
|
126
|
+
return 3, 0.3 # Default to medium complexity with low confidence
|
|
127
|
+
|
|
128
|
+
except Exception as e:
|
|
129
|
+
logger.error(f"Error parsing assessment response: {e}")
|
|
130
|
+
return 3, 0.1
|
|
131
|
+
|
|
132
|
+
def _score_to_tier(self, score: int) -> ModelTier:
|
|
133
|
+
"""
|
|
134
|
+
Convert complexity score to model tier.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
score: Complexity score from 1-5
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
Appropriate ModelTier
|
|
141
|
+
"""
|
|
142
|
+
if score <= self.lite_threshold:
|
|
143
|
+
return ModelTier.LITE
|
|
144
|
+
elif score <= self.fast_threshold:
|
|
145
|
+
return ModelTier.FAST
|
|
146
|
+
else:
|
|
147
|
+
return ModelTier.POWER
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""Type definitions and enums for the routing module."""
|
|
2
|
+
|
|
3
|
+
from enum import Enum
|
|
4
|
+
from typing import Any, Dict, Optional
|
|
5
|
+
|
|
6
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ModelTier(str, Enum):
|
|
10
|
+
"""Enumeration of model tiers based on capability and resource requirements."""
|
|
11
|
+
|
|
12
|
+
LITE = "lite" # Fast, low-resource models for simple tasks
|
|
13
|
+
FAST = "fast" # Balanced models for moderate complexity
|
|
14
|
+
POWER = "power" # High-capability models for complex reasoning
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ComplexityScore(BaseModel):
|
|
18
|
+
"""Represents a complexity score with breakdown of different factors."""
|
|
19
|
+
|
|
20
|
+
total: float = Field(..., ge=0.0, le=1.0, description="Overall complexity score (0.0-1.0)")
|
|
21
|
+
linguistic: Optional[float] = Field(None, ge=0.0, le=1.0, description="Linguistic complexity component")
|
|
22
|
+
reasoning: Optional[float] = Field(None, ge=0.0, le=1.0, description="Reasoning depth component")
|
|
23
|
+
uncertainty: Optional[float] = Field(None, ge=0.0, le=1.0, description="Knowledge uncertainty component")
|
|
24
|
+
metadata: Optional[Dict[str, Any]] = Field(None, description="Additional metadata")
|
|
25
|
+
|
|
26
|
+
model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class RoutingResult(BaseModel):
|
|
30
|
+
"""Result of a routing decision."""
|
|
31
|
+
|
|
32
|
+
tier: ModelTier = Field(..., description="Recommended model tier")
|
|
33
|
+
confidence: float = Field(..., ge=0.0, le=1.0, description="Confidence in the routing decision (0.0-1.0)")
|
|
34
|
+
complexity_score: ComplexityScore = Field(..., description="Detailed complexity breakdown")
|
|
35
|
+
strategy: str = Field(..., description="Name of the strategy used")
|
|
36
|
+
metadata: Optional[Dict[str, Any]] = Field(None, description="Additional routing metadata")
|
|
37
|
+
|
|
38
|
+
model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True)
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Noesium Tools Module
|
|
3
|
+
|
|
4
|
+
A unified toolkit system for LLM-based agents with support for:
|
|
5
|
+
- LangChain tool integration
|
|
6
|
+
- MCP (Model Context Protocol) support
|
|
7
|
+
- Unified configuration management
|
|
8
|
+
- Built-in logging and LLM integration
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from .base import AsyncBaseToolkit, BaseToolkit
|
|
12
|
+
from .config import ToolkitConfig
|
|
13
|
+
from .registry import ToolkitRegistry, get_toolkit, get_toolkits_map
|
|
14
|
+
|
|
15
|
+
# Import MCP integration if available
|
|
16
|
+
try:
|
|
17
|
+
MCP_AVAILABLE = True
|
|
18
|
+
__all__ = [
|
|
19
|
+
"ToolkitConfig",
|
|
20
|
+
"BaseToolkit",
|
|
21
|
+
"AsyncBaseToolkit",
|
|
22
|
+
"ToolkitRegistry",
|
|
23
|
+
"get_toolkit",
|
|
24
|
+
"get_toolkits_map",
|
|
25
|
+
"MCPToolkit",
|
|
26
|
+
"create_mcp_toolkit",
|
|
27
|
+
"MCP_AVAILABLE",
|
|
28
|
+
]
|
|
29
|
+
except ImportError:
|
|
30
|
+
MCP_AVAILABLE = False
|
|
31
|
+
__all__ = [
|
|
32
|
+
"ToolkitConfig",
|
|
33
|
+
"BaseToolkit",
|
|
34
|
+
"AsyncBaseToolkit",
|
|
35
|
+
"ToolkitRegistry",
|
|
36
|
+
"get_toolkit",
|
|
37
|
+
"get_toolkits_map",
|
|
38
|
+
"MCP_AVAILABLE",
|
|
39
|
+
]
|
|
@@ -0,0 +1,360 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Base toolkit classes for noesium tools system.
|
|
3
|
+
|
|
4
|
+
Provides abstract base classes for both synchronous and asynchronous toolkits
|
|
5
|
+
with support for LangChain tools, MCP integration, and unified configuration.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import abc
|
|
9
|
+
import asyncio
|
|
10
|
+
from typing import Any, Callable, Dict, List, Optional, Union
|
|
11
|
+
|
|
12
|
+
from langchain_core.tools import BaseTool, tool
|
|
13
|
+
|
|
14
|
+
from noesium.core.llm import BaseLLMClient
|
|
15
|
+
from noesium.core.utils.logging import get_logger
|
|
16
|
+
|
|
17
|
+
from .config import ToolkitConfig
|
|
18
|
+
|
|
19
|
+
try:
|
|
20
|
+
import mcp.types as mcp_types
|
|
21
|
+
|
|
22
|
+
MCP_AVAILABLE = True
|
|
23
|
+
except ImportError:
|
|
24
|
+
mcp_types = None
|
|
25
|
+
MCP_AVAILABLE = False
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ToolkitError(Exception):
|
|
29
|
+
"""Base exception for toolkit-related errors."""
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class MCPNotAvailableError(ToolkitError):
|
|
33
|
+
"""Raised when MCP functionality is requested but not available."""
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class ToolConverter:
|
|
37
|
+
"""Utility class for converting between different tool formats."""
|
|
38
|
+
|
|
39
|
+
@staticmethod
|
|
40
|
+
def langchain_to_mcp(langchain_tool: BaseTool) -> "mcp_types.Tool":
|
|
41
|
+
"""
|
|
42
|
+
Convert a LangChain tool to MCP format.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
langchain_tool: LangChain BaseTool instance
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
MCP Tool instance
|
|
49
|
+
|
|
50
|
+
Raises:
|
|
51
|
+
MCPNotAvailableError: If MCP is not available
|
|
52
|
+
"""
|
|
53
|
+
if not MCP_AVAILABLE:
|
|
54
|
+
raise MCPNotAvailableError("MCP package is not installed")
|
|
55
|
+
|
|
56
|
+
return mcp_types.Tool(
|
|
57
|
+
name=langchain_tool.name,
|
|
58
|
+
description=langchain_tool.description,
|
|
59
|
+
inputSchema=langchain_tool.args_schema.model_json_schema() if langchain_tool.args_schema else {},
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
@staticmethod
|
|
63
|
+
def function_to_langchain(
|
|
64
|
+
func: Callable, name: Optional[str] = None, description: Optional[str] = None
|
|
65
|
+
) -> BaseTool:
|
|
66
|
+
"""
|
|
67
|
+
Convert a function to LangChain tool format.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
func: Function to convert
|
|
71
|
+
name: Optional tool name (defaults to function name)
|
|
72
|
+
description: Optional tool description (defaults to function docstring)
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
LangChain BaseTool instance
|
|
76
|
+
"""
|
|
77
|
+
# Use function name if no name provided
|
|
78
|
+
tool_name = name if name is not None else func.__name__
|
|
79
|
+
|
|
80
|
+
# Use function docstring if no description provided
|
|
81
|
+
if description is not None:
|
|
82
|
+
tool_description = description
|
|
83
|
+
else:
|
|
84
|
+
# Use docstring and clean it up (add period if needed)
|
|
85
|
+
tool_description = func.__doc__ or ""
|
|
86
|
+
if tool_description and not tool_description.endswith("."):
|
|
87
|
+
tool_description = tool_description.strip() + "."
|
|
88
|
+
|
|
89
|
+
return tool(tool_name, description=tool_description)(func)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class BaseToolkit(abc.ABC):
|
|
93
|
+
"""
|
|
94
|
+
Base class for synchronous toolkits.
|
|
95
|
+
|
|
96
|
+
Provides a common interface for all toolkit implementations with support
|
|
97
|
+
for LangChain tools, configuration management, and logging.
|
|
98
|
+
"""
|
|
99
|
+
|
|
100
|
+
def __init__(self, config: Optional[Union[ToolkitConfig, Dict[str, Any]]] = None):
|
|
101
|
+
"""
|
|
102
|
+
Initialize the toolkit.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
config: Toolkit configuration (ToolkitConfig instance or dict)
|
|
106
|
+
"""
|
|
107
|
+
if isinstance(config, dict):
|
|
108
|
+
config = ToolkitConfig(**config)
|
|
109
|
+
elif config is None:
|
|
110
|
+
config = ToolkitConfig()
|
|
111
|
+
|
|
112
|
+
self.config = config
|
|
113
|
+
self.logger = get_logger(f"{self.__class__.__module__}.{self.__class__.__name__}")
|
|
114
|
+
self._tools_cache: Optional[Dict[str, Callable]] = None
|
|
115
|
+
self._llm_client: Optional[BaseLLMClient] = None
|
|
116
|
+
|
|
117
|
+
@property
|
|
118
|
+
def llm_client(self) -> BaseLLMClient:
|
|
119
|
+
"""Get or create LLM client instance."""
|
|
120
|
+
if self._llm_client is None:
|
|
121
|
+
self._llm_client = self.config.get_llm_client()
|
|
122
|
+
return self._llm_client
|
|
123
|
+
|
|
124
|
+
@abc.abstractmethod
|
|
125
|
+
def get_tools_map(self) -> Dict[str, Callable]:
|
|
126
|
+
"""
|
|
127
|
+
Get a mapping of tool names to their implementation functions.
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
Dict mapping tool names to callable functions
|
|
131
|
+
"""
|
|
132
|
+
|
|
133
|
+
def get_filtered_tools_map(self) -> Dict[str, Callable]:
|
|
134
|
+
"""
|
|
135
|
+
Get tools map filtered by activated_tools configuration.
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
Dict containing only activated tools
|
|
139
|
+
"""
|
|
140
|
+
if self._tools_cache is None:
|
|
141
|
+
self._tools_cache = self.get_tools_map()
|
|
142
|
+
|
|
143
|
+
if self.config.activated_tools is None:
|
|
144
|
+
return self._tools_cache
|
|
145
|
+
|
|
146
|
+
# Validate that all activated tools exist
|
|
147
|
+
missing_tools = set(self.config.activated_tools) - set(self._tools_cache.keys())
|
|
148
|
+
if missing_tools:
|
|
149
|
+
raise ToolkitError(
|
|
150
|
+
f"Activated tools not found in {self.__class__.__name__}: {missing_tools}. "
|
|
151
|
+
f"Available tools: {list(self._tools_cache.keys())}"
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
return {name: self._tools_cache[name] for name in self.config.activated_tools}
|
|
155
|
+
|
|
156
|
+
def get_langchain_tools(self) -> List[BaseTool]:
|
|
157
|
+
"""
|
|
158
|
+
Get tools in LangChain format.
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
List of LangChain BaseTool instances
|
|
162
|
+
"""
|
|
163
|
+
tools_map = self.get_filtered_tools_map()
|
|
164
|
+
return [ToolConverter.function_to_langchain(func, name) for name, func in tools_map.items()]
|
|
165
|
+
|
|
166
|
+
def get_mcp_tools(self) -> List["mcp_types.Tool"]:
|
|
167
|
+
"""
|
|
168
|
+
Get tools in MCP format.
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
List of MCP Tool instances
|
|
172
|
+
|
|
173
|
+
Raises:
|
|
174
|
+
MCPNotAvailableError: If MCP is not available
|
|
175
|
+
"""
|
|
176
|
+
if not MCP_AVAILABLE:
|
|
177
|
+
raise MCPNotAvailableError("MCP package is not installed")
|
|
178
|
+
|
|
179
|
+
langchain_tools = self.get_langchain_tools()
|
|
180
|
+
return [ToolConverter.langchain_to_mcp(tool) for tool in langchain_tools]
|
|
181
|
+
|
|
182
|
+
def call_tool(self, name: str, **kwargs) -> Any:
|
|
183
|
+
"""
|
|
184
|
+
Call a tool by name with the provided arguments.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
name: Tool name
|
|
188
|
+
**kwargs: Tool arguments
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
Tool execution result
|
|
192
|
+
|
|
193
|
+
Raises:
|
|
194
|
+
ToolkitError: If tool is not found
|
|
195
|
+
"""
|
|
196
|
+
tools_map = self.get_filtered_tools_map()
|
|
197
|
+
if name not in tools_map:
|
|
198
|
+
raise ToolkitError(f"Tool '{name}' not found in {self.__class__.__name__}")
|
|
199
|
+
|
|
200
|
+
tool_func = tools_map[name]
|
|
201
|
+
self.logger.debug(f"Calling tool '{name}' with args: {kwargs}")
|
|
202
|
+
|
|
203
|
+
try:
|
|
204
|
+
result = tool_func(**kwargs)
|
|
205
|
+
self.logger.debug(f"Tool '{name}' completed successfully")
|
|
206
|
+
return result
|
|
207
|
+
except Exception as e:
|
|
208
|
+
self.logger.error(f"Tool '{name}' failed: {e}")
|
|
209
|
+
raise
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
class AsyncBaseToolkit(BaseToolkit):
|
|
213
|
+
"""
|
|
214
|
+
Base class for asynchronous toolkits.
|
|
215
|
+
|
|
216
|
+
Extends BaseToolkit with async support and lifecycle management.
|
|
217
|
+
"""
|
|
218
|
+
|
|
219
|
+
def __init__(self, config: Optional[Union[ToolkitConfig, Dict[str, Any]]] = None):
|
|
220
|
+
"""
|
|
221
|
+
Initialize the async toolkit.
|
|
222
|
+
|
|
223
|
+
Args:
|
|
224
|
+
config: Toolkit configuration (ToolkitConfig instance or dict)
|
|
225
|
+
"""
|
|
226
|
+
super().__init__(config)
|
|
227
|
+
self._built = False
|
|
228
|
+
|
|
229
|
+
async def __aenter__(self):
|
|
230
|
+
"""Async context manager entry."""
|
|
231
|
+
await self.build()
|
|
232
|
+
return self
|
|
233
|
+
|
|
234
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
235
|
+
"""Async context manager exit."""
|
|
236
|
+
await self.cleanup()
|
|
237
|
+
|
|
238
|
+
async def build(self):
|
|
239
|
+
"""
|
|
240
|
+
Build/initialize the toolkit.
|
|
241
|
+
|
|
242
|
+
Override this method to perform async initialization tasks.
|
|
243
|
+
"""
|
|
244
|
+
if self._built:
|
|
245
|
+
return
|
|
246
|
+
self.logger.debug(f"Building {self.__class__.__name__}")
|
|
247
|
+
self._built = True
|
|
248
|
+
|
|
249
|
+
async def cleanup(self):
|
|
250
|
+
"""
|
|
251
|
+
Cleanup toolkit resources.
|
|
252
|
+
|
|
253
|
+
Override this method to perform cleanup tasks.
|
|
254
|
+
"""
|
|
255
|
+
self.logger.debug(f"Cleaning up {self.__class__.__name__}")
|
|
256
|
+
self._built = False
|
|
257
|
+
|
|
258
|
+
@abc.abstractmethod
|
|
259
|
+
async def get_tools_map(self) -> Dict[str, Callable]:
|
|
260
|
+
"""
|
|
261
|
+
Get a mapping of tool names to their implementation functions.
|
|
262
|
+
|
|
263
|
+
Returns:
|
|
264
|
+
Dict mapping tool names to callable functions
|
|
265
|
+
"""
|
|
266
|
+
|
|
267
|
+
async def get_filtered_tools_map(self) -> Dict[str, Callable]:
|
|
268
|
+
"""
|
|
269
|
+
Get tools map filtered by activated_tools configuration.
|
|
270
|
+
|
|
271
|
+
Returns:
|
|
272
|
+
Dict containing only activated tools
|
|
273
|
+
"""
|
|
274
|
+
if self._tools_cache is None:
|
|
275
|
+
self._tools_cache = await self.get_tools_map()
|
|
276
|
+
|
|
277
|
+
if self.config.activated_tools is None:
|
|
278
|
+
return self._tools_cache
|
|
279
|
+
|
|
280
|
+
# Validate that all activated tools exist
|
|
281
|
+
missing_tools = set(self.config.activated_tools) - set(self._tools_cache.keys())
|
|
282
|
+
if missing_tools:
|
|
283
|
+
raise ToolkitError(
|
|
284
|
+
f"Activated tools not found in {self.__class__.__name__}: {missing_tools}. "
|
|
285
|
+
f"Available tools: {list(self._tools_cache.keys())}"
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
return {name: self._tools_cache[name] for name in self.config.activated_tools}
|
|
289
|
+
|
|
290
|
+
async def get_langchain_tools(self) -> List[BaseTool]:
|
|
291
|
+
"""
|
|
292
|
+
Get tools in LangChain format.
|
|
293
|
+
|
|
294
|
+
Returns:
|
|
295
|
+
List of LangChain BaseTool instances
|
|
296
|
+
"""
|
|
297
|
+
tools_map = await self.get_filtered_tools_map()
|
|
298
|
+
return [ToolConverter.function_to_langchain(func, name) for name, func in tools_map.items()]
|
|
299
|
+
|
|
300
|
+
async def get_mcp_tools(self) -> List["mcp_types.Tool"]:
|
|
301
|
+
"""
|
|
302
|
+
Get tools in MCP format.
|
|
303
|
+
|
|
304
|
+
Returns:
|
|
305
|
+
List of MCP Tool instances
|
|
306
|
+
|
|
307
|
+
Raises:
|
|
308
|
+
MCPNotAvailableError: If MCP is not available
|
|
309
|
+
"""
|
|
310
|
+
if not MCP_AVAILABLE:
|
|
311
|
+
raise MCPNotAvailableError("MCP package is not installed")
|
|
312
|
+
|
|
313
|
+
langchain_tools = await self.get_langchain_tools()
|
|
314
|
+
return [ToolConverter.langchain_to_mcp(tool) for tool in langchain_tools]
|
|
315
|
+
|
|
316
|
+
async def call_tool(self, name: str, **kwargs) -> Any:
|
|
317
|
+
"""
|
|
318
|
+
Call a tool by name with the provided arguments.
|
|
319
|
+
|
|
320
|
+
Args:
|
|
321
|
+
name: Tool name
|
|
322
|
+
**kwargs: Tool arguments
|
|
323
|
+
|
|
324
|
+
Returns:
|
|
325
|
+
Tool execution result
|
|
326
|
+
|
|
327
|
+
Raises:
|
|
328
|
+
ToolkitError: If tool is not found
|
|
329
|
+
"""
|
|
330
|
+
tools_map = await self.get_filtered_tools_map()
|
|
331
|
+
if name not in tools_map:
|
|
332
|
+
raise ToolkitError(f"Tool '{name}' not found in {self.__class__.__name__}")
|
|
333
|
+
|
|
334
|
+
tool_func = tools_map[name]
|
|
335
|
+
self.logger.debug(f"Calling tool '{name}' with args: {kwargs}")
|
|
336
|
+
|
|
337
|
+
try:
|
|
338
|
+
# Handle both sync and async functions
|
|
339
|
+
if asyncio.iscoroutinefunction(tool_func):
|
|
340
|
+
result = await tool_func(**kwargs)
|
|
341
|
+
else:
|
|
342
|
+
result = tool_func(**kwargs)
|
|
343
|
+
self.logger.debug(f"Tool '{name}' completed successfully")
|
|
344
|
+
return result
|
|
345
|
+
except Exception as e:
|
|
346
|
+
self.logger.error(f"Tool '{name}' failed: {e}")
|
|
347
|
+
raise
|
|
348
|
+
|
|
349
|
+
# Sync compatibility methods
|
|
350
|
+
def get_tools_map_sync(self) -> Dict[str, Callable]:
|
|
351
|
+
"""Synchronous version of get_tools_map for compatibility."""
|
|
352
|
+
loop = asyncio.get_event_loop()
|
|
353
|
+
if not self._built:
|
|
354
|
+
loop.run_until_complete(self.build())
|
|
355
|
+
return loop.run_until_complete(self.get_tools_map())
|
|
356
|
+
|
|
357
|
+
def get_langchain_tools_sync(self) -> List[BaseTool]:
|
|
358
|
+
"""Synchronous version of get_langchain_tools for compatibility."""
|
|
359
|
+
loop = asyncio.get_event_loop()
|
|
360
|
+
return loop.run_until_complete(self.get_langchain_tools())
|