rakam-systems-core 0.1.1rc7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rakam_systems_core/__init__.py +41 -0
- rakam_systems_core/ai_core/__init__.py +68 -0
- rakam_systems_core/ai_core/base.py +142 -0
- rakam_systems_core/ai_core/config.py +12 -0
- rakam_systems_core/ai_core/config_loader.py +580 -0
- rakam_systems_core/ai_core/config_schema.py +395 -0
- rakam_systems_core/ai_core/interfaces/__init__.py +30 -0
- rakam_systems_core/ai_core/interfaces/agent.py +83 -0
- rakam_systems_core/ai_core/interfaces/chat_history.py +122 -0
- rakam_systems_core/ai_core/interfaces/chunker.py +11 -0
- rakam_systems_core/ai_core/interfaces/embedding_model.py +10 -0
- rakam_systems_core/ai_core/interfaces/indexer.py +10 -0
- rakam_systems_core/ai_core/interfaces/llm_gateway.py +139 -0
- rakam_systems_core/ai_core/interfaces/loader.py +86 -0
- rakam_systems_core/ai_core/interfaces/reranker.py +10 -0
- rakam_systems_core/ai_core/interfaces/retriever.py +11 -0
- rakam_systems_core/ai_core/interfaces/tool.py +162 -0
- rakam_systems_core/ai_core/interfaces/tool_invoker.py +260 -0
- rakam_systems_core/ai_core/interfaces/tool_loader.py +374 -0
- rakam_systems_core/ai_core/interfaces/tool_registry.py +287 -0
- rakam_systems_core/ai_core/interfaces/vectorstore.py +37 -0
- rakam_systems_core/ai_core/mcp/README.md +545 -0
- rakam_systems_core/ai_core/mcp/__init__.py +0 -0
- rakam_systems_core/ai_core/mcp/mcp_server.py +334 -0
- rakam_systems_core/ai_core/tracking.py +602 -0
- rakam_systems_core/ai_core/vs_core.py +55 -0
- rakam_systems_core/ai_utils/__init__.py +16 -0
- rakam_systems_core/ai_utils/logging.py +126 -0
- rakam_systems_core/ai_utils/metrics.py +10 -0
- rakam_systems_core/ai_utils/s3.py +480 -0
- rakam_systems_core/ai_utils/tracing.py +5 -0
- rakam_systems_core-0.1.1rc7.dist-info/METADATA +162 -0
- rakam_systems_core-0.1.1rc7.dist-info/RECORD +34 -0
- rakam_systems_core-0.1.1rc7.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
from typing import Any, Dict, Iterator, Optional, Type, TypeVar
|
|
4
|
+
from pydantic import BaseModel
|
|
5
|
+
from ..base import BaseComponent
|
|
6
|
+
|
|
7
|
+
T = TypeVar("T", bound=BaseModel)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class LLMRequest(BaseModel):
|
|
11
|
+
"""Standardized LLM request structure."""
|
|
12
|
+
system_prompt: Optional[str] = None
|
|
13
|
+
user_prompt: str
|
|
14
|
+
temperature: Optional[float] = None
|
|
15
|
+
max_tokens: Optional[int] = None
|
|
16
|
+
response_format: Optional[str] = None # "text" or "json"
|
|
17
|
+
json_schema: Optional[Type[BaseModel]] = None
|
|
18
|
+
extra_params: Dict[str, Any] = {}
|
|
19
|
+
|
|
20
|
+
class Config:
|
|
21
|
+
arbitrary_types_allowed = True
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class LLMResponse(BaseModel):
|
|
25
|
+
"""Standardized LLM response structure."""
|
|
26
|
+
content: str
|
|
27
|
+
parsed_content: Optional[Any] = None
|
|
28
|
+
usage: Optional[Dict[str, Any]] = None
|
|
29
|
+
model: Optional[str] = None
|
|
30
|
+
finish_reason: Optional[str] = None
|
|
31
|
+
metadata: Dict[str, Any] = {}
|
|
32
|
+
|
|
33
|
+
class Config:
|
|
34
|
+
arbitrary_types_allowed = True
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class LLMGateway(BaseComponent, ABC):
|
|
38
|
+
"""Abstract base class for LLM gateway implementations.
|
|
39
|
+
|
|
40
|
+
This gateway provides a standardized interface for interacting with various
|
|
41
|
+
LLM providers (OpenAI, Mistral, etc.) with support for:
|
|
42
|
+
- Text generation
|
|
43
|
+
- Structured output generation
|
|
44
|
+
- Streaming responses
|
|
45
|
+
- Token counting
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
def __init__(
|
|
49
|
+
self,
|
|
50
|
+
name: str = "llm_gateway",
|
|
51
|
+
config: Optional[Dict[str, Any]] = None,
|
|
52
|
+
provider: Optional[str] = None,
|
|
53
|
+
model: Optional[str] = None,
|
|
54
|
+
default_temperature: float = 0.7,
|
|
55
|
+
api_key: Optional[str] = None,
|
|
56
|
+
):
|
|
57
|
+
super().__init__(name, config)
|
|
58
|
+
self.provider = provider
|
|
59
|
+
self.model = model
|
|
60
|
+
self.default_temperature = default_temperature
|
|
61
|
+
self.api_key = api_key
|
|
62
|
+
|
|
63
|
+
@abstractmethod
|
|
64
|
+
def generate(
|
|
65
|
+
self,
|
|
66
|
+
request: LLMRequest,
|
|
67
|
+
) -> LLMResponse:
|
|
68
|
+
"""Generate a response from the LLM.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
request: Standardized LLM request
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
Standardized LLM response
|
|
75
|
+
"""
|
|
76
|
+
raise NotImplementedError
|
|
77
|
+
|
|
78
|
+
@abstractmethod
|
|
79
|
+
def generate_structured(
|
|
80
|
+
self,
|
|
81
|
+
request: LLMRequest,
|
|
82
|
+
schema: Type[T],
|
|
83
|
+
) -> T:
|
|
84
|
+
"""Generate structured output conforming to a Pydantic schema.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
request: Standardized LLM request
|
|
88
|
+
schema: Pydantic model class to parse response into
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
Instance of the schema class
|
|
92
|
+
"""
|
|
93
|
+
raise NotImplementedError
|
|
94
|
+
|
|
95
|
+
def stream(
|
|
96
|
+
self,
|
|
97
|
+
request: LLMRequest,
|
|
98
|
+
) -> Iterator[str]:
|
|
99
|
+
"""Stream token/segment responses.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
request: Standardized LLM request
|
|
103
|
+
|
|
104
|
+
Yields:
|
|
105
|
+
String chunks from the LLM
|
|
106
|
+
"""
|
|
107
|
+
# Default implementation yields full response
|
|
108
|
+
response = self.generate(request)
|
|
109
|
+
yield response.content
|
|
110
|
+
|
|
111
|
+
@abstractmethod
|
|
112
|
+
def count_tokens(
|
|
113
|
+
self,
|
|
114
|
+
text: str,
|
|
115
|
+
model: Optional[str] = None,
|
|
116
|
+
) -> int:
|
|
117
|
+
"""Count tokens in text.
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
text: Text to count tokens for
|
|
121
|
+
model: Model name to determine encoding
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
Number of tokens
|
|
125
|
+
"""
|
|
126
|
+
raise NotImplementedError
|
|
127
|
+
|
|
128
|
+
# Legacy methods for backward compatibility
|
|
129
|
+
def run(self, prompt: str, **kwargs: Any) -> str:
|
|
130
|
+
"""Legacy synchronous text completion."""
|
|
131
|
+
request = LLMRequest(
|
|
132
|
+
user_prompt=prompt,
|
|
133
|
+
system_prompt=kwargs.get("system_prompt"),
|
|
134
|
+
temperature=kwargs.get("temperature"),
|
|
135
|
+
max_tokens=kwargs.get("max_tokens"),
|
|
136
|
+
extra_params=kwargs,
|
|
137
|
+
)
|
|
138
|
+
response = self.generate(request)
|
|
139
|
+
return response.content
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
|
|
5
|
+
from ..base import BaseComponent
|
|
6
|
+
|
|
7
|
+
if TYPE_CHECKING:
|
|
8
|
+
from ..vs_core import Node, VSFile
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Loader(BaseComponent, ABC):
|
|
12
|
+
"""
|
|
13
|
+
Abstract base class for document loaders.
|
|
14
|
+
|
|
15
|
+
This class provides a common interface for loading documents into different formats.
|
|
16
|
+
Subclasses must implement the load_as_text, load_as_chunks, load_as_nodes, and load_as_vsfile methods.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
@abstractmethod
|
|
20
|
+
def run(self, source: str) -> List[str]:
|
|
21
|
+
"""Load raw documents from a source (path, URL, id, etc.)."""
|
|
22
|
+
raise NotImplementedError
|
|
23
|
+
|
|
24
|
+
@abstractmethod
|
|
25
|
+
def load_as_text(self, source: Union[str, Path]) -> str:
|
|
26
|
+
"""
|
|
27
|
+
Load document and return as a single text string.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
source: Path to document file
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
Full text content of the document as a single string
|
|
34
|
+
"""
|
|
35
|
+
raise NotImplementedError
|
|
36
|
+
|
|
37
|
+
@abstractmethod
|
|
38
|
+
def load_as_chunks(self, source: Union[str, Path]) -> List[str]:
|
|
39
|
+
"""
|
|
40
|
+
Load document and return as a list of text chunks.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
source: Path to document file
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
List of text chunks extracted from the document
|
|
47
|
+
"""
|
|
48
|
+
raise NotImplementedError
|
|
49
|
+
|
|
50
|
+
@abstractmethod
|
|
51
|
+
def load_as_nodes(
|
|
52
|
+
self,
|
|
53
|
+
source: Union[str, Path],
|
|
54
|
+
source_id: Optional[str] = None,
|
|
55
|
+
custom_metadata: Optional[Dict[str, Any]] = None
|
|
56
|
+
) -> List["Node"]:
|
|
57
|
+
"""
|
|
58
|
+
Load document and return as Node objects with metadata.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
source: Path to document file
|
|
62
|
+
source_id: Optional source identifier (defaults to file path)
|
|
63
|
+
custom_metadata: Optional custom metadata to attach to nodes
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
List of Node objects with text chunks and metadata
|
|
67
|
+
"""
|
|
68
|
+
raise NotImplementedError
|
|
69
|
+
|
|
70
|
+
@abstractmethod
|
|
71
|
+
def load_as_vsfile(
|
|
72
|
+
self,
|
|
73
|
+
file_path: Union[str, Path],
|
|
74
|
+
custom_metadata: Optional[Dict[str, Any]] = None
|
|
75
|
+
) -> "VSFile":
|
|
76
|
+
"""
|
|
77
|
+
Load document and return as VSFile object.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
file_path: Path to document file
|
|
81
|
+
custom_metadata: Optional custom metadata
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
VSFile object with nodes
|
|
85
|
+
"""
|
|
86
|
+
raise NotImplementedError
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
from typing import Any, Dict, List
|
|
4
|
+
from ..base import BaseComponent
|
|
5
|
+
|
|
6
|
+
class Reranker(BaseComponent, ABC):
|
|
7
|
+
@abstractmethod
|
|
8
|
+
def run(self, documents: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
9
|
+
"""Reorder documents by relevance and return a new list."""
|
|
10
|
+
raise NotImplementedError
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
from typing import Any, Dict, List
|
|
4
|
+
from ..base import BaseComponent
|
|
5
|
+
|
|
6
|
+
class Retriever(BaseComponent, ABC):
|
|
7
|
+
@abstractmethod
|
|
8
|
+
def run(self, query: str) -> List[Dict[str, Any]]:
|
|
9
|
+
"""Return a list of candidate hits with metadata.
|
|
10
|
+
Output schema is intentionally loose to stay dependency‑free."""
|
|
11
|
+
raise NotImplementedError
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
from typing import Any, Callable, Dict, Optional, Awaitable
|
|
4
|
+
import inspect
|
|
5
|
+
from ..base import BaseComponent
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class ToolComponent(BaseComponent, ABC):
|
|
9
|
+
"""
|
|
10
|
+
Represents a callable external or internal tool, compatible with Pydantic AI.
|
|
11
|
+
|
|
12
|
+
This is the base class for all tools in the system. Tools can be functions
|
|
13
|
+
or callable objects that can be invoked by agents.
|
|
14
|
+
|
|
15
|
+
Attributes:
|
|
16
|
+
name: Unique name for the tool
|
|
17
|
+
description: Human-readable description
|
|
18
|
+
function: The callable function (defaults to self.run)
|
|
19
|
+
json_schema: JSON schema for tool parameters
|
|
20
|
+
takes_ctx: Whether the tool takes context as first argument
|
|
21
|
+
is_async: Whether the function is async
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(
|
|
25
|
+
self,
|
|
26
|
+
name: str,
|
|
27
|
+
config: Optional[Dict[str, Any]] = None,
|
|
28
|
+
description: Optional[str] = None,
|
|
29
|
+
json_schema: Optional[Dict[str, Any]] = None,
|
|
30
|
+
takes_ctx: bool = False,
|
|
31
|
+
) -> None:
|
|
32
|
+
"""
|
|
33
|
+
Initialize a ToolComponent.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
name: Unique name for the tool
|
|
37
|
+
config: Optional configuration dictionary
|
|
38
|
+
description: Human-readable description of what the tool does
|
|
39
|
+
json_schema: JSON schema defining the tool's parameters
|
|
40
|
+
takes_ctx: Whether the tool takes context as first argument
|
|
41
|
+
"""
|
|
42
|
+
super().__init__(name, config)
|
|
43
|
+
self.description = description or f"Tool: {name}"
|
|
44
|
+
self.json_schema = json_schema or self._generate_default_schema()
|
|
45
|
+
self.takes_ctx = takes_ctx
|
|
46
|
+
|
|
47
|
+
# Set function to the run method for Pydantic AI compatibility
|
|
48
|
+
# Check if run method is async
|
|
49
|
+
self.function = self.run
|
|
50
|
+
self.is_async = inspect.iscoroutinefunction(self.run)
|
|
51
|
+
|
|
52
|
+
def _generate_default_schema(self) -> Dict[str, Any]:
|
|
53
|
+
"""
|
|
54
|
+
Generate a default JSON schema for the tool.
|
|
55
|
+
Subclasses can override this to provide custom schemas.
|
|
56
|
+
"""
|
|
57
|
+
return {
|
|
58
|
+
"type": "object",
|
|
59
|
+
"properties": {
|
|
60
|
+
"query": {
|
|
61
|
+
"type": "string",
|
|
62
|
+
"description": "Input query or parameter for the tool"
|
|
63
|
+
}
|
|
64
|
+
},
|
|
65
|
+
"required": ["query"],
|
|
66
|
+
"additionalProperties": False,
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
@abstractmethod
|
|
70
|
+
def run(self, *args: Any, **kwargs: Any) -> Any:
|
|
71
|
+
"""
|
|
72
|
+
Execute the primary operation for the tool.
|
|
73
|
+
This method should be overridden by subclasses.
|
|
74
|
+
"""
|
|
75
|
+
raise NotImplementedError
|
|
76
|
+
|
|
77
|
+
@classmethod
|
|
78
|
+
def from_function(
|
|
79
|
+
cls,
|
|
80
|
+
function: Callable[..., Any],
|
|
81
|
+
name: str,
|
|
82
|
+
description: str,
|
|
83
|
+
json_schema: Dict[str, Any],
|
|
84
|
+
takes_ctx: bool = False,
|
|
85
|
+
) -> "ToolComponent":
|
|
86
|
+
"""
|
|
87
|
+
Create a ToolComponent from a function (Pydantic AI compatible).
|
|
88
|
+
|
|
89
|
+
This factory method allows creating tool instances from standalone functions.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
function: The callable function
|
|
93
|
+
name: Unique name for the tool
|
|
94
|
+
description: Human-readable description
|
|
95
|
+
json_schema: JSON schema for parameters
|
|
96
|
+
takes_ctx: Whether the tool takes context as first argument
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
FunctionToolComponent instance wrapping the function
|
|
100
|
+
"""
|
|
101
|
+
return FunctionToolComponent(
|
|
102
|
+
function=function,
|
|
103
|
+
name=name,
|
|
104
|
+
description=description,
|
|
105
|
+
json_schema=json_schema,
|
|
106
|
+
takes_ctx=takes_ctx,
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
async def acall(self, *args: Any, **kwargs: Any) -> Any:
|
|
110
|
+
"""
|
|
111
|
+
Async call for the tool.
|
|
112
|
+
Automatically handles both sync and async run methods.
|
|
113
|
+
"""
|
|
114
|
+
if self.is_async:
|
|
115
|
+
return await self.run(*args, **kwargs)
|
|
116
|
+
else:
|
|
117
|
+
return self.run(*args, **kwargs)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
class FunctionToolComponent(ToolComponent):
|
|
121
|
+
"""
|
|
122
|
+
A ToolComponent that wraps a standalone function.
|
|
123
|
+
|
|
124
|
+
This is used internally by the from_function factory method to wrap
|
|
125
|
+
plain functions as ToolComponent instances.
|
|
126
|
+
"""
|
|
127
|
+
|
|
128
|
+
def __init__(
|
|
129
|
+
self,
|
|
130
|
+
function: Callable[..., Any],
|
|
131
|
+
name: str,
|
|
132
|
+
description: str,
|
|
133
|
+
json_schema: Dict[str, Any],
|
|
134
|
+
takes_ctx: bool = False,
|
|
135
|
+
config: Optional[Dict[str, Any]] = None,
|
|
136
|
+
) -> None:
|
|
137
|
+
# Store the function before calling super().__init__
|
|
138
|
+
self._wrapped_function = function
|
|
139
|
+
|
|
140
|
+
# Initialize the parent with the description and schema
|
|
141
|
+
super().__init__(
|
|
142
|
+
name=name,
|
|
143
|
+
config=config,
|
|
144
|
+
description=description,
|
|
145
|
+
json_schema=json_schema,
|
|
146
|
+
takes_ctx=takes_ctx,
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
# Override function and is_async based on the wrapped function
|
|
150
|
+
self.function = function
|
|
151
|
+
self.is_async = inspect.iscoroutinefunction(function)
|
|
152
|
+
|
|
153
|
+
def run(self, *args: Any, **kwargs: Any) -> Any:
|
|
154
|
+
"""Execute the wrapped function."""
|
|
155
|
+
return self._wrapped_function(*args, **kwargs)
|
|
156
|
+
|
|
157
|
+
async def acall(self, *args: Any, **kwargs: Any) -> Any:
|
|
158
|
+
"""Async call for the wrapped function."""
|
|
159
|
+
if self.is_async:
|
|
160
|
+
return await self._wrapped_function(*args, **kwargs)
|
|
161
|
+
else:
|
|
162
|
+
return self._wrapped_function(*args, **kwargs)
|
|
@@ -0,0 +1,260 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Tool Invoker for uniform tool invocation across different modes.
|
|
3
|
+
Supports both direct tool calls and MCP-based tool calls.
|
|
4
|
+
"""
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
from typing import Any, Dict, Optional, Union
|
|
7
|
+
import asyncio
|
|
8
|
+
from .tool_registry import ToolRegistry, ToolMetadata, ToolMode
|
|
9
|
+
from .tool import ToolComponent
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ToolInvocationError(Exception):
|
|
13
|
+
"""Base exception for tool invocation errors."""
|
|
14
|
+
pass
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ToolNotFoundError(ToolInvocationError):
|
|
18
|
+
"""Raised when a tool is not found in the registry."""
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class MCPServerNotFoundError(ToolInvocationError):
|
|
23
|
+
"""Raised when an MCP server is not available."""
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class ToolInvoker:
|
|
28
|
+
"""
|
|
29
|
+
Uniform interface for invoking tools regardless of their execution mode.
|
|
30
|
+
|
|
31
|
+
Supports:
|
|
32
|
+
- Direct tool invocation (synchronous and asynchronous)
|
|
33
|
+
- MCP-based tool invocation via registered servers
|
|
34
|
+
- Automatic mode selection based on tool registration
|
|
35
|
+
- Error handling and validation
|
|
36
|
+
|
|
37
|
+
Example:
|
|
38
|
+
>>> registry = ToolRegistry()
|
|
39
|
+
>>> invoker = ToolInvoker(registry)
|
|
40
|
+
>>>
|
|
41
|
+
>>> # Invoke a direct tool
|
|
42
|
+
>>> result = await invoker.ainvoke("calculate", x=10, y=20)
|
|
43
|
+
>>>
|
|
44
|
+
>>> # Invoke an MCP tool (with MCP server registered)
|
|
45
|
+
>>> invoker.register_mcp_server("search_server", mcp_server_instance)
|
|
46
|
+
>>> result = await invoker.ainvoke("web_search", query="Python")
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
def __init__(self, registry: ToolRegistry):
|
|
50
|
+
"""
|
|
51
|
+
Initialize the ToolInvoker.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
registry: ToolRegistry instance containing registered tools
|
|
55
|
+
"""
|
|
56
|
+
self.registry = registry
|
|
57
|
+
self._mcp_servers: Dict[str, Any] = {}
|
|
58
|
+
|
|
59
|
+
def register_mcp_server(self, server_name: str, server_instance: Any) -> None:
|
|
60
|
+
"""
|
|
61
|
+
Register an MCP server for tool invocation.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
server_name: Name of the MCP server
|
|
65
|
+
server_instance: The MCP server instance
|
|
66
|
+
"""
|
|
67
|
+
self._mcp_servers[server_name] = server_instance
|
|
68
|
+
|
|
69
|
+
def unregister_mcp_server(self, server_name: str) -> bool:
|
|
70
|
+
"""
|
|
71
|
+
Unregister an MCP server.
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
True if server was unregistered, False if not found
|
|
75
|
+
"""
|
|
76
|
+
if server_name in self._mcp_servers:
|
|
77
|
+
del self._mcp_servers[server_name]
|
|
78
|
+
return True
|
|
79
|
+
return False
|
|
80
|
+
|
|
81
|
+
def invoke(self, tool_name: str, **kwargs: Any) -> Any:
|
|
82
|
+
"""
|
|
83
|
+
Synchronously invoke a tool by name.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
tool_name: Name of the tool to invoke
|
|
87
|
+
**kwargs: Arguments to pass to the tool
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
Result from the tool execution
|
|
91
|
+
|
|
92
|
+
Raises:
|
|
93
|
+
ToolNotFoundError: If tool is not registered
|
|
94
|
+
ToolInvocationError: If tool invocation fails
|
|
95
|
+
"""
|
|
96
|
+
metadata = self.registry.get_tool(tool_name)
|
|
97
|
+
if metadata is None:
|
|
98
|
+
raise ToolNotFoundError(f"Tool '{tool_name}' not found in registry")
|
|
99
|
+
|
|
100
|
+
if metadata.mode == ToolMode.DIRECT:
|
|
101
|
+
return self._invoke_direct(metadata, **kwargs)
|
|
102
|
+
elif metadata.mode == ToolMode.MCP:
|
|
103
|
+
return self._invoke_mcp(metadata, **kwargs)
|
|
104
|
+
else:
|
|
105
|
+
raise ToolInvocationError(f"Unknown tool mode: {metadata.mode}")
|
|
106
|
+
|
|
107
|
+
async def ainvoke(self, tool_name: str, **kwargs: Any) -> Any:
|
|
108
|
+
"""
|
|
109
|
+
Asynchronously invoke a tool by name.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
tool_name: Name of the tool to invoke
|
|
113
|
+
**kwargs: Arguments to pass to the tool
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
Result from the tool execution
|
|
117
|
+
|
|
118
|
+
Raises:
|
|
119
|
+
ToolNotFoundError: If tool is not registered
|
|
120
|
+
ToolInvocationError: If tool invocation fails
|
|
121
|
+
"""
|
|
122
|
+
metadata = self.registry.get_tool(tool_name)
|
|
123
|
+
if metadata is None:
|
|
124
|
+
raise ToolNotFoundError(f"Tool '{tool_name}' not found in registry")
|
|
125
|
+
|
|
126
|
+
if metadata.mode == ToolMode.DIRECT:
|
|
127
|
+
return await self._ainvoke_direct(metadata, **kwargs)
|
|
128
|
+
elif metadata.mode == ToolMode.MCP:
|
|
129
|
+
return await self._ainvoke_mcp(metadata, **kwargs)
|
|
130
|
+
else:
|
|
131
|
+
raise ToolInvocationError(f"Unknown tool mode: {metadata.mode}")
|
|
132
|
+
|
|
133
|
+
def _invoke_direct(self, metadata: ToolMetadata, **kwargs: Any) -> Any:
|
|
134
|
+
"""Invoke a direct tool synchronously."""
|
|
135
|
+
tool_instance = metadata.tool_instance
|
|
136
|
+
|
|
137
|
+
if isinstance(tool_instance, ToolComponent):
|
|
138
|
+
# ToolComponent - call the function directly (it's now compatible with kwargs)
|
|
139
|
+
return tool_instance.function(**kwargs)
|
|
140
|
+
else:
|
|
141
|
+
raise ToolInvocationError(
|
|
142
|
+
f"Invalid tool instance type for '{metadata.name}': {type(tool_instance)}"
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
async def _ainvoke_direct(self, metadata: ToolMetadata, **kwargs: Any) -> Any:
|
|
146
|
+
"""Invoke a direct tool asynchronously."""
|
|
147
|
+
tool_instance = metadata.tool_instance
|
|
148
|
+
|
|
149
|
+
if isinstance(tool_instance, ToolComponent):
|
|
150
|
+
# ToolComponent - use acall method for async support
|
|
151
|
+
return await tool_instance.acall(**kwargs)
|
|
152
|
+
else:
|
|
153
|
+
raise ToolInvocationError(
|
|
154
|
+
f"Invalid tool instance type for '{metadata.name}': {type(tool_instance)}"
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
def _invoke_mcp(self, metadata: ToolMetadata, **kwargs: Any) -> Any:
|
|
158
|
+
"""Invoke a tool via MCP server synchronously."""
|
|
159
|
+
if metadata.mcp_server not in self._mcp_servers:
|
|
160
|
+
raise MCPServerNotFoundError(
|
|
161
|
+
f"MCP server '{metadata.mcp_server}' not registered"
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
server = self._mcp_servers[metadata.mcp_server]
|
|
165
|
+
tool_name = metadata.mcp_tool_name or metadata.name
|
|
166
|
+
|
|
167
|
+
# Construct MCP message
|
|
168
|
+
message = {
|
|
169
|
+
'action': 'invoke_tool',
|
|
170
|
+
'tool_name': tool_name,
|
|
171
|
+
'arguments': kwargs,
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
try:
|
|
175
|
+
# Send message to the tool component (receiver is the tool name)
|
|
176
|
+
result = server.send_message(
|
|
177
|
+
sender='tool_invoker',
|
|
178
|
+
receiver=tool_name,
|
|
179
|
+
message=message
|
|
180
|
+
)
|
|
181
|
+
return result
|
|
182
|
+
except Exception as e:
|
|
183
|
+
raise ToolInvocationError(
|
|
184
|
+
f"Failed to invoke MCP tool '{metadata.name}': {str(e)}"
|
|
185
|
+
) from e
|
|
186
|
+
|
|
187
|
+
async def _ainvoke_mcp(self, metadata: ToolMetadata, **kwargs: Any) -> Any:
|
|
188
|
+
"""Invoke a tool via MCP server asynchronously."""
|
|
189
|
+
if metadata.mcp_server not in self._mcp_servers:
|
|
190
|
+
raise MCPServerNotFoundError(
|
|
191
|
+
f"MCP server '{metadata.mcp_server}' not registered"
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
server = self._mcp_servers[metadata.mcp_server]
|
|
195
|
+
tool_name = metadata.mcp_tool_name or metadata.name
|
|
196
|
+
|
|
197
|
+
# Construct MCP message
|
|
198
|
+
message = {
|
|
199
|
+
'action': 'invoke_tool',
|
|
200
|
+
'tool_name': tool_name,
|
|
201
|
+
'arguments': kwargs,
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
try:
|
|
205
|
+
# Check if server has async send_message
|
|
206
|
+
if hasattr(server, 'asend_message'):
|
|
207
|
+
result = await server.asend_message(
|
|
208
|
+
sender='tool_invoker',
|
|
209
|
+
receiver=tool_name,
|
|
210
|
+
message=message
|
|
211
|
+
)
|
|
212
|
+
else:
|
|
213
|
+
# Fall back to sync in thread pool
|
|
214
|
+
result = await asyncio.get_event_loop().run_in_executor(
|
|
215
|
+
None,
|
|
216
|
+
server.send_message,
|
|
217
|
+
'tool_invoker',
|
|
218
|
+
tool_name,
|
|
219
|
+
message
|
|
220
|
+
)
|
|
221
|
+
return result
|
|
222
|
+
except Exception as e:
|
|
223
|
+
raise ToolInvocationError(
|
|
224
|
+
f"Failed to invoke MCP tool '{metadata.name}': {str(e)}"
|
|
225
|
+
) from e
|
|
226
|
+
|
|
227
|
+
def get_tool_info(self, tool_name: str) -> Optional[Dict[str, Any]]:
|
|
228
|
+
"""
|
|
229
|
+
Get information about a registered tool.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
tool_name: Name of the tool
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
Dictionary with tool information or None if not found
|
|
236
|
+
"""
|
|
237
|
+
metadata = self.registry.get_tool(tool_name)
|
|
238
|
+
if metadata is None:
|
|
239
|
+
return None
|
|
240
|
+
|
|
241
|
+
return metadata.to_dict()
|
|
242
|
+
|
|
243
|
+
def list_available_tools(self) -> Dict[str, Dict[str, Any]]:
|
|
244
|
+
"""
|
|
245
|
+
List all available tools with their information.
|
|
246
|
+
|
|
247
|
+
Returns:
|
|
248
|
+
Dictionary mapping tool names to their metadata
|
|
249
|
+
"""
|
|
250
|
+
return {
|
|
251
|
+
metadata.name: metadata.to_dict()
|
|
252
|
+
for metadata in self.registry.get_all_tools()
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
def __repr__(self) -> str:
|
|
256
|
+
return (
|
|
257
|
+
f"ToolInvoker(tools={len(self.registry)}, "
|
|
258
|
+
f"mcp_servers={len(self._mcp_servers)})"
|
|
259
|
+
)
|
|
260
|
+
|