kailash 0.1.4__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +1 -1
- kailash/access_control.py +740 -0
- kailash/api/__main__.py +6 -0
- kailash/api/auth.py +668 -0
- kailash/api/custom_nodes.py +285 -0
- kailash/api/custom_nodes_secure.py +377 -0
- kailash/api/database.py +620 -0
- kailash/api/studio.py +915 -0
- kailash/api/studio_secure.py +893 -0
- kailash/mcp/__init__.py +53 -0
- kailash/mcp/__main__.py +13 -0
- kailash/mcp/ai_registry_server.py +712 -0
- kailash/mcp/client.py +447 -0
- kailash/mcp/client_new.py +334 -0
- kailash/mcp/server.py +293 -0
- kailash/mcp/server_new.py +336 -0
- kailash/mcp/servers/__init__.py +12 -0
- kailash/mcp/servers/ai_registry.py +289 -0
- kailash/nodes/__init__.py +4 -2
- kailash/nodes/ai/__init__.py +38 -0
- kailash/nodes/ai/a2a.py +1790 -0
- kailash/nodes/ai/agents.py +116 -2
- kailash/nodes/ai/ai_providers.py +206 -8
- kailash/nodes/ai/intelligent_agent_orchestrator.py +2108 -0
- kailash/nodes/ai/iterative_llm_agent.py +1280 -0
- kailash/nodes/ai/llm_agent.py +324 -1
- kailash/nodes/ai/self_organizing.py +1623 -0
- kailash/nodes/api/http.py +106 -25
- kailash/nodes/api/rest.py +116 -21
- kailash/nodes/base.py +15 -2
- kailash/nodes/base_async.py +45 -0
- kailash/nodes/base_cycle_aware.py +374 -0
- kailash/nodes/base_with_acl.py +338 -0
- kailash/nodes/code/python.py +135 -27
- kailash/nodes/data/readers.py +116 -53
- kailash/nodes/data/writers.py +16 -6
- kailash/nodes/logic/__init__.py +8 -0
- kailash/nodes/logic/async_operations.py +48 -9
- kailash/nodes/logic/convergence.py +642 -0
- kailash/nodes/logic/loop.py +153 -0
- kailash/nodes/logic/operations.py +212 -27
- kailash/nodes/logic/workflow.py +26 -18
- kailash/nodes/mixins/__init__.py +11 -0
- kailash/nodes/mixins/mcp.py +228 -0
- kailash/nodes/mixins.py +387 -0
- kailash/nodes/transform/__init__.py +8 -1
- kailash/nodes/transform/processors.py +119 -4
- kailash/runtime/__init__.py +2 -1
- kailash/runtime/access_controlled.py +458 -0
- kailash/runtime/local.py +106 -33
- kailash/runtime/parallel_cyclic.py +529 -0
- kailash/sdk_exceptions.py +90 -5
- kailash/security.py +845 -0
- kailash/tracking/manager.py +38 -15
- kailash/tracking/models.py +1 -1
- kailash/tracking/storage/filesystem.py +30 -2
- kailash/utils/__init__.py +8 -0
- kailash/workflow/__init__.py +18 -0
- kailash/workflow/convergence.py +270 -0
- kailash/workflow/cycle_analyzer.py +768 -0
- kailash/workflow/cycle_builder.py +573 -0
- kailash/workflow/cycle_config.py +709 -0
- kailash/workflow/cycle_debugger.py +760 -0
- kailash/workflow/cycle_exceptions.py +601 -0
- kailash/workflow/cycle_profiler.py +671 -0
- kailash/workflow/cycle_state.py +338 -0
- kailash/workflow/cyclic_runner.py +985 -0
- kailash/workflow/graph.py +500 -39
- kailash/workflow/migration.py +768 -0
- kailash/workflow/safety.py +365 -0
- kailash/workflow/templates.py +744 -0
- kailash/workflow/validation.py +693 -0
- {kailash-0.1.4.dist-info → kailash-0.2.0.dist-info}/METADATA +446 -13
- kailash-0.2.0.dist-info/RECORD +125 -0
- kailash/nodes/mcp/__init__.py +0 -11
- kailash/nodes/mcp/client.py +0 -554
- kailash/nodes/mcp/resource.py +0 -682
- kailash/nodes/mcp/server.py +0 -577
- kailash-0.1.4.dist-info/RECORD +0 -85
- {kailash-0.1.4.dist-info → kailash-0.2.0.dist-info}/WHEEL +0 -0
- {kailash-0.1.4.dist-info → kailash-0.2.0.dist-info}/entry_points.txt +0 -0
- {kailash-0.1.4.dist-info → kailash-0.2.0.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.1.4.dist-info → kailash-0.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,228 @@
|
|
1
|
+
"""MCP Capability Mixin for Nodes.
|
2
|
+
|
3
|
+
This mixin provides MCP (Model Context Protocol) capabilities to any node,
|
4
|
+
allowing them to discover and use MCP tools without being an LLM agent.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import asyncio
|
8
|
+
from typing import Any, Dict, List, Union
|
9
|
+
|
10
|
+
from kailash.mcp import MCPClient
|
11
|
+
|
12
|
+
|
13
|
+
class MCPCapabilityMixin:
|
14
|
+
"""Mixin to add MCP capabilities to any node.
|
15
|
+
|
16
|
+
This mixin allows non-LLM nodes to interact with MCP servers,
|
17
|
+
discover tools, retrieve resources, and execute tool calls.
|
18
|
+
|
19
|
+
Examples:
|
20
|
+
>>> from kailash.nodes.base import BaseNode
|
21
|
+
>>> from kailash.nodes.mixins.mcp import MCPCapabilityMixin
|
22
|
+
>>>
|
23
|
+
>>> class DataProcessorWithMCP(BaseNode, MCPCapabilityMixin):
|
24
|
+
... def run(self, context, **kwargs):
|
25
|
+
... # Discover available tools
|
26
|
+
... tools = self.discover_mcp_tools_sync(
|
27
|
+
... ["http://localhost:8080"]
|
28
|
+
... )
|
29
|
+
...
|
30
|
+
... # Call a specific tool
|
31
|
+
... result = self.call_mcp_tool_sync(
|
32
|
+
... "http://localhost:8080",
|
33
|
+
... "process_data",
|
34
|
+
... {"data": kwargs.get("data")}
|
35
|
+
... )
|
36
|
+
...
|
37
|
+
... return {"processed": result}
|
38
|
+
"""
|
39
|
+
|
40
|
+
def __init__(self, *args, **kwargs):
|
41
|
+
"""Initialize the mixin."""
|
42
|
+
super().__init__(*args, **kwargs)
|
43
|
+
self._mcp_client = None
|
44
|
+
|
45
|
+
@property
|
46
|
+
def mcp_client(self) -> MCPClient:
|
47
|
+
"""Get or create MCP client instance."""
|
48
|
+
if self._mcp_client is None:
|
49
|
+
self._mcp_client = MCPClient()
|
50
|
+
return self._mcp_client
|
51
|
+
|
52
|
+
async def discover_mcp_tools(
|
53
|
+
self, mcp_servers: List[Union[str, Dict[str, Any]]]
|
54
|
+
) -> List[Dict[str, Any]]:
|
55
|
+
"""Discover tools from MCP servers asynchronously.
|
56
|
+
|
57
|
+
Args:
|
58
|
+
mcp_servers: List of MCP server configurations
|
59
|
+
|
60
|
+
Returns:
|
61
|
+
List of discovered tools in OpenAI function format
|
62
|
+
"""
|
63
|
+
all_tools = []
|
64
|
+
|
65
|
+
for server in mcp_servers:
|
66
|
+
try:
|
67
|
+
tools = await self.mcp_client.discover_tools(server)
|
68
|
+
all_tools.extend(tools)
|
69
|
+
except Exception as e:
|
70
|
+
# Log error but continue with other servers
|
71
|
+
if hasattr(self, "logger"):
|
72
|
+
self.logger.warning(f"Failed to discover tools from {server}: {e}")
|
73
|
+
|
74
|
+
return all_tools
|
75
|
+
|
76
|
+
async def call_mcp_tool(
|
77
|
+
self,
|
78
|
+
server_config: Union[str, Dict[str, Any]],
|
79
|
+
tool_name: str,
|
80
|
+
arguments: Dict[str, Any],
|
81
|
+
) -> Any:
|
82
|
+
"""Call an MCP tool asynchronously.
|
83
|
+
|
84
|
+
Args:
|
85
|
+
server_config: MCP server configuration
|
86
|
+
tool_name: Name of the tool to call
|
87
|
+
arguments: Tool arguments
|
88
|
+
|
89
|
+
Returns:
|
90
|
+
Tool execution result
|
91
|
+
"""
|
92
|
+
return await self.mcp_client.call_tool(server_config, tool_name, arguments)
|
93
|
+
|
94
|
+
async def list_mcp_resources(
|
95
|
+
self, server_config: Union[str, Dict[str, Any]]
|
96
|
+
) -> List[Dict[str, Any]]:
|
97
|
+
"""List available resources from an MCP server.
|
98
|
+
|
99
|
+
Args:
|
100
|
+
server_config: MCP server configuration
|
101
|
+
|
102
|
+
Returns:
|
103
|
+
List of available resources
|
104
|
+
"""
|
105
|
+
return await self.mcp_client.list_resources(server_config)
|
106
|
+
|
107
|
+
async def read_mcp_resource(
|
108
|
+
self, server_config: Union[str, Dict[str, Any]], uri: str
|
109
|
+
) -> Any:
|
110
|
+
"""Read a resource from an MCP server.
|
111
|
+
|
112
|
+
Args:
|
113
|
+
server_config: MCP server configuration
|
114
|
+
uri: Resource URI
|
115
|
+
|
116
|
+
Returns:
|
117
|
+
Resource content
|
118
|
+
"""
|
119
|
+
return await self.mcp_client.read_resource(server_config, uri)
|
120
|
+
|
121
|
+
# Synchronous wrappers for non-async nodes
|
122
|
+
|
123
|
+
def discover_mcp_tools_sync(
|
124
|
+
self, mcp_servers: List[Union[str, Dict[str, Any]]]
|
125
|
+
) -> List[Dict[str, Any]]:
|
126
|
+
"""Synchronous wrapper for discovering MCP tools.
|
127
|
+
|
128
|
+
Args:
|
129
|
+
mcp_servers: List of MCP server configurations
|
130
|
+
|
131
|
+
Returns:
|
132
|
+
List of discovered tools
|
133
|
+
"""
|
134
|
+
loop = asyncio.new_event_loop()
|
135
|
+
try:
|
136
|
+
return loop.run_until_complete(self.discover_mcp_tools(mcp_servers))
|
137
|
+
finally:
|
138
|
+
loop.close()
|
139
|
+
|
140
|
+
def call_mcp_tool_sync(
|
141
|
+
self,
|
142
|
+
server_config: Union[str, Dict[str, Any]],
|
143
|
+
tool_name: str,
|
144
|
+
arguments: Dict[str, Any],
|
145
|
+
) -> Any:
|
146
|
+
"""Synchronous wrapper for calling MCP tools.
|
147
|
+
|
148
|
+
Args:
|
149
|
+
server_config: MCP server configuration
|
150
|
+
tool_name: Name of the tool to call
|
151
|
+
arguments: Tool arguments
|
152
|
+
|
153
|
+
Returns:
|
154
|
+
Tool execution result
|
155
|
+
"""
|
156
|
+
loop = asyncio.new_event_loop()
|
157
|
+
try:
|
158
|
+
return loop.run_until_complete(
|
159
|
+
self.call_mcp_tool(server_config, tool_name, arguments)
|
160
|
+
)
|
161
|
+
finally:
|
162
|
+
loop.close()
|
163
|
+
|
164
|
+
def list_mcp_resources_sync(
|
165
|
+
self, server_config: Union[str, Dict[str, Any]]
|
166
|
+
) -> List[Dict[str, Any]]:
|
167
|
+
"""Synchronous wrapper for listing MCP resources.
|
168
|
+
|
169
|
+
Args:
|
170
|
+
server_config: MCP server configuration
|
171
|
+
|
172
|
+
Returns:
|
173
|
+
List of available resources
|
174
|
+
"""
|
175
|
+
loop = asyncio.new_event_loop()
|
176
|
+
try:
|
177
|
+
return loop.run_until_complete(self.list_mcp_resources(server_config))
|
178
|
+
finally:
|
179
|
+
loop.close()
|
180
|
+
|
181
|
+
def read_mcp_resource_sync(
|
182
|
+
self, server_config: Union[str, Dict[str, Any]], uri: str
|
183
|
+
) -> Any:
|
184
|
+
"""Synchronous wrapper for reading MCP resources.
|
185
|
+
|
186
|
+
Args:
|
187
|
+
server_config: MCP server configuration
|
188
|
+
uri: Resource URI
|
189
|
+
|
190
|
+
Returns:
|
191
|
+
Resource content
|
192
|
+
"""
|
193
|
+
loop = asyncio.new_event_loop()
|
194
|
+
try:
|
195
|
+
return loop.run_until_complete(self.read_mcp_resource(server_config, uri))
|
196
|
+
finally:
|
197
|
+
loop.close()
|
198
|
+
|
199
|
+
# Helper methods for common patterns
|
200
|
+
|
201
|
+
def get_mcp_parameter_defaults(self) -> Dict[str, Any]:
|
202
|
+
"""Get default MCP-related parameters for nodes.
|
203
|
+
|
204
|
+
Returns:
|
205
|
+
Dictionary of MCP parameter defaults
|
206
|
+
"""
|
207
|
+
return {"mcp_servers": [], "mcp_context": [], "auto_discover_tools": False}
|
208
|
+
|
209
|
+
def format_mcp_tools_for_display(self, tools: List[Dict[str, Any]]) -> str:
|
210
|
+
"""Format MCP tools for human-readable display.
|
211
|
+
|
212
|
+
Args:
|
213
|
+
tools: List of tools in OpenAI format
|
214
|
+
|
215
|
+
Returns:
|
216
|
+
Formatted string representation
|
217
|
+
"""
|
218
|
+
if not tools:
|
219
|
+
return "No tools available"
|
220
|
+
|
221
|
+
lines = ["Available MCP Tools:"]
|
222
|
+
for tool in tools:
|
223
|
+
func = tool.get("function", {})
|
224
|
+
name = func.get("name", "unknown")
|
225
|
+
desc = func.get("description", "No description")
|
226
|
+
lines.append(f" - {name}: {desc}")
|
227
|
+
|
228
|
+
return "\n".join(lines)
|
kailash/nodes/mixins.py
ADDED
@@ -0,0 +1,387 @@
|
|
1
|
+
"""
|
2
|
+
Node mixins for the Kailash SDK.
|
3
|
+
|
4
|
+
This module provides mixins that add common functionality to nodes,
|
5
|
+
including security features, validation, and utility methods.
|
6
|
+
|
7
|
+
Design Philosophy:
|
8
|
+
- Composition over inheritance for optional features
|
9
|
+
- Security by default
|
10
|
+
- Minimal performance overhead
|
11
|
+
- Easy to integrate with existing nodes
|
12
|
+
"""
|
13
|
+
|
14
|
+
import logging
|
15
|
+
from typing import Any, Dict, Optional
|
16
|
+
|
17
|
+
from kailash.security import (
|
18
|
+
SecurityConfig,
|
19
|
+
SecurityError,
|
20
|
+
get_security_config,
|
21
|
+
sanitize_input,
|
22
|
+
validate_node_parameters,
|
23
|
+
)
|
24
|
+
|
25
|
+
logger = logging.getLogger(__name__)
|
26
|
+
|
27
|
+
|
28
|
+
class SecurityMixin:
|
29
|
+
"""
|
30
|
+
Mixin that adds security features to nodes.
|
31
|
+
|
32
|
+
This mixin provides:
|
33
|
+
- Input parameter validation and sanitization
|
34
|
+
- Security policy enforcement
|
35
|
+
- Audit logging for security events
|
36
|
+
- Protection against common attack vectors
|
37
|
+
|
38
|
+
Usage:
|
39
|
+
class MySecureNode(SecurityMixin, Node):
|
40
|
+
def run(self, **kwargs):
|
41
|
+
# Input is automatically sanitized
|
42
|
+
safe_params = self.validate_and_sanitize_inputs(kwargs)
|
43
|
+
return self.process_safely(safe_params)
|
44
|
+
"""
|
45
|
+
|
46
|
+
def __init__(
|
47
|
+
self, *args, security_config: Optional[SecurityConfig] = None, **kwargs
|
48
|
+
):
|
49
|
+
"""
|
50
|
+
Initialize security mixin.
|
51
|
+
|
52
|
+
Args:
|
53
|
+
security_config: Security configuration to use
|
54
|
+
*args: Arguments passed to parent class
|
55
|
+
**kwargs: Keyword arguments passed to parent class
|
56
|
+
"""
|
57
|
+
super().__init__(*args, **kwargs)
|
58
|
+
self.security_config = security_config or get_security_config()
|
59
|
+
|
60
|
+
if self.security_config.enable_audit_logging:
|
61
|
+
logger.info(f"Security mixin initialized for {self.__class__.__name__}")
|
62
|
+
|
63
|
+
def validate_and_sanitize_inputs(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
64
|
+
"""
|
65
|
+
Validate and sanitize input parameters.
|
66
|
+
|
67
|
+
Args:
|
68
|
+
inputs: Dictionary of input parameters
|
69
|
+
|
70
|
+
Returns:
|
71
|
+
Dictionary of validated and sanitized parameters
|
72
|
+
|
73
|
+
Raises:
|
74
|
+
SecurityError: If validation fails
|
75
|
+
"""
|
76
|
+
try:
|
77
|
+
# First validate using the security framework
|
78
|
+
validated_inputs = validate_node_parameters(inputs, self.security_config)
|
79
|
+
|
80
|
+
if self.security_config.enable_audit_logging:
|
81
|
+
logger.debug(
|
82
|
+
f"Inputs validated for {self.__class__.__name__}: {list(validated_inputs.keys())}"
|
83
|
+
)
|
84
|
+
|
85
|
+
return validated_inputs
|
86
|
+
|
87
|
+
except SecurityError as e:
|
88
|
+
if self.security_config.enable_audit_logging:
|
89
|
+
logger.error(
|
90
|
+
f"Security validation failed for {self.__class__.__name__}: {e}"
|
91
|
+
)
|
92
|
+
raise
|
93
|
+
except Exception as e:
|
94
|
+
if self.security_config.enable_audit_logging:
|
95
|
+
logger.error(
|
96
|
+
f"Unexpected validation error for {self.__class__.__name__}: {e}"
|
97
|
+
)
|
98
|
+
raise SecurityError(f"Input validation failed: {e}")
|
99
|
+
|
100
|
+
def sanitize_single_input(self, value: Any, max_length: int = 10000) -> Any:
|
101
|
+
"""
|
102
|
+
Sanitize a single input value.
|
103
|
+
|
104
|
+
Args:
|
105
|
+
value: Value to sanitize
|
106
|
+
max_length: Maximum string length
|
107
|
+
|
108
|
+
Returns:
|
109
|
+
Sanitized value
|
110
|
+
"""
|
111
|
+
return sanitize_input(value, max_length, config=self.security_config)
|
112
|
+
|
113
|
+
def is_security_enabled(self) -> bool:
|
114
|
+
"""Check if security features are enabled."""
|
115
|
+
return (
|
116
|
+
self.security_config.enable_path_validation
|
117
|
+
or self.security_config.enable_command_validation
|
118
|
+
or hasattr(self, "security_config")
|
119
|
+
)
|
120
|
+
|
121
|
+
def log_security_event(self, event: str, level: str = "INFO") -> None:
|
122
|
+
"""
|
123
|
+
Log a security-related event.
|
124
|
+
|
125
|
+
Args:
|
126
|
+
event: Description of the security event
|
127
|
+
level: Log level (INFO, WARNING, ERROR)
|
128
|
+
"""
|
129
|
+
if not self.security_config.enable_audit_logging:
|
130
|
+
return
|
131
|
+
|
132
|
+
log_msg = f"Security event in {self.__class__.__name__}: {event}"
|
133
|
+
|
134
|
+
if level.upper() == "ERROR":
|
135
|
+
logger.error(log_msg)
|
136
|
+
elif level.upper() == "WARNING":
|
137
|
+
logger.warning(log_msg)
|
138
|
+
else:
|
139
|
+
logger.info(log_msg)
|
140
|
+
|
141
|
+
|
142
|
+
class ValidationMixin:
|
143
|
+
"""
|
144
|
+
Mixin that adds enhanced input validation to nodes.
|
145
|
+
|
146
|
+
This mixin provides:
|
147
|
+
- Type checking and conversion
|
148
|
+
- Range and constraint validation
|
149
|
+
- Custom validation rules
|
150
|
+
- Detailed error reporting
|
151
|
+
"""
|
152
|
+
|
153
|
+
def validate_required_params(
|
154
|
+
self, inputs: Dict[str, Any], required_params: list
|
155
|
+
) -> None:
|
156
|
+
"""
|
157
|
+
Validate that all required parameters are present.
|
158
|
+
|
159
|
+
Args:
|
160
|
+
inputs: Input parameters
|
161
|
+
required_params: List of required parameter names
|
162
|
+
|
163
|
+
Raises:
|
164
|
+
ValueError: If required parameters are missing
|
165
|
+
"""
|
166
|
+
missing_params = [param for param in required_params if param not in inputs]
|
167
|
+
if missing_params:
|
168
|
+
raise ValueError(f"Missing required parameters: {missing_params}")
|
169
|
+
|
170
|
+
def validate_param_types(
|
171
|
+
self, inputs: Dict[str, Any], type_mapping: Dict[str, type]
|
172
|
+
) -> Dict[str, Any]:
|
173
|
+
"""
|
174
|
+
Validate and convert parameter types.
|
175
|
+
|
176
|
+
Args:
|
177
|
+
inputs: Input parameters
|
178
|
+
type_mapping: Dictionary mapping parameter names to expected types
|
179
|
+
|
180
|
+
Returns:
|
181
|
+
Dictionary with converted types
|
182
|
+
|
183
|
+
Raises:
|
184
|
+
TypeError: If type conversion fails
|
185
|
+
"""
|
186
|
+
converted = {}
|
187
|
+
|
188
|
+
for param_name, value in inputs.items():
|
189
|
+
if param_name in type_mapping:
|
190
|
+
expected_type = type_mapping[param_name]
|
191
|
+
try:
|
192
|
+
if isinstance(value, expected_type):
|
193
|
+
converted[param_name] = value
|
194
|
+
else:
|
195
|
+
converted[param_name] = expected_type(value)
|
196
|
+
except (ValueError, TypeError) as e:
|
197
|
+
raise TypeError(
|
198
|
+
f"Cannot convert {param_name} to {expected_type.__name__}: {e}"
|
199
|
+
)
|
200
|
+
else:
|
201
|
+
converted[param_name] = value
|
202
|
+
|
203
|
+
return converted
|
204
|
+
|
205
|
+
def validate_param_ranges(
|
206
|
+
self, inputs: Dict[str, Any], range_mapping: Dict[str, tuple]
|
207
|
+
) -> None:
|
208
|
+
"""
|
209
|
+
Validate that numeric parameters are within acceptable ranges.
|
210
|
+
|
211
|
+
Args:
|
212
|
+
inputs: Input parameters
|
213
|
+
range_mapping: Dictionary mapping parameter names to (min, max) tuples
|
214
|
+
|
215
|
+
Raises:
|
216
|
+
ValueError: If parameters are out of range
|
217
|
+
"""
|
218
|
+
for param_name, (min_val, max_val) in range_mapping.items():
|
219
|
+
if param_name in inputs:
|
220
|
+
value = inputs[param_name]
|
221
|
+
if isinstance(value, (int, float)):
|
222
|
+
if value < min_val or value > max_val:
|
223
|
+
raise ValueError(
|
224
|
+
f"{param_name} must be between {min_val} and {max_val}, got {value}"
|
225
|
+
)
|
226
|
+
|
227
|
+
|
228
|
+
class PerformanceMixin:
|
229
|
+
"""
|
230
|
+
Mixin that adds performance monitoring to nodes.
|
231
|
+
|
232
|
+
This mixin provides:
|
233
|
+
- Execution time tracking
|
234
|
+
- Memory usage monitoring
|
235
|
+
- Performance metrics collection
|
236
|
+
- Optimization hints
|
237
|
+
"""
|
238
|
+
|
239
|
+
def __init__(self, *args, **kwargs):
|
240
|
+
"""Initialize performance mixin."""
|
241
|
+
super().__init__(*args, **kwargs)
|
242
|
+
self.execution_times = []
|
243
|
+
self.memory_usage = []
|
244
|
+
self.performance_enabled = True
|
245
|
+
|
246
|
+
def track_performance(self, func):
|
247
|
+
"""
|
248
|
+
Decorator to track performance of node methods.
|
249
|
+
|
250
|
+
Args:
|
251
|
+
func: Function to wrap
|
252
|
+
|
253
|
+
Returns:
|
254
|
+
Wrapped function with performance tracking
|
255
|
+
"""
|
256
|
+
import time
|
257
|
+
import tracemalloc
|
258
|
+
from functools import wraps
|
259
|
+
|
260
|
+
@wraps(func)
|
261
|
+
def wrapper(*args, **kwargs):
|
262
|
+
if not self.performance_enabled:
|
263
|
+
return func(*args, **kwargs)
|
264
|
+
|
265
|
+
# Start tracking
|
266
|
+
start_time = time.time()
|
267
|
+
tracemalloc.start()
|
268
|
+
|
269
|
+
try:
|
270
|
+
result = func(*args, **kwargs)
|
271
|
+
return result
|
272
|
+
finally:
|
273
|
+
# Record metrics
|
274
|
+
execution_time = time.time() - start_time
|
275
|
+
current, peak = tracemalloc.get_traced_memory()
|
276
|
+
tracemalloc.stop()
|
277
|
+
|
278
|
+
self.execution_times.append(execution_time)
|
279
|
+
self.memory_usage.append(peak)
|
280
|
+
|
281
|
+
if len(self.execution_times) > 100: # Keep last 100 measurements
|
282
|
+
self.execution_times = self.execution_times[-100:]
|
283
|
+
self.memory_usage = self.memory_usage[-100:]
|
284
|
+
|
285
|
+
return wrapper
|
286
|
+
|
287
|
+
def get_performance_stats(self) -> Dict[str, Any]:
|
288
|
+
"""
|
289
|
+
Get performance statistics for this node.
|
290
|
+
|
291
|
+
Returns:
|
292
|
+
Dictionary containing performance metrics
|
293
|
+
"""
|
294
|
+
if not self.execution_times:
|
295
|
+
return {"status": "No performance data available"}
|
296
|
+
|
297
|
+
import statistics
|
298
|
+
|
299
|
+
return {
|
300
|
+
"executions": len(self.execution_times),
|
301
|
+
"avg_execution_time": statistics.mean(self.execution_times),
|
302
|
+
"min_execution_time": min(self.execution_times),
|
303
|
+
"max_execution_time": max(self.execution_times),
|
304
|
+
"avg_memory_usage": (
|
305
|
+
statistics.mean(self.memory_usage) if self.memory_usage else 0
|
306
|
+
),
|
307
|
+
"peak_memory_usage": max(self.memory_usage) if self.memory_usage else 0,
|
308
|
+
}
|
309
|
+
|
310
|
+
def reset_performance_stats(self) -> None:
|
311
|
+
"""Reset performance statistics."""
|
312
|
+
self.execution_times.clear()
|
313
|
+
self.memory_usage.clear()
|
314
|
+
|
315
|
+
|
316
|
+
class LoggingMixin:
|
317
|
+
"""
|
318
|
+
Mixin that adds enhanced logging capabilities to nodes.
|
319
|
+
|
320
|
+
This mixin provides:
|
321
|
+
- Structured logging with context
|
322
|
+
- Log level management
|
323
|
+
- Performance logging
|
324
|
+
- Debug information
|
325
|
+
"""
|
326
|
+
|
327
|
+
def __init__(self, *args, log_level: str = "INFO", **kwargs):
|
328
|
+
"""
|
329
|
+
Initialize logging mixin.
|
330
|
+
|
331
|
+
Args:
|
332
|
+
log_level: Default log level for this node
|
333
|
+
*args: Arguments passed to parent class
|
334
|
+
**kwargs: Keyword arguments passed to parent class
|
335
|
+
"""
|
336
|
+
super().__init__(*args, **kwargs)
|
337
|
+
self.logger = logging.getLogger(
|
338
|
+
f"{self.__class__.__module__}.{self.__class__.__name__}"
|
339
|
+
)
|
340
|
+
self.logger.setLevel(getattr(logging, log_level.upper()))
|
341
|
+
self.log_context = {"node_class": self.__class__.__name__}
|
342
|
+
|
343
|
+
def log_with_context(self, level: str, message: str, **context) -> None:
|
344
|
+
"""
|
345
|
+
Log a message with additional context.
|
346
|
+
|
347
|
+
Args:
|
348
|
+
level: Log level
|
349
|
+
message: Log message
|
350
|
+
**context: Additional context to include
|
351
|
+
"""
|
352
|
+
full_context = {**self.log_context, **context}
|
353
|
+
context_str = " | ".join(f"{k}={v}" for k, v in full_context.items())
|
354
|
+
full_message = f"{message} | {context_str}"
|
355
|
+
|
356
|
+
log_func = getattr(self.logger, level.lower())
|
357
|
+
log_func(full_message)
|
358
|
+
|
359
|
+
def log_node_execution(self, operation: str, **context) -> None:
|
360
|
+
"""
|
361
|
+
Log node execution information.
|
362
|
+
|
363
|
+
Args:
|
364
|
+
operation: Type of operation being performed
|
365
|
+
**context: Additional context
|
366
|
+
"""
|
367
|
+
self.log_with_context("INFO", f"Node operation: {operation}", **context)
|
368
|
+
|
369
|
+
def log_error_with_traceback(
|
370
|
+
self, error: Exception, operation: str = "unknown"
|
371
|
+
) -> None:
|
372
|
+
"""
|
373
|
+
Log an error with full traceback information.
|
374
|
+
|
375
|
+
Args:
|
376
|
+
error: Exception that occurred
|
377
|
+
operation: Operation that failed
|
378
|
+
"""
|
379
|
+
import traceback
|
380
|
+
|
381
|
+
self.log_with_context(
|
382
|
+
"ERROR",
|
383
|
+
f"Operation failed: {operation}",
|
384
|
+
error_type=type(error).__name__,
|
385
|
+
error_message=str(error),
|
386
|
+
traceback=traceback.format_exc(),
|
387
|
+
)
|
@@ -6,10 +6,17 @@ from kailash.nodes.transform.formatters import (
|
|
6
6
|
ContextFormatterNode,
|
7
7
|
QueryTextWrapperNode,
|
8
8
|
)
|
9
|
-
from kailash.nodes.transform.processors import
|
9
|
+
from kailash.nodes.transform.processors import (
|
10
|
+
DataTransformer,
|
11
|
+
Filter,
|
12
|
+
FilterNode,
|
13
|
+
Map,
|
14
|
+
Sort,
|
15
|
+
)
|
10
16
|
|
11
17
|
__all__ = [
|
12
18
|
"Filter",
|
19
|
+
"FilterNode",
|
13
20
|
"Map",
|
14
21
|
"Sort",
|
15
22
|
"DataTransformer",
|