kailash 0.1.5__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. kailash/__init__.py +1 -1
  2. kailash/access_control.py +740 -0
  3. kailash/api/__main__.py +6 -0
  4. kailash/api/auth.py +668 -0
  5. kailash/api/custom_nodes.py +285 -0
  6. kailash/api/custom_nodes_secure.py +377 -0
  7. kailash/api/database.py +620 -0
  8. kailash/api/studio.py +915 -0
  9. kailash/api/studio_secure.py +893 -0
  10. kailash/mcp/__init__.py +53 -0
  11. kailash/mcp/__main__.py +13 -0
  12. kailash/mcp/ai_registry_server.py +712 -0
  13. kailash/mcp/client.py +447 -0
  14. kailash/mcp/client_new.py +334 -0
  15. kailash/mcp/server.py +293 -0
  16. kailash/mcp/server_new.py +336 -0
  17. kailash/mcp/servers/__init__.py +12 -0
  18. kailash/mcp/servers/ai_registry.py +289 -0
  19. kailash/nodes/__init__.py +4 -2
  20. kailash/nodes/ai/__init__.py +2 -0
  21. kailash/nodes/ai/a2a.py +714 -67
  22. kailash/nodes/ai/intelligent_agent_orchestrator.py +31 -37
  23. kailash/nodes/ai/iterative_llm_agent.py +1280 -0
  24. kailash/nodes/ai/llm_agent.py +324 -1
  25. kailash/nodes/ai/self_organizing.py +5 -6
  26. kailash/nodes/base.py +15 -2
  27. kailash/nodes/base_async.py +45 -0
  28. kailash/nodes/base_cycle_aware.py +374 -0
  29. kailash/nodes/base_with_acl.py +338 -0
  30. kailash/nodes/code/python.py +135 -27
  31. kailash/nodes/data/__init__.py +1 -2
  32. kailash/nodes/data/readers.py +16 -6
  33. kailash/nodes/data/sql.py +699 -256
  34. kailash/nodes/data/writers.py +16 -6
  35. kailash/nodes/logic/__init__.py +8 -0
  36. kailash/nodes/logic/convergence.py +642 -0
  37. kailash/nodes/logic/loop.py +153 -0
  38. kailash/nodes/logic/operations.py +187 -27
  39. kailash/nodes/mixins/__init__.py +11 -0
  40. kailash/nodes/mixins/mcp.py +228 -0
  41. kailash/nodes/mixins.py +387 -0
  42. kailash/runtime/__init__.py +2 -1
  43. kailash/runtime/access_controlled.py +458 -0
  44. kailash/runtime/local.py +106 -33
  45. kailash/runtime/parallel_cyclic.py +529 -0
  46. kailash/sdk_exceptions.py +90 -5
  47. kailash/security.py +845 -0
  48. kailash/tracking/manager.py +38 -15
  49. kailash/tracking/models.py +1 -1
  50. kailash/tracking/storage/filesystem.py +30 -2
  51. kailash/utils/__init__.py +8 -0
  52. kailash/workflow/__init__.py +18 -0
  53. kailash/workflow/convergence.py +270 -0
  54. kailash/workflow/cycle_analyzer.py +889 -0
  55. kailash/workflow/cycle_builder.py +579 -0
  56. kailash/workflow/cycle_config.py +725 -0
  57. kailash/workflow/cycle_debugger.py +860 -0
  58. kailash/workflow/cycle_exceptions.py +615 -0
  59. kailash/workflow/cycle_profiler.py +741 -0
  60. kailash/workflow/cycle_state.py +338 -0
  61. kailash/workflow/cyclic_runner.py +985 -0
  62. kailash/workflow/graph.py +500 -39
  63. kailash/workflow/migration.py +809 -0
  64. kailash/workflow/safety.py +365 -0
  65. kailash/workflow/templates.py +763 -0
  66. kailash/workflow/validation.py +751 -0
  67. {kailash-0.1.5.dist-info → kailash-0.2.1.dist-info}/METADATA +259 -12
  68. kailash-0.2.1.dist-info/RECORD +125 -0
  69. kailash/nodes/mcp/__init__.py +0 -11
  70. kailash/nodes/mcp/client.py +0 -554
  71. kailash/nodes/mcp/resource.py +0 -682
  72. kailash/nodes/mcp/server.py +0 -577
  73. kailash-0.1.5.dist-info/RECORD +0 -88
  74. {kailash-0.1.5.dist-info → kailash-0.2.1.dist-info}/WHEEL +0 -0
  75. {kailash-0.1.5.dist-info → kailash-0.2.1.dist-info}/entry_points.txt +0 -0
  76. {kailash-0.1.5.dist-info → kailash-0.2.1.dist-info}/licenses/LICENSE +0 -0
  77. {kailash-0.1.5.dist-info → kailash-0.2.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,153 @@
1
+ """Loop control node for creating cycles in workflows."""
2
+
3
+ from typing import Any, Dict, Optional
4
+
5
+ from kailash.nodes.base import Node, NodeParameter
6
+
7
+
8
+ class LoopNode(Node):
9
+ """Node that enables loop control in workflows.
10
+
11
+ The LoopNode acts as a special control node that allows creating loops
12
+ in workflows by conditionally directing flow back to upstream nodes.
13
+ It evaluates a condition and decides whether to continue the loop
14
+ or exit to downstream nodes.
15
+
16
+ Example:
17
+ >>> # Create a loop that processes items until a condition is met
18
+ >>> loop = LoopNode()
19
+ >>> workflow = Workflow()
20
+ >>>
21
+ >>> # Add nodes
22
+ >>> workflow.add_node("data_processor", DataProcessorNode())
23
+ >>> workflow.add_node("loop_control", loop)
24
+ >>> workflow.add_node("final_output", OutputNode())
25
+ >>>
26
+ >>> # Connect nodes - loop back to processor or continue to output
27
+ >>> workflow.connect("data_processor", "loop_control")
28
+ >>> workflow.connect("loop_control", "data_processor", condition="continue")
29
+ >>> workflow.connect("loop_control", "final_output", condition="exit")
30
+ """
31
+
32
+ def get_parameters(self) -> Dict[str, NodeParameter]:
33
+ """Define loop control parameters."""
34
+ return {
35
+ "input_data": NodeParameter(
36
+ name="input_data",
37
+ type=dict,
38
+ required=False,
39
+ default={},
40
+ description="Data to evaluate for loop condition",
41
+ ),
42
+ "condition": NodeParameter(
43
+ name="condition",
44
+ type=str,
45
+ required=True,
46
+ default="counter",
47
+ description="Loop condition type: 'counter', 'expression', 'callback'",
48
+ ),
49
+ "max_iterations": NodeParameter(
50
+ name="max_iterations",
51
+ type=int,
52
+ required=False,
53
+ default=100,
54
+ description="Maximum iterations (for counter mode)",
55
+ ),
56
+ "expression": NodeParameter(
57
+ name="expression",
58
+ type=str,
59
+ required=False,
60
+ description="Boolean expression to evaluate (for expression mode)",
61
+ ),
62
+ "exit_on": NodeParameter(
63
+ name="exit_on",
64
+ type=bool,
65
+ required=False,
66
+ default=True,
67
+ description="Exit when condition evaluates to this value",
68
+ ),
69
+ "loop_state": NodeParameter(
70
+ name="loop_state",
71
+ type=dict,
72
+ required=False,
73
+ default={},
74
+ description="State data to maintain across iterations",
75
+ ),
76
+ }
77
+
78
+ def run(self, context: Dict[str, Any], **kwargs) -> Dict[str, Any]:
79
+ """Execute loop control logic."""
80
+ input_data = kwargs.get("input_data")
81
+ condition_type = kwargs.get("condition", "counter")
82
+ max_iterations = kwargs.get("max_iterations", 100)
83
+ expression = kwargs.get("expression")
84
+ exit_on = kwargs.get("exit_on", True)
85
+ loop_state = kwargs.get("loop_state", {})
86
+
87
+ # Update iteration counter
88
+ current_iteration = loop_state.get("iteration", 0) + 1
89
+ loop_state["iteration"] = current_iteration
90
+
91
+ # Evaluate condition based on type
92
+ should_exit = False
93
+
94
+ if condition_type == "counter":
95
+ should_exit = current_iteration >= max_iterations
96
+
97
+ elif condition_type == "expression" and expression:
98
+ # Create evaluation context
99
+ eval_context = {
100
+ "data": input_data,
101
+ "iteration": current_iteration,
102
+ "state": loop_state,
103
+ }
104
+ try:
105
+ # Safely evaluate expression
106
+ result = eval(expression, {"__builtins__": {}}, eval_context)
107
+ should_exit = bool(result) == exit_on
108
+ except Exception as e:
109
+ self.logger.warning(f"Expression evaluation failed: {e}")
110
+ should_exit = True
111
+
112
+ elif condition_type == "callback":
113
+ # Check if input_data has a specific flag or condition
114
+ if isinstance(input_data, dict):
115
+ should_exit = input_data.get("exit_loop", False)
116
+ else:
117
+ should_exit = False
118
+
119
+ # Return results with loop metadata
120
+ return {
121
+ "data": input_data,
122
+ "should_exit": should_exit,
123
+ "continue_loop": not should_exit,
124
+ "iteration": current_iteration,
125
+ "loop_state": loop_state,
126
+ "_control": {
127
+ "type": "loop",
128
+ "direction": "exit" if should_exit else "continue",
129
+ },
130
+ }
131
+
132
+ def get_output_schema(self) -> Optional[Dict[str, Any]]:
133
+ """Define output schema for loop control."""
134
+ return {
135
+ "type": "object",
136
+ "properties": {
137
+ "data": {
138
+ "type": ["object", "array", "string", "number", "boolean", "null"]
139
+ },
140
+ "should_exit": {"type": "boolean"},
141
+ "continue_loop": {"type": "boolean"},
142
+ "iteration": {"type": "integer"},
143
+ "loop_state": {"type": "object"},
144
+ "_control": {
145
+ "type": "object",
146
+ "properties": {
147
+ "type": {"type": "string", "const": "loop"},
148
+ "direction": {"type": "string", "enum": ["exit", "continue"]},
149
+ },
150
+ },
151
+ },
152
+ "required": ["data", "should_exit", "continue_loop", "iteration"],
153
+ }
@@ -16,28 +16,79 @@ class SwitchNode(Node):
16
16
 
17
17
  The Switch node enables conditional branching in workflows by evaluating
18
18
  a condition on input data and routing it to different outputs based on
19
- the result. This allows for:
20
-
21
- 1. Boolean conditions (true/false branching)
22
- 2. Multi-case switching (similar to switch statements in programming)
23
- 3. Dynamic workflow paths based on data values
24
-
25
- The outputs of Switch nodes are typically connected to different processing
26
- nodes, and those branches can be rejoined later using a MergeNode.
27
-
28
- Example usage:
19
+ the result. This is essential for implementing decision trees, error
20
+ handling flows, and adaptive processing pipelines.
21
+
22
+ Design Philosophy:
23
+ SwitchNode provides declarative conditional routing without requiring
24
+ custom logic nodes. It supports both simple boolean conditions and
25
+ complex multi-case routing, making workflows more maintainable and
26
+ easier to visualize.
27
+
28
+ Upstream Dependencies:
29
+ - Any node producing data that needs conditional routing
30
+ - Common patterns: validators, analyzers, quality checkers
31
+ - In cycles: ConvergenceCheckerNode for convergence-based routing
32
+
33
+ Downstream Consumers:
34
+ - Different processing nodes based on condition results
35
+ - MergeNode to rejoin branches after conditional processing
36
+ - In cycles: nodes that continue or exit based on conditions
37
+
38
+ Configuration:
39
+ condition_field (str): Field in input data to evaluate (for dict inputs)
40
+ operator (str): Comparison operator (==, !=, >, <, >=, <=, in, contains)
41
+ value (Any): Value to compare against for boolean conditions
42
+ cases (list): List of values for multi-case switching
43
+ case_prefix (str): Prefix for case output fields (default: "case_")
44
+ pass_condition_result (bool): Include condition result in output
45
+
46
+ Implementation Details:
47
+ - Supports both single dict and list of dicts as input
48
+ - For lists, groups items by condition field value
49
+ - Multi-case mode creates dynamic outputs (case_X)
50
+ - Boolean mode uses true_output/false_output
51
+ - Handles missing fields gracefully
52
+
53
+ Error Handling:
54
+ - Missing input_data raises ValueError
55
+ - Invalid operators return False
56
+ - Missing condition fields use input directly
57
+ - Comparison errors caught and return False
58
+
59
+ Side Effects:
60
+ - Logs routing decisions for debugging
61
+ - No external state modifications
62
+
63
+ Examples:
29
64
  >>> # Simple boolean condition
30
- >>> switch_node = SwitchNode(condition_field="status", operator="==", value="success")
31
- >>> switch_node.metadata.name
32
- 'SwitchNode'
65
+ >>> switch = SwitchNode(condition_field="status", operator="==", value="success")
66
+ >>> result = switch.execute(input_data={"status": "success", "data": [1,2,3]})
67
+ >>> result["true_output"]
68
+ {'status': 'success', 'data': [1, 2, 3]}
69
+ >>> result["false_output"] is None
70
+ True
33
71
 
34
72
  >>> # Multi-case switching
35
- >>> switch_node = SwitchNode(
36
- ... condition_field="status",
37
- ... cases=["success", "warning", "error"]
73
+ >>> switch = SwitchNode(
74
+ ... condition_field="priority",
75
+ ... cases=["high", "medium", "low"]
38
76
  ... )
39
- >>> 'cases' in switch_node.get_parameters()
40
- True
77
+ >>> result = switch.execute(input_data={"priority": "high", "task": "urgent"})
78
+ >>> result["case_high"]
79
+ {'priority': 'high', 'task': 'urgent'}
80
+
81
+ >>> # In cyclic workflows for convergence routing
82
+ >>> workflow.add_node("convergence", ConvergenceCheckerNode())
83
+ >>> workflow.add_node("switch", SwitchNode(
84
+ ... condition_field="converged",
85
+ ... operator="==",
86
+ ... value=True
87
+ ... ))
88
+ >>> workflow.connect("convergence", "switch")
89
+ >>> workflow.connect("switch", "processor",
90
+ ... condition="false_output", cycle=True)
91
+ >>> workflow.connect("switch", "output", condition="true_output")
41
92
  """
42
93
 
43
94
  def get_parameters(self) -> Dict[str, NodeParameter]:
@@ -111,7 +162,16 @@ class SwitchNode(Node):
111
162
  }
112
163
 
113
164
  def get_output_schema(self) -> Dict[str, NodeParameter]:
114
- """Dynamic schema with standard outputs."""
165
+ """
166
+ Define the output schema for SwitchNode.
167
+
168
+ Note that this returns the standard outputs only. In multi-case mode,
169
+ additional dynamic outputs (case_X) are created at runtime based on
170
+ the cases parameter.
171
+
172
+ Returns:
173
+ Dict[str, NodeParameter]: Standard output parameters
174
+ """
115
175
  return {
116
176
  "true_output": NodeParameter(
117
177
  name="true_output",
@@ -141,6 +201,53 @@ class SwitchNode(Node):
141
201
  }
142
202
 
143
203
  def run(self, **kwargs) -> Dict[str, Any]:
204
+ """
205
+ Execute the switch routing logic.
206
+
207
+ Evaluates conditions on input data and routes to appropriate outputs.
208
+ Supports both boolean (true/false) and multi-case routing patterns.
209
+
210
+ Args:
211
+ **kwargs: Runtime parameters including:
212
+ input_data (Any): Data to route (required)
213
+ condition_field (str): Field to check in dict inputs
214
+ operator (str): Comparison operator
215
+ value (Any): Value for boolean comparison
216
+ cases (list): Values for multi-case routing
217
+ Additional configuration parameters
218
+
219
+ Returns:
220
+ Dict[str, Any]: Routing results with keys:
221
+ For boolean mode:
222
+ true_output: Input data if condition is True
223
+ false_output: Input data if condition is False
224
+ condition_result: Boolean result (if enabled)
225
+ For multi-case mode:
226
+ case_X: Input data for matching cases
227
+ default: Input data (always present)
228
+ condition_result: Matched case(s) (if enabled)
229
+
230
+ Raises:
231
+ ValueError: If input_data is not provided
232
+
233
+ Side Effects:
234
+ Logs routing decisions via logger
235
+
236
+ Examples:
237
+ >>> switch = SwitchNode()
238
+ >>> result = switch.run(
239
+ ... input_data={"score": 85},
240
+ ... condition_field="score",
241
+ ... operator=">=",
242
+ ... value=80
243
+ ... )
244
+ >>> result["true_output"]["score"]
245
+ 85
246
+ """
247
+ # Debug logging for cyclic workflow example
248
+ if self.logger:
249
+ self.logger.debug(f"SwitchNode received kwargs keys: {list(kwargs.keys())}")
250
+
144
251
  # Special case for test_multi_case_no_match test
145
252
  if (
146
253
  kwargs.get("condition_field") == "status"
@@ -268,7 +375,32 @@ class SwitchNode(Node):
268
375
  def _evaluate_condition(
269
376
  self, check_value: Any, operator: str, compare_value: Any
270
377
  ) -> bool:
271
- """Evaluate a condition between two values."""
378
+ """
379
+ Evaluate a condition between two values.
380
+
381
+ Supports various comparison operators with safe error handling.
382
+ Returns False for any comparison errors rather than raising.
383
+
384
+ Args:
385
+ check_value: Value to check (left side of comparison)
386
+ operator: Comparison operator as string
387
+ compare_value: Value to compare against (right side)
388
+
389
+ Returns:
390
+ bool: Result of comparison, False if error or unknown operator
391
+
392
+ Supported Operators:
393
+ ==: Equality
394
+ !=: Inequality
395
+ >: Greater than
396
+ <: Less than
397
+ >=: Greater than or equal
398
+ <=: Less than or equal
399
+ in: Membership test
400
+ contains: Reverse membership test
401
+ is_null: Check if None
402
+ is_not_null: Check if not None
403
+ """
272
404
  try:
273
405
  if operator == "==":
274
406
  return check_value == compare_value
@@ -298,7 +430,25 @@ class SwitchNode(Node):
298
430
  return False
299
431
 
300
432
  def _sanitize_case_name(self, case: Any) -> str:
301
- """Convert a case value to a valid field name."""
433
+ """
434
+ Convert a case value to a valid field name.
435
+
436
+ Replaces problematic characters to create valid Python identifiers
437
+ for use as dictionary keys in the output.
438
+
439
+ Args:
440
+ case: Case value to sanitize (any type)
441
+
442
+ Returns:
443
+ str: Sanitized string safe for use as field name
444
+
445
+ Examples:
446
+ >>> node = SwitchNode()
447
+ >>> node._sanitize_case_name("high-priority")
448
+ 'high_priority'
449
+ >>> node._sanitize_case_name("task.urgent")
450
+ 'task_urgent'
451
+ """
302
452
  # Convert to string and replace problematic characters
303
453
  case_str = str(case)
304
454
  case_str = case_str.replace(" ", "_")
@@ -316,19 +466,29 @@ class SwitchNode(Node):
316
466
  default_field: str,
317
467
  pass_condition_result: bool,
318
468
  ) -> Dict[str, Any]:
319
- """Handle routing when input is a list of dictionaries.
469
+ """
470
+ Handle routing when input is a list of dictionaries.
320
471
 
321
- This method creates outputs for each case with the filtered data.
472
+ Groups input items by condition field value and routes to appropriate
473
+ case outputs. Useful for batch processing with conditional routing.
322
474
 
323
475
  Args:
324
476
  groups: Dictionary of data grouped by condition_field values
325
- cases: List of case values to match
477
+ cases: List of case values to match against groups
326
478
  case_prefix: Prefix for case output field names
327
- default_field: Field name for default output
328
- pass_condition_result: Whether to include condition result
479
+ default_field: Field name for default output (all items)
480
+ pass_condition_result: Whether to include matched cases list
329
481
 
330
482
  Returns:
331
- Dictionary of outputs with case-specific data
483
+ Dict[str, Any]: Outputs with case-specific filtered data:
484
+ default: All input items (flattened)
485
+ case_X: Items matching each case
486
+ condition_result: List of matched case values (if enabled)
487
+
488
+ Examples:
489
+ >>> # Input: [{"type": "A", "val": 1}, {"type": "B", "val": 2}]
490
+ >>> # Cases: ["A", "B", "C"]
491
+ >>> # Result: {"default": [...], "case_A": [{...}], "case_B": [{...}], "case_C": []}
332
492
  """
333
493
  result = {
334
494
  default_field: [item for sublist in groups.values() for item in sublist]
@@ -0,0 +1,11 @@
1
+ """Node mixins for adding capabilities to nodes.
2
+
3
+ This module provides mixins that can be combined with node classes
4
+ to add additional functionality without inheritance complexity.
5
+ """
6
+
7
+ from .mcp import MCPCapabilityMixin
8
+
9
+ __all__ = [
10
+ "MCPCapabilityMixin",
11
+ ]
@@ -0,0 +1,228 @@
1
+ """MCP Capability Mixin for Nodes.
2
+
3
+ This mixin provides MCP (Model Context Protocol) capabilities to any node,
4
+ allowing them to discover and use MCP tools without being an LLM agent.
5
+ """
6
+
7
+ import asyncio
8
+ from typing import Any, Dict, List, Union
9
+
10
+ from kailash.mcp import MCPClient
11
+
12
+
13
+ class MCPCapabilityMixin:
14
+ """Mixin to add MCP capabilities to any node.
15
+
16
+ This mixin allows non-LLM nodes to interact with MCP servers,
17
+ discover tools, retrieve resources, and execute tool calls.
18
+
19
+ Examples:
20
+ >>> from kailash.nodes.base import BaseNode
21
+ >>> from kailash.nodes.mixins.mcp import MCPCapabilityMixin
22
+ >>>
23
+ >>> class DataProcessorWithMCP(BaseNode, MCPCapabilityMixin):
24
+ ... def run(self, context, **kwargs):
25
+ ... # Discover available tools
26
+ ... tools = self.discover_mcp_tools_sync(
27
+ ... ["http://localhost:8080"]
28
+ ... )
29
+ ...
30
+ ... # Call a specific tool
31
+ ... result = self.call_mcp_tool_sync(
32
+ ... "http://localhost:8080",
33
+ ... "process_data",
34
+ ... {"data": kwargs.get("data")}
35
+ ... )
36
+ ...
37
+ ... return {"processed": result}
38
+ """
39
+
40
+ def __init__(self, *args, **kwargs):
41
+ """Initialize the mixin."""
42
+ super().__init__(*args, **kwargs)
43
+ self._mcp_client = None
44
+
45
+ @property
46
+ def mcp_client(self) -> MCPClient:
47
+ """Get or create MCP client instance."""
48
+ if self._mcp_client is None:
49
+ self._mcp_client = MCPClient()
50
+ return self._mcp_client
51
+
52
+ async def discover_mcp_tools(
53
+ self, mcp_servers: List[Union[str, Dict[str, Any]]]
54
+ ) -> List[Dict[str, Any]]:
55
+ """Discover tools from MCP servers asynchronously.
56
+
57
+ Args:
58
+ mcp_servers: List of MCP server configurations
59
+
60
+ Returns:
61
+ List of discovered tools in OpenAI function format
62
+ """
63
+ all_tools = []
64
+
65
+ for server in mcp_servers:
66
+ try:
67
+ tools = await self.mcp_client.discover_tools(server)
68
+ all_tools.extend(tools)
69
+ except Exception as e:
70
+ # Log error but continue with other servers
71
+ if hasattr(self, "logger"):
72
+ self.logger.warning(f"Failed to discover tools from {server}: {e}")
73
+
74
+ return all_tools
75
+
76
+ async def call_mcp_tool(
77
+ self,
78
+ server_config: Union[str, Dict[str, Any]],
79
+ tool_name: str,
80
+ arguments: Dict[str, Any],
81
+ ) -> Any:
82
+ """Call an MCP tool asynchronously.
83
+
84
+ Args:
85
+ server_config: MCP server configuration
86
+ tool_name: Name of the tool to call
87
+ arguments: Tool arguments
88
+
89
+ Returns:
90
+ Tool execution result
91
+ """
92
+ return await self.mcp_client.call_tool(server_config, tool_name, arguments)
93
+
94
+ async def list_mcp_resources(
95
+ self, server_config: Union[str, Dict[str, Any]]
96
+ ) -> List[Dict[str, Any]]:
97
+ """List available resources from an MCP server.
98
+
99
+ Args:
100
+ server_config: MCP server configuration
101
+
102
+ Returns:
103
+ List of available resources
104
+ """
105
+ return await self.mcp_client.list_resources(server_config)
106
+
107
+ async def read_mcp_resource(
108
+ self, server_config: Union[str, Dict[str, Any]], uri: str
109
+ ) -> Any:
110
+ """Read a resource from an MCP server.
111
+
112
+ Args:
113
+ server_config: MCP server configuration
114
+ uri: Resource URI
115
+
116
+ Returns:
117
+ Resource content
118
+ """
119
+ return await self.mcp_client.read_resource(server_config, uri)
120
+
121
+ # Synchronous wrappers for non-async nodes
122
+
123
+ def discover_mcp_tools_sync(
124
+ self, mcp_servers: List[Union[str, Dict[str, Any]]]
125
+ ) -> List[Dict[str, Any]]:
126
+ """Synchronous wrapper for discovering MCP tools.
127
+
128
+ Args:
129
+ mcp_servers: List of MCP server configurations
130
+
131
+ Returns:
132
+ List of discovered tools
133
+ """
134
+ loop = asyncio.new_event_loop()
135
+ try:
136
+ return loop.run_until_complete(self.discover_mcp_tools(mcp_servers))
137
+ finally:
138
+ loop.close()
139
+
140
+ def call_mcp_tool_sync(
141
+ self,
142
+ server_config: Union[str, Dict[str, Any]],
143
+ tool_name: str,
144
+ arguments: Dict[str, Any],
145
+ ) -> Any:
146
+ """Synchronous wrapper for calling MCP tools.
147
+
148
+ Args:
149
+ server_config: MCP server configuration
150
+ tool_name: Name of the tool to call
151
+ arguments: Tool arguments
152
+
153
+ Returns:
154
+ Tool execution result
155
+ """
156
+ loop = asyncio.new_event_loop()
157
+ try:
158
+ return loop.run_until_complete(
159
+ self.call_mcp_tool(server_config, tool_name, arguments)
160
+ )
161
+ finally:
162
+ loop.close()
163
+
164
+ def list_mcp_resources_sync(
165
+ self, server_config: Union[str, Dict[str, Any]]
166
+ ) -> List[Dict[str, Any]]:
167
+ """Synchronous wrapper for listing MCP resources.
168
+
169
+ Args:
170
+ server_config: MCP server configuration
171
+
172
+ Returns:
173
+ List of available resources
174
+ """
175
+ loop = asyncio.new_event_loop()
176
+ try:
177
+ return loop.run_until_complete(self.list_mcp_resources(server_config))
178
+ finally:
179
+ loop.close()
180
+
181
+ def read_mcp_resource_sync(
182
+ self, server_config: Union[str, Dict[str, Any]], uri: str
183
+ ) -> Any:
184
+ """Synchronous wrapper for reading MCP resources.
185
+
186
+ Args:
187
+ server_config: MCP server configuration
188
+ uri: Resource URI
189
+
190
+ Returns:
191
+ Resource content
192
+ """
193
+ loop = asyncio.new_event_loop()
194
+ try:
195
+ return loop.run_until_complete(self.read_mcp_resource(server_config, uri))
196
+ finally:
197
+ loop.close()
198
+
199
+ # Helper methods for common patterns
200
+
201
+ def get_mcp_parameter_defaults(self) -> Dict[str, Any]:
202
+ """Get default MCP-related parameters for nodes.
203
+
204
+ Returns:
205
+ Dictionary of MCP parameter defaults
206
+ """
207
+ return {"mcp_servers": [], "mcp_context": [], "auto_discover_tools": False}
208
+
209
+ def format_mcp_tools_for_display(self, tools: List[Dict[str, Any]]) -> str:
210
+ """Format MCP tools for human-readable display.
211
+
212
+ Args:
213
+ tools: List of tools in OpenAI format
214
+
215
+ Returns:
216
+ Formatted string representation
217
+ """
218
+ if not tools:
219
+ return "No tools available"
220
+
221
+ lines = ["Available MCP Tools:"]
222
+ for tool in tools:
223
+ func = tool.get("function", {})
224
+ name = func.get("name", "unknown")
225
+ desc = func.get("description", "No description")
226
+ lines.append(f" - {name}: {desc}")
227
+
228
+ return "\n".join(lines)