griptape-nodes 0.56.0__py3-none-any.whl → 0.57.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- griptape_nodes/app/app.py +10 -15
- griptape_nodes/app/watch.py +35 -67
- griptape_nodes/bootstrap/utils/__init__.py +1 -0
- griptape_nodes/bootstrap/utils/python_subprocess_executor.py +122 -0
- griptape_nodes/bootstrap/workflow_executors/local_session_workflow_executor.py +418 -0
- griptape_nodes/bootstrap/workflow_executors/local_workflow_executor.py +37 -8
- griptape_nodes/bootstrap/workflow_executors/subprocess_workflow_executor.py +326 -0
- griptape_nodes/bootstrap/workflow_executors/utils/__init__.py +1 -0
- griptape_nodes/bootstrap/workflow_executors/utils/subprocess_script.py +51 -0
- griptape_nodes/bootstrap/workflow_publishers/__init__.py +1 -0
- griptape_nodes/bootstrap/workflow_publishers/local_workflow_publisher.py +43 -0
- griptape_nodes/bootstrap/workflow_publishers/subprocess_workflow_publisher.py +84 -0
- griptape_nodes/bootstrap/workflow_publishers/utils/__init__.py +1 -0
- griptape_nodes/bootstrap/workflow_publishers/utils/subprocess_script.py +54 -0
- griptape_nodes/cli/commands/engine.py +4 -15
- griptape_nodes/cli/main.py +6 -1
- griptape_nodes/exe_types/core_types.py +26 -0
- griptape_nodes/exe_types/node_types.py +116 -1
- griptape_nodes/retained_mode/events/agent_events.py +2 -0
- griptape_nodes/retained_mode/events/base_events.py +18 -17
- griptape_nodes/retained_mode/events/execution_events.py +3 -1
- griptape_nodes/retained_mode/events/flow_events.py +5 -7
- griptape_nodes/retained_mode/events/mcp_events.py +363 -0
- griptape_nodes/retained_mode/events/node_events.py +3 -4
- griptape_nodes/retained_mode/griptape_nodes.py +8 -0
- griptape_nodes/retained_mode/managers/agent_manager.py +67 -4
- griptape_nodes/retained_mode/managers/event_manager.py +31 -13
- griptape_nodes/retained_mode/managers/flow_manager.py +76 -44
- griptape_nodes/retained_mode/managers/library_manager.py +7 -9
- griptape_nodes/retained_mode/managers/mcp_manager.py +364 -0
- griptape_nodes/retained_mode/managers/node_manager.py +12 -1
- griptape_nodes/retained_mode/managers/settings.py +40 -0
- griptape_nodes/retained_mode/managers/workflow_manager.py +94 -8
- griptape_nodes/traits/multi_options.py +5 -1
- griptape_nodes/traits/options.py +10 -2
- {griptape_nodes-0.56.0.dist-info → griptape_nodes-0.57.0.dist-info}/METADATA +2 -2
- {griptape_nodes-0.56.0.dist-info → griptape_nodes-0.57.0.dist-info}/RECORD +39 -26
- {griptape_nodes-0.56.0.dist-info → griptape_nodes-0.57.0.dist-info}/WHEEL +0 -0
- {griptape_nodes-0.56.0.dist-info → griptape_nodes-0.57.0.dist-info}/entry_points.txt +0 -0
|
@@ -15,7 +15,15 @@ from griptape_nodes.exe_types.core_types import (
|
|
|
15
15
|
ParameterTypeBuiltin,
|
|
16
16
|
)
|
|
17
17
|
from griptape_nodes.exe_types.flow import ControlFlow
|
|
18
|
-
from griptape_nodes.exe_types.node_types import
|
|
18
|
+
from griptape_nodes.exe_types.node_types import (
|
|
19
|
+
LOCAL_EXECUTION,
|
|
20
|
+
BaseNode,
|
|
21
|
+
ErrorProxyNode,
|
|
22
|
+
NodeDependencies,
|
|
23
|
+
NodeResolutionState,
|
|
24
|
+
StartLoopNode,
|
|
25
|
+
StartNode,
|
|
26
|
+
)
|
|
19
27
|
from griptape_nodes.machines.control_flow import CompleteState, ControlFlowMachine
|
|
20
28
|
from griptape_nodes.machines.dag_builder import DagBuilder
|
|
21
29
|
from griptape_nodes.machines.parallel_resolution import ParallelResolutionMachine
|
|
@@ -1090,15 +1098,20 @@ class FlowManager:
|
|
|
1090
1098
|
# Step 4: Serialize the package node
|
|
1091
1099
|
unique_parameter_uuid_to_values = {}
|
|
1092
1100
|
serialized_parameter_value_tracker = SerializedParameterValueTracker()
|
|
1101
|
+
package_node = package_node_info.package_node
|
|
1102
|
+
# Set to LOCAL_EXECUTION before packaging to prevent recursive loop.
|
|
1103
|
+
previous_value = package_node.get_parameter_value("execution_environment")
|
|
1104
|
+
package_node.set_parameter_value("execution_environment", LOCAL_EXECUTION)
|
|
1093
1105
|
serialized_package_result = self._serialize_package_node(
|
|
1094
1106
|
node_name=package_node_info.package_node.name,
|
|
1095
1107
|
package_node=package_node_info.package_node,
|
|
1096
1108
|
unique_parameter_uuid_to_values=unique_parameter_uuid_to_values,
|
|
1097
1109
|
serialized_parameter_value_tracker=serialized_parameter_value_tracker,
|
|
1098
1110
|
)
|
|
1111
|
+
# Now that we've serialized the value as LOCAL_EXECUTION, we need to restore it to whatever it was before
|
|
1112
|
+
package_node.set_parameter_value("execution_environment", previous_value)
|
|
1099
1113
|
if isinstance(serialized_package_result, PackageNodeAsSerializedFlowResultFailure):
|
|
1100
1114
|
return serialized_package_result
|
|
1101
|
-
|
|
1102
1115
|
# Step 5: Create start node commands and data connections
|
|
1103
1116
|
start_node_result = self._create_start_node_commands(
|
|
1104
1117
|
request=request,
|
|
@@ -1152,7 +1165,6 @@ class FlowManager:
|
|
|
1152
1165
|
workflow_shape = GriptapeNodes.WorkflowManager().build_workflow_shape_from_parameter_info(
|
|
1153
1166
|
input_node_params=start_node_result.input_shape_data, output_node_params=end_node_result.output_shape_data
|
|
1154
1167
|
)
|
|
1155
|
-
|
|
1156
1168
|
# Return success result
|
|
1157
1169
|
return PackageNodeAsSerializedFlowResultSuccess(
|
|
1158
1170
|
result_details=f'Successfully packaged node "{package_node_info.package_node.name}" from flow "{package_node_info.package_flow_name}" as serialized flow with start node type "{request.start_node_type}" and end node type "{request.end_node_type}" from library "{request.start_end_specific_library_name}".',
|
|
@@ -1428,10 +1440,13 @@ class FlowManager:
|
|
|
1428
1440
|
start_to_package_data_connections.append(start_to_package_connection)
|
|
1429
1441
|
|
|
1430
1442
|
# Build complete SerializedNodeCommands for start node
|
|
1443
|
+
start_node_dependencies = NodeDependencies()
|
|
1444
|
+
start_node_dependencies.libraries.add(start_node_library_details)
|
|
1445
|
+
|
|
1431
1446
|
start_node_commands = SerializedNodeCommands(
|
|
1432
1447
|
create_node_command=start_create_node_command,
|
|
1433
1448
|
element_modification_commands=start_node_parameter_commands,
|
|
1434
|
-
|
|
1449
|
+
node_dependencies=start_node_dependencies,
|
|
1435
1450
|
node_uuid=start_node_uuid,
|
|
1436
1451
|
)
|
|
1437
1452
|
|
|
@@ -1519,10 +1534,13 @@ class FlowManager:
|
|
|
1519
1534
|
package_to_end_data_connections.append(package_to_end_connection)
|
|
1520
1535
|
|
|
1521
1536
|
# Build complete SerializedNodeCommands for end node
|
|
1537
|
+
end_node_dependencies = NodeDependencies()
|
|
1538
|
+
end_node_dependencies.libraries.add(end_node_library_details)
|
|
1539
|
+
|
|
1522
1540
|
end_node_commands = SerializedNodeCommands(
|
|
1523
1541
|
create_node_command=end_create_node_command,
|
|
1524
1542
|
element_modification_commands=end_node_parameter_commands,
|
|
1525
|
-
|
|
1543
|
+
node_dependencies=end_node_dependencies,
|
|
1526
1544
|
node_uuid=end_node_uuid,
|
|
1527
1545
|
)
|
|
1528
1546
|
|
|
@@ -1601,18 +1619,6 @@ class FlowManager:
|
|
|
1601
1619
|
serialized_package_result.serialized_node_commands.lock_node_command
|
|
1602
1620
|
)
|
|
1603
1621
|
|
|
1604
|
-
# Collect all libraries used
|
|
1605
|
-
end_node_library_details = LibraryNameAndVersion(
|
|
1606
|
-
library_name=request.start_end_specific_library_name,
|
|
1607
|
-
library_version=library_version,
|
|
1608
|
-
)
|
|
1609
|
-
|
|
1610
|
-
node_libraries_used = {
|
|
1611
|
-
serialized_package_result.serialized_node_commands.node_library_details,
|
|
1612
|
-
start_node_result.start_node_commands.node_library_details,
|
|
1613
|
-
end_node_library_details,
|
|
1614
|
-
}
|
|
1615
|
-
|
|
1616
1622
|
# Include all three nodes in the flow
|
|
1617
1623
|
all_serialized_nodes = [
|
|
1618
1624
|
start_node_result.start_node_commands,
|
|
@@ -1634,9 +1640,18 @@ class FlowManager:
|
|
|
1634
1640
|
metadata=packaged_flow_metadata,
|
|
1635
1641
|
)
|
|
1636
1642
|
|
|
1643
|
+
# Aggregate dependencies from the packaged nodes
|
|
1644
|
+
packaged_dependencies = self._aggregate_flow_dependencies(all_serialized_nodes, [])
|
|
1645
|
+
|
|
1646
|
+
# Add the start/end specific library dependency
|
|
1647
|
+
start_end_library_dependency = LibraryNameAndVersion(
|
|
1648
|
+
library_name=request.start_end_specific_library_name,
|
|
1649
|
+
library_version=library_version,
|
|
1650
|
+
)
|
|
1651
|
+
packaged_dependencies.libraries.add(start_end_library_dependency)
|
|
1652
|
+
|
|
1637
1653
|
# Build the complete SerializedFlowCommands
|
|
1638
1654
|
return SerializedFlowCommands(
|
|
1639
|
-
node_libraries_used=node_libraries_used,
|
|
1640
1655
|
flow_initialization_command=create_packaged_flow_request,
|
|
1641
1656
|
serialized_node_commands=all_serialized_nodes,
|
|
1642
1657
|
serialized_connections=all_connections,
|
|
@@ -1647,7 +1662,7 @@ class FlowManager:
|
|
|
1647
1662
|
},
|
|
1648
1663
|
set_lock_commands_per_node=set_lock_commands_per_node,
|
|
1649
1664
|
sub_flows_commands=[],
|
|
1650
|
-
|
|
1665
|
+
node_dependencies=packaged_dependencies,
|
|
1651
1666
|
)
|
|
1652
1667
|
|
|
1653
1668
|
async def on_start_flow_request(self, request: StartFlowRequest) -> ResultPayload: # noqa: C901, PLR0911, PLR0912
|
|
@@ -1901,6 +1916,31 @@ class FlowManager:
|
|
|
1901
1916
|
|
|
1902
1917
|
return ListFlowsInCurrentContextResultSuccess(flow_names=ret_list, result_details=details)
|
|
1903
1918
|
|
|
1919
|
+
def _aggregate_flow_dependencies(
|
|
1920
|
+
self, serialized_node_commands: list[SerializedNodeCommands], sub_flows_commands: list[SerializedFlowCommands]
|
|
1921
|
+
) -> NodeDependencies:
|
|
1922
|
+
"""Aggregate dependencies from nodes and sub-flows into a single NodeDependencies object.
|
|
1923
|
+
|
|
1924
|
+
Args:
|
|
1925
|
+
serialized_node_commands: List of serialized node commands to aggregate from
|
|
1926
|
+
sub_flows_commands: List of sub-flow commands to aggregate from
|
|
1927
|
+
|
|
1928
|
+
Returns:
|
|
1929
|
+
NodeDependencies object with all dependencies merged
|
|
1930
|
+
"""
|
|
1931
|
+
# Start with empty dependencies and aggregate into it
|
|
1932
|
+
aggregated_deps = NodeDependencies()
|
|
1933
|
+
|
|
1934
|
+
# Aggregate dependencies from all nodes
|
|
1935
|
+
for node_cmd in serialized_node_commands:
|
|
1936
|
+
aggregated_deps.aggregate_from(node_cmd.node_dependencies)
|
|
1937
|
+
|
|
1938
|
+
# Aggregate dependencies from all sub-flows
|
|
1939
|
+
for sub_flow_cmd in sub_flows_commands:
|
|
1940
|
+
aggregated_deps.aggregate_from(sub_flow_cmd.node_dependencies)
|
|
1941
|
+
|
|
1942
|
+
return aggregated_deps
|
|
1943
|
+
|
|
1904
1944
|
# TODO: https://github.com/griptape-ai/griptape-nodes/issues/861
|
|
1905
1945
|
# similar manager refactors: https://github.com/griptape-ai/griptape-nodes/issues/806
|
|
1906
1946
|
def on_serialize_flow_to_commands(self, request: SerializeFlowToCommandsRequest) -> ResultPayload: # noqa: C901, PLR0911, PLR0912, PLR0915
|
|
@@ -1922,12 +1962,6 @@ class FlowManager:
|
|
|
1922
1962
|
)
|
|
1923
1963
|
return SerializeFlowToCommandsResultFailure(result_details=details)
|
|
1924
1964
|
|
|
1925
|
-
# Track all node libraries that were in use by these Nodes
|
|
1926
|
-
node_libraries_in_use = set()
|
|
1927
|
-
|
|
1928
|
-
# Track all referenced workflows used by this flow and its sub-flows
|
|
1929
|
-
referenced_workflows_in_use = set()
|
|
1930
|
-
|
|
1931
1965
|
# Track all parameter values that were in use by these Nodes (maps UUID to Parameter value)
|
|
1932
1966
|
unique_parameter_uuid_to_values = {}
|
|
1933
1967
|
# And track how values map into that map.
|
|
@@ -1943,7 +1977,6 @@ class FlowManager:
|
|
|
1943
1977
|
workflow_name=referenced_workflow_name, # type: ignore[arg-type] # is_referenced_workflow() guarantees this is not None
|
|
1944
1978
|
imported_flow_metadata=flow.metadata,
|
|
1945
1979
|
)
|
|
1946
|
-
referenced_workflows_in_use.add(referenced_workflow_name) # type: ignore[arg-type] # is_referenced_workflow() guarantees this is not None
|
|
1947
1980
|
else:
|
|
1948
1981
|
# Always set set_as_new_context=False during serialization - let the workflow manager
|
|
1949
1982
|
# that loads this serialized flow decide whether to push it to context or not
|
|
@@ -1991,7 +2024,6 @@ class FlowManager:
|
|
|
1991
2024
|
node_name_to_uuid[node_name] = serialized_node.node_uuid
|
|
1992
2025
|
|
|
1993
2026
|
serialized_node_commands.append(serialized_node)
|
|
1994
|
-
node_libraries_in_use.add(serialized_node.node_library_details)
|
|
1995
2027
|
# Get the list of set value commands for THIS node.
|
|
1996
2028
|
set_value_commands_list = serialize_node_result.set_parameter_value_commands
|
|
1997
2029
|
if serialize_node_result.serialized_node_commands.lock_node_command is not None:
|
|
@@ -2040,20 +2072,22 @@ class FlowManager:
|
|
|
2040
2072
|
imported_flow_metadata=flow.metadata,
|
|
2041
2073
|
)
|
|
2042
2074
|
|
|
2075
|
+
# Create NodeDependencies with just the referenced workflow
|
|
2076
|
+
sub_flow_dependencies = NodeDependencies(
|
|
2077
|
+
referenced_workflows={referenced_workflow_name} # type: ignore[arg-type] # is_referenced_workflow() guarantees this is not None
|
|
2078
|
+
)
|
|
2079
|
+
|
|
2043
2080
|
serialized_flow = SerializedFlowCommands(
|
|
2044
|
-
node_libraries_used=set(),
|
|
2045
2081
|
flow_initialization_command=import_command,
|
|
2046
2082
|
serialized_node_commands=[],
|
|
2047
2083
|
serialized_connections=[],
|
|
2048
2084
|
unique_parameter_uuid_to_values={},
|
|
2049
2085
|
set_parameter_value_commands={},
|
|
2086
|
+
set_lock_commands_per_node={},
|
|
2050
2087
|
sub_flows_commands=[],
|
|
2051
|
-
|
|
2088
|
+
node_dependencies=sub_flow_dependencies,
|
|
2052
2089
|
)
|
|
2053
2090
|
sub_flow_commands.append(serialized_flow)
|
|
2054
|
-
|
|
2055
|
-
# Add this referenced workflow to our accumulation
|
|
2056
|
-
referenced_workflows_in_use.add(referenced_workflow_name) # type: ignore[arg-type] # is_referenced_workflow() guarantees this is not None
|
|
2057
2091
|
else:
|
|
2058
2092
|
# For standalone sub-flows, use the existing recursive serialization
|
|
2059
2093
|
with GriptapeNodes.ContextManager().flow(flow=flow):
|
|
@@ -2065,11 +2099,8 @@ class FlowManager:
|
|
|
2065
2099
|
serialized_flow = child_flow_result.serialized_flow_commands
|
|
2066
2100
|
sub_flow_commands.append(serialized_flow)
|
|
2067
2101
|
|
|
2068
|
-
|
|
2069
|
-
|
|
2070
|
-
|
|
2071
|
-
# Merge in all child flow referenced workflows.
|
|
2072
|
-
referenced_workflows_in_use.union(serialized_flow.referenced_workflows)
|
|
2102
|
+
# Aggregate all dependencies from nodes and sub-flows
|
|
2103
|
+
aggregated_dependencies = self._aggregate_flow_dependencies(serialized_node_commands, sub_flow_commands)
|
|
2073
2104
|
|
|
2074
2105
|
serialized_flow = SerializedFlowCommands(
|
|
2075
2106
|
flow_initialization_command=create_flow_request,
|
|
@@ -2079,8 +2110,7 @@ class FlowManager:
|
|
|
2079
2110
|
set_parameter_value_commands=set_parameter_value_commands_per_node,
|
|
2080
2111
|
set_lock_commands_per_node=set_lock_commands_per_node,
|
|
2081
2112
|
sub_flows_commands=sub_flow_commands,
|
|
2082
|
-
|
|
2083
|
-
referenced_workflows=referenced_workflows_in_use,
|
|
2113
|
+
node_dependencies=aggregated_dependencies,
|
|
2084
2114
|
)
|
|
2085
2115
|
details = f"Successfully serialized Flow '{flow_name}' into commands."
|
|
2086
2116
|
result = SerializeFlowToCommandsResultSuccess(serialized_flow_commands=serialized_flow, result_details=details)
|
|
@@ -2280,6 +2310,9 @@ class FlowManager:
|
|
|
2280
2310
|
self._global_dag_builder.clear()
|
|
2281
2311
|
logger.debug("Cancelling flow run")
|
|
2282
2312
|
|
|
2313
|
+
GriptapeNodes.EventManager().put_event(
|
|
2314
|
+
ExecutionGriptapeNodeEvent(wrapped_event=ExecutionEvent(payload=InvolvedNodesEvent(involved_nodes=[])))
|
|
2315
|
+
)
|
|
2283
2316
|
GriptapeNodes.EventManager().put_event(
|
|
2284
2317
|
ExecutionGriptapeNodeEvent(wrapped_event=ExecutionEvent(payload=ControlFlowCancelledEvent()))
|
|
2285
2318
|
)
|
|
@@ -2394,17 +2427,16 @@ class FlowManager:
|
|
|
2394
2427
|
try:
|
|
2395
2428
|
await resolution_machine.resolve_node(node)
|
|
2396
2429
|
except Exception as e:
|
|
2430
|
+
logger.exception("Exception during single node resolution")
|
|
2397
2431
|
if self.check_for_existing_running_flow():
|
|
2398
2432
|
self.cancel_flow_run()
|
|
2399
2433
|
raise RuntimeError(e) from e
|
|
2400
2434
|
if resolution_machine.is_complete():
|
|
2401
2435
|
self._global_single_node_resolution = False
|
|
2402
2436
|
self._global_control_flow_machine.context.current_nodes = []
|
|
2403
|
-
|
|
2404
|
-
|
|
2405
|
-
|
|
2406
|
-
)
|
|
2407
|
-
)
|
|
2437
|
+
GriptapeNodes.EventManager().put_event(
|
|
2438
|
+
ExecutionGriptapeNodeEvent(wrapped_event=ExecutionEvent(payload=InvolvedNodesEvent(involved_nodes=[])))
|
|
2439
|
+
)
|
|
2408
2440
|
|
|
2409
2441
|
async def single_execution_step(self, flow: ControlFlow, change_debug_mode: bool) -> None: # noqa: FBT001
|
|
2410
2442
|
# do a granular step
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import asyncio
|
|
3
4
|
import importlib.util
|
|
4
5
|
import json
|
|
5
6
|
import logging
|
|
@@ -895,7 +896,8 @@ class LibraryManager:
|
|
|
895
896
|
continue # SKIP IT
|
|
896
897
|
|
|
897
898
|
# Attempt to load nodes from the library.
|
|
898
|
-
library_load_results =
|
|
899
|
+
library_load_results = await asyncio.to_thread(
|
|
900
|
+
self._attempt_load_nodes_from_library,
|
|
899
901
|
library_data=library_data,
|
|
900
902
|
library=library,
|
|
901
903
|
base_dir=base_dir,
|
|
@@ -1521,7 +1523,7 @@ class LibraryManager:
|
|
|
1521
1523
|
for library_result in metadata_result.successful_libraries:
|
|
1522
1524
|
if library_result.library_schema.name == LibraryManager.SANDBOX_LIBRARY_NAME:
|
|
1523
1525
|
# Handle sandbox library - use the schema we already have
|
|
1524
|
-
self._attempt_generate_sandbox_library_from_schema(
|
|
1526
|
+
await self._attempt_generate_sandbox_library_from_schema(
|
|
1525
1527
|
library_schema=library_result.library_schema, sandbox_directory=library_result.file_path
|
|
1526
1528
|
)
|
|
1527
1529
|
else:
|
|
@@ -1853,7 +1855,7 @@ class LibraryManager:
|
|
|
1853
1855
|
problems=problems,
|
|
1854
1856
|
)
|
|
1855
1857
|
|
|
1856
|
-
def _attempt_generate_sandbox_library_from_schema(
|
|
1858
|
+
async def _attempt_generate_sandbox_library_from_schema(
|
|
1857
1859
|
self, library_schema: LibrarySchema, sandbox_directory: str
|
|
1858
1860
|
) -> None:
|
|
1859
1861
|
"""Generate sandbox library using an existing schema, loading actual node classes."""
|
|
@@ -1945,7 +1947,8 @@ class LibraryManager:
|
|
|
1945
1947
|
return
|
|
1946
1948
|
|
|
1947
1949
|
# Load nodes into the library
|
|
1948
|
-
library_load_results =
|
|
1950
|
+
library_load_results = await asyncio.to_thread(
|
|
1951
|
+
self._attempt_load_nodes_from_library,
|
|
1949
1952
|
library_data=library_data,
|
|
1950
1953
|
library=library,
|
|
1951
1954
|
base_dir=sandbox_library_dir,
|
|
@@ -2058,9 +2061,4 @@ class LibraryManager:
|
|
|
2058
2061
|
if library_path.exists():
|
|
2059
2062
|
process_path(library_path)
|
|
2060
2063
|
|
|
2061
|
-
# Add from workspace - recursive discovery of library JSON files
|
|
2062
|
-
workspace_path = config_mgr.workspace_path
|
|
2063
|
-
if workspace_path.exists():
|
|
2064
|
-
process_path(workspace_path)
|
|
2065
|
-
|
|
2066
2064
|
return list(discovered_libraries)
|
|
@@ -0,0 +1,364 @@
|
|
|
1
|
+
"""MCP (Model Context Protocol) server management.
|
|
2
|
+
|
|
3
|
+
Handles MCP server configurations, enabling/disabling servers, and provides
|
|
4
|
+
event-based interface for frontend and backend interactions.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from griptape_nodes.retained_mode.events.mcp_events import (
|
|
11
|
+
CreateMCPServerRequest,
|
|
12
|
+
CreateMCPServerResultFailure,
|
|
13
|
+
CreateMCPServerResultSuccess,
|
|
14
|
+
DeleteMCPServerRequest,
|
|
15
|
+
DeleteMCPServerResultFailure,
|
|
16
|
+
DeleteMCPServerResultSuccess,
|
|
17
|
+
DisableMCPServerRequest,
|
|
18
|
+
DisableMCPServerResultFailure,
|
|
19
|
+
DisableMCPServerResultSuccess,
|
|
20
|
+
EnableMCPServerRequest,
|
|
21
|
+
EnableMCPServerResultFailure,
|
|
22
|
+
EnableMCPServerResultSuccess,
|
|
23
|
+
GetEnabledMCPServersRequest,
|
|
24
|
+
GetEnabledMCPServersResultFailure,
|
|
25
|
+
GetEnabledMCPServersResultSuccess,
|
|
26
|
+
GetMCPServerRequest,
|
|
27
|
+
GetMCPServerResultFailure,
|
|
28
|
+
GetMCPServerResultSuccess,
|
|
29
|
+
ListMCPServersRequest,
|
|
30
|
+
ListMCPServersResultFailure,
|
|
31
|
+
ListMCPServersResultSuccess,
|
|
32
|
+
UpdateMCPServerRequest,
|
|
33
|
+
UpdateMCPServerResultFailure,
|
|
34
|
+
UpdateMCPServerResultSuccess,
|
|
35
|
+
)
|
|
36
|
+
from griptape_nodes.retained_mode.managers.config_manager import ConfigManager
|
|
37
|
+
from griptape_nodes.retained_mode.managers.event_manager import EventManager
|
|
38
|
+
from griptape_nodes.retained_mode.managers.settings import MCPServerConfig
|
|
39
|
+
|
|
40
|
+
logger = logging.getLogger("griptape_nodes")
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class MCPManager:
|
|
44
|
+
"""Manager for MCP server configurations and operations."""
|
|
45
|
+
|
|
46
|
+
def __init__(self, event_manager: EventManager | None = None, config_manager: ConfigManager | None = None) -> None:
|
|
47
|
+
"""Initialize the MCPManager.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
event_manager: The EventManager instance to use for event handling.
|
|
51
|
+
config_manager: The ConfigManager instance to use for configuration management.
|
|
52
|
+
"""
|
|
53
|
+
self.config_manager = config_manager
|
|
54
|
+
if event_manager is not None:
|
|
55
|
+
# Register event handlers
|
|
56
|
+
event_manager.assign_manager_to_request_type(ListMCPServersRequest, self.on_list_mcp_servers_request)
|
|
57
|
+
event_manager.assign_manager_to_request_type(GetMCPServerRequest, self.on_get_mcp_server_request)
|
|
58
|
+
event_manager.assign_manager_to_request_type(CreateMCPServerRequest, self.on_create_mcp_server_request)
|
|
59
|
+
event_manager.assign_manager_to_request_type(UpdateMCPServerRequest, self.on_update_mcp_server_request)
|
|
60
|
+
event_manager.assign_manager_to_request_type(DeleteMCPServerRequest, self.on_delete_mcp_server_request)
|
|
61
|
+
event_manager.assign_manager_to_request_type(EnableMCPServerRequest, self.on_enable_mcp_server_request)
|
|
62
|
+
event_manager.assign_manager_to_request_type(DisableMCPServerRequest, self.on_disable_mcp_server_request)
|
|
63
|
+
event_manager.assign_manager_to_request_type(
|
|
64
|
+
GetEnabledMCPServersRequest, self.on_get_enabled_mcp_servers_request
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
def _get_mcp_servers(self, filter_by: dict[str, Any] | None = None) -> list[MCPServerConfig]:
|
|
68
|
+
"""Get the current MCP servers configuration from the config manager.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
filter_by: Optional dict of field=value pairs to filter by.
|
|
72
|
+
Keys should match server config field names, values are the expected values.
|
|
73
|
+
"""
|
|
74
|
+
if self.config_manager is None:
|
|
75
|
+
return []
|
|
76
|
+
|
|
77
|
+
mcp_config_data = self.config_manager.get_config_value("mcp_servers", default=[])
|
|
78
|
+
if not mcp_config_data:
|
|
79
|
+
return []
|
|
80
|
+
|
|
81
|
+
try:
|
|
82
|
+
servers = [MCPServerConfig.model_validate(server) for server in mcp_config_data]
|
|
83
|
+
if filter_by:
|
|
84
|
+
filtered_servers = []
|
|
85
|
+
for server in servers:
|
|
86
|
+
match = True
|
|
87
|
+
for field, value in filter_by.items():
|
|
88
|
+
if getattr(server, field, None) != value:
|
|
89
|
+
match = False
|
|
90
|
+
break
|
|
91
|
+
if match:
|
|
92
|
+
filtered_servers.append(server)
|
|
93
|
+
return filtered_servers
|
|
94
|
+
return servers # noqa: TRY300
|
|
95
|
+
except Exception as e:
|
|
96
|
+
logger.error("Failed to parse MCP servers configuration: %s", e)
|
|
97
|
+
return []
|
|
98
|
+
|
|
99
|
+
def _save_mcp_servers(self, servers: list[MCPServerConfig]) -> None:
|
|
100
|
+
"""Save the MCP servers configuration to the config manager."""
|
|
101
|
+
if self.config_manager is None:
|
|
102
|
+
logger.warning("No config manager available, cannot save MCP configuration")
|
|
103
|
+
return
|
|
104
|
+
|
|
105
|
+
try:
|
|
106
|
+
self.config_manager.set_config_value("mcp_servers", [server.model_dump() for server in servers])
|
|
107
|
+
except Exception as e:
|
|
108
|
+
logger.error("Failed to save MCP servers configuration: %s", e)
|
|
109
|
+
|
|
110
|
+
def _update_server_fields(self, server_config: MCPServerConfig, request: UpdateMCPServerRequest) -> None:
|
|
111
|
+
"""Update server configuration fields from request."""
|
|
112
|
+
# Map request fields to server config attributes
|
|
113
|
+
field_mapping = {
|
|
114
|
+
"new_name": "name",
|
|
115
|
+
"transport": "transport",
|
|
116
|
+
"enabled": "enabled",
|
|
117
|
+
"command": "command",
|
|
118
|
+
"args": "args",
|
|
119
|
+
"env": "env",
|
|
120
|
+
"cwd": "cwd",
|
|
121
|
+
"encoding": "encoding",
|
|
122
|
+
"encoding_error_handler": "encoding_error_handler",
|
|
123
|
+
"url": "url",
|
|
124
|
+
"headers": "headers",
|
|
125
|
+
"timeout": "timeout",
|
|
126
|
+
"sse_read_timeout": "sse_read_timeout",
|
|
127
|
+
"terminate_on_close": "terminate_on_close",
|
|
128
|
+
"description": "description",
|
|
129
|
+
"capabilities": "capabilities",
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
# Update fields that are not None
|
|
133
|
+
for request_field, config_field in field_mapping.items():
|
|
134
|
+
value = getattr(request, request_field, None)
|
|
135
|
+
if value is not None:
|
|
136
|
+
setattr(server_config, config_field, value)
|
|
137
|
+
|
|
138
|
+
def on_list_mcp_servers_request(
|
|
139
|
+
self, request: ListMCPServersRequest
|
|
140
|
+
) -> ListMCPServersResultSuccess | ListMCPServersResultFailure:
|
|
141
|
+
"""Handle list MCP servers request."""
|
|
142
|
+
try:
|
|
143
|
+
servers = self._get_mcp_servers()
|
|
144
|
+
except Exception as e:
|
|
145
|
+
logger.error("Failed to list MCP servers: %s", e)
|
|
146
|
+
return ListMCPServersResultFailure(result_details=f"Failed to list MCP servers: {e}")
|
|
147
|
+
|
|
148
|
+
if request.include_disabled:
|
|
149
|
+
servers_dict = {server.name: server.model_dump() for server in servers}
|
|
150
|
+
else:
|
|
151
|
+
enabled_servers = [server for server in servers if server.enabled]
|
|
152
|
+
servers_dict = {server.name: server.model_dump() for server in enabled_servers}
|
|
153
|
+
|
|
154
|
+
# Success path after exception handling
|
|
155
|
+
return ListMCPServersResultSuccess(
|
|
156
|
+
# Pydantic model.model_dump() returns dict[str, Any] which matches MCPServerConfig TypedDict structure
|
|
157
|
+
servers=servers_dict, # type: ignore[arg-type]
|
|
158
|
+
result_details=f"Successfully listed {len(servers_dict)} MCP servers",
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
def on_get_mcp_server_request(
|
|
162
|
+
self, request: GetMCPServerRequest
|
|
163
|
+
) -> GetMCPServerResultSuccess | GetMCPServerResultFailure:
|
|
164
|
+
"""Handle get MCP server request."""
|
|
165
|
+
servers = self._get_mcp_servers(filter_by={"name": request.name})
|
|
166
|
+
server_config = servers[0] if servers else None
|
|
167
|
+
|
|
168
|
+
if server_config is None:
|
|
169
|
+
return GetMCPServerResultFailure(result_details=f"Failed to get MCP server '{request.name}' - not found")
|
|
170
|
+
|
|
171
|
+
# Success path after exception handling
|
|
172
|
+
return GetMCPServerResultSuccess(
|
|
173
|
+
# Pydantic model.model_dump() returns dict[str, Any] which matches MCPServerConfig TypedDict structure
|
|
174
|
+
server_config=server_config.model_dump(), # type: ignore[arg-type]
|
|
175
|
+
result_details=f"Successfully retrieved MCP server '{request.name}'",
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
def on_create_mcp_server_request(
|
|
179
|
+
self, request: CreateMCPServerRequest
|
|
180
|
+
) -> CreateMCPServerResultSuccess | CreateMCPServerResultFailure:
|
|
181
|
+
"""Handle create MCP server request."""
|
|
182
|
+
try:
|
|
183
|
+
servers = self._get_mcp_servers()
|
|
184
|
+
except Exception as e:
|
|
185
|
+
logger.error("Failed to create MCP server '%s': %s", request.name, e)
|
|
186
|
+
return CreateMCPServerResultFailure(result_details=f"Failed to create MCP server '{request.name}': {e}")
|
|
187
|
+
|
|
188
|
+
# Check if server already exists
|
|
189
|
+
for server in servers:
|
|
190
|
+
if server.name == request.name:
|
|
191
|
+
return CreateMCPServerResultFailure(
|
|
192
|
+
result_details=f"Failed to create MCP server '{request.name}' - already exists"
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
# Create new server configuration
|
|
196
|
+
server_config = MCPServerConfig(
|
|
197
|
+
name=request.name,
|
|
198
|
+
enabled=request.enabled,
|
|
199
|
+
transport=request.transport,
|
|
200
|
+
# StdioConnection fields
|
|
201
|
+
command=request.command,
|
|
202
|
+
args=request.args or [],
|
|
203
|
+
env=request.env or {},
|
|
204
|
+
cwd=request.cwd,
|
|
205
|
+
encoding=request.encoding,
|
|
206
|
+
encoding_error_handler=request.encoding_error_handler,
|
|
207
|
+
# HTTP-based connection fields
|
|
208
|
+
url=request.url,
|
|
209
|
+
headers=request.headers,
|
|
210
|
+
timeout=request.timeout,
|
|
211
|
+
sse_read_timeout=request.sse_read_timeout,
|
|
212
|
+
terminate_on_close=request.terminate_on_close,
|
|
213
|
+
# Common fields
|
|
214
|
+
description=request.description,
|
|
215
|
+
capabilities=request.capabilities or [],
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
servers.append(server_config)
|
|
219
|
+
|
|
220
|
+
try:
|
|
221
|
+
self._save_mcp_servers(servers)
|
|
222
|
+
except Exception as e:
|
|
223
|
+
logger.error("Failed to save MCP server '%s': %s", request.name, e)
|
|
224
|
+
return CreateMCPServerResultFailure(result_details=f"Failed to save MCP server '{request.name}': {e}")
|
|
225
|
+
|
|
226
|
+
# Success path after exception handling
|
|
227
|
+
return CreateMCPServerResultSuccess(
|
|
228
|
+
name=request.name, result_details=f"Successfully created MCP server '{request.name}'"
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
def on_update_mcp_server_request(
|
|
232
|
+
self, request: UpdateMCPServerRequest
|
|
233
|
+
) -> UpdateMCPServerResultSuccess | UpdateMCPServerResultFailure:
|
|
234
|
+
"""Handle update MCP server request."""
|
|
235
|
+
servers = self._get_mcp_servers(filter_by={"name": request.name})
|
|
236
|
+
server_config = servers[0] if servers else None
|
|
237
|
+
|
|
238
|
+
if server_config is None:
|
|
239
|
+
return UpdateMCPServerResultFailure(
|
|
240
|
+
result_details=f"Failed to update MCP server '{request.name}' - not found"
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
# Update only provided fields
|
|
244
|
+
self._update_server_fields(server_config, request)
|
|
245
|
+
|
|
246
|
+
try:
|
|
247
|
+
self._save_mcp_servers(servers)
|
|
248
|
+
except Exception as e:
|
|
249
|
+
logger.error("Failed to save MCP server '%s': %s", request.name, e)
|
|
250
|
+
return UpdateMCPServerResultFailure(result_details=f"Failed to save MCP server '{request.name}': {e}")
|
|
251
|
+
|
|
252
|
+
# Success path after exception handling
|
|
253
|
+
return UpdateMCPServerResultSuccess(
|
|
254
|
+
name=request.name, result_details=f"Successfully updated MCP server '{request.name}'"
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
def on_delete_mcp_server_request(
|
|
258
|
+
self, request: DeleteMCPServerRequest
|
|
259
|
+
) -> DeleteMCPServerResultSuccess | DeleteMCPServerResultFailure:
|
|
260
|
+
"""Handle delete MCP server request."""
|
|
261
|
+
try:
|
|
262
|
+
servers = self._get_mcp_servers()
|
|
263
|
+
|
|
264
|
+
# Find and remove server by server_id
|
|
265
|
+
server_found = False
|
|
266
|
+
for i, server in enumerate(servers):
|
|
267
|
+
if server.name == request.name:
|
|
268
|
+
servers.pop(i)
|
|
269
|
+
server_found = True
|
|
270
|
+
break
|
|
271
|
+
|
|
272
|
+
self._save_mcp_servers(servers)
|
|
273
|
+
|
|
274
|
+
except Exception as e:
|
|
275
|
+
logger.error("Failed to delete MCP server '%s': %s", request.name, e)
|
|
276
|
+
return DeleteMCPServerResultFailure(result_details=f"Failed to delete MCP server '{request.name}': {e}")
|
|
277
|
+
|
|
278
|
+
if not server_found:
|
|
279
|
+
return DeleteMCPServerResultFailure(
|
|
280
|
+
result_details=f"Failed to delete MCP server '{request.name}' - not found"
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
# Success path after exception handling
|
|
284
|
+
return DeleteMCPServerResultSuccess(
|
|
285
|
+
name=request.name, result_details=f"Successfully deleted MCP server '{request.name}'"
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
def on_enable_mcp_server_request(
|
|
289
|
+
self, request: EnableMCPServerRequest
|
|
290
|
+
) -> EnableMCPServerResultSuccess | EnableMCPServerResultFailure:
|
|
291
|
+
"""Handle enable MCP server request."""
|
|
292
|
+
servers = self._get_mcp_servers()
|
|
293
|
+
server_config = None
|
|
294
|
+
for server in servers:
|
|
295
|
+
if server.name == request.name:
|
|
296
|
+
server_config = server
|
|
297
|
+
break
|
|
298
|
+
|
|
299
|
+
if server_config is None:
|
|
300
|
+
return EnableMCPServerResultFailure(
|
|
301
|
+
result_details=f"Failed to enable MCP server '{request.name}' - not found"
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
server_config.enabled = True
|
|
305
|
+
|
|
306
|
+
try:
|
|
307
|
+
self._save_mcp_servers(servers)
|
|
308
|
+
except Exception as e:
|
|
309
|
+
logger.error("Failed to save MCP server '%s': %s", request.name, e)
|
|
310
|
+
return EnableMCPServerResultFailure(result_details=f"Failed to save MCP server '{request.name}': {e}")
|
|
311
|
+
|
|
312
|
+
# Success path after exception handling
|
|
313
|
+
return EnableMCPServerResultSuccess(
|
|
314
|
+
name=request.name, result_details=f"Successfully enabled MCP server '{request.name}'"
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
def on_disable_mcp_server_request(
|
|
318
|
+
self, request: DisableMCPServerRequest
|
|
319
|
+
) -> DisableMCPServerResultSuccess | DisableMCPServerResultFailure:
|
|
320
|
+
"""Handle disable MCP server request."""
|
|
321
|
+
servers = self._get_mcp_servers()
|
|
322
|
+
server_config = None
|
|
323
|
+
for server in servers:
|
|
324
|
+
if server.name == request.name:
|
|
325
|
+
server_config = server
|
|
326
|
+
break
|
|
327
|
+
|
|
328
|
+
if server_config is None:
|
|
329
|
+
return DisableMCPServerResultFailure(
|
|
330
|
+
result_details=f"Failed to disable MCP server '{request.name}' - not found"
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
server_config.enabled = False
|
|
334
|
+
|
|
335
|
+
try:
|
|
336
|
+
self._save_mcp_servers(servers)
|
|
337
|
+
except Exception as e:
|
|
338
|
+
logger.error("Failed to save MCP server '%s': %s", request.name, e)
|
|
339
|
+
return DisableMCPServerResultFailure(result_details=f"Failed to save MCP server '{request.name}': {e}")
|
|
340
|
+
|
|
341
|
+
# Success path after exception handling
|
|
342
|
+
return DisableMCPServerResultSuccess(
|
|
343
|
+
name=request.name, result_details=f"Successfully disabled MCP server '{request.name}'"
|
|
344
|
+
)
|
|
345
|
+
|
|
346
|
+
def on_get_enabled_mcp_servers_request(
|
|
347
|
+
self,
|
|
348
|
+
request: GetEnabledMCPServersRequest, # noqa: ARG002
|
|
349
|
+
) -> GetEnabledMCPServersResultSuccess | GetEnabledMCPServersResultFailure:
|
|
350
|
+
"""Handle get enabled MCP servers request."""
|
|
351
|
+
try:
|
|
352
|
+
enabled_servers = self._get_mcp_servers(filter_by={"enabled": True})
|
|
353
|
+
servers_dict = {server.name: server.model_dump() for server in enabled_servers}
|
|
354
|
+
|
|
355
|
+
except Exception as e:
|
|
356
|
+
logger.error("Failed to get enabled MCP servers: %s", e)
|
|
357
|
+
return GetEnabledMCPServersResultFailure(result_details=f"Failed to get enabled MCP servers: {e}")
|
|
358
|
+
|
|
359
|
+
# Success path after exception handling
|
|
360
|
+
return GetEnabledMCPServersResultSuccess(
|
|
361
|
+
# Pydantic model.model_dump() returns dict[str, Any] which matches MCPServerConfig TypedDict structure
|
|
362
|
+
servers=servers_dict, # type: ignore[arg-type]
|
|
363
|
+
result_details=f"Successfully retrieved {len(servers_dict)} enabled MCP servers",
|
|
364
|
+
)
|