uipath-langchain 0.0.133__py3-none-any.whl → 0.1.28__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- uipath_langchain/_cli/cli_init.py +130 -191
- uipath_langchain/_cli/cli_new.py +2 -3
- uipath_langchain/_resources/AGENTS.md +21 -0
- uipath_langchain/_resources/REQUIRED_STRUCTURE.md +92 -0
- uipath_langchain/_tracing/__init__.py +3 -2
- uipath_langchain/_tracing/_instrument_traceable.py +11 -12
- uipath_langchain/_utils/_request_mixin.py +327 -51
- uipath_langchain/_utils/_settings.py +2 -2
- uipath_langchain/agent/exceptions/__init__.py +6 -0
- uipath_langchain/agent/exceptions/exceptions.py +11 -0
- uipath_langchain/agent/guardrails/__init__.py +21 -0
- uipath_langchain/agent/guardrails/actions/__init__.py +11 -0
- uipath_langchain/agent/guardrails/actions/base_action.py +24 -0
- uipath_langchain/agent/guardrails/actions/block_action.py +42 -0
- uipath_langchain/agent/guardrails/actions/escalate_action.py +499 -0
- uipath_langchain/agent/guardrails/actions/log_action.py +58 -0
- uipath_langchain/agent/guardrails/guardrail_nodes.py +173 -0
- uipath_langchain/agent/guardrails/guardrails_factory.py +70 -0
- uipath_langchain/agent/guardrails/guardrails_subgraph.py +283 -0
- uipath_langchain/agent/guardrails/types.py +20 -0
- uipath_langchain/agent/react/__init__.py +14 -0
- uipath_langchain/agent/react/agent.py +117 -0
- uipath_langchain/agent/react/constants.py +2 -0
- uipath_langchain/agent/react/init_node.py +20 -0
- uipath_langchain/agent/react/llm_node.py +43 -0
- uipath_langchain/agent/react/router.py +97 -0
- uipath_langchain/agent/react/terminate_node.py +82 -0
- uipath_langchain/agent/react/tools/__init__.py +7 -0
- uipath_langchain/agent/react/tools/tools.py +50 -0
- uipath_langchain/agent/react/types.py +39 -0
- uipath_langchain/agent/react/utils.py +49 -0
- uipath_langchain/agent/tools/__init__.py +17 -0
- uipath_langchain/agent/tools/context_tool.py +53 -0
- uipath_langchain/agent/tools/escalation_tool.py +111 -0
- uipath_langchain/agent/tools/integration_tool.py +181 -0
- uipath_langchain/agent/tools/process_tool.py +49 -0
- uipath_langchain/agent/tools/static_args.py +138 -0
- uipath_langchain/agent/tools/structured_tool_with_output_type.py +14 -0
- uipath_langchain/agent/tools/tool_factory.py +45 -0
- uipath_langchain/agent/tools/tool_node.py +22 -0
- uipath_langchain/agent/tools/utils.py +11 -0
- uipath_langchain/chat/__init__.py +4 -0
- uipath_langchain/chat/bedrock.py +187 -0
- uipath_langchain/chat/mapper.py +309 -0
- uipath_langchain/chat/models.py +248 -35
- uipath_langchain/chat/openai.py +133 -0
- uipath_langchain/chat/supported_models.py +42 -0
- uipath_langchain/chat/vertex.py +255 -0
- uipath_langchain/embeddings/embeddings.py +131 -34
- uipath_langchain/middlewares.py +0 -6
- uipath_langchain/retrievers/context_grounding_retriever.py +7 -9
- uipath_langchain/runtime/__init__.py +36 -0
- uipath_langchain/runtime/_serialize.py +46 -0
- uipath_langchain/runtime/config.py +61 -0
- uipath_langchain/runtime/errors.py +43 -0
- uipath_langchain/runtime/factory.py +315 -0
- uipath_langchain/runtime/graph.py +159 -0
- uipath_langchain/runtime/runtime.py +453 -0
- uipath_langchain/runtime/schema.py +386 -0
- uipath_langchain/runtime/storage.py +115 -0
- uipath_langchain/vectorstores/context_grounding_vectorstore.py +90 -110
- {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.28.dist-info}/METADATA +44 -23
- uipath_langchain-0.1.28.dist-info/RECORD +76 -0
- {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.28.dist-info}/WHEEL +1 -1
- uipath_langchain-0.1.28.dist-info/entry_points.txt +5 -0
- uipath_langchain/_cli/_runtime/_context.py +0 -21
- uipath_langchain/_cli/_runtime/_conversation.py +0 -298
- uipath_langchain/_cli/_runtime/_exception.py +0 -17
- uipath_langchain/_cli/_runtime/_input.py +0 -139
- uipath_langchain/_cli/_runtime/_output.py +0 -234
- uipath_langchain/_cli/_runtime/_runtime.py +0 -379
- uipath_langchain/_cli/_utils/_graph.py +0 -199
- uipath_langchain/_cli/cli_dev.py +0 -44
- uipath_langchain/_cli/cli_eval.py +0 -78
- uipath_langchain/_cli/cli_run.py +0 -82
- uipath_langchain/_tracing/_oteladapter.py +0 -222
- uipath_langchain/_tracing/_utils.py +0 -28
- uipath_langchain/builder/agent_config.py +0 -191
- uipath_langchain/tools/preconfigured.py +0 -191
- uipath_langchain-0.0.133.dist-info/RECORD +0 -41
- uipath_langchain-0.0.133.dist-info/entry_points.txt +0 -2
- /uipath_langchain/{tools/__init__.py → py.typed} +0 -0
- {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.28.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
import re
|
|
4
|
+
from typing import Any, Callable
|
|
5
|
+
|
|
6
|
+
from langchain_core.messages import AIMessage, AnyMessage, HumanMessage, SystemMessage
|
|
7
|
+
from langgraph.types import Command
|
|
8
|
+
from uipath.platform import UiPath
|
|
9
|
+
from uipath.platform.guardrails import (
|
|
10
|
+
BaseGuardrail,
|
|
11
|
+
GuardrailScope,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
from uipath_langchain.agent.guardrails.types import ExecutionStage
|
|
15
|
+
|
|
16
|
+
from .types import AgentGuardrailsGraphState
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _message_text(msg: AnyMessage) -> str:
|
|
22
|
+
if isinstance(msg, (HumanMessage, SystemMessage)):
|
|
23
|
+
return msg.content if isinstance(msg.content, str) else str(msg.content)
|
|
24
|
+
return str(getattr(msg, "content", "")) if hasattr(msg, "content") else ""
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _create_guardrail_node(
|
|
28
|
+
guardrail: BaseGuardrail,
|
|
29
|
+
scope: GuardrailScope,
|
|
30
|
+
execution_stage: ExecutionStage,
|
|
31
|
+
payload_generator: Callable[[AgentGuardrailsGraphState], str],
|
|
32
|
+
success_node: str,
|
|
33
|
+
failure_node: str,
|
|
34
|
+
) -> tuple[str, Callable[[AgentGuardrailsGraphState], Any]]:
|
|
35
|
+
"""Private factory for guardrail evaluation nodes.
|
|
36
|
+
|
|
37
|
+
Returns a node that evaluates the guardrail and routes via Command:
|
|
38
|
+
- goto success_node on validation pass
|
|
39
|
+
- goto failure_node on validation fail
|
|
40
|
+
"""
|
|
41
|
+
raw_node_name = f"{scope.name}_{execution_stage.name}_{guardrail.name}"
|
|
42
|
+
node_name = re.sub(r"\W+", "_", raw_node_name.lower()).strip("_")
|
|
43
|
+
|
|
44
|
+
async def node(
|
|
45
|
+
state: AgentGuardrailsGraphState,
|
|
46
|
+
):
|
|
47
|
+
text = payload_generator(state)
|
|
48
|
+
try:
|
|
49
|
+
uipath = UiPath()
|
|
50
|
+
result = uipath.guardrails.evaluate_guardrail(text, guardrail)
|
|
51
|
+
except Exception as exc:
|
|
52
|
+
logger.error("Failed to evaluate guardrail: %s", exc)
|
|
53
|
+
raise
|
|
54
|
+
|
|
55
|
+
if not result.validation_passed:
|
|
56
|
+
return Command(
|
|
57
|
+
goto=failure_node, update={"guardrail_validation_result": result.reason}
|
|
58
|
+
)
|
|
59
|
+
return Command(goto=success_node, update={"guardrail_validation_result": None})
|
|
60
|
+
|
|
61
|
+
return node_name, node
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def create_llm_guardrail_node(
|
|
65
|
+
guardrail: BaseGuardrail,
|
|
66
|
+
execution_stage: ExecutionStage,
|
|
67
|
+
success_node: str,
|
|
68
|
+
failure_node: str,
|
|
69
|
+
) -> tuple[str, Callable[[AgentGuardrailsGraphState], Any]]:
|
|
70
|
+
def _payload_generator(state: AgentGuardrailsGraphState) -> str:
|
|
71
|
+
if not state.messages:
|
|
72
|
+
return ""
|
|
73
|
+
return _message_text(state.messages[-1])
|
|
74
|
+
|
|
75
|
+
return _create_guardrail_node(
|
|
76
|
+
guardrail,
|
|
77
|
+
GuardrailScope.LLM,
|
|
78
|
+
execution_stage,
|
|
79
|
+
_payload_generator,
|
|
80
|
+
success_node,
|
|
81
|
+
failure_node,
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def create_agent_guardrail_node(
|
|
86
|
+
guardrail: BaseGuardrail,
|
|
87
|
+
execution_stage: ExecutionStage,
|
|
88
|
+
success_node: str,
|
|
89
|
+
failure_node: str,
|
|
90
|
+
) -> tuple[str, Callable[[AgentGuardrailsGraphState], Any]]:
|
|
91
|
+
# To be implemented in future PR
|
|
92
|
+
def _payload_generator(state: AgentGuardrailsGraphState) -> str:
|
|
93
|
+
if not state.messages:
|
|
94
|
+
return ""
|
|
95
|
+
return _message_text(state.messages[-1])
|
|
96
|
+
|
|
97
|
+
return _create_guardrail_node(
|
|
98
|
+
guardrail,
|
|
99
|
+
GuardrailScope.AGENT,
|
|
100
|
+
execution_stage,
|
|
101
|
+
_payload_generator,
|
|
102
|
+
success_node,
|
|
103
|
+
failure_node,
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def create_tool_guardrail_node(
|
|
108
|
+
guardrail: BaseGuardrail,
|
|
109
|
+
execution_stage: ExecutionStage,
|
|
110
|
+
success_node: str,
|
|
111
|
+
failure_node: str,
|
|
112
|
+
tool_name: str,
|
|
113
|
+
) -> tuple[str, Callable[[AgentGuardrailsGraphState], Any]]:
|
|
114
|
+
"""Create a guardrail node for TOOL scope guardrails.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
guardrail: The guardrail to evaluate.
|
|
118
|
+
execution_stage: The execution stage (PRE_EXECUTION or POST_EXECUTION).
|
|
119
|
+
success_node: Node to route to on validation pass.
|
|
120
|
+
failure_node: Node to route to on validation fail.
|
|
121
|
+
tool_name: Name of the tool to extract arguments from.
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
A tuple of (node_name, node_function) for the guardrail evaluation node.
|
|
125
|
+
"""
|
|
126
|
+
|
|
127
|
+
def _payload_generator(state: AgentGuardrailsGraphState) -> str:
|
|
128
|
+
"""Extract tool call arguments for the specified tool name.
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
state: The current agent graph state.
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
JSON string of the tool call arguments, or empty string if not found.
|
|
135
|
+
"""
|
|
136
|
+
if not state.messages:
|
|
137
|
+
return ""
|
|
138
|
+
|
|
139
|
+
if execution_stage == ExecutionStage.PRE_EXECUTION:
|
|
140
|
+
if not isinstance(state.messages[-1], AIMessage):
|
|
141
|
+
return ""
|
|
142
|
+
message = state.messages[-1]
|
|
143
|
+
|
|
144
|
+
if not message.tool_calls:
|
|
145
|
+
return ""
|
|
146
|
+
|
|
147
|
+
# Find the first tool call with matching name
|
|
148
|
+
for tool_call in message.tool_calls:
|
|
149
|
+
call_name = (
|
|
150
|
+
tool_call.get("name")
|
|
151
|
+
if isinstance(tool_call, dict)
|
|
152
|
+
else getattr(tool_call, "name", None)
|
|
153
|
+
)
|
|
154
|
+
if call_name == tool_name:
|
|
155
|
+
# Extract args from the tool call
|
|
156
|
+
args = (
|
|
157
|
+
tool_call.get("args")
|
|
158
|
+
if isinstance(tool_call, dict)
|
|
159
|
+
else getattr(tool_call, "args", None)
|
|
160
|
+
)
|
|
161
|
+
if args is not None:
|
|
162
|
+
return json.dumps(args)
|
|
163
|
+
|
|
164
|
+
return _message_text(state.messages[-1])
|
|
165
|
+
|
|
166
|
+
return _create_guardrail_node(
|
|
167
|
+
guardrail,
|
|
168
|
+
GuardrailScope.TOOL,
|
|
169
|
+
execution_stage,
|
|
170
|
+
_payload_generator,
|
|
171
|
+
success_node,
|
|
172
|
+
failure_node,
|
|
173
|
+
)
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Sequence
|
|
3
|
+
|
|
4
|
+
from uipath.agent.models.agent import (
|
|
5
|
+
AgentGuardrail,
|
|
6
|
+
AgentGuardrailBlockAction,
|
|
7
|
+
AgentGuardrailEscalateAction,
|
|
8
|
+
AgentGuardrailLogAction,
|
|
9
|
+
AgentGuardrailSeverityLevel,
|
|
10
|
+
AgentUnknownGuardrail,
|
|
11
|
+
)
|
|
12
|
+
from uipath.platform.guardrails import BaseGuardrail
|
|
13
|
+
|
|
14
|
+
from uipath_langchain.agent.guardrails.actions import (
|
|
15
|
+
BlockAction,
|
|
16
|
+
EscalateAction,
|
|
17
|
+
GuardrailAction,
|
|
18
|
+
LogAction,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def build_guardrails_with_actions(
|
|
23
|
+
guardrails: Sequence[AgentGuardrail] | None,
|
|
24
|
+
) -> list[tuple[BaseGuardrail, GuardrailAction]]:
|
|
25
|
+
"""Build a list of (guardrail, action) tuples from model definitions.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
guardrails: Sequence of guardrail model objects or None.
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
A list of tuples pairing each supported guardrail with its executable action.
|
|
32
|
+
"""
|
|
33
|
+
if not guardrails:
|
|
34
|
+
return []
|
|
35
|
+
|
|
36
|
+
result: list[tuple[BaseGuardrail, GuardrailAction]] = []
|
|
37
|
+
for guardrail in guardrails:
|
|
38
|
+
if isinstance(guardrail, AgentUnknownGuardrail):
|
|
39
|
+
continue
|
|
40
|
+
|
|
41
|
+
action = guardrail.action
|
|
42
|
+
|
|
43
|
+
if isinstance(action, AgentGuardrailBlockAction):
|
|
44
|
+
result.append((guardrail, BlockAction(action.reason)))
|
|
45
|
+
elif isinstance(action, AgentGuardrailLogAction):
|
|
46
|
+
severity_level_map = {
|
|
47
|
+
AgentGuardrailSeverityLevel.ERROR: logging.ERROR,
|
|
48
|
+
AgentGuardrailSeverityLevel.WARNING: logging.WARNING,
|
|
49
|
+
AgentGuardrailSeverityLevel.INFO: logging.INFO,
|
|
50
|
+
}
|
|
51
|
+
level = severity_level_map.get(action.severity_level, logging.INFO)
|
|
52
|
+
result.append(
|
|
53
|
+
(
|
|
54
|
+
guardrail,
|
|
55
|
+
LogAction(message=action.message, level=level),
|
|
56
|
+
)
|
|
57
|
+
)
|
|
58
|
+
elif isinstance(action, AgentGuardrailEscalateAction):
|
|
59
|
+
result.append(
|
|
60
|
+
(
|
|
61
|
+
guardrail,
|
|
62
|
+
EscalateAction(
|
|
63
|
+
app_name=action.app.name,
|
|
64
|
+
app_folder_path=action.app.folder_name,
|
|
65
|
+
version=action.app.version,
|
|
66
|
+
assignee=action.recipient.value,
|
|
67
|
+
),
|
|
68
|
+
)
|
|
69
|
+
)
|
|
70
|
+
return result
|
|
@@ -0,0 +1,283 @@
|
|
|
1
|
+
from functools import partial
|
|
2
|
+
from typing import Any, Callable, Sequence
|
|
3
|
+
|
|
4
|
+
from langgraph.constants import END, START
|
|
5
|
+
from langgraph.graph import StateGraph
|
|
6
|
+
from langgraph.prebuilt import ToolNode
|
|
7
|
+
from uipath.platform.guardrails import (
|
|
8
|
+
BaseGuardrail,
|
|
9
|
+
BuiltInValidatorGuardrail,
|
|
10
|
+
GuardrailScope,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
from uipath_langchain.agent.guardrails.types import ExecutionStage
|
|
14
|
+
|
|
15
|
+
from .actions.base_action import GuardrailAction, GuardrailActionNode
|
|
16
|
+
from .guardrail_nodes import (
|
|
17
|
+
create_agent_guardrail_node,
|
|
18
|
+
create_llm_guardrail_node,
|
|
19
|
+
create_tool_guardrail_node,
|
|
20
|
+
)
|
|
21
|
+
from .types import AgentGuardrailsGraphState
|
|
22
|
+
|
|
23
|
+
_VALIDATOR_ALLOWED_STAGES = {
|
|
24
|
+
"prompt_injection": {ExecutionStage.PRE_EXECUTION},
|
|
25
|
+
"pii_detection": {ExecutionStage.PRE_EXECUTION, ExecutionStage.POST_EXECUTION},
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _filter_guardrails_by_stage(
|
|
30
|
+
guardrails: Sequence[tuple[BaseGuardrail, GuardrailAction]] | None,
|
|
31
|
+
stage: ExecutionStage,
|
|
32
|
+
) -> list[tuple[BaseGuardrail, GuardrailAction]]:
|
|
33
|
+
"""Filter guardrails that apply to a specific execution stage."""
|
|
34
|
+
filtered_guardrails = []
|
|
35
|
+
for guardrail, action in guardrails or []:
|
|
36
|
+
# Internal knowledge: Check against configured allowed stages
|
|
37
|
+
if (
|
|
38
|
+
isinstance(guardrail, BuiltInValidatorGuardrail)
|
|
39
|
+
and guardrail.validator_type in _VALIDATOR_ALLOWED_STAGES
|
|
40
|
+
and stage not in _VALIDATOR_ALLOWED_STAGES[guardrail.validator_type]
|
|
41
|
+
):
|
|
42
|
+
continue
|
|
43
|
+
filtered_guardrails.append((guardrail, action))
|
|
44
|
+
return filtered_guardrails
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _create_guardrails_subgraph(
|
|
48
|
+
main_inner_node: tuple[str, Any],
|
|
49
|
+
guardrails: Sequence[tuple[BaseGuardrail, GuardrailAction]] | None,
|
|
50
|
+
scope: GuardrailScope,
|
|
51
|
+
execution_stages: Sequence[ExecutionStage],
|
|
52
|
+
node_factory: Callable[
|
|
53
|
+
[
|
|
54
|
+
BaseGuardrail,
|
|
55
|
+
ExecutionStage,
|
|
56
|
+
str, # success node name
|
|
57
|
+
str, # fail node name
|
|
58
|
+
],
|
|
59
|
+
GuardrailActionNode,
|
|
60
|
+
] = create_llm_guardrail_node,
|
|
61
|
+
):
|
|
62
|
+
"""Build a subgraph that enforces guardrails around an inner node.
|
|
63
|
+
|
|
64
|
+
The constructed graph conditionally includes pre- and/or post-execution guardrail
|
|
65
|
+
chains based on ``execution_stages``:
|
|
66
|
+
- If ``ExecutionStage.PRE_EXECUTION`` is included, the graph links
|
|
67
|
+
START -> first pre-guardrail node -> ... -> inner.
|
|
68
|
+
Otherwise, it directly links START -> inner.
|
|
69
|
+
- If ``ExecutionStage.POST_EXECUTION`` is included, the graph links
|
|
70
|
+
inner -> first post-guardrail node -> ... -> END.
|
|
71
|
+
Otherwise, it directly links inner -> END.
|
|
72
|
+
|
|
73
|
+
No static edges are added between guardrail nodes; each evaluation node routes
|
|
74
|
+
dynamically to its configured success/failure targets. Failure nodes are added
|
|
75
|
+
but not chained; they are expected to route via Command to the provided next node.
|
|
76
|
+
"""
|
|
77
|
+
inner_name, inner_node = main_inner_node
|
|
78
|
+
|
|
79
|
+
subgraph = StateGraph(AgentGuardrailsGraphState)
|
|
80
|
+
|
|
81
|
+
subgraph.add_node(inner_name, inner_node)
|
|
82
|
+
|
|
83
|
+
# Add pre execution guardrail nodes
|
|
84
|
+
if ExecutionStage.PRE_EXECUTION in execution_stages:
|
|
85
|
+
pre_guardrails = _filter_guardrails_by_stage(
|
|
86
|
+
guardrails, ExecutionStage.PRE_EXECUTION
|
|
87
|
+
)
|
|
88
|
+
first_pre_exec_guardrail_node = _build_guardrail_node_chain(
|
|
89
|
+
subgraph,
|
|
90
|
+
pre_guardrails,
|
|
91
|
+
scope,
|
|
92
|
+
ExecutionStage.PRE_EXECUTION,
|
|
93
|
+
node_factory,
|
|
94
|
+
inner_name,
|
|
95
|
+
inner_name,
|
|
96
|
+
)
|
|
97
|
+
subgraph.add_edge(START, first_pre_exec_guardrail_node)
|
|
98
|
+
else:
|
|
99
|
+
subgraph.add_edge(START, inner_name)
|
|
100
|
+
|
|
101
|
+
# Add post execution guardrail nodes
|
|
102
|
+
if ExecutionStage.POST_EXECUTION in execution_stages:
|
|
103
|
+
post_guardrails = _filter_guardrails_by_stage(
|
|
104
|
+
guardrails, ExecutionStage.POST_EXECUTION
|
|
105
|
+
)
|
|
106
|
+
first_post_exec_guardrail_node = _build_guardrail_node_chain(
|
|
107
|
+
subgraph,
|
|
108
|
+
post_guardrails,
|
|
109
|
+
scope,
|
|
110
|
+
ExecutionStage.POST_EXECUTION,
|
|
111
|
+
node_factory,
|
|
112
|
+
END,
|
|
113
|
+
inner_node,
|
|
114
|
+
)
|
|
115
|
+
subgraph.add_edge(inner_name, first_post_exec_guardrail_node)
|
|
116
|
+
else:
|
|
117
|
+
subgraph.add_edge(inner_name, END)
|
|
118
|
+
|
|
119
|
+
return subgraph.compile()
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def _build_guardrail_node_chain(
|
|
123
|
+
subgraph: StateGraph[AgentGuardrailsGraphState],
|
|
124
|
+
guardrails: Sequence[tuple[BaseGuardrail, GuardrailAction]] | None,
|
|
125
|
+
scope: GuardrailScope,
|
|
126
|
+
execution_stage: ExecutionStage,
|
|
127
|
+
node_factory: Callable[
|
|
128
|
+
[
|
|
129
|
+
BaseGuardrail,
|
|
130
|
+
ExecutionStage,
|
|
131
|
+
str, # success node name
|
|
132
|
+
str, # fail node name
|
|
133
|
+
],
|
|
134
|
+
GuardrailActionNode,
|
|
135
|
+
],
|
|
136
|
+
next_node: str,
|
|
137
|
+
guarded_node_name: str,
|
|
138
|
+
) -> str:
|
|
139
|
+
"""Recursively build a chain of guardrail nodes in reverse order.
|
|
140
|
+
|
|
141
|
+
This function processes guardrails from last to first, creating a chain where:
|
|
142
|
+
- Each guardrail node evaluates the guardrail condition
|
|
143
|
+
- On success, it routes to the next guardrail node (or the final next_node)
|
|
144
|
+
- On failure, it routes to a failure node that either throws an error or continues to next_node
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
subgraph: The StateGraph to add nodes and edges to.
|
|
148
|
+
guardrails: Sequence of (guardrail, action) tuples to process. Processed in reverse.
|
|
149
|
+
scope: The scope of the guardrails (LLM, AGENT, or TOOL).
|
|
150
|
+
execution_stage: Whether this is "PreExecution" or "PostExecution" guardrails.
|
|
151
|
+
node_factory: Factory function to create guardrail evaluation nodes.
|
|
152
|
+
next_node: The node name to route to after all guardrails pass.
|
|
153
|
+
|
|
154
|
+
Returns:
|
|
155
|
+
The name of the first guardrail node in the chain (or next_node if no guardrails).
|
|
156
|
+
"""
|
|
157
|
+
# Base case: no guardrails to process, return the next node directly
|
|
158
|
+
if not guardrails:
|
|
159
|
+
return next_node
|
|
160
|
+
|
|
161
|
+
guardrail, action = guardrails[-1]
|
|
162
|
+
remaining_guardrails = guardrails[:-1]
|
|
163
|
+
|
|
164
|
+
fail_node_name, fail_node = action.action_node(
|
|
165
|
+
guardrail=guardrail,
|
|
166
|
+
scope=scope,
|
|
167
|
+
execution_stage=execution_stage,
|
|
168
|
+
guarded_component_name=guarded_node_name,
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
# Create the guardrail evaluation node.
|
|
172
|
+
guardrail_node_name, guardrail_node = node_factory(
|
|
173
|
+
guardrail, execution_stage, next_node, fail_node_name
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
# Add both nodes to the subgraph
|
|
177
|
+
subgraph.add_node(guardrail_node_name, guardrail_node)
|
|
178
|
+
subgraph.add_node(fail_node_name, fail_node)
|
|
179
|
+
|
|
180
|
+
# Failure path route to the next node
|
|
181
|
+
subgraph.add_edge(fail_node_name, next_node)
|
|
182
|
+
|
|
183
|
+
previous_node_name = _build_guardrail_node_chain(
|
|
184
|
+
subgraph,
|
|
185
|
+
remaining_guardrails,
|
|
186
|
+
scope,
|
|
187
|
+
execution_stage,
|
|
188
|
+
node_factory,
|
|
189
|
+
guardrail_node_name,
|
|
190
|
+
guarded_node_name,
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
return previous_node_name
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def create_llm_guardrails_subgraph(
|
|
197
|
+
llm_node: tuple[str, Any],
|
|
198
|
+
guardrails: Sequence[tuple[BaseGuardrail, GuardrailAction]] | None,
|
|
199
|
+
):
|
|
200
|
+
applicable_guardrails = [
|
|
201
|
+
(guardrail, _)
|
|
202
|
+
for (guardrail, _) in (guardrails or [])
|
|
203
|
+
if GuardrailScope.LLM in guardrail.selector.scopes
|
|
204
|
+
]
|
|
205
|
+
if applicable_guardrails is None or len(applicable_guardrails) == 0:
|
|
206
|
+
return llm_node[1]
|
|
207
|
+
|
|
208
|
+
return _create_guardrails_subgraph(
|
|
209
|
+
main_inner_node=llm_node,
|
|
210
|
+
guardrails=applicable_guardrails,
|
|
211
|
+
scope=GuardrailScope.LLM,
|
|
212
|
+
execution_stages=[ExecutionStage.PRE_EXECUTION, ExecutionStage.POST_EXECUTION],
|
|
213
|
+
node_factory=create_llm_guardrail_node,
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
def create_tools_guardrails_subgraph(
|
|
218
|
+
tool_nodes: dict[str, ToolNode],
|
|
219
|
+
guardrails: Sequence[tuple[BaseGuardrail, GuardrailAction]] | None,
|
|
220
|
+
) -> dict[str, ToolNode]:
|
|
221
|
+
"""Create tool nodes with guardrails.
|
|
222
|
+
Args:
|
|
223
|
+
"""
|
|
224
|
+
result: dict[str, ToolNode] = {}
|
|
225
|
+
for tool_name, tool_node in tool_nodes.items():
|
|
226
|
+
subgraph = create_tool_guardrails_subgraph(
|
|
227
|
+
(tool_name, tool_node),
|
|
228
|
+
guardrails,
|
|
229
|
+
)
|
|
230
|
+
result[tool_name] = subgraph
|
|
231
|
+
|
|
232
|
+
return result
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def create_agent_guardrails_subgraph(
|
|
236
|
+
agent_node: tuple[str, Any],
|
|
237
|
+
guardrails: Sequence[tuple[BaseGuardrail, GuardrailAction]] | None,
|
|
238
|
+
execution_stage: ExecutionStage,
|
|
239
|
+
):
|
|
240
|
+
"""Create a subgraph for AGENT-scoped guardrails that applies checks at the specified stage.
|
|
241
|
+
|
|
242
|
+
This is intended for wrapping nodes like INIT or TERMINATE, where guardrails should run
|
|
243
|
+
either before (pre-execution) or after (post-execution) the node logic.
|
|
244
|
+
"""
|
|
245
|
+
applicable_guardrails = [
|
|
246
|
+
(guardrail, _)
|
|
247
|
+
for (guardrail, _) in (guardrails or [])
|
|
248
|
+
if GuardrailScope.AGENT in guardrail.selector.scopes
|
|
249
|
+
]
|
|
250
|
+
if applicable_guardrails is None or len(applicable_guardrails) == 0:
|
|
251
|
+
return agent_node[1]
|
|
252
|
+
|
|
253
|
+
return _create_guardrails_subgraph(
|
|
254
|
+
main_inner_node=agent_node,
|
|
255
|
+
guardrails=applicable_guardrails,
|
|
256
|
+
scope=GuardrailScope.AGENT,
|
|
257
|
+
execution_stages=[execution_stage],
|
|
258
|
+
node_factory=create_agent_guardrail_node,
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
def create_tool_guardrails_subgraph(
|
|
263
|
+
tool_node: tuple[str, Any],
|
|
264
|
+
guardrails: Sequence[tuple[BaseGuardrail, GuardrailAction]] | None,
|
|
265
|
+
):
|
|
266
|
+
tool_name, _ = tool_node
|
|
267
|
+
applicable_guardrails = [
|
|
268
|
+
(guardrail, action)
|
|
269
|
+
for (guardrail, action) in (guardrails or [])
|
|
270
|
+
if GuardrailScope.TOOL in guardrail.selector.scopes
|
|
271
|
+
and guardrail.selector.match_names is not None
|
|
272
|
+
and tool_name in guardrail.selector.match_names
|
|
273
|
+
]
|
|
274
|
+
if applicable_guardrails is None or len(applicable_guardrails) == 0:
|
|
275
|
+
return tool_node[1]
|
|
276
|
+
|
|
277
|
+
return _create_guardrails_subgraph(
|
|
278
|
+
main_inner_node=tool_node,
|
|
279
|
+
guardrails=applicable_guardrails,
|
|
280
|
+
scope=GuardrailScope.TOOL,
|
|
281
|
+
execution_stages=[ExecutionStage.PRE_EXECUTION, ExecutionStage.POST_EXECUTION],
|
|
282
|
+
node_factory=partial(create_tool_guardrail_node, tool_name=tool_name),
|
|
283
|
+
)
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
from typing import Annotated, Optional
|
|
3
|
+
|
|
4
|
+
from langchain_core.messages import AnyMessage
|
|
5
|
+
from langgraph.graph.message import add_messages
|
|
6
|
+
from pydantic import BaseModel
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class AgentGuardrailsGraphState(BaseModel):
|
|
10
|
+
"""Agent Guardrails Graph state for guardrail subgraph."""
|
|
11
|
+
|
|
12
|
+
messages: Annotated[list[AnyMessage], add_messages] = []
|
|
13
|
+
guardrail_validation_result: Optional[str] = None
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ExecutionStage(str, Enum):
|
|
17
|
+
"""Execution stage enumeration."""
|
|
18
|
+
|
|
19
|
+
PRE_EXECUTION = "preExecution"
|
|
20
|
+
POST_EXECUTION = "postExecution"
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
"""UiPath ReAct Agent implementation"""
|
|
2
|
+
|
|
3
|
+
from .agent import create_agent
|
|
4
|
+
from .types import AgentGraphConfig, AgentGraphNode, AgentGraphState
|
|
5
|
+
from .utils import resolve_input_model, resolve_output_model
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"create_agent",
|
|
9
|
+
"resolve_output_model",
|
|
10
|
+
"resolve_input_model",
|
|
11
|
+
"AgentGraphNode",
|
|
12
|
+
"AgentGraphState",
|
|
13
|
+
"AgentGraphConfig",
|
|
14
|
+
]
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Callable, Sequence, Type, TypeVar, cast
|
|
3
|
+
|
|
4
|
+
from langchain_core.language_models import BaseChatModel
|
|
5
|
+
from langchain_core.messages import HumanMessage, SystemMessage
|
|
6
|
+
from langchain_core.tools import BaseTool
|
|
7
|
+
from langgraph.constants import END, START
|
|
8
|
+
from langgraph.graph import StateGraph
|
|
9
|
+
from pydantic import BaseModel
|
|
10
|
+
from uipath.platform.guardrails import BaseGuardrail
|
|
11
|
+
|
|
12
|
+
from ..guardrails import create_llm_guardrails_subgraph
|
|
13
|
+
from ..guardrails.actions import GuardrailAction
|
|
14
|
+
from ..guardrails.guardrails_subgraph import create_tools_guardrails_subgraph
|
|
15
|
+
from ..tools import create_tool_node
|
|
16
|
+
from .init_node import (
|
|
17
|
+
create_init_node,
|
|
18
|
+
)
|
|
19
|
+
from .llm_node import (
|
|
20
|
+
create_llm_node,
|
|
21
|
+
)
|
|
22
|
+
from .router import (
|
|
23
|
+
route_agent,
|
|
24
|
+
)
|
|
25
|
+
from .terminate_node import (
|
|
26
|
+
create_terminate_node,
|
|
27
|
+
)
|
|
28
|
+
from .tools import create_flow_control_tools
|
|
29
|
+
from .types import AgentGraphConfig, AgentGraphNode, AgentGraphState
|
|
30
|
+
|
|
31
|
+
InputT = TypeVar("InputT", bound=BaseModel)
|
|
32
|
+
OutputT = TypeVar("OutputT", bound=BaseModel)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def create_state_with_input(input_schema: Type[InputT]):
|
|
36
|
+
InnerAgentGraphState = type(
|
|
37
|
+
"InnerAgentGraphState",
|
|
38
|
+
(AgentGraphState, input_schema),
|
|
39
|
+
{},
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
cast(type[BaseModel], InnerAgentGraphState).model_rebuild()
|
|
43
|
+
return InnerAgentGraphState
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def create_agent(
|
|
47
|
+
model: BaseChatModel,
|
|
48
|
+
tools: Sequence[BaseTool],
|
|
49
|
+
messages: Sequence[SystemMessage | HumanMessage]
|
|
50
|
+
| Callable[[InputT], Sequence[SystemMessage | HumanMessage]],
|
|
51
|
+
*,
|
|
52
|
+
input_schema: Type[InputT] | None = None,
|
|
53
|
+
output_schema: Type[OutputT] | None = None,
|
|
54
|
+
config: AgentGraphConfig | None = None,
|
|
55
|
+
guardrails: Sequence[tuple[BaseGuardrail, GuardrailAction]] | None = None,
|
|
56
|
+
) -> StateGraph[AgentGraphState, None, InputT, OutputT]:
|
|
57
|
+
"""Build agent graph with INIT -> AGENT(subgraph) <-> TOOLS loop, terminated by control flow tools.
|
|
58
|
+
|
|
59
|
+
The AGENT node is a subgraph that runs:
|
|
60
|
+
- before-agent guardrail middlewares
|
|
61
|
+
- the LLM tool-executing node
|
|
62
|
+
- after-agent guardrail middlewares
|
|
63
|
+
|
|
64
|
+
Control flow tools (end_execution, raise_error) are auto-injected alongside regular tools.
|
|
65
|
+
"""
|
|
66
|
+
if config is None:
|
|
67
|
+
config = AgentGraphConfig()
|
|
68
|
+
|
|
69
|
+
os.environ["LANGCHAIN_RECURSION_LIMIT"] = str(config.recursion_limit)
|
|
70
|
+
|
|
71
|
+
agent_tools = list(tools)
|
|
72
|
+
flow_control_tools: list[BaseTool] = create_flow_control_tools(output_schema)
|
|
73
|
+
llm_tools: list[BaseTool] = [*agent_tools, *flow_control_tools]
|
|
74
|
+
|
|
75
|
+
init_node = create_init_node(messages)
|
|
76
|
+
tool_nodes = create_tool_node(agent_tools)
|
|
77
|
+
tool_nodes_with_guardrails = create_tools_guardrails_subgraph(
|
|
78
|
+
tool_nodes, guardrails
|
|
79
|
+
)
|
|
80
|
+
terminate_node = create_terminate_node(output_schema)
|
|
81
|
+
|
|
82
|
+
InnerAgentGraphState = create_state_with_input(
|
|
83
|
+
input_schema if input_schema is not None else BaseModel
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
builder: StateGraph[AgentGraphState, None, InputT, OutputT] = StateGraph(
|
|
87
|
+
InnerAgentGraphState, input_schema=input_schema, output_schema=output_schema
|
|
88
|
+
)
|
|
89
|
+
builder.add_node(AgentGraphNode.INIT, init_node)
|
|
90
|
+
|
|
91
|
+
for tool_name, tool_node in tool_nodes_with_guardrails.items():
|
|
92
|
+
builder.add_node(tool_name, tool_node)
|
|
93
|
+
|
|
94
|
+
builder.add_node(AgentGraphNode.TERMINATE, terminate_node)
|
|
95
|
+
|
|
96
|
+
builder.add_edge(START, AgentGraphNode.INIT)
|
|
97
|
+
|
|
98
|
+
llm_node = create_llm_node(model, llm_tools)
|
|
99
|
+
llm_with_guardrails_subgraph = create_llm_guardrails_subgraph(
|
|
100
|
+
(AgentGraphNode.LLM, llm_node), guardrails
|
|
101
|
+
)
|
|
102
|
+
builder.add_node(AgentGraphNode.AGENT, llm_with_guardrails_subgraph)
|
|
103
|
+
builder.add_edge(AgentGraphNode.INIT, AgentGraphNode.AGENT)
|
|
104
|
+
|
|
105
|
+
tool_node_names = list(tool_nodes_with_guardrails.keys())
|
|
106
|
+
builder.add_conditional_edges(
|
|
107
|
+
AgentGraphNode.AGENT,
|
|
108
|
+
route_agent,
|
|
109
|
+
[AgentGraphNode.AGENT, *tool_node_names, AgentGraphNode.TERMINATE],
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
for tool_name in tool_node_names:
|
|
113
|
+
builder.add_edge(tool_name, AgentGraphNode.AGENT)
|
|
114
|
+
|
|
115
|
+
builder.add_edge(AgentGraphNode.TERMINATE, END)
|
|
116
|
+
|
|
117
|
+
return builder
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
"""State initialization node for the ReAct Agent graph."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Callable, Sequence
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import HumanMessage, SystemMessage
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def create_init_node(
|
|
9
|
+
messages: Sequence[SystemMessage | HumanMessage]
|
|
10
|
+
| Callable[[Any], Sequence[SystemMessage | HumanMessage]],
|
|
11
|
+
):
|
|
12
|
+
def graph_state_init(state: Any):
|
|
13
|
+
if callable(messages):
|
|
14
|
+
resolved_messages = messages(state)
|
|
15
|
+
else:
|
|
16
|
+
resolved_messages = messages
|
|
17
|
+
|
|
18
|
+
return {"messages": list(resolved_messages)}
|
|
19
|
+
|
|
20
|
+
return graph_state_init
|