uipath-langchain 0.0.133__py3-none-any.whl → 0.1.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- uipath_langchain/_cli/cli_init.py +130 -191
- uipath_langchain/_cli/cli_new.py +2 -3
- uipath_langchain/_resources/AGENTS.md +21 -0
- uipath_langchain/_resources/REQUIRED_STRUCTURE.md +92 -0
- uipath_langchain/_tracing/__init__.py +3 -2
- uipath_langchain/_tracing/_instrument_traceable.py +11 -12
- uipath_langchain/_utils/_request_mixin.py +327 -51
- uipath_langchain/_utils/_settings.py +2 -2
- uipath_langchain/agent/exceptions/__init__.py +6 -0
- uipath_langchain/agent/exceptions/exceptions.py +11 -0
- uipath_langchain/agent/guardrails/__init__.py +21 -0
- uipath_langchain/agent/guardrails/actions/__init__.py +11 -0
- uipath_langchain/agent/guardrails/actions/base_action.py +23 -0
- uipath_langchain/agent/guardrails/actions/block_action.py +41 -0
- uipath_langchain/agent/guardrails/actions/escalate_action.py +274 -0
- uipath_langchain/agent/guardrails/actions/log_action.py +57 -0
- uipath_langchain/agent/guardrails/guardrail_nodes.py +125 -0
- uipath_langchain/agent/guardrails/guardrails_factory.py +70 -0
- uipath_langchain/agent/guardrails/guardrails_subgraph.py +247 -0
- uipath_langchain/agent/guardrails/types.py +20 -0
- uipath_langchain/agent/react/__init__.py +14 -0
- uipath_langchain/agent/react/agent.py +113 -0
- uipath_langchain/agent/react/constants.py +2 -0
- uipath_langchain/agent/react/init_node.py +20 -0
- uipath_langchain/agent/react/llm_node.py +43 -0
- uipath_langchain/agent/react/router.py +97 -0
- uipath_langchain/agent/react/terminate_node.py +82 -0
- uipath_langchain/agent/react/tools/__init__.py +7 -0
- uipath_langchain/agent/react/tools/tools.py +50 -0
- uipath_langchain/agent/react/types.py +39 -0
- uipath_langchain/agent/react/utils.py +49 -0
- uipath_langchain/agent/tools/__init__.py +17 -0
- uipath_langchain/agent/tools/context_tool.py +53 -0
- uipath_langchain/agent/tools/escalation_tool.py +111 -0
- uipath_langchain/agent/tools/integration_tool.py +181 -0
- uipath_langchain/agent/tools/process_tool.py +49 -0
- uipath_langchain/agent/tools/static_args.py +138 -0
- uipath_langchain/agent/tools/structured_tool_with_output_type.py +14 -0
- uipath_langchain/agent/tools/tool_factory.py +45 -0
- uipath_langchain/agent/tools/tool_node.py +22 -0
- uipath_langchain/agent/tools/utils.py +11 -0
- uipath_langchain/chat/__init__.py +4 -0
- uipath_langchain/chat/bedrock.py +187 -0
- uipath_langchain/chat/gemini.py +330 -0
- uipath_langchain/chat/mapper.py +309 -0
- uipath_langchain/chat/models.py +248 -35
- uipath_langchain/chat/openai.py +132 -0
- uipath_langchain/chat/supported_models.py +42 -0
- uipath_langchain/embeddings/embeddings.py +131 -34
- uipath_langchain/middlewares.py +0 -6
- uipath_langchain/retrievers/context_grounding_retriever.py +7 -9
- uipath_langchain/runtime/__init__.py +36 -0
- uipath_langchain/runtime/_serialize.py +46 -0
- uipath_langchain/runtime/config.py +61 -0
- uipath_langchain/runtime/errors.py +43 -0
- uipath_langchain/runtime/factory.py +315 -0
- uipath_langchain/runtime/graph.py +159 -0
- uipath_langchain/runtime/runtime.py +453 -0
- uipath_langchain/runtime/schema.py +349 -0
- uipath_langchain/runtime/storage.py +115 -0
- uipath_langchain/vectorstores/context_grounding_vectorstore.py +90 -110
- {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.24.dist-info}/METADATA +42 -22
- uipath_langchain-0.1.24.dist-info/RECORD +76 -0
- {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.24.dist-info}/WHEEL +1 -1
- uipath_langchain-0.1.24.dist-info/entry_points.txt +5 -0
- uipath_langchain/_cli/_runtime/_context.py +0 -21
- uipath_langchain/_cli/_runtime/_conversation.py +0 -298
- uipath_langchain/_cli/_runtime/_exception.py +0 -17
- uipath_langchain/_cli/_runtime/_input.py +0 -139
- uipath_langchain/_cli/_runtime/_output.py +0 -234
- uipath_langchain/_cli/_runtime/_runtime.py +0 -379
- uipath_langchain/_cli/_utils/_graph.py +0 -199
- uipath_langchain/_cli/cli_dev.py +0 -44
- uipath_langchain/_cli/cli_eval.py +0 -78
- uipath_langchain/_cli/cli_run.py +0 -82
- uipath_langchain/_tracing/_oteladapter.py +0 -222
- uipath_langchain/_tracing/_utils.py +0 -28
- uipath_langchain/builder/agent_config.py +0 -191
- uipath_langchain/tools/preconfigured.py +0 -191
- uipath_langchain-0.0.133.dist-info/RECORD +0 -41
- uipath_langchain-0.0.133.dist-info/entry_points.txt +0 -2
- /uipath_langchain/{tools/__init__.py → py.typed} +0 -0
- {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.24.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,274 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import re
|
|
5
|
+
from typing import Any, Dict, Literal
|
|
6
|
+
|
|
7
|
+
from langchain_core.messages import AIMessage, ToolMessage
|
|
8
|
+
from langgraph.types import Command, interrupt
|
|
9
|
+
from uipath.platform.common import CreateEscalation
|
|
10
|
+
from uipath.platform.guardrails import (
|
|
11
|
+
BaseGuardrail,
|
|
12
|
+
GuardrailScope,
|
|
13
|
+
)
|
|
14
|
+
from uipath.runtime.errors import UiPathErrorCode
|
|
15
|
+
|
|
16
|
+
from ...exceptions import AgentTerminationException
|
|
17
|
+
from ..guardrail_nodes import _message_text
|
|
18
|
+
from ..types import AgentGuardrailsGraphState, ExecutionStage
|
|
19
|
+
from .base_action import GuardrailAction, GuardrailActionNode
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class EscalateAction(GuardrailAction):
|
|
23
|
+
"""Node-producing action that inserts a HITL interruption node into the graph.
|
|
24
|
+
|
|
25
|
+
The returned node creates a human-in-the-loop interruption that suspends execution
|
|
26
|
+
and waits for human review. When execution resumes, if the escalation was approved,
|
|
27
|
+
the flow continues with the reviewed content; otherwise, an error is raised.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(
|
|
31
|
+
self,
|
|
32
|
+
app_name: str,
|
|
33
|
+
app_folder_path: str,
|
|
34
|
+
version: int,
|
|
35
|
+
assignee: str,
|
|
36
|
+
):
|
|
37
|
+
self.app_name = app_name
|
|
38
|
+
self.app_folder_path = app_folder_path
|
|
39
|
+
self.version = version
|
|
40
|
+
self.assignee = assignee
|
|
41
|
+
|
|
42
|
+
def action_node(
|
|
43
|
+
self,
|
|
44
|
+
*,
|
|
45
|
+
guardrail: BaseGuardrail,
|
|
46
|
+
scope: GuardrailScope,
|
|
47
|
+
execution_stage: ExecutionStage,
|
|
48
|
+
) -> GuardrailActionNode:
|
|
49
|
+
node_name = _get_node_name(execution_stage, guardrail, scope)
|
|
50
|
+
|
|
51
|
+
async def _node(
|
|
52
|
+
state: AgentGuardrailsGraphState,
|
|
53
|
+
) -> Dict[str, Any] | Command[Any]:
|
|
54
|
+
input = _extract_escalation_content(state, scope, execution_stage)
|
|
55
|
+
escalation_field = _execution_stage_to_escalation_field(execution_stage)
|
|
56
|
+
|
|
57
|
+
data = {
|
|
58
|
+
"GuardrailName": guardrail.name,
|
|
59
|
+
"GuardrailDescription": guardrail.description,
|
|
60
|
+
"Component": scope.name.lower(),
|
|
61
|
+
"ExecutionStage": _execution_stage_to_string(execution_stage),
|
|
62
|
+
"GuardrailResult": state.guardrail_validation_result,
|
|
63
|
+
escalation_field: input,
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
escalation_result = interrupt(
|
|
67
|
+
CreateEscalation(
|
|
68
|
+
app_name=self.app_name,
|
|
69
|
+
app_folder_path=self.app_folder_path,
|
|
70
|
+
title=self.app_name,
|
|
71
|
+
data=data,
|
|
72
|
+
assignee=self.assignee,
|
|
73
|
+
)
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
if escalation_result.action == "Approve":
|
|
77
|
+
return _process_escalation_response(
|
|
78
|
+
state, escalation_result.data, scope, execution_stage
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
raise AgentTerminationException(
|
|
82
|
+
code=UiPathErrorCode.EXECUTION_ERROR,
|
|
83
|
+
title="Escalation rejected",
|
|
84
|
+
detail=f"Action was rejected after reviewing the task created by guardrail [{guardrail.name}]. Please contact your administrator.",
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
return node_name, _node
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def _get_node_name(
|
|
91
|
+
execution_stage: ExecutionStage, guardrail: BaseGuardrail, scope: GuardrailScope
|
|
92
|
+
) -> str:
|
|
93
|
+
sanitized = re.sub(r"\W+", "_", guardrail.name).strip("_").lower()
|
|
94
|
+
node_name = f"{sanitized}_hitl_{execution_stage.name.lower()}_{scope.lower()}"
|
|
95
|
+
return node_name
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def _execution_stage_to_string(
|
|
99
|
+
execution_stage: ExecutionStage,
|
|
100
|
+
) -> Literal["PreExecution", "PostExecution"]:
|
|
101
|
+
"""Convert ExecutionStage enum to string literal.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
execution_stage: The execution stage enum.
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
"PreExecution" for PRE_EXECUTION, "PostExecution" for POST_EXECUTION.
|
|
108
|
+
"""
|
|
109
|
+
if execution_stage == ExecutionStage.PRE_EXECUTION:
|
|
110
|
+
return "PreExecution"
|
|
111
|
+
return "PostExecution"
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def _process_escalation_response(
|
|
115
|
+
state: AgentGuardrailsGraphState,
|
|
116
|
+
escalation_result: Dict[str, Any],
|
|
117
|
+
scope: GuardrailScope,
|
|
118
|
+
execution_stage: ExecutionStage,
|
|
119
|
+
) -> Dict[str, Any] | Command[Any]:
|
|
120
|
+
"""Process escalation response and update state based on guardrail scope.
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
state: The current agent graph state.
|
|
124
|
+
escalation_result: The result from the escalation interrupt.
|
|
125
|
+
scope: The guardrail scope (LLM/AGENT/TOOL).
|
|
126
|
+
execution_stage: The hook type ("PreExecution" or "PostExecution").
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
For LLM scope: Command to update messages with reviewed inputs/outputs.
|
|
130
|
+
For non-LLM scope: Empty dict (no message alteration).
|
|
131
|
+
|
|
132
|
+
Raises:
|
|
133
|
+
AgentTerminationException: If escalation response processing fails.
|
|
134
|
+
"""
|
|
135
|
+
if scope != GuardrailScope.LLM:
|
|
136
|
+
return {}
|
|
137
|
+
|
|
138
|
+
try:
|
|
139
|
+
reviewed_field = (
|
|
140
|
+
"ReviewedInputs"
|
|
141
|
+
if execution_stage == ExecutionStage.PRE_EXECUTION
|
|
142
|
+
else "ReviewedOutputs"
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
msgs = state.messages.copy()
|
|
146
|
+
if not msgs or reviewed_field not in escalation_result:
|
|
147
|
+
return {}
|
|
148
|
+
|
|
149
|
+
last_message = msgs[-1]
|
|
150
|
+
|
|
151
|
+
if execution_stage == ExecutionStage.PRE_EXECUTION:
|
|
152
|
+
reviewed_content = escalation_result[reviewed_field]
|
|
153
|
+
if reviewed_content:
|
|
154
|
+
last_message.content = json.loads(reviewed_content)
|
|
155
|
+
else:
|
|
156
|
+
reviewed_outputs_json = escalation_result[reviewed_field]
|
|
157
|
+
if not reviewed_outputs_json:
|
|
158
|
+
return {}
|
|
159
|
+
|
|
160
|
+
content_list = json.loads(reviewed_outputs_json)
|
|
161
|
+
if not content_list:
|
|
162
|
+
return {}
|
|
163
|
+
|
|
164
|
+
# For AI messages, process tool calls if present
|
|
165
|
+
if isinstance(last_message, AIMessage):
|
|
166
|
+
ai_message: AIMessage = last_message
|
|
167
|
+
content_index = 0
|
|
168
|
+
|
|
169
|
+
if ai_message.tool_calls:
|
|
170
|
+
tool_calls = list(ai_message.tool_calls)
|
|
171
|
+
for tool_call in tool_calls:
|
|
172
|
+
args = tool_call["args"]
|
|
173
|
+
if (
|
|
174
|
+
isinstance(args, dict)
|
|
175
|
+
and "content" in args
|
|
176
|
+
and args["content"] is not None
|
|
177
|
+
):
|
|
178
|
+
if content_index < len(content_list):
|
|
179
|
+
updated_content = json.loads(
|
|
180
|
+
content_list[content_index]
|
|
181
|
+
)
|
|
182
|
+
args["content"] = updated_content
|
|
183
|
+
tool_call["args"] = args
|
|
184
|
+
content_index += 1
|
|
185
|
+
ai_message.tool_calls = tool_calls
|
|
186
|
+
|
|
187
|
+
if len(content_list) > content_index:
|
|
188
|
+
ai_message.content = content_list[-1]
|
|
189
|
+
else:
|
|
190
|
+
# Fallback for other message types
|
|
191
|
+
if content_list:
|
|
192
|
+
last_message.content = content_list[-1]
|
|
193
|
+
|
|
194
|
+
return Command[Any](update={"messages": msgs})
|
|
195
|
+
except Exception as e:
|
|
196
|
+
raise AgentTerminationException(
|
|
197
|
+
code=UiPathErrorCode.EXECUTION_ERROR,
|
|
198
|
+
title="Escalation rejected",
|
|
199
|
+
detail=str(e),
|
|
200
|
+
) from e
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
def _extract_escalation_content(
|
|
204
|
+
state: AgentGuardrailsGraphState,
|
|
205
|
+
scope: GuardrailScope,
|
|
206
|
+
execution_stage: ExecutionStage,
|
|
207
|
+
) -> str | list[str | Dict[str, Any]]:
|
|
208
|
+
"""Extract escalation content from state based on guardrail scope and execution stage.
|
|
209
|
+
|
|
210
|
+
Args:
|
|
211
|
+
state: The current agent graph state.
|
|
212
|
+
scope: The guardrail scope (LLM/AGENT/TOOL).
|
|
213
|
+
execution_stage: The execution stage enum.
|
|
214
|
+
|
|
215
|
+
Returns:
|
|
216
|
+
For non-LLM scope: Empty string.
|
|
217
|
+
For LLM PreExecution: JSON string with message content.
|
|
218
|
+
For LLM PostExecution: JSON array with tool call content and message content.
|
|
219
|
+
"""
|
|
220
|
+
if scope != GuardrailScope.LLM:
|
|
221
|
+
return ""
|
|
222
|
+
|
|
223
|
+
if not state.messages:
|
|
224
|
+
raise AgentTerminationException(
|
|
225
|
+
code=UiPathErrorCode.EXECUTION_ERROR,
|
|
226
|
+
title="Invalid state message",
|
|
227
|
+
detail="No messages in state",
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
last_message = state.messages[-1]
|
|
231
|
+
if execution_stage == ExecutionStage.PRE_EXECUTION:
|
|
232
|
+
if isinstance(last_message, ToolMessage):
|
|
233
|
+
return last_message.content
|
|
234
|
+
|
|
235
|
+
content = _message_text(last_message)
|
|
236
|
+
return json.dumps(content) if content else ""
|
|
237
|
+
|
|
238
|
+
# For AI messages, process tool calls if present
|
|
239
|
+
if isinstance(last_message, AIMessage):
|
|
240
|
+
ai_message: AIMessage = last_message
|
|
241
|
+
content_list: list[str] = []
|
|
242
|
+
|
|
243
|
+
if ai_message.tool_calls:
|
|
244
|
+
for tool_call in ai_message.tool_calls:
|
|
245
|
+
args = tool_call["args"]
|
|
246
|
+
if (
|
|
247
|
+
isinstance(args, dict)
|
|
248
|
+
and "content" in args
|
|
249
|
+
and args["content"] is not None
|
|
250
|
+
):
|
|
251
|
+
content_list.append(json.dumps(args["content"]))
|
|
252
|
+
|
|
253
|
+
message_content = _message_text(last_message)
|
|
254
|
+
if message_content:
|
|
255
|
+
content_list.append(message_content)
|
|
256
|
+
|
|
257
|
+
return json.dumps(content_list)
|
|
258
|
+
|
|
259
|
+
# Fallback for other message types
|
|
260
|
+
return _message_text(last_message)
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
def _execution_stage_to_escalation_field(
|
|
264
|
+
execution_stage: ExecutionStage,
|
|
265
|
+
) -> str:
|
|
266
|
+
"""Convert execution stage to escalation data field name.
|
|
267
|
+
|
|
268
|
+
Args:
|
|
269
|
+
execution_stage: The execution stage enum.
|
|
270
|
+
|
|
271
|
+
Returns:
|
|
272
|
+
"Inputs" for PRE_EXECUTION, "Outputs" for POST_EXECUTION.
|
|
273
|
+
"""
|
|
274
|
+
return "Inputs" if execution_stage == ExecutionStage.PRE_EXECUTION else "Outputs"
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import re
|
|
3
|
+
from typing import Any, Optional
|
|
4
|
+
|
|
5
|
+
from uipath.platform.guardrails import BaseGuardrail, GuardrailScope
|
|
6
|
+
|
|
7
|
+
from uipath_langchain.agent.guardrails.types import ExecutionStage
|
|
8
|
+
|
|
9
|
+
from ..types import AgentGuardrailsGraphState
|
|
10
|
+
from .base_action import GuardrailAction, GuardrailActionNode
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class LogAction(GuardrailAction):
|
|
16
|
+
"""Action that logs guardrail violation and continues."""
|
|
17
|
+
|
|
18
|
+
def __init__(self, message: Optional[str], level: int = logging.INFO) -> None:
|
|
19
|
+
"""Initialize the log action.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
message: Message to be logged.
|
|
23
|
+
level: Logging level used when reporting a guardrail failure.
|
|
24
|
+
"""
|
|
25
|
+
self.message = message
|
|
26
|
+
self.level = level
|
|
27
|
+
|
|
28
|
+
def action_node(
|
|
29
|
+
self,
|
|
30
|
+
*,
|
|
31
|
+
guardrail: BaseGuardrail,
|
|
32
|
+
scope: GuardrailScope,
|
|
33
|
+
execution_stage: ExecutionStage,
|
|
34
|
+
) -> GuardrailActionNode:
|
|
35
|
+
"""Create a guardrail action node that logs validation failures.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
guardrail: The guardrail whose failure is being logged.
|
|
39
|
+
scope: The scope in which the guardrail applies.
|
|
40
|
+
execution_stage: Whether this runs before or after execution.
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
A tuple containing the node name and the async node callable.
|
|
44
|
+
"""
|
|
45
|
+
raw_node_name = f"{scope.name}_{execution_stage.name}_{guardrail.name}_log"
|
|
46
|
+
node_name = re.sub(r"\W+", "_", raw_node_name.lower()).strip("_")
|
|
47
|
+
|
|
48
|
+
async def _node(_state: AgentGuardrailsGraphState) -> dict[str, Any]:
|
|
49
|
+
message = (
|
|
50
|
+
self.message
|
|
51
|
+
or f"Guardrail [{guardrail.name}] validation failed for [{scope.name}] [{execution_stage.name}] with the following reason: {_state.guardrail_validation_result}"
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
logger.log(self.level, message)
|
|
55
|
+
return {}
|
|
56
|
+
|
|
57
|
+
return node_name, _node
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import re
|
|
3
|
+
from typing import Any, Callable
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import AnyMessage, HumanMessage, SystemMessage
|
|
6
|
+
from langgraph.types import Command
|
|
7
|
+
from uipath.platform import UiPath
|
|
8
|
+
from uipath.platform.guardrails import (
|
|
9
|
+
BaseGuardrail,
|
|
10
|
+
GuardrailScope,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
from uipath_langchain.agent.guardrails.types import ExecutionStage
|
|
14
|
+
|
|
15
|
+
from .types import AgentGuardrailsGraphState
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _message_text(msg: AnyMessage) -> str:
|
|
21
|
+
if isinstance(msg, (HumanMessage, SystemMessage)):
|
|
22
|
+
return msg.content if isinstance(msg.content, str) else str(msg.content)
|
|
23
|
+
return str(getattr(msg, "content", "")) if hasattr(msg, "content") else ""
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _create_guardrail_node(
|
|
27
|
+
guardrail: BaseGuardrail,
|
|
28
|
+
scope: GuardrailScope,
|
|
29
|
+
execution_stage: ExecutionStage,
|
|
30
|
+
payload_generator: Callable[[AgentGuardrailsGraphState], str],
|
|
31
|
+
success_node: str,
|
|
32
|
+
failure_node: str,
|
|
33
|
+
) -> tuple[str, Callable[[AgentGuardrailsGraphState], Any]]:
|
|
34
|
+
"""Private factory for guardrail evaluation nodes.
|
|
35
|
+
|
|
36
|
+
Returns a node that evaluates the guardrail and routes via Command:
|
|
37
|
+
- goto success_node on validation pass
|
|
38
|
+
- goto failure_node on validation fail
|
|
39
|
+
"""
|
|
40
|
+
raw_node_name = f"{scope.name}_{execution_stage.name}_{guardrail.name}"
|
|
41
|
+
node_name = re.sub(r"\W+", "_", raw_node_name.lower()).strip("_")
|
|
42
|
+
|
|
43
|
+
async def node(
|
|
44
|
+
state: AgentGuardrailsGraphState,
|
|
45
|
+
):
|
|
46
|
+
text = payload_generator(state)
|
|
47
|
+
try:
|
|
48
|
+
uipath = UiPath()
|
|
49
|
+
result = uipath.guardrails.evaluate_guardrail(text, guardrail)
|
|
50
|
+
except Exception as exc:
|
|
51
|
+
logger.error("Failed to evaluate guardrail: %s", exc)
|
|
52
|
+
raise
|
|
53
|
+
|
|
54
|
+
if not result.validation_passed:
|
|
55
|
+
return Command(
|
|
56
|
+
goto=failure_node, update={"guardrail_validation_result": result.reason}
|
|
57
|
+
)
|
|
58
|
+
return Command(goto=success_node, update={"guardrail_validation_result": None})
|
|
59
|
+
|
|
60
|
+
return node_name, node
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def create_llm_guardrail_node(
|
|
64
|
+
guardrail: BaseGuardrail,
|
|
65
|
+
execution_stage: ExecutionStage,
|
|
66
|
+
success_node: str,
|
|
67
|
+
failure_node: str,
|
|
68
|
+
) -> tuple[str, Callable[[AgentGuardrailsGraphState], Any]]:
|
|
69
|
+
def _payload_generator(state: AgentGuardrailsGraphState) -> str:
|
|
70
|
+
if not state.messages:
|
|
71
|
+
return ""
|
|
72
|
+
return _message_text(state.messages[-1])
|
|
73
|
+
|
|
74
|
+
return _create_guardrail_node(
|
|
75
|
+
guardrail,
|
|
76
|
+
GuardrailScope.LLM,
|
|
77
|
+
execution_stage,
|
|
78
|
+
_payload_generator,
|
|
79
|
+
success_node,
|
|
80
|
+
failure_node,
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def create_agent_guardrail_node(
|
|
85
|
+
guardrail: BaseGuardrail,
|
|
86
|
+
execution_stage: ExecutionStage,
|
|
87
|
+
success_node: str,
|
|
88
|
+
failure_node: str,
|
|
89
|
+
) -> tuple[str, Callable[[AgentGuardrailsGraphState], Any]]:
|
|
90
|
+
# To be implemented in future PR
|
|
91
|
+
def _payload_generator(state: AgentGuardrailsGraphState) -> str:
|
|
92
|
+
if not state.messages:
|
|
93
|
+
return ""
|
|
94
|
+
return _message_text(state.messages[-1])
|
|
95
|
+
|
|
96
|
+
return _create_guardrail_node(
|
|
97
|
+
guardrail,
|
|
98
|
+
GuardrailScope.AGENT,
|
|
99
|
+
execution_stage,
|
|
100
|
+
_payload_generator,
|
|
101
|
+
success_node,
|
|
102
|
+
failure_node,
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def create_tool_guardrail_node(
|
|
107
|
+
guardrail: BaseGuardrail,
|
|
108
|
+
execution_stage: ExecutionStage,
|
|
109
|
+
success_node: str,
|
|
110
|
+
failure_node: str,
|
|
111
|
+
) -> tuple[str, Callable[[AgentGuardrailsGraphState], Any]]:
|
|
112
|
+
# To be implemented in future PR
|
|
113
|
+
def _payload_generator(state: AgentGuardrailsGraphState) -> str:
|
|
114
|
+
if not state.messages:
|
|
115
|
+
return ""
|
|
116
|
+
return _message_text(state.messages[-1])
|
|
117
|
+
|
|
118
|
+
return _create_guardrail_node(
|
|
119
|
+
guardrail,
|
|
120
|
+
GuardrailScope.TOOL,
|
|
121
|
+
execution_stage,
|
|
122
|
+
_payload_generator,
|
|
123
|
+
success_node,
|
|
124
|
+
failure_node,
|
|
125
|
+
)
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Sequence
|
|
3
|
+
|
|
4
|
+
from uipath.agent.models.agent import (
|
|
5
|
+
AgentGuardrail,
|
|
6
|
+
AgentGuardrailBlockAction,
|
|
7
|
+
AgentGuardrailEscalateAction,
|
|
8
|
+
AgentGuardrailLogAction,
|
|
9
|
+
AgentGuardrailSeverityLevel,
|
|
10
|
+
AgentUnknownGuardrail,
|
|
11
|
+
)
|
|
12
|
+
from uipath.platform.guardrails import BaseGuardrail
|
|
13
|
+
|
|
14
|
+
from uipath_langchain.agent.guardrails.actions import (
|
|
15
|
+
BlockAction,
|
|
16
|
+
EscalateAction,
|
|
17
|
+
GuardrailAction,
|
|
18
|
+
LogAction,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def build_guardrails_with_actions(
|
|
23
|
+
guardrails: Sequence[AgentGuardrail] | None,
|
|
24
|
+
) -> list[tuple[BaseGuardrail, GuardrailAction]]:
|
|
25
|
+
"""Build a list of (guardrail, action) tuples from model definitions.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
guardrails: Sequence of guardrail model objects or None.
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
A list of tuples pairing each supported guardrail with its executable action.
|
|
32
|
+
"""
|
|
33
|
+
if not guardrails:
|
|
34
|
+
return []
|
|
35
|
+
|
|
36
|
+
result: list[tuple[BaseGuardrail, GuardrailAction]] = []
|
|
37
|
+
for guardrail in guardrails:
|
|
38
|
+
if isinstance(guardrail, AgentUnknownGuardrail):
|
|
39
|
+
continue
|
|
40
|
+
|
|
41
|
+
action = guardrail.action
|
|
42
|
+
|
|
43
|
+
if isinstance(action, AgentGuardrailBlockAction):
|
|
44
|
+
result.append((guardrail, BlockAction(action.reason)))
|
|
45
|
+
elif isinstance(action, AgentGuardrailLogAction):
|
|
46
|
+
severity_level_map = {
|
|
47
|
+
AgentGuardrailSeverityLevel.ERROR: logging.ERROR,
|
|
48
|
+
AgentGuardrailSeverityLevel.WARNING: logging.WARNING,
|
|
49
|
+
AgentGuardrailSeverityLevel.INFO: logging.INFO,
|
|
50
|
+
}
|
|
51
|
+
level = severity_level_map.get(action.severity_level, logging.INFO)
|
|
52
|
+
result.append(
|
|
53
|
+
(
|
|
54
|
+
guardrail,
|
|
55
|
+
LogAction(message=action.message, level=level),
|
|
56
|
+
)
|
|
57
|
+
)
|
|
58
|
+
elif isinstance(action, AgentGuardrailEscalateAction):
|
|
59
|
+
result.append(
|
|
60
|
+
(
|
|
61
|
+
guardrail,
|
|
62
|
+
EscalateAction(
|
|
63
|
+
app_name=action.app.name,
|
|
64
|
+
app_folder_path=action.app.folder_name,
|
|
65
|
+
version=action.app.version,
|
|
66
|
+
assignee=action.recipient.value,
|
|
67
|
+
),
|
|
68
|
+
)
|
|
69
|
+
)
|
|
70
|
+
return result
|