uipath-langchain 0.0.133__py3-none-any.whl → 0.1.28__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- uipath_langchain/_cli/cli_init.py +130 -191
- uipath_langchain/_cli/cli_new.py +2 -3
- uipath_langchain/_resources/AGENTS.md +21 -0
- uipath_langchain/_resources/REQUIRED_STRUCTURE.md +92 -0
- uipath_langchain/_tracing/__init__.py +3 -2
- uipath_langchain/_tracing/_instrument_traceable.py +11 -12
- uipath_langchain/_utils/_request_mixin.py +327 -51
- uipath_langchain/_utils/_settings.py +2 -2
- uipath_langchain/agent/exceptions/__init__.py +6 -0
- uipath_langchain/agent/exceptions/exceptions.py +11 -0
- uipath_langchain/agent/guardrails/__init__.py +21 -0
- uipath_langchain/agent/guardrails/actions/__init__.py +11 -0
- uipath_langchain/agent/guardrails/actions/base_action.py +24 -0
- uipath_langchain/agent/guardrails/actions/block_action.py +42 -0
- uipath_langchain/agent/guardrails/actions/escalate_action.py +499 -0
- uipath_langchain/agent/guardrails/actions/log_action.py +58 -0
- uipath_langchain/agent/guardrails/guardrail_nodes.py +173 -0
- uipath_langchain/agent/guardrails/guardrails_factory.py +70 -0
- uipath_langchain/agent/guardrails/guardrails_subgraph.py +283 -0
- uipath_langchain/agent/guardrails/types.py +20 -0
- uipath_langchain/agent/react/__init__.py +14 -0
- uipath_langchain/agent/react/agent.py +117 -0
- uipath_langchain/agent/react/constants.py +2 -0
- uipath_langchain/agent/react/init_node.py +20 -0
- uipath_langchain/agent/react/llm_node.py +43 -0
- uipath_langchain/agent/react/router.py +97 -0
- uipath_langchain/agent/react/terminate_node.py +82 -0
- uipath_langchain/agent/react/tools/__init__.py +7 -0
- uipath_langchain/agent/react/tools/tools.py +50 -0
- uipath_langchain/agent/react/types.py +39 -0
- uipath_langchain/agent/react/utils.py +49 -0
- uipath_langchain/agent/tools/__init__.py +17 -0
- uipath_langchain/agent/tools/context_tool.py +53 -0
- uipath_langchain/agent/tools/escalation_tool.py +111 -0
- uipath_langchain/agent/tools/integration_tool.py +181 -0
- uipath_langchain/agent/tools/process_tool.py +49 -0
- uipath_langchain/agent/tools/static_args.py +138 -0
- uipath_langchain/agent/tools/structured_tool_with_output_type.py +14 -0
- uipath_langchain/agent/tools/tool_factory.py +45 -0
- uipath_langchain/agent/tools/tool_node.py +22 -0
- uipath_langchain/agent/tools/utils.py +11 -0
- uipath_langchain/chat/__init__.py +4 -0
- uipath_langchain/chat/bedrock.py +187 -0
- uipath_langchain/chat/mapper.py +309 -0
- uipath_langchain/chat/models.py +248 -35
- uipath_langchain/chat/openai.py +133 -0
- uipath_langchain/chat/supported_models.py +42 -0
- uipath_langchain/chat/vertex.py +255 -0
- uipath_langchain/embeddings/embeddings.py +131 -34
- uipath_langchain/middlewares.py +0 -6
- uipath_langchain/retrievers/context_grounding_retriever.py +7 -9
- uipath_langchain/runtime/__init__.py +36 -0
- uipath_langchain/runtime/_serialize.py +46 -0
- uipath_langchain/runtime/config.py +61 -0
- uipath_langchain/runtime/errors.py +43 -0
- uipath_langchain/runtime/factory.py +315 -0
- uipath_langchain/runtime/graph.py +159 -0
- uipath_langchain/runtime/runtime.py +453 -0
- uipath_langchain/runtime/schema.py +386 -0
- uipath_langchain/runtime/storage.py +115 -0
- uipath_langchain/vectorstores/context_grounding_vectorstore.py +90 -110
- {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.28.dist-info}/METADATA +44 -23
- uipath_langchain-0.1.28.dist-info/RECORD +76 -0
- {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.28.dist-info}/WHEEL +1 -1
- uipath_langchain-0.1.28.dist-info/entry_points.txt +5 -0
- uipath_langchain/_cli/_runtime/_context.py +0 -21
- uipath_langchain/_cli/_runtime/_conversation.py +0 -298
- uipath_langchain/_cli/_runtime/_exception.py +0 -17
- uipath_langchain/_cli/_runtime/_input.py +0 -139
- uipath_langchain/_cli/_runtime/_output.py +0 -234
- uipath_langchain/_cli/_runtime/_runtime.py +0 -379
- uipath_langchain/_cli/_utils/_graph.py +0 -199
- uipath_langchain/_cli/cli_dev.py +0 -44
- uipath_langchain/_cli/cli_eval.py +0 -78
- uipath_langchain/_cli/cli_run.py +0 -82
- uipath_langchain/_tracing/_oteladapter.py +0 -222
- uipath_langchain/_tracing/_utils.py +0 -28
- uipath_langchain/builder/agent_config.py +0 -191
- uipath_langchain/tools/preconfigured.py +0 -191
- uipath_langchain-0.0.133.dist-info/RECORD +0 -41
- uipath_langchain-0.0.133.dist-info/entry_points.txt +0 -2
- /uipath_langchain/{tools/__init__.py → py.typed} +0 -0
- {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.28.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,499 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import re
|
|
5
|
+
from typing import Any, Dict, Literal
|
|
6
|
+
|
|
7
|
+
from langchain_core.messages import AIMessage, ToolCall, ToolMessage
|
|
8
|
+
from langgraph.types import Command, interrupt
|
|
9
|
+
from uipath.platform.common import CreateEscalation
|
|
10
|
+
from uipath.platform.guardrails import (
|
|
11
|
+
BaseGuardrail,
|
|
12
|
+
GuardrailScope,
|
|
13
|
+
)
|
|
14
|
+
from uipath.runtime.errors import UiPathErrorCode
|
|
15
|
+
|
|
16
|
+
from ...exceptions import AgentTerminationException
|
|
17
|
+
from ..guardrail_nodes import _message_text
|
|
18
|
+
from ..types import AgentGuardrailsGraphState, ExecutionStage
|
|
19
|
+
from .base_action import GuardrailAction, GuardrailActionNode
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class EscalateAction(GuardrailAction):
|
|
23
|
+
"""Node-producing action that inserts a HITL interruption node into the graph.
|
|
24
|
+
|
|
25
|
+
The returned node creates a human-in-the-loop interruption that suspends execution
|
|
26
|
+
and waits for human review. When execution resumes, if the escalation was approved,
|
|
27
|
+
the flow continues with the reviewed content; otherwise, an error is raised.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(
|
|
31
|
+
self,
|
|
32
|
+
app_name: str,
|
|
33
|
+
app_folder_path: str,
|
|
34
|
+
version: int,
|
|
35
|
+
assignee: str,
|
|
36
|
+
):
|
|
37
|
+
"""Initialize EscalateAction with escalation app configuration.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
app_name: Name of the escalation app.
|
|
41
|
+
app_folder_path: Folder path where the escalation app is located.
|
|
42
|
+
version: Version of the escalation app.
|
|
43
|
+
assignee: User or role assigned to handle the escalation.
|
|
44
|
+
"""
|
|
45
|
+
self.app_name = app_name
|
|
46
|
+
self.app_folder_path = app_folder_path
|
|
47
|
+
self.version = version
|
|
48
|
+
self.assignee = assignee
|
|
49
|
+
|
|
50
|
+
def action_node(
|
|
51
|
+
self,
|
|
52
|
+
*,
|
|
53
|
+
guardrail: BaseGuardrail,
|
|
54
|
+
scope: GuardrailScope,
|
|
55
|
+
execution_stage: ExecutionStage,
|
|
56
|
+
guarded_component_name: str,
|
|
57
|
+
) -> GuardrailActionNode:
|
|
58
|
+
"""Create a HITL escalation node for the guardrail.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
guardrail: The guardrail that triggered this escalation action.
|
|
62
|
+
scope: The guardrail scope (LLM/AGENT/TOOL).
|
|
63
|
+
execution_stage: The execution stage (PRE_EXECUTION or POST_EXECUTION).
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
A tuple of (node_name, node_function) where the node function triggers
|
|
67
|
+
a HITL interruption and processes the escalation response.
|
|
68
|
+
"""
|
|
69
|
+
node_name = _get_node_name(execution_stage, guardrail, scope)
|
|
70
|
+
|
|
71
|
+
async def _node(
|
|
72
|
+
state: AgentGuardrailsGraphState,
|
|
73
|
+
) -> Dict[str, Any] | Command[Any]:
|
|
74
|
+
input = _extract_escalation_content(
|
|
75
|
+
state, scope, execution_stage, guarded_component_name
|
|
76
|
+
)
|
|
77
|
+
escalation_field = _execution_stage_to_escalation_field(execution_stage)
|
|
78
|
+
|
|
79
|
+
data = {
|
|
80
|
+
"GuardrailName": guardrail.name,
|
|
81
|
+
"GuardrailDescription": guardrail.description,
|
|
82
|
+
"Component": scope.name.lower(),
|
|
83
|
+
"ExecutionStage": _execution_stage_to_string(execution_stage),
|
|
84
|
+
"GuardrailResult": state.guardrail_validation_result,
|
|
85
|
+
escalation_field: input,
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
escalation_result = interrupt(
|
|
89
|
+
CreateEscalation(
|
|
90
|
+
app_name=self.app_name,
|
|
91
|
+
app_folder_path=self.app_folder_path,
|
|
92
|
+
title=self.app_name,
|
|
93
|
+
data=data,
|
|
94
|
+
assignee=self.assignee,
|
|
95
|
+
)
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
if escalation_result.action == "Approve":
|
|
99
|
+
return _process_escalation_response(
|
|
100
|
+
state,
|
|
101
|
+
escalation_result.data,
|
|
102
|
+
scope,
|
|
103
|
+
execution_stage,
|
|
104
|
+
guarded_component_name,
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
raise AgentTerminationException(
|
|
108
|
+
code=UiPathErrorCode.EXECUTION_ERROR,
|
|
109
|
+
title="Escalation rejected",
|
|
110
|
+
detail=f"Action was rejected after reviewing the task created by guardrail [{guardrail.name}]. Please contact your administrator.",
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
return node_name, _node
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def _get_node_name(
|
|
117
|
+
execution_stage: ExecutionStage, guardrail: BaseGuardrail, scope: GuardrailScope
|
|
118
|
+
) -> str:
|
|
119
|
+
sanitized = re.sub(r"\W+", "_", guardrail.name).strip("_").lower()
|
|
120
|
+
node_name = f"{sanitized}_hitl_{execution_stage.name.lower()}_{scope.lower()}"
|
|
121
|
+
return node_name
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def _process_escalation_response(
|
|
125
|
+
state: AgentGuardrailsGraphState,
|
|
126
|
+
escalation_result: Dict[str, Any],
|
|
127
|
+
scope: GuardrailScope,
|
|
128
|
+
execution_stage: ExecutionStage,
|
|
129
|
+
guarded_node_name: str,
|
|
130
|
+
) -> Dict[str, Any] | Command[Any]:
|
|
131
|
+
"""Process escalation response and route to appropriate handler based on scope.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
state: The current agent graph state.
|
|
135
|
+
escalation_result: The result from the escalation interrupt containing reviewed inputs/outputs.
|
|
136
|
+
scope: The guardrail scope (LLM/AGENT/TOOL).
|
|
137
|
+
execution_stage: The execution stage (PRE_EXECUTION or POST_EXECUTION).
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
For LLM/TOOL scope: Command to update messages with reviewed inputs/outputs, or empty dict.
|
|
141
|
+
For AGENT scope: Empty dict (no message alteration).
|
|
142
|
+
"""
|
|
143
|
+
match scope:
|
|
144
|
+
case GuardrailScope.LLM:
|
|
145
|
+
return _process_llm_escalation_response(
|
|
146
|
+
state, escalation_result, execution_stage
|
|
147
|
+
)
|
|
148
|
+
case GuardrailScope.TOOL:
|
|
149
|
+
return _process_tool_escalation_response(
|
|
150
|
+
state, escalation_result, execution_stage, guarded_node_name
|
|
151
|
+
)
|
|
152
|
+
case GuardrailScope.AGENT:
|
|
153
|
+
return {}
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def _process_llm_escalation_response(
|
|
157
|
+
state: AgentGuardrailsGraphState,
|
|
158
|
+
escalation_result: Dict[str, Any],
|
|
159
|
+
execution_stage: ExecutionStage,
|
|
160
|
+
) -> Dict[str, Any] | Command[Any]:
|
|
161
|
+
"""Process escalation response for LLM scope guardrails.
|
|
162
|
+
|
|
163
|
+
Updates message content or tool calls based on reviewed inputs/outputs from escalation.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
state: The current agent graph state.
|
|
167
|
+
escalation_result: The result from the escalation interrupt containing reviewed inputs/outputs.
|
|
168
|
+
execution_stage: The execution stage (PRE_EXECUTION or POST_EXECUTION).
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
Command to update messages with reviewed inputs/outputs, or empty dict if no updates needed.
|
|
172
|
+
|
|
173
|
+
Raises:
|
|
174
|
+
AgentTerminationException: If escalation response processing fails.
|
|
175
|
+
"""
|
|
176
|
+
try:
|
|
177
|
+
reviewed_field = (
|
|
178
|
+
"ReviewedInputs"
|
|
179
|
+
if execution_stage == ExecutionStage.PRE_EXECUTION
|
|
180
|
+
else "ReviewedOutputs"
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
msgs = state.messages.copy()
|
|
184
|
+
if not msgs or reviewed_field not in escalation_result:
|
|
185
|
+
return {}
|
|
186
|
+
|
|
187
|
+
last_message = msgs[-1]
|
|
188
|
+
|
|
189
|
+
if execution_stage == ExecutionStage.PRE_EXECUTION:
|
|
190
|
+
reviewed_content = escalation_result[reviewed_field]
|
|
191
|
+
if reviewed_content:
|
|
192
|
+
last_message.content = json.loads(reviewed_content)
|
|
193
|
+
else:
|
|
194
|
+
reviewed_outputs_json = escalation_result[reviewed_field]
|
|
195
|
+
if not reviewed_outputs_json:
|
|
196
|
+
return {}
|
|
197
|
+
|
|
198
|
+
content_list = json.loads(reviewed_outputs_json)
|
|
199
|
+
if not content_list:
|
|
200
|
+
return {}
|
|
201
|
+
|
|
202
|
+
# For AI messages, process tool calls if present
|
|
203
|
+
if isinstance(last_message, AIMessage):
|
|
204
|
+
ai_message: AIMessage = last_message
|
|
205
|
+
content_index = 0
|
|
206
|
+
|
|
207
|
+
if ai_message.tool_calls:
|
|
208
|
+
tool_calls = list(ai_message.tool_calls)
|
|
209
|
+
for tool_call in tool_calls:
|
|
210
|
+
args = tool_call["args"]
|
|
211
|
+
if (
|
|
212
|
+
isinstance(args, dict)
|
|
213
|
+
and "content" in args
|
|
214
|
+
and args["content"] is not None
|
|
215
|
+
):
|
|
216
|
+
if content_index < len(content_list):
|
|
217
|
+
updated_content = json.loads(
|
|
218
|
+
content_list[content_index]
|
|
219
|
+
)
|
|
220
|
+
args["content"] = updated_content
|
|
221
|
+
tool_call["args"] = args
|
|
222
|
+
content_index += 1
|
|
223
|
+
ai_message.tool_calls = tool_calls
|
|
224
|
+
|
|
225
|
+
if len(content_list) > content_index:
|
|
226
|
+
ai_message.content = content_list[-1]
|
|
227
|
+
else:
|
|
228
|
+
# Fallback for other message types
|
|
229
|
+
if content_list:
|
|
230
|
+
last_message.content = content_list[-1]
|
|
231
|
+
|
|
232
|
+
return Command[Any](update={"messages": msgs})
|
|
233
|
+
except Exception as e:
|
|
234
|
+
raise AgentTerminationException(
|
|
235
|
+
code=UiPathErrorCode.EXECUTION_ERROR,
|
|
236
|
+
title="Escalation rejected",
|
|
237
|
+
detail=str(e),
|
|
238
|
+
) from e
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def _process_tool_escalation_response(
|
|
242
|
+
state: AgentGuardrailsGraphState,
|
|
243
|
+
escalation_result: Dict[str, Any],
|
|
244
|
+
execution_stage: ExecutionStage,
|
|
245
|
+
tool_name: str,
|
|
246
|
+
) -> Dict[str, Any] | Command[Any]:
|
|
247
|
+
"""Process escalation response for TOOL scope guardrails.
|
|
248
|
+
|
|
249
|
+
Updates the tool call arguments (PreExecution) or tool message content (PostExecution)
|
|
250
|
+
for the specific tool matching the tool_name. For PreExecution, finds the tool call
|
|
251
|
+
with the matching name and updates only that tool call's args with the reviewed dict.
|
|
252
|
+
For PostExecution, updates the tool message content.
|
|
253
|
+
|
|
254
|
+
Args:
|
|
255
|
+
state: The current agent graph state.
|
|
256
|
+
escalation_result: The result from the escalation interrupt containing reviewed inputs/outputs.
|
|
257
|
+
execution_stage: The execution stage (PRE_EXECUTION or POST_EXECUTION).
|
|
258
|
+
tool_name: Name of the tool to update. Only the tool call matching this name will be updated.
|
|
259
|
+
|
|
260
|
+
Returns:
|
|
261
|
+
Command to update messages with reviewed tool call args or content, or empty dict if no updates needed.
|
|
262
|
+
|
|
263
|
+
Raises:
|
|
264
|
+
AgentTerminationException: If escalation response processing fails.
|
|
265
|
+
"""
|
|
266
|
+
try:
|
|
267
|
+
reviewed_field = (
|
|
268
|
+
"ReviewedInputs"
|
|
269
|
+
if execution_stage == ExecutionStage.PRE_EXECUTION
|
|
270
|
+
else "ReviewedOutputs"
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
msgs = state.messages.copy()
|
|
274
|
+
if not msgs or reviewed_field not in escalation_result:
|
|
275
|
+
return {}
|
|
276
|
+
|
|
277
|
+
last_message = msgs[-1]
|
|
278
|
+
if execution_stage == ExecutionStage.PRE_EXECUTION:
|
|
279
|
+
if not isinstance(last_message, AIMessage):
|
|
280
|
+
return {}
|
|
281
|
+
|
|
282
|
+
# Get reviewed tool calls args from escalation result
|
|
283
|
+
reviewed_inputs_json = escalation_result[reviewed_field]
|
|
284
|
+
if not reviewed_inputs_json:
|
|
285
|
+
return {}
|
|
286
|
+
|
|
287
|
+
reviewed_tool_calls_args = json.loads(reviewed_inputs_json)
|
|
288
|
+
if not isinstance(reviewed_tool_calls_args, dict):
|
|
289
|
+
return {}
|
|
290
|
+
|
|
291
|
+
# Find and update only the tool call with matching name
|
|
292
|
+
if last_message.tool_calls:
|
|
293
|
+
tool_calls = list(last_message.tool_calls)
|
|
294
|
+
for tool_call in tool_calls:
|
|
295
|
+
call_name = extract_tool_name(tool_call)
|
|
296
|
+
if call_name == tool_name:
|
|
297
|
+
# Update args for the matching tool call
|
|
298
|
+
if isinstance(reviewed_tool_calls_args, dict):
|
|
299
|
+
if isinstance(tool_call, dict):
|
|
300
|
+
tool_call["args"] = reviewed_tool_calls_args
|
|
301
|
+
else:
|
|
302
|
+
tool_call.args = reviewed_tool_calls_args
|
|
303
|
+
break
|
|
304
|
+
last_message.tool_calls = tool_calls
|
|
305
|
+
else:
|
|
306
|
+
if not isinstance(last_message, ToolMessage):
|
|
307
|
+
return {}
|
|
308
|
+
|
|
309
|
+
# PostExecution: update tool message content
|
|
310
|
+
reviewed_outputs_json = escalation_result[reviewed_field]
|
|
311
|
+
if reviewed_outputs_json:
|
|
312
|
+
last_message.content = reviewed_outputs_json
|
|
313
|
+
|
|
314
|
+
return Command[Any](update={"messages": msgs})
|
|
315
|
+
except Exception as e:
|
|
316
|
+
raise AgentTerminationException(
|
|
317
|
+
code=UiPathErrorCode.EXECUTION_ERROR,
|
|
318
|
+
title="Escalation rejected",
|
|
319
|
+
detail=str(e),
|
|
320
|
+
) from e
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
def _extract_escalation_content(
|
|
324
|
+
state: AgentGuardrailsGraphState,
|
|
325
|
+
scope: GuardrailScope,
|
|
326
|
+
execution_stage: ExecutionStage,
|
|
327
|
+
guarded_node_name: str,
|
|
328
|
+
) -> str | list[str | Dict[str, Any]]:
|
|
329
|
+
"""Extract escalation content from state based on guardrail scope and execution stage.
|
|
330
|
+
|
|
331
|
+
Args:
|
|
332
|
+
state: The current agent graph state.
|
|
333
|
+
scope: The guardrail scope (LLM/AGENT/TOOL).
|
|
334
|
+
execution_stage: The execution stage (PRE_EXECUTION or POST_EXECUTION).
|
|
335
|
+
|
|
336
|
+
Returns:
|
|
337
|
+
str or list[str | Dict[str, Any]]: For LLM scope, returns JSON string or list with message/tool call content.
|
|
338
|
+
For AGENT scope, returns empty string. For TOOL scope, returns JSON string or list with tool-specific content.
|
|
339
|
+
|
|
340
|
+
Raises:
|
|
341
|
+
AgentTerminationException: If no messages are found in state.
|
|
342
|
+
"""
|
|
343
|
+
if not state.messages:
|
|
344
|
+
raise AgentTerminationException(
|
|
345
|
+
code=UiPathErrorCode.EXECUTION_ERROR,
|
|
346
|
+
title="Invalid state message",
|
|
347
|
+
detail="No message found into agent state",
|
|
348
|
+
)
|
|
349
|
+
|
|
350
|
+
match scope:
|
|
351
|
+
case GuardrailScope.LLM:
|
|
352
|
+
return _extract_llm_escalation_content(state, execution_stage)
|
|
353
|
+
case GuardrailScope.AGENT:
|
|
354
|
+
return _extract_agent_escalation_content(state, execution_stage)
|
|
355
|
+
case GuardrailScope.TOOL:
|
|
356
|
+
return _extract_tool_escalation_content(
|
|
357
|
+
state, execution_stage, guarded_node_name
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
def _extract_llm_escalation_content(
|
|
362
|
+
state: AgentGuardrailsGraphState, execution_stage: ExecutionStage
|
|
363
|
+
) -> str | list[str | Dict[str, Any]]:
|
|
364
|
+
"""Extract escalation content for LLM scope guardrails.
|
|
365
|
+
|
|
366
|
+
Args:
|
|
367
|
+
state: The current agent graph state.
|
|
368
|
+
execution_stage: The execution stage (PRE_EXECUTION or POST_EXECUTION).
|
|
369
|
+
|
|
370
|
+
Returns:
|
|
371
|
+
str or list[str | Dict[str, Any]]: For PreExecution, returns JSON string with message content or empty string.
|
|
372
|
+
For PostExecution, returns JSON string (array) with tool call content and message content.
|
|
373
|
+
Returns empty string if no content found.
|
|
374
|
+
"""
|
|
375
|
+
last_message = state.messages[-1]
|
|
376
|
+
if execution_stage == ExecutionStage.PRE_EXECUTION:
|
|
377
|
+
if isinstance(last_message, ToolMessage):
|
|
378
|
+
return last_message.content
|
|
379
|
+
|
|
380
|
+
content = _message_text(last_message)
|
|
381
|
+
return json.dumps(content) if content else ""
|
|
382
|
+
|
|
383
|
+
# For AI messages, process tool calls if present
|
|
384
|
+
if isinstance(last_message, AIMessage):
|
|
385
|
+
ai_message: AIMessage = last_message
|
|
386
|
+
content_list: list[str] = []
|
|
387
|
+
|
|
388
|
+
if ai_message.tool_calls:
|
|
389
|
+
for tool_call in ai_message.tool_calls:
|
|
390
|
+
args = tool_call["args"]
|
|
391
|
+
if (
|
|
392
|
+
isinstance(args, dict)
|
|
393
|
+
and "content" in args
|
|
394
|
+
and args["content"] is not None
|
|
395
|
+
):
|
|
396
|
+
content_list.append(json.dumps(args["content"]))
|
|
397
|
+
|
|
398
|
+
message_content = _message_text(last_message)
|
|
399
|
+
if message_content:
|
|
400
|
+
content_list.append(message_content)
|
|
401
|
+
|
|
402
|
+
return json.dumps(content_list)
|
|
403
|
+
|
|
404
|
+
# Fallback for other message types
|
|
405
|
+
return _message_text(last_message)
|
|
406
|
+
|
|
407
|
+
|
|
408
|
+
def _extract_agent_escalation_content(
|
|
409
|
+
state: AgentGuardrailsGraphState, execution_stage: ExecutionStage
|
|
410
|
+
) -> str | list[str | Dict[str, Any]]:
|
|
411
|
+
"""Extract escalation content for AGENT scope guardrails.
|
|
412
|
+
|
|
413
|
+
Args:
|
|
414
|
+
state: The current agent graph state.
|
|
415
|
+
execution_stage: The execution stage (PRE_EXECUTION or POST_EXECUTION).
|
|
416
|
+
|
|
417
|
+
Returns:
|
|
418
|
+
str: Empty string (AGENT scope guardrails do not extract escalation content).
|
|
419
|
+
"""
|
|
420
|
+
return ""
|
|
421
|
+
|
|
422
|
+
|
|
423
|
+
def _extract_tool_escalation_content(
|
|
424
|
+
state: AgentGuardrailsGraphState, execution_stage: ExecutionStage, tool_name: str
|
|
425
|
+
) -> str | list[str | Dict[str, Any]]:
|
|
426
|
+
"""Extract escalation content for TOOL scope guardrails.
|
|
427
|
+
|
|
428
|
+
Args:
|
|
429
|
+
state: The current agent graph state.
|
|
430
|
+
execution_stage: The execution stage (PRE_EXECUTION or POST_EXECUTION).
|
|
431
|
+
tool_name: Optional tool name to filter tool calls. If provided, only extracts args for matching tool.
|
|
432
|
+
|
|
433
|
+
Returns:
|
|
434
|
+
str or list[str | Dict[str, Any]]: For PreExecution, returns JSON string with tool call arguments
|
|
435
|
+
for the specified tool name, or empty string if not found. For PostExecution, returns string with
|
|
436
|
+
tool message content, or empty string if message type doesn't match.
|
|
437
|
+
"""
|
|
438
|
+
last_message = state.messages[-1]
|
|
439
|
+
if execution_stage == ExecutionStage.PRE_EXECUTION:
|
|
440
|
+
if not isinstance(last_message, AIMessage):
|
|
441
|
+
return ""
|
|
442
|
+
if not last_message.tool_calls:
|
|
443
|
+
return ""
|
|
444
|
+
|
|
445
|
+
# Find the tool call with matching name
|
|
446
|
+
for tool_call in last_message.tool_calls:
|
|
447
|
+
call_name = extract_tool_name(tool_call)
|
|
448
|
+
if call_name == tool_name:
|
|
449
|
+
# Extract args from the matching tool call
|
|
450
|
+
args = (
|
|
451
|
+
tool_call.get("args")
|
|
452
|
+
if isinstance(tool_call, dict)
|
|
453
|
+
else getattr(tool_call, "args", None)
|
|
454
|
+
)
|
|
455
|
+
if args is not None:
|
|
456
|
+
return json.dumps(args)
|
|
457
|
+
return ""
|
|
458
|
+
else:
|
|
459
|
+
if not isinstance(last_message, ToolMessage):
|
|
460
|
+
return ""
|
|
461
|
+
return last_message.content
|
|
462
|
+
|
|
463
|
+
|
|
464
|
+
def extract_tool_name(tool_call: ToolCall) -> Any | None:
|
|
465
|
+
return (
|
|
466
|
+
tool_call.get("name")
|
|
467
|
+
if isinstance(tool_call, dict)
|
|
468
|
+
else getattr(tool_call, "name", None)
|
|
469
|
+
)
|
|
470
|
+
|
|
471
|
+
|
|
472
|
+
def _execution_stage_to_escalation_field(
|
|
473
|
+
execution_stage: ExecutionStage,
|
|
474
|
+
) -> str:
|
|
475
|
+
"""Convert execution stage to escalation data field name.
|
|
476
|
+
|
|
477
|
+
Args:
|
|
478
|
+
execution_stage: The execution stage enum.
|
|
479
|
+
|
|
480
|
+
Returns:
|
|
481
|
+
"Inputs" for PRE_EXECUTION, "Outputs" for POST_EXECUTION.
|
|
482
|
+
"""
|
|
483
|
+
return "Inputs" if execution_stage == ExecutionStage.PRE_EXECUTION else "Outputs"
|
|
484
|
+
|
|
485
|
+
|
|
486
|
+
def _execution_stage_to_string(
|
|
487
|
+
execution_stage: ExecutionStage,
|
|
488
|
+
) -> Literal["PreExecution", "PostExecution"]:
|
|
489
|
+
"""Convert ExecutionStage enum to string literal.
|
|
490
|
+
|
|
491
|
+
Args:
|
|
492
|
+
execution_stage: The execution stage enum.
|
|
493
|
+
|
|
494
|
+
Returns:
|
|
495
|
+
"PreExecution" for PRE_EXECUTION, "PostExecution" for POST_EXECUTION.
|
|
496
|
+
"""
|
|
497
|
+
if execution_stage == ExecutionStage.PRE_EXECUTION:
|
|
498
|
+
return "PreExecution"
|
|
499
|
+
return "PostExecution"
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import re
|
|
3
|
+
from typing import Any, Optional
|
|
4
|
+
|
|
5
|
+
from uipath.platform.guardrails import BaseGuardrail, GuardrailScope
|
|
6
|
+
|
|
7
|
+
from uipath_langchain.agent.guardrails.types import ExecutionStage
|
|
8
|
+
|
|
9
|
+
from ..types import AgentGuardrailsGraphState
|
|
10
|
+
from .base_action import GuardrailAction, GuardrailActionNode
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class LogAction(GuardrailAction):
|
|
16
|
+
"""Action that logs guardrail violation and continues."""
|
|
17
|
+
|
|
18
|
+
def __init__(self, message: Optional[str], level: int = logging.INFO) -> None:
|
|
19
|
+
"""Initialize the log action.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
message: Message to be logged.
|
|
23
|
+
level: Logging level used when reporting a guardrail failure.
|
|
24
|
+
"""
|
|
25
|
+
self.message = message
|
|
26
|
+
self.level = level
|
|
27
|
+
|
|
28
|
+
def action_node(
|
|
29
|
+
self,
|
|
30
|
+
*,
|
|
31
|
+
guardrail: BaseGuardrail,
|
|
32
|
+
scope: GuardrailScope,
|
|
33
|
+
execution_stage: ExecutionStage,
|
|
34
|
+
guarded_component_name: str,
|
|
35
|
+
) -> GuardrailActionNode:
|
|
36
|
+
"""Create a guardrail action node that logs validation failures.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
guardrail: The guardrail whose failure is being logged.
|
|
40
|
+
scope: The scope in which the guardrail applies.
|
|
41
|
+
execution_stage: Whether this runs before or after execution.
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
A tuple containing the node name and the async node callable.
|
|
45
|
+
"""
|
|
46
|
+
raw_node_name = f"{scope.name}_{execution_stage.name}_{guardrail.name}_log"
|
|
47
|
+
node_name = re.sub(r"\W+", "_", raw_node_name.lower()).strip("_")
|
|
48
|
+
|
|
49
|
+
async def _node(_state: AgentGuardrailsGraphState) -> dict[str, Any]:
|
|
50
|
+
message = (
|
|
51
|
+
self.message
|
|
52
|
+
or f"Guardrail [{guardrail.name}] validation failed for [{scope.name}] [{execution_stage.name}] with the following reason: {_state.guardrail_validation_result}"
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
logger.log(self.level, message)
|
|
56
|
+
return {}
|
|
57
|
+
|
|
58
|
+
return node_name, _node
|