uipath-langchain 0.1.28__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- uipath_langchain/_cli/_templates/langgraph.json.template +2 -4
- uipath_langchain/_cli/cli_new.py +1 -2
- uipath_langchain/_utils/_request_mixin.py +8 -0
- uipath_langchain/_utils/_settings.py +3 -2
- uipath_langchain/agent/guardrails/__init__.py +0 -16
- uipath_langchain/agent/guardrails/actions/__init__.py +2 -0
- uipath_langchain/agent/guardrails/actions/block_action.py +1 -1
- uipath_langchain/agent/guardrails/actions/escalate_action.py +265 -138
- uipath_langchain/agent/guardrails/actions/filter_action.py +290 -0
- uipath_langchain/agent/guardrails/actions/log_action.py +1 -1
- uipath_langchain/agent/guardrails/guardrail_nodes.py +193 -42
- uipath_langchain/agent/guardrails/guardrails_factory.py +235 -14
- uipath_langchain/agent/guardrails/types.py +0 -12
- uipath_langchain/agent/guardrails/utils.py +177 -0
- uipath_langchain/agent/react/agent.py +24 -9
- uipath_langchain/agent/react/constants.py +1 -2
- uipath_langchain/agent/react/file_type_handler.py +123 -0
- uipath_langchain/agent/{guardrails → react/guardrails}/guardrails_subgraph.py +119 -25
- uipath_langchain/agent/react/init_node.py +16 -1
- uipath_langchain/agent/react/job_attachments.py +125 -0
- uipath_langchain/agent/react/json_utils.py +183 -0
- uipath_langchain/agent/react/jsonschema_pydantic_converter.py +76 -0
- uipath_langchain/agent/react/llm_node.py +41 -10
- uipath_langchain/agent/react/llm_with_files.py +76 -0
- uipath_langchain/agent/react/router.py +48 -37
- uipath_langchain/agent/react/types.py +19 -1
- uipath_langchain/agent/react/utils.py +30 -4
- uipath_langchain/agent/tools/__init__.py +7 -1
- uipath_langchain/agent/tools/context_tool.py +151 -1
- uipath_langchain/agent/tools/escalation_tool.py +46 -15
- uipath_langchain/agent/tools/integration_tool.py +20 -16
- uipath_langchain/agent/tools/internal_tools/__init__.py +5 -0
- uipath_langchain/agent/tools/internal_tools/analyze_files_tool.py +113 -0
- uipath_langchain/agent/tools/internal_tools/internal_tool_factory.py +54 -0
- uipath_langchain/agent/tools/mcp_tool.py +86 -0
- uipath_langchain/agent/tools/process_tool.py +8 -1
- uipath_langchain/agent/tools/static_args.py +18 -40
- uipath_langchain/agent/tools/tool_factory.py +13 -5
- uipath_langchain/agent/tools/tool_node.py +133 -4
- uipath_langchain/agent/tools/utils.py +31 -0
- uipath_langchain/agent/wrappers/__init__.py +6 -0
- uipath_langchain/agent/wrappers/job_attachment_wrapper.py +62 -0
- uipath_langchain/agent/wrappers/static_args_wrapper.py +34 -0
- uipath_langchain/chat/__init__.py +4 -0
- uipath_langchain/chat/bedrock.py +16 -0
- uipath_langchain/chat/mapper.py +60 -42
- uipath_langchain/chat/openai.py +56 -26
- uipath_langchain/chat/supported_models.py +9 -0
- uipath_langchain/chat/vertex.py +62 -46
- uipath_langchain/embeddings/embeddings.py +18 -12
- uipath_langchain/runtime/factory.py +10 -5
- uipath_langchain/runtime/runtime.py +38 -35
- uipath_langchain/runtime/schema.py +72 -16
- uipath_langchain/runtime/storage.py +178 -71
- {uipath_langchain-0.1.28.dist-info → uipath_langchain-0.3.1.dist-info}/METADATA +7 -4
- uipath_langchain-0.3.1.dist-info/RECORD +90 -0
- uipath_langchain-0.1.28.dist-info/RECORD +0 -76
- {uipath_langchain-0.1.28.dist-info → uipath_langchain-0.3.1.dist-info}/WHEEL +0 -0
- {uipath_langchain-0.1.28.dist-info → uipath_langchain-0.3.1.dist-info}/entry_points.txt +0 -0
- {uipath_langchain-0.1.28.dist-info → uipath_langchain-0.3.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,22 +1,218 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
|
|
2
|
+
import re
|
|
3
|
+
from typing import Callable, Sequence
|
|
3
4
|
|
|
4
5
|
from uipath.agent.models.agent import (
|
|
6
|
+
AgentBooleanOperator,
|
|
7
|
+
AgentBooleanRule,
|
|
8
|
+
AgentCustomGuardrail,
|
|
5
9
|
AgentGuardrail,
|
|
6
10
|
AgentGuardrailBlockAction,
|
|
7
11
|
AgentGuardrailEscalateAction,
|
|
12
|
+
AgentGuardrailFilterAction,
|
|
8
13
|
AgentGuardrailLogAction,
|
|
9
14
|
AgentGuardrailSeverityLevel,
|
|
15
|
+
AgentNumberOperator,
|
|
16
|
+
AgentNumberRule,
|
|
10
17
|
AgentUnknownGuardrail,
|
|
18
|
+
AgentWordOperator,
|
|
19
|
+
AgentWordRule,
|
|
20
|
+
StandardRecipient,
|
|
11
21
|
)
|
|
12
|
-
from uipath.
|
|
22
|
+
from uipath.core.guardrails import (
|
|
23
|
+
BooleanRule,
|
|
24
|
+
DeterministicGuardrail,
|
|
25
|
+
NumberRule,
|
|
26
|
+
UniversalRule,
|
|
27
|
+
WordRule,
|
|
28
|
+
)
|
|
29
|
+
from uipath.platform.guardrails import BaseGuardrail, GuardrailScope
|
|
13
30
|
|
|
14
31
|
from uipath_langchain.agent.guardrails.actions import (
|
|
15
32
|
BlockAction,
|
|
16
33
|
EscalateAction,
|
|
34
|
+
FilterAction,
|
|
17
35
|
GuardrailAction,
|
|
18
36
|
LogAction,
|
|
19
37
|
)
|
|
38
|
+
from uipath_langchain.agent.guardrails.utils import _sanitize_selector_tool_names
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _assert_value_not_none(value: str | None, operator: AgentWordOperator) -> str:
|
|
42
|
+
"""Assert value is not None and return as string."""
|
|
43
|
+
assert value is not None, f"value cannot be None for {operator.name} operator"
|
|
44
|
+
return value
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _create_word_rule_func(
|
|
48
|
+
operator: AgentWordOperator, value: str | None
|
|
49
|
+
) -> Callable[[str], bool]:
|
|
50
|
+
"""Create a callable function from AgentWordOperator and value.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
operator: The word operator to convert.
|
|
54
|
+
value: The value to compare against (may be None for isEmpty/isNotEmpty).
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
A callable that takes a string and returns a boolean.
|
|
58
|
+
"""
|
|
59
|
+
match operator:
|
|
60
|
+
case AgentWordOperator.CONTAINS:
|
|
61
|
+
val = _assert_value_not_none(value, operator)
|
|
62
|
+
return lambda s: val.lower() in s.lower()
|
|
63
|
+
case AgentWordOperator.DOES_NOT_CONTAIN:
|
|
64
|
+
val = _assert_value_not_none(value, operator)
|
|
65
|
+
return lambda s: val.lower() not in s.lower()
|
|
66
|
+
case AgentWordOperator.EQUALS:
|
|
67
|
+
val = _assert_value_not_none(value, operator)
|
|
68
|
+
return lambda s: s == val
|
|
69
|
+
case AgentWordOperator.DOES_NOT_EQUAL:
|
|
70
|
+
val = _assert_value_not_none(value, operator)
|
|
71
|
+
return lambda s: s != val
|
|
72
|
+
case AgentWordOperator.STARTS_WITH:
|
|
73
|
+
val = _assert_value_not_none(value, operator)
|
|
74
|
+
return lambda s: s.startswith(val)
|
|
75
|
+
case AgentWordOperator.DOES_NOT_START_WITH:
|
|
76
|
+
val = _assert_value_not_none(value, operator)
|
|
77
|
+
return lambda s: not s.startswith(val)
|
|
78
|
+
case AgentWordOperator.ENDS_WITH:
|
|
79
|
+
val = _assert_value_not_none(value, operator)
|
|
80
|
+
return lambda s: s.endswith(val)
|
|
81
|
+
case AgentWordOperator.DOES_NOT_END_WITH:
|
|
82
|
+
val = _assert_value_not_none(value, operator)
|
|
83
|
+
return lambda s: not s.endswith(val)
|
|
84
|
+
case AgentWordOperator.IS_EMPTY:
|
|
85
|
+
return lambda s: len(s) == 0
|
|
86
|
+
case AgentWordOperator.IS_NOT_EMPTY:
|
|
87
|
+
return lambda s: len(s) > 0
|
|
88
|
+
case AgentWordOperator.MATCHES_REGEX:
|
|
89
|
+
val = _assert_value_not_none(value, operator)
|
|
90
|
+
pattern = re.compile(val)
|
|
91
|
+
return lambda s: bool(pattern.match(s))
|
|
92
|
+
case _:
|
|
93
|
+
raise ValueError(f"Unsupported word operator: {operator}")
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def _create_number_rule_func(
|
|
97
|
+
operator: AgentNumberOperator, value: float
|
|
98
|
+
) -> Callable[[float], bool]:
|
|
99
|
+
"""Create a callable function from AgentNumberOperator and value.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
operator: The number operator to convert.
|
|
103
|
+
value: The value to compare against.
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
A callable that takes a float and returns a boolean.
|
|
107
|
+
"""
|
|
108
|
+
match operator:
|
|
109
|
+
case AgentNumberOperator.EQUALS:
|
|
110
|
+
return lambda n: n == value
|
|
111
|
+
case AgentNumberOperator.DOES_NOT_EQUAL:
|
|
112
|
+
return lambda n: n != value
|
|
113
|
+
case AgentNumberOperator.GREATER_THAN:
|
|
114
|
+
return lambda n: n > value
|
|
115
|
+
case AgentNumberOperator.GREATER_THAN_OR_EQUAL:
|
|
116
|
+
return lambda n: n >= value
|
|
117
|
+
case AgentNumberOperator.LESS_THAN:
|
|
118
|
+
return lambda n: n < value
|
|
119
|
+
case AgentNumberOperator.LESS_THAN_OR_EQUAL:
|
|
120
|
+
return lambda n: n <= value
|
|
121
|
+
case _:
|
|
122
|
+
raise ValueError(f"Unsupported number operator: {operator}")
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def _create_boolean_rule_func(
|
|
126
|
+
operator: AgentBooleanOperator, value: bool
|
|
127
|
+
) -> Callable[[bool], bool]:
|
|
128
|
+
"""Create a callable function from AgentBooleanOperator and value.
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
operator: The boolean operator to convert.
|
|
132
|
+
value: The value to compare against.
|
|
133
|
+
|
|
134
|
+
Returns:
|
|
135
|
+
A callable that takes a boolean and returns a boolean.
|
|
136
|
+
"""
|
|
137
|
+
match operator:
|
|
138
|
+
case AgentBooleanOperator.EQUALS:
|
|
139
|
+
return lambda b: b == value
|
|
140
|
+
case _:
|
|
141
|
+
raise ValueError(f"Unsupported boolean operator: {operator}")
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def _convert_agent_rule_to_deterministic(
|
|
145
|
+
agent_rule: AgentWordRule | AgentNumberRule | AgentBooleanRule | UniversalRule,
|
|
146
|
+
) -> WordRule | NumberRule | BooleanRule | UniversalRule:
|
|
147
|
+
"""Convert an Agent rule to its Deterministic equivalent.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
agent_rule: The agent rule to convert.
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
The corresponding deterministic rule with a callable function.
|
|
154
|
+
"""
|
|
155
|
+
if isinstance(agent_rule, UniversalRule):
|
|
156
|
+
# UniversalRule is already compatible
|
|
157
|
+
return agent_rule
|
|
158
|
+
|
|
159
|
+
if isinstance(agent_rule, AgentWordRule):
|
|
160
|
+
return WordRule(
|
|
161
|
+
rule_type="word",
|
|
162
|
+
field_selector=agent_rule.field_selector,
|
|
163
|
+
detects_violation=_create_word_rule_func(
|
|
164
|
+
agent_rule.operator, agent_rule.value
|
|
165
|
+
),
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
if isinstance(agent_rule, AgentNumberRule):
|
|
169
|
+
return NumberRule(
|
|
170
|
+
rule_type="number",
|
|
171
|
+
field_selector=agent_rule.field_selector,
|
|
172
|
+
detects_violation=_create_number_rule_func(
|
|
173
|
+
agent_rule.operator, agent_rule.value
|
|
174
|
+
),
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
if isinstance(agent_rule, AgentBooleanRule):
|
|
178
|
+
return BooleanRule(
|
|
179
|
+
rule_type="boolean",
|
|
180
|
+
field_selector=agent_rule.field_selector,
|
|
181
|
+
detects_violation=_create_boolean_rule_func(
|
|
182
|
+
agent_rule.operator, agent_rule.value
|
|
183
|
+
),
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
raise ValueError(f"Unsupported agent rule type: {type(agent_rule)}")
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def _convert_agent_custom_guardrail_to_deterministic(
|
|
190
|
+
guardrail: AgentCustomGuardrail,
|
|
191
|
+
) -> DeterministicGuardrail:
|
|
192
|
+
"""Convert AgentCustomGuardrail to DeterministicGuardrail.
|
|
193
|
+
|
|
194
|
+
Args:
|
|
195
|
+
guardrail: The agent custom guardrail to convert.
|
|
196
|
+
|
|
197
|
+
Returns:
|
|
198
|
+
A DeterministicGuardrail with converted rules and sanitized selector.
|
|
199
|
+
"""
|
|
200
|
+
converted_rules = [
|
|
201
|
+
_convert_agent_rule_to_deterministic(rule) for rule in guardrail.rules
|
|
202
|
+
]
|
|
203
|
+
|
|
204
|
+
# Sanitize tool names in selector for Tool scope guardrails
|
|
205
|
+
sanitized_selector = _sanitize_selector_tool_names(guardrail.selector)
|
|
206
|
+
|
|
207
|
+
return DeterministicGuardrail(
|
|
208
|
+
id=guardrail.id,
|
|
209
|
+
name=guardrail.name,
|
|
210
|
+
description=guardrail.description,
|
|
211
|
+
enabled_for_evals=guardrail.enabled_for_evals,
|
|
212
|
+
selector=sanitized_selector,
|
|
213
|
+
guardrail_type="custom",
|
|
214
|
+
rules=converted_rules,
|
|
215
|
+
)
|
|
20
216
|
|
|
21
217
|
|
|
22
218
|
def build_guardrails_with_actions(
|
|
@@ -38,10 +234,32 @@ def build_guardrails_with_actions(
|
|
|
38
234
|
if isinstance(guardrail, AgentUnknownGuardrail):
|
|
39
235
|
continue
|
|
40
236
|
|
|
237
|
+
converted_guardrail: BaseGuardrail
|
|
238
|
+
if isinstance(guardrail, AgentCustomGuardrail):
|
|
239
|
+
converted_guardrail = _convert_agent_custom_guardrail_to_deterministic(
|
|
240
|
+
guardrail
|
|
241
|
+
)
|
|
242
|
+
# Validate that DeterministicGuardrails only have TOOL scope
|
|
243
|
+
non_tool_scopes = [
|
|
244
|
+
scope
|
|
245
|
+
for scope in converted_guardrail.selector.scopes
|
|
246
|
+
if scope != GuardrailScope.TOOL
|
|
247
|
+
]
|
|
248
|
+
|
|
249
|
+
if non_tool_scopes:
|
|
250
|
+
raise ValueError(
|
|
251
|
+
f"Deterministic guardrail '{converted_guardrail.name}' can only be used with TOOL scope. "
|
|
252
|
+
f"Found invalid scopes: {[scope.name for scope in non_tool_scopes]}. "
|
|
253
|
+
f"Please configure this guardrail to use only TOOL scope."
|
|
254
|
+
)
|
|
255
|
+
else:
|
|
256
|
+
converted_guardrail = guardrail
|
|
257
|
+
_sanitize_selector_tool_names(converted_guardrail.selector)
|
|
258
|
+
|
|
41
259
|
action = guardrail.action
|
|
42
260
|
|
|
43
261
|
if isinstance(action, AgentGuardrailBlockAction):
|
|
44
|
-
result.append((
|
|
262
|
+
result.append((converted_guardrail, BlockAction(action.reason)))
|
|
45
263
|
elif isinstance(action, AgentGuardrailLogAction):
|
|
46
264
|
severity_level_map = {
|
|
47
265
|
AgentGuardrailSeverityLevel.ERROR: logging.ERROR,
|
|
@@ -51,20 +269,23 @@ def build_guardrails_with_actions(
|
|
|
51
269
|
level = severity_level_map.get(action.severity_level, logging.INFO)
|
|
52
270
|
result.append(
|
|
53
271
|
(
|
|
54
|
-
|
|
272
|
+
converted_guardrail,
|
|
55
273
|
LogAction(message=action.message, level=level),
|
|
56
274
|
)
|
|
57
275
|
)
|
|
58
276
|
elif isinstance(action, AgentGuardrailEscalateAction):
|
|
59
|
-
|
|
60
|
-
(
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
277
|
+
if isinstance(action.recipient, StandardRecipient):
|
|
278
|
+
result.append(
|
|
279
|
+
(
|
|
280
|
+
converted_guardrail,
|
|
281
|
+
EscalateAction(
|
|
282
|
+
app_name=action.app.name,
|
|
283
|
+
app_folder_path=action.app.folder_name,
|
|
284
|
+
version=action.app.version,
|
|
285
|
+
assignee=action.recipient.value,
|
|
286
|
+
),
|
|
287
|
+
)
|
|
68
288
|
)
|
|
69
|
-
|
|
289
|
+
elif isinstance(action, AgentGuardrailFilterAction):
|
|
290
|
+
result.append((converted_guardrail, FilterAction(fields=action.fields)))
|
|
70
291
|
return result
|
|
@@ -1,16 +1,4 @@
|
|
|
1
1
|
from enum import Enum
|
|
2
|
-
from typing import Annotated, Optional
|
|
3
|
-
|
|
4
|
-
from langchain_core.messages import AnyMessage
|
|
5
|
-
from langgraph.graph.message import add_messages
|
|
6
|
-
from pydantic import BaseModel
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
class AgentGuardrailsGraphState(BaseModel):
|
|
10
|
-
"""Agent Guardrails Graph state for guardrail subgraph."""
|
|
11
|
-
|
|
12
|
-
messages: Annotated[list[AnyMessage], add_messages] = []
|
|
13
|
-
guardrail_validation_result: Optional[str] = None
|
|
14
2
|
|
|
15
3
|
|
|
16
4
|
class ExecutionStage(str, Enum):
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import (
|
|
6
|
+
AIMessage,
|
|
7
|
+
AnyMessage,
|
|
8
|
+
HumanMessage,
|
|
9
|
+
SystemMessage,
|
|
10
|
+
ToolMessage,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
from uipath_langchain.agent.react.types import AgentGuardrailsGraphState
|
|
14
|
+
from uipath_langchain.agent.tools.utils import sanitize_tool_name
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _extract_tool_args_from_message(
|
|
20
|
+
message: AnyMessage, tool_name: str
|
|
21
|
+
) -> dict[str, Any]:
|
|
22
|
+
"""Extract tool call arguments from an AIMessage.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
message: The message to extract from.
|
|
26
|
+
tool_name: Name of the tool to extract arguments from.
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
Dict containing tool call arguments, or empty dict if not found.
|
|
30
|
+
"""
|
|
31
|
+
if not isinstance(message, AIMessage):
|
|
32
|
+
return {}
|
|
33
|
+
|
|
34
|
+
if not message.tool_calls:
|
|
35
|
+
return {}
|
|
36
|
+
|
|
37
|
+
# Find the first tool call with matching name
|
|
38
|
+
for tool_call in message.tool_calls:
|
|
39
|
+
call_name = (
|
|
40
|
+
tool_call.get("name")
|
|
41
|
+
if isinstance(tool_call, dict)
|
|
42
|
+
else getattr(tool_call, "name", None)
|
|
43
|
+
)
|
|
44
|
+
if call_name == tool_name:
|
|
45
|
+
# Extract args from the tool call
|
|
46
|
+
args = (
|
|
47
|
+
tool_call.get("args")
|
|
48
|
+
if isinstance(tool_call, dict)
|
|
49
|
+
else getattr(tool_call, "args", None)
|
|
50
|
+
)
|
|
51
|
+
if args is not None:
|
|
52
|
+
# Args should already be a dict
|
|
53
|
+
if isinstance(args, dict):
|
|
54
|
+
return args
|
|
55
|
+
# If it's a JSON string, parse it
|
|
56
|
+
if isinstance(args, str):
|
|
57
|
+
try:
|
|
58
|
+
parsed = json.loads(args)
|
|
59
|
+
if isinstance(parsed, dict):
|
|
60
|
+
return parsed
|
|
61
|
+
except json.JSONDecodeError:
|
|
62
|
+
logger.warning(
|
|
63
|
+
"Failed to parse tool args as JSON for tool '%s'", tool_name
|
|
64
|
+
)
|
|
65
|
+
return {}
|
|
66
|
+
|
|
67
|
+
return {}
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def _extract_tools_args_from_message(message: AnyMessage) -> list[dict[str, Any]]:
|
|
71
|
+
if not isinstance(message, AIMessage):
|
|
72
|
+
return []
|
|
73
|
+
|
|
74
|
+
if not message.tool_calls:
|
|
75
|
+
return []
|
|
76
|
+
|
|
77
|
+
result: list[dict[str, Any]] = []
|
|
78
|
+
|
|
79
|
+
for tool_call in message.tool_calls:
|
|
80
|
+
args = (
|
|
81
|
+
tool_call.get("args")
|
|
82
|
+
if isinstance(tool_call, dict)
|
|
83
|
+
else getattr(tool_call, "args", None)
|
|
84
|
+
)
|
|
85
|
+
if args is not None:
|
|
86
|
+
# Args should already be a dict
|
|
87
|
+
if isinstance(args, dict):
|
|
88
|
+
result.append(args)
|
|
89
|
+
# If it's a JSON string, parse it
|
|
90
|
+
elif isinstance(args, str):
|
|
91
|
+
try:
|
|
92
|
+
parsed = json.loads(args)
|
|
93
|
+
if isinstance(parsed, dict):
|
|
94
|
+
result.append(parsed)
|
|
95
|
+
except json.JSONDecodeError:
|
|
96
|
+
logger.warning("Failed to parse tool args as JSON")
|
|
97
|
+
|
|
98
|
+
return result
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def _extract_tool_output_data(state: AgentGuardrailsGraphState) -> dict[str, Any]:
|
|
102
|
+
"""Extract tool execution output as dict for POST_EXECUTION deterministic guardrails.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
state: The current agent graph state.
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
Dict containing tool output. If output is not valid JSON, wraps it in {"output": content}.
|
|
109
|
+
"""
|
|
110
|
+
if not state.messages:
|
|
111
|
+
return {}
|
|
112
|
+
|
|
113
|
+
last_message = state.messages[-1]
|
|
114
|
+
if not isinstance(last_message, ToolMessage):
|
|
115
|
+
return {}
|
|
116
|
+
|
|
117
|
+
content = last_message.content
|
|
118
|
+
if not content:
|
|
119
|
+
return {}
|
|
120
|
+
|
|
121
|
+
# Try to parse as JSON first
|
|
122
|
+
if isinstance(content, str):
|
|
123
|
+
try:
|
|
124
|
+
parsed = json.loads(content)
|
|
125
|
+
if isinstance(parsed, dict):
|
|
126
|
+
return parsed
|
|
127
|
+
else:
|
|
128
|
+
# JSON array or primitive - wrap it
|
|
129
|
+
return {"output": parsed}
|
|
130
|
+
except json.JSONDecodeError:
|
|
131
|
+
# Try to parse as Python literal (dict/list representation)
|
|
132
|
+
try:
|
|
133
|
+
import ast
|
|
134
|
+
|
|
135
|
+
parsed = ast.literal_eval(content)
|
|
136
|
+
if isinstance(parsed, dict):
|
|
137
|
+
return parsed
|
|
138
|
+
else:
|
|
139
|
+
return {"output": parsed}
|
|
140
|
+
except (ValueError, SyntaxError):
|
|
141
|
+
logger.warning("Tool output is not valid JSON or Python literal")
|
|
142
|
+
return {"output": content}
|
|
143
|
+
elif isinstance(content, dict):
|
|
144
|
+
return content
|
|
145
|
+
else:
|
|
146
|
+
# List or other type
|
|
147
|
+
return {"output": content}
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def get_message_content(msg: AnyMessage) -> str:
|
|
151
|
+
if isinstance(msg, (HumanMessage, SystemMessage)):
|
|
152
|
+
return msg.content if isinstance(msg.content, str) else str(msg.content)
|
|
153
|
+
return str(getattr(msg, "content", "")) if hasattr(msg, "content") else ""
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def _sanitize_selector_tool_names(selector):
|
|
157
|
+
"""Sanitize tool names in the selector's match_names for Tool scope guardrails.
|
|
158
|
+
|
|
159
|
+
This ensures that the tool names in the selector match the sanitized tool names
|
|
160
|
+
used in the actual tool nodes.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
selector: The guardrail selector object.
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
The selector with sanitized match_names (if applicable).
|
|
167
|
+
"""
|
|
168
|
+
from uipath.platform.guardrails import GuardrailScope
|
|
169
|
+
|
|
170
|
+
# Only sanitize for Tool scope guardrails
|
|
171
|
+
if GuardrailScope.TOOL in selector.scopes and selector.match_names is not None:
|
|
172
|
+
# Sanitize each tool name in match_names
|
|
173
|
+
sanitized_names = [sanitize_tool_name(name) for name in selector.match_names]
|
|
174
|
+
# Update the selector with sanitized names
|
|
175
|
+
selector.match_names = sanitized_names
|
|
176
|
+
|
|
177
|
+
return selector
|
|
@@ -9,10 +9,13 @@ from langgraph.graph import StateGraph
|
|
|
9
9
|
from pydantic import BaseModel
|
|
10
10
|
from uipath.platform.guardrails import BaseGuardrail
|
|
11
11
|
|
|
12
|
-
from ..guardrails import create_llm_guardrails_subgraph
|
|
13
12
|
from ..guardrails.actions import GuardrailAction
|
|
14
|
-
from
|
|
15
|
-
|
|
13
|
+
from .guardrails.guardrails_subgraph import (
|
|
14
|
+
create_agent_init_guardrails_subgraph,
|
|
15
|
+
create_agent_terminate_guardrails_subgraph,
|
|
16
|
+
create_llm_guardrails_subgraph,
|
|
17
|
+
create_tools_guardrails_subgraph,
|
|
18
|
+
)
|
|
16
19
|
from .init_node import (
|
|
17
20
|
create_init_node,
|
|
18
21
|
)
|
|
@@ -20,7 +23,7 @@ from .llm_node import (
|
|
|
20
23
|
create_llm_node,
|
|
21
24
|
)
|
|
22
25
|
from .router import (
|
|
23
|
-
|
|
26
|
+
create_route_agent,
|
|
24
27
|
)
|
|
25
28
|
from .terminate_node import (
|
|
26
29
|
create_terminate_node,
|
|
@@ -54,7 +57,7 @@ def create_agent(
|
|
|
54
57
|
config: AgentGraphConfig | None = None,
|
|
55
58
|
guardrails: Sequence[tuple[BaseGuardrail, GuardrailAction]] | None = None,
|
|
56
59
|
) -> StateGraph[AgentGraphState, None, InputT, OutputT]:
|
|
57
|
-
"""Build agent graph with INIT -> AGENT(subgraph) <-> TOOLS loop, terminated by control flow tools.
|
|
60
|
+
"""Build agent graph with INIT -> AGENT (subgraph) <-> TOOLS loop, terminated by control flow tools.
|
|
58
61
|
|
|
59
62
|
The AGENT node is a subgraph that runs:
|
|
60
63
|
- before-agent guardrail middlewares
|
|
@@ -63,6 +66,8 @@ def create_agent(
|
|
|
63
66
|
|
|
64
67
|
Control flow tools (end_execution, raise_error) are auto-injected alongside regular tools.
|
|
65
68
|
"""
|
|
69
|
+
from ..tools import create_tool_node
|
|
70
|
+
|
|
66
71
|
if config is None:
|
|
67
72
|
config = AgentGraphConfig()
|
|
68
73
|
|
|
@@ -72,7 +77,8 @@ def create_agent(
|
|
|
72
77
|
flow_control_tools: list[BaseTool] = create_flow_control_tools(output_schema)
|
|
73
78
|
llm_tools: list[BaseTool] = [*agent_tools, *flow_control_tools]
|
|
74
79
|
|
|
75
|
-
init_node = create_init_node(messages)
|
|
80
|
+
init_node = create_init_node(messages, input_schema)
|
|
81
|
+
|
|
76
82
|
tool_nodes = create_tool_node(agent_tools)
|
|
77
83
|
tool_nodes_with_guardrails = create_tools_guardrails_subgraph(
|
|
78
84
|
tool_nodes, guardrails
|
|
@@ -86,16 +92,24 @@ def create_agent(
|
|
|
86
92
|
builder: StateGraph[AgentGraphState, None, InputT, OutputT] = StateGraph(
|
|
87
93
|
InnerAgentGraphState, input_schema=input_schema, output_schema=output_schema
|
|
88
94
|
)
|
|
89
|
-
|
|
95
|
+
init_with_guardrails_subgraph = create_agent_init_guardrails_subgraph(
|
|
96
|
+
(AgentGraphNode.GUARDED_INIT, init_node),
|
|
97
|
+
guardrails,
|
|
98
|
+
)
|
|
99
|
+
builder.add_node(AgentGraphNode.INIT, init_with_guardrails_subgraph)
|
|
90
100
|
|
|
91
101
|
for tool_name, tool_node in tool_nodes_with_guardrails.items():
|
|
92
102
|
builder.add_node(tool_name, tool_node)
|
|
93
103
|
|
|
94
|
-
|
|
104
|
+
terminate_with_guardrails_subgraph = create_agent_terminate_guardrails_subgraph(
|
|
105
|
+
(AgentGraphNode.GUARDED_TERMINATE, terminate_node),
|
|
106
|
+
guardrails,
|
|
107
|
+
)
|
|
108
|
+
builder.add_node(AgentGraphNode.TERMINATE, terminate_with_guardrails_subgraph)
|
|
95
109
|
|
|
96
110
|
builder.add_edge(START, AgentGraphNode.INIT)
|
|
97
111
|
|
|
98
|
-
llm_node = create_llm_node(model, llm_tools)
|
|
112
|
+
llm_node = create_llm_node(model, llm_tools, config.thinking_messages_limit)
|
|
99
113
|
llm_with_guardrails_subgraph = create_llm_guardrails_subgraph(
|
|
100
114
|
(AgentGraphNode.LLM, llm_node), guardrails
|
|
101
115
|
)
|
|
@@ -103,6 +117,7 @@ def create_agent(
|
|
|
103
117
|
builder.add_edge(AgentGraphNode.INIT, AgentGraphNode.AGENT)
|
|
104
118
|
|
|
105
119
|
tool_node_names = list(tool_nodes_with_guardrails.keys())
|
|
120
|
+
route_agent = create_route_agent(config.thinking_messages_limit)
|
|
106
121
|
builder.add_conditional_edges(
|
|
107
122
|
AgentGraphNode.AGENT,
|
|
108
123
|
route_agent,
|
|
@@ -1,2 +1 @@
|
|
|
1
|
-
|
|
2
|
-
MAX_SUCCESSIVE_COMPLETIONS = 1
|
|
1
|
+
MAX_CONSECUTIVE_THINKING_MESSAGES = 0
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
from enum import StrEnum
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
from uipath._utils._ssl_context import get_httpx_client_kwargs
|
|
7
|
+
|
|
8
|
+
IMAGE_MIME_TYPES: set[str] = {
|
|
9
|
+
"image/png",
|
|
10
|
+
"image/jpeg",
|
|
11
|
+
"image/gif",
|
|
12
|
+
"image/webp",
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class LlmProvider(StrEnum):
|
|
17
|
+
OPENAI = "openai"
|
|
18
|
+
BEDROCK = "bedrock"
|
|
19
|
+
VERTEX = "vertex"
|
|
20
|
+
UNKNOWN = "unknown"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def is_pdf(mime_type: str) -> bool:
|
|
24
|
+
"""Check if the MIME type represents a PDF document."""
|
|
25
|
+
return mime_type.lower() == "application/pdf"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def is_image(mime_type: str) -> bool:
|
|
29
|
+
"""Check if the MIME type represents a supported image format (PNG, JPEG, GIF, WebP)."""
|
|
30
|
+
return mime_type.lower() in IMAGE_MIME_TYPES
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def detect_provider(model_name: str) -> LlmProvider:
|
|
34
|
+
"""Detect the LLM provider (Bedrock, OpenAI, or Vertex) based on the model name."""
|
|
35
|
+
if not model_name:
|
|
36
|
+
raise ValueError(f"Unsupported model: {model_name}")
|
|
37
|
+
|
|
38
|
+
model_lower = model_name.lower()
|
|
39
|
+
|
|
40
|
+
if "anthropic" in model_lower or "claude" in model_lower:
|
|
41
|
+
return LlmProvider.BEDROCK
|
|
42
|
+
|
|
43
|
+
if "gpt" in model_lower:
|
|
44
|
+
return LlmProvider.OPENAI
|
|
45
|
+
|
|
46
|
+
if "gemini" in model_lower:
|
|
47
|
+
return LlmProvider.VERTEX
|
|
48
|
+
|
|
49
|
+
raise ValueError(f"Unsupported model: {model_name}")
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
async def _download_file(url: str) -> str:
|
|
53
|
+
"""Download a file from a URL and return its content as a base64 string."""
|
|
54
|
+
async with httpx.AsyncClient(**get_httpx_client_kwargs()) as client:
|
|
55
|
+
response = await client.get(url)
|
|
56
|
+
response.raise_for_status()
|
|
57
|
+
file_content = response.content
|
|
58
|
+
|
|
59
|
+
return base64.b64encode(file_content).decode("utf-8")
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
async def build_message_content_part_from_data(
|
|
63
|
+
url: str,
|
|
64
|
+
filename: str,
|
|
65
|
+
mime_type: str,
|
|
66
|
+
model: str,
|
|
67
|
+
) -> dict[str, Any]:
|
|
68
|
+
"""Download a file and build a provider-specific message content part.
|
|
69
|
+
|
|
70
|
+
The format varies based on the detected provider (Bedrock, OpenAI, or Vertex).
|
|
71
|
+
"""
|
|
72
|
+
provider = detect_provider(model)
|
|
73
|
+
|
|
74
|
+
if provider == LlmProvider.BEDROCK:
|
|
75
|
+
raise ValueError("Anthropic models are not yet supported for file attachments")
|
|
76
|
+
|
|
77
|
+
if provider == LlmProvider.OPENAI:
|
|
78
|
+
return await _build_openai_content_part_from_data(
|
|
79
|
+
url, mime_type, filename, False
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
if provider == LlmProvider.VERTEX:
|
|
83
|
+
raise ValueError("Gemini models are not yet supported for file attachments")
|
|
84
|
+
|
|
85
|
+
raise ValueError(f"Unsupported provider: {provider}")
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
async def _build_openai_content_part_from_data(
|
|
89
|
+
url: str,
|
|
90
|
+
mime_type: str,
|
|
91
|
+
filename: str,
|
|
92
|
+
download_image: bool,
|
|
93
|
+
) -> dict[str, Any]:
|
|
94
|
+
"""Build a content part for OpenAI models (base64-encoded or URL reference)."""
|
|
95
|
+
if download_image:
|
|
96
|
+
base64_content = await _download_file(url)
|
|
97
|
+
if is_image(mime_type):
|
|
98
|
+
data_url = f"data:{mime_type};base64,{base64_content}"
|
|
99
|
+
return {
|
|
100
|
+
"type": "input_image",
|
|
101
|
+
"image_url": data_url,
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
if is_pdf(mime_type):
|
|
105
|
+
return {
|
|
106
|
+
"type": "input_file",
|
|
107
|
+
"filename": filename,
|
|
108
|
+
"file_data": base64_content,
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
elif is_image(mime_type):
|
|
112
|
+
return {
|
|
113
|
+
"type": "input_image",
|
|
114
|
+
"image_url": url,
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
elif is_pdf(mime_type):
|
|
118
|
+
return {
|
|
119
|
+
"type": "input_file",
|
|
120
|
+
"file_url": url,
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
raise ValueError(f"Unsupported mime_type: {mime_type}")
|