uipath-langchain 0.1.24__py3-none-any.whl → 0.1.34__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- uipath_langchain/_utils/_request_mixin.py +8 -0
- uipath_langchain/_utils/_settings.py +3 -2
- uipath_langchain/agent/guardrails/__init__.py +0 -16
- uipath_langchain/agent/guardrails/actions/__init__.py +2 -0
- uipath_langchain/agent/guardrails/actions/base_action.py +1 -0
- uipath_langchain/agent/guardrails/actions/block_action.py +2 -1
- uipath_langchain/agent/guardrails/actions/escalate_action.py +243 -35
- uipath_langchain/agent/guardrails/actions/filter_action.py +55 -0
- uipath_langchain/agent/guardrails/actions/log_action.py +2 -1
- uipath_langchain/agent/guardrails/guardrail_nodes.py +186 -22
- uipath_langchain/agent/guardrails/guardrails_factory.py +200 -4
- uipath_langchain/agent/guardrails/types.py +0 -12
- uipath_langchain/agent/guardrails/utils.py +146 -0
- uipath_langchain/agent/react/agent.py +25 -8
- uipath_langchain/agent/react/constants.py +1 -2
- uipath_langchain/agent/{guardrails → react/guardrails}/guardrails_subgraph.py +94 -19
- uipath_langchain/agent/react/llm_node.py +41 -10
- uipath_langchain/agent/react/router.py +48 -37
- uipath_langchain/agent/react/types.py +15 -1
- uipath_langchain/agent/react/utils.py +1 -1
- uipath_langchain/agent/tools/__init__.py +2 -0
- uipath_langchain/agent/tools/mcp_tool.py +86 -0
- uipath_langchain/chat/__init__.py +4 -0
- uipath_langchain/chat/bedrock.py +16 -0
- uipath_langchain/chat/openai.py +57 -26
- uipath_langchain/chat/supported_models.py +9 -0
- uipath_langchain/chat/vertex.py +271 -0
- uipath_langchain/embeddings/embeddings.py +18 -12
- uipath_langchain/runtime/schema.py +116 -23
- {uipath_langchain-0.1.24.dist-info → uipath_langchain-0.1.34.dist-info}/METADATA +9 -6
- {uipath_langchain-0.1.24.dist-info → uipath_langchain-0.1.34.dist-info}/RECORD +34 -31
- uipath_langchain/chat/gemini.py +0 -330
- {uipath_langchain-0.1.24.dist-info → uipath_langchain-0.1.34.dist-info}/WHEEL +0 -0
- {uipath_langchain-0.1.24.dist-info → uipath_langchain-0.1.34.dist-info}/entry_points.txt +0 -0
- {uipath_langchain-0.1.24.dist-info → uipath_langchain-0.1.34.dist-info}/licenses/LICENSE +0 -0
|
@@ -9,9 +9,14 @@ from langgraph.graph import StateGraph
|
|
|
9
9
|
from pydantic import BaseModel
|
|
10
10
|
from uipath.platform.guardrails import BaseGuardrail
|
|
11
11
|
|
|
12
|
-
from ..guardrails import create_llm_guardrails_subgraph
|
|
13
12
|
from ..guardrails.actions import GuardrailAction
|
|
14
13
|
from ..tools import create_tool_node
|
|
14
|
+
from .guardrails.guardrails_subgraph import (
|
|
15
|
+
create_agent_init_guardrails_subgraph,
|
|
16
|
+
create_agent_terminate_guardrails_subgraph,
|
|
17
|
+
create_llm_guardrails_subgraph,
|
|
18
|
+
create_tools_guardrails_subgraph,
|
|
19
|
+
)
|
|
15
20
|
from .init_node import (
|
|
16
21
|
create_init_node,
|
|
17
22
|
)
|
|
@@ -19,7 +24,7 @@ from .llm_node import (
|
|
|
19
24
|
create_llm_node,
|
|
20
25
|
)
|
|
21
26
|
from .router import (
|
|
22
|
-
|
|
27
|
+
create_route_agent,
|
|
23
28
|
)
|
|
24
29
|
from .terminate_node import (
|
|
25
30
|
create_terminate_node,
|
|
@@ -53,7 +58,7 @@ def create_agent(
|
|
|
53
58
|
config: AgentGraphConfig | None = None,
|
|
54
59
|
guardrails: Sequence[tuple[BaseGuardrail, GuardrailAction]] | None = None,
|
|
55
60
|
) -> StateGraph[AgentGraphState, None, InputT, OutputT]:
|
|
56
|
-
"""Build agent graph with INIT -> AGENT(subgraph) <-> TOOLS loop, terminated by control flow tools.
|
|
61
|
+
"""Build agent graph with INIT -> AGENT (subgraph) <-> TOOLS loop, terminated by control flow tools.
|
|
57
62
|
|
|
58
63
|
The AGENT node is a subgraph that runs:
|
|
59
64
|
- before-agent guardrail middlewares
|
|
@@ -73,6 +78,9 @@ def create_agent(
|
|
|
73
78
|
|
|
74
79
|
init_node = create_init_node(messages)
|
|
75
80
|
tool_nodes = create_tool_node(agent_tools)
|
|
81
|
+
tool_nodes_with_guardrails = create_tools_guardrails_subgraph(
|
|
82
|
+
tool_nodes, guardrails
|
|
83
|
+
)
|
|
76
84
|
terminate_node = create_terminate_node(output_schema)
|
|
77
85
|
|
|
78
86
|
InnerAgentGraphState = create_state_with_input(
|
|
@@ -82,23 +90,32 @@ def create_agent(
|
|
|
82
90
|
builder: StateGraph[AgentGraphState, None, InputT, OutputT] = StateGraph(
|
|
83
91
|
InnerAgentGraphState, input_schema=input_schema, output_schema=output_schema
|
|
84
92
|
)
|
|
85
|
-
|
|
93
|
+
init_with_guardrails_subgraph = create_agent_init_guardrails_subgraph(
|
|
94
|
+
(AgentGraphNode.GUARDED_INIT, init_node),
|
|
95
|
+
guardrails,
|
|
96
|
+
)
|
|
97
|
+
builder.add_node(AgentGraphNode.INIT, init_with_guardrails_subgraph)
|
|
86
98
|
|
|
87
|
-
for tool_name, tool_node in
|
|
99
|
+
for tool_name, tool_node in tool_nodes_with_guardrails.items():
|
|
88
100
|
builder.add_node(tool_name, tool_node)
|
|
89
101
|
|
|
90
|
-
|
|
102
|
+
terminate_with_guardrails_subgraph = create_agent_terminate_guardrails_subgraph(
|
|
103
|
+
(AgentGraphNode.GUARDED_TERMINATE, terminate_node),
|
|
104
|
+
guardrails,
|
|
105
|
+
)
|
|
106
|
+
builder.add_node(AgentGraphNode.TERMINATE, terminate_with_guardrails_subgraph)
|
|
91
107
|
|
|
92
108
|
builder.add_edge(START, AgentGraphNode.INIT)
|
|
93
109
|
|
|
94
|
-
llm_node = create_llm_node(model, llm_tools)
|
|
110
|
+
llm_node = create_llm_node(model, llm_tools, config.thinking_messages_limit)
|
|
95
111
|
llm_with_guardrails_subgraph = create_llm_guardrails_subgraph(
|
|
96
112
|
(AgentGraphNode.LLM, llm_node), guardrails
|
|
97
113
|
)
|
|
98
114
|
builder.add_node(AgentGraphNode.AGENT, llm_with_guardrails_subgraph)
|
|
99
115
|
builder.add_edge(AgentGraphNode.INIT, AgentGraphNode.AGENT)
|
|
100
116
|
|
|
101
|
-
tool_node_names = list(
|
|
117
|
+
tool_node_names = list(tool_nodes_with_guardrails.keys())
|
|
118
|
+
route_agent = create_route_agent(config.thinking_messages_limit)
|
|
102
119
|
builder.add_conditional_edges(
|
|
103
120
|
AgentGraphNode.AGENT,
|
|
104
121
|
route_agent,
|
|
@@ -1,2 +1 @@
|
|
|
1
|
-
|
|
2
|
-
MAX_SUCCESSIVE_COMPLETIONS = 1
|
|
1
|
+
MAX_CONSECUTIVE_THINKING_MESSAGES = 0
|
|
@@ -1,22 +1,30 @@
|
|
|
1
|
+
from functools import partial
|
|
1
2
|
from typing import Any, Callable, Sequence
|
|
2
3
|
|
|
3
4
|
from langgraph.constants import END, START
|
|
4
5
|
from langgraph.graph import StateGraph
|
|
6
|
+
from langgraph.prebuilt import ToolNode
|
|
5
7
|
from uipath.platform.guardrails import (
|
|
6
8
|
BaseGuardrail,
|
|
7
9
|
BuiltInValidatorGuardrail,
|
|
8
10
|
GuardrailScope,
|
|
9
11
|
)
|
|
10
12
|
|
|
11
|
-
from uipath_langchain.agent.guardrails.
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
13
|
+
from uipath_langchain.agent.guardrails.actions.base_action import (
|
|
14
|
+
GuardrailAction,
|
|
15
|
+
GuardrailActionNode,
|
|
16
|
+
)
|
|
17
|
+
from uipath_langchain.agent.guardrails.guardrail_nodes import (
|
|
18
|
+
create_agent_init_guardrail_node,
|
|
19
|
+
create_agent_terminate_guardrail_node,
|
|
16
20
|
create_llm_guardrail_node,
|
|
17
21
|
create_tool_guardrail_node,
|
|
18
22
|
)
|
|
19
|
-
from .types import
|
|
23
|
+
from uipath_langchain.agent.guardrails.types import ExecutionStage
|
|
24
|
+
from uipath_langchain.agent.react.types import (
|
|
25
|
+
AgentGraphState,
|
|
26
|
+
AgentGuardrailsGraphState,
|
|
27
|
+
)
|
|
20
28
|
|
|
21
29
|
_VALIDATOR_ALLOWED_STAGES = {
|
|
22
30
|
"prompt_injection": {ExecutionStage.PRE_EXECUTION},
|
|
@@ -90,6 +98,7 @@ def _create_guardrails_subgraph(
|
|
|
90
98
|
ExecutionStage.PRE_EXECUTION,
|
|
91
99
|
node_factory,
|
|
92
100
|
inner_name,
|
|
101
|
+
inner_name,
|
|
93
102
|
)
|
|
94
103
|
subgraph.add_edge(START, first_pre_exec_guardrail_node)
|
|
95
104
|
else:
|
|
@@ -107,6 +116,7 @@ def _create_guardrails_subgraph(
|
|
|
107
116
|
ExecutionStage.POST_EXECUTION,
|
|
108
117
|
node_factory,
|
|
109
118
|
END,
|
|
119
|
+
inner_node,
|
|
110
120
|
)
|
|
111
121
|
subgraph.add_edge(inner_name, first_post_exec_guardrail_node)
|
|
112
122
|
else:
|
|
@@ -130,6 +140,7 @@ def _build_guardrail_node_chain(
|
|
|
130
140
|
GuardrailActionNode,
|
|
131
141
|
],
|
|
132
142
|
next_node: str,
|
|
143
|
+
guarded_node_name: str,
|
|
133
144
|
) -> str:
|
|
134
145
|
"""Recursively build a chain of guardrail nodes in reverse order.
|
|
135
146
|
|
|
@@ -157,7 +168,10 @@ def _build_guardrail_node_chain(
|
|
|
157
168
|
remaining_guardrails = guardrails[:-1]
|
|
158
169
|
|
|
159
170
|
fail_node_name, fail_node = action.action_node(
|
|
160
|
-
guardrail=guardrail,
|
|
171
|
+
guardrail=guardrail,
|
|
172
|
+
scope=scope,
|
|
173
|
+
execution_stage=execution_stage,
|
|
174
|
+
guarded_component_name=guarded_node_name,
|
|
161
175
|
)
|
|
162
176
|
|
|
163
177
|
# Create the guardrail evaluation node.
|
|
@@ -179,6 +193,7 @@ def _build_guardrail_node_chain(
|
|
|
179
193
|
execution_stage,
|
|
180
194
|
node_factory,
|
|
181
195
|
guardrail_node_name,
|
|
196
|
+
guarded_node_name,
|
|
182
197
|
)
|
|
183
198
|
|
|
184
199
|
return previous_node_name
|
|
@@ -193,6 +208,9 @@ def create_llm_guardrails_subgraph(
|
|
|
193
208
|
for (guardrail, _) in (guardrails or [])
|
|
194
209
|
if GuardrailScope.LLM in guardrail.selector.scopes
|
|
195
210
|
]
|
|
211
|
+
if applicable_guardrails is None or len(applicable_guardrails) == 0:
|
|
212
|
+
return llm_node[1]
|
|
213
|
+
|
|
196
214
|
return _create_guardrails_subgraph(
|
|
197
215
|
main_inner_node=llm_node,
|
|
198
216
|
guardrails=applicable_guardrails,
|
|
@@ -202,30 +220,84 @@ def create_llm_guardrails_subgraph(
|
|
|
202
220
|
)
|
|
203
221
|
|
|
204
222
|
|
|
205
|
-
def
|
|
206
|
-
|
|
223
|
+
def create_tools_guardrails_subgraph(
|
|
224
|
+
tool_nodes: dict[str, ToolNode],
|
|
207
225
|
guardrails: Sequence[tuple[BaseGuardrail, GuardrailAction]] | None,
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
This is intended for wrapping nodes like INIT or TERMINATE, where guardrails should run
|
|
213
|
-
either before (pre-execution) or after (post-execution) the node logic.
|
|
226
|
+
) -> dict[str, ToolNode]:
|
|
227
|
+
"""Create tool nodes with guardrails.
|
|
228
|
+
Args:
|
|
214
229
|
"""
|
|
230
|
+
result: dict[str, ToolNode] = {}
|
|
231
|
+
for tool_name, tool_node in tool_nodes.items():
|
|
232
|
+
subgraph = create_tool_guardrails_subgraph(
|
|
233
|
+
(tool_name, tool_node),
|
|
234
|
+
guardrails,
|
|
235
|
+
)
|
|
236
|
+
result[tool_name] = subgraph
|
|
237
|
+
|
|
238
|
+
return result
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def create_agent_init_guardrails_subgraph(
|
|
242
|
+
init_node: tuple[str, Any],
|
|
243
|
+
guardrails: Sequence[tuple[BaseGuardrail, GuardrailAction]] | None,
|
|
244
|
+
):
|
|
245
|
+
"""Create a subgraph for INIT node that applies guardrails on the state messages."""
|
|
215
246
|
applicable_guardrails = [
|
|
216
247
|
(guardrail, _)
|
|
217
248
|
for (guardrail, _) in (guardrails or [])
|
|
218
249
|
if GuardrailScope.AGENT in guardrail.selector.scopes
|
|
219
250
|
]
|
|
251
|
+
if applicable_guardrails is None or len(applicable_guardrails) == 0:
|
|
252
|
+
return init_node[1]
|
|
253
|
+
|
|
220
254
|
return _create_guardrails_subgraph(
|
|
221
|
-
main_inner_node=
|
|
255
|
+
main_inner_node=init_node,
|
|
222
256
|
guardrails=applicable_guardrails,
|
|
223
257
|
scope=GuardrailScope.AGENT,
|
|
224
|
-
execution_stages=[
|
|
225
|
-
node_factory=
|
|
258
|
+
execution_stages=[ExecutionStage.POST_EXECUTION],
|
|
259
|
+
node_factory=create_agent_init_guardrail_node,
|
|
226
260
|
)
|
|
227
261
|
|
|
228
262
|
|
|
263
|
+
def create_agent_terminate_guardrails_subgraph(
|
|
264
|
+
terminate_node: tuple[str, Any],
|
|
265
|
+
guardrails: Sequence[tuple[BaseGuardrail, GuardrailAction]] | None,
|
|
266
|
+
):
|
|
267
|
+
"""Create a subgraph for TERMINATE node that applies guardrails on the agent result."""
|
|
268
|
+
node_name, node_func = terminate_node
|
|
269
|
+
|
|
270
|
+
def terminate_wrapper(state: Any) -> dict[str, Any]:
|
|
271
|
+
# Call original terminate node
|
|
272
|
+
result = node_func(state)
|
|
273
|
+
# Store result in state
|
|
274
|
+
return {"agent_result": result, "messages": state.messages}
|
|
275
|
+
|
|
276
|
+
applicable_guardrails = [
|
|
277
|
+
(guardrail, _)
|
|
278
|
+
for (guardrail, _) in (guardrails or [])
|
|
279
|
+
if GuardrailScope.AGENT in guardrail.selector.scopes
|
|
280
|
+
]
|
|
281
|
+
if applicable_guardrails is None or len(applicable_guardrails) == 0:
|
|
282
|
+
return terminate_node[1]
|
|
283
|
+
|
|
284
|
+
subgraph = _create_guardrails_subgraph(
|
|
285
|
+
main_inner_node=(node_name, terminate_wrapper),
|
|
286
|
+
guardrails=applicable_guardrails,
|
|
287
|
+
scope=GuardrailScope.AGENT,
|
|
288
|
+
execution_stages=[ExecutionStage.POST_EXECUTION],
|
|
289
|
+
node_factory=create_agent_terminate_guardrail_node,
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
async def run_terminate_subgraph(
|
|
293
|
+
state: AgentGraphState,
|
|
294
|
+
) -> dict[str, Any]:
|
|
295
|
+
result_state = await subgraph.ainvoke(state)
|
|
296
|
+
return result_state["agent_result"]
|
|
297
|
+
|
|
298
|
+
return run_terminate_subgraph
|
|
299
|
+
|
|
300
|
+
|
|
229
301
|
def create_tool_guardrails_subgraph(
|
|
230
302
|
tool_node: tuple[str, Any],
|
|
231
303
|
guardrails: Sequence[tuple[BaseGuardrail, GuardrailAction]] | None,
|
|
@@ -238,10 +310,13 @@ def create_tool_guardrails_subgraph(
|
|
|
238
310
|
and guardrail.selector.match_names is not None
|
|
239
311
|
and tool_name in guardrail.selector.match_names
|
|
240
312
|
]
|
|
313
|
+
if applicable_guardrails is None or len(applicable_guardrails) == 0:
|
|
314
|
+
return tool_node[1]
|
|
315
|
+
|
|
241
316
|
return _create_guardrails_subgraph(
|
|
242
317
|
main_inner_node=tool_node,
|
|
243
318
|
guardrails=applicable_guardrails,
|
|
244
319
|
scope=GuardrailScope.TOOL,
|
|
245
320
|
execution_stages=[ExecutionStage.PRE_EXECUTION, ExecutionStage.POST_EXECUTION],
|
|
246
|
-
node_factory=create_tool_guardrail_node,
|
|
321
|
+
node_factory=partial(create_tool_guardrail_node, tool_name=tool_name),
|
|
247
322
|
)
|
|
@@ -1,34 +1,65 @@
|
|
|
1
|
-
"""LLM node
|
|
1
|
+
"""LLM node for ReAct Agent graph."""
|
|
2
2
|
|
|
3
|
-
from typing import Sequence
|
|
3
|
+
from typing import Literal, Sequence
|
|
4
4
|
|
|
5
5
|
from langchain_core.language_models import BaseChatModel
|
|
6
6
|
from langchain_core.messages import AIMessage, AnyMessage
|
|
7
7
|
from langchain_core.tools import BaseTool
|
|
8
8
|
|
|
9
|
-
from .constants import
|
|
9
|
+
from .constants import MAX_CONSECUTIVE_THINKING_MESSAGES
|
|
10
10
|
from .types import AgentGraphState
|
|
11
|
-
from .utils import
|
|
11
|
+
from .utils import count_consecutive_thinking_messages
|
|
12
|
+
|
|
13
|
+
OPENAI_COMPATIBLE_CHAT_MODELS = (
|
|
14
|
+
"UiPathChatOpenAI",
|
|
15
|
+
"AzureChatOpenAI",
|
|
16
|
+
"ChatOpenAI",
|
|
17
|
+
"UiPathChat",
|
|
18
|
+
"UiPathAzureChatOpenAI",
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _get_required_tool_choice_by_model(
|
|
23
|
+
model: BaseChatModel,
|
|
24
|
+
) -> Literal["required", "any"]:
|
|
25
|
+
"""Get the appropriate tool_choice value to enforce tool usage based on model type.
|
|
26
|
+
|
|
27
|
+
"required" - OpenAI compatible required tool_choice value
|
|
28
|
+
"any" - Vertex and Bedrock parameter for required tool_choice value
|
|
29
|
+
"""
|
|
30
|
+
model_class_name = model.__class__.__name__
|
|
31
|
+
if model_class_name in OPENAI_COMPATIBLE_CHAT_MODELS:
|
|
32
|
+
return "required"
|
|
33
|
+
return "any"
|
|
12
34
|
|
|
13
35
|
|
|
14
36
|
def create_llm_node(
|
|
15
37
|
model: BaseChatModel,
|
|
16
38
|
tools: Sequence[BaseTool] | None = None,
|
|
39
|
+
thinking_messages_limit: int = MAX_CONSECUTIVE_THINKING_MESSAGES,
|
|
17
40
|
):
|
|
18
|
-
"""
|
|
41
|
+
"""Create LLM node with dynamic tool_choice enforcement.
|
|
19
42
|
|
|
20
|
-
|
|
21
|
-
|
|
43
|
+
Controls when to force tool usage based on consecutive thinking steps
|
|
44
|
+
to prevent infinite loops and ensure progress.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
model: The chat model to use
|
|
48
|
+
tools: Available tools to bind
|
|
49
|
+
thinking_messages_limit: Max consecutive LLM responses without tool calls
|
|
50
|
+
before enforcing tool usage. 0 = force tools every time.
|
|
22
51
|
"""
|
|
23
52
|
bindable_tools = list(tools) if tools else []
|
|
24
53
|
base_llm = model.bind_tools(bindable_tools) if bindable_tools else model
|
|
54
|
+
tool_choice_required_value = _get_required_tool_choice_by_model(model)
|
|
25
55
|
|
|
26
56
|
async def llm_node(state: AgentGraphState):
|
|
27
57
|
messages: list[AnyMessage] = state.messages
|
|
28
58
|
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
59
|
+
consecutive_thinking_messages = count_consecutive_thinking_messages(messages)
|
|
60
|
+
|
|
61
|
+
if bindable_tools and consecutive_thinking_messages >= thinking_messages_limit:
|
|
62
|
+
llm = base_llm.bind(tool_choice=tool_choice_required_value)
|
|
32
63
|
else:
|
|
33
64
|
llm = base_llm
|
|
34
65
|
|
|
@@ -6,9 +6,8 @@ from langchain_core.messages import AIMessage, AnyMessage, ToolCall
|
|
|
6
6
|
from uipath.agent.react import END_EXECUTION_TOOL, RAISE_ERROR_TOOL
|
|
7
7
|
|
|
8
8
|
from ..exceptions import AgentNodeRoutingException
|
|
9
|
-
from .constants import MAX_SUCCESSIVE_COMPLETIONS
|
|
10
9
|
from .types import AgentGraphNode, AgentGraphState
|
|
11
|
-
from .utils import
|
|
10
|
+
from .utils import count_consecutive_thinking_messages
|
|
12
11
|
|
|
13
12
|
FLOW_CONTROL_TOOLS = [END_EXECUTION_TOOL.name, RAISE_ERROR_TOOL.name]
|
|
14
13
|
|
|
@@ -48,50 +47,62 @@ def __validate_last_message_is_AI(messages: list[AnyMessage]) -> AIMessage:
|
|
|
48
47
|
return last_message
|
|
49
48
|
|
|
50
49
|
|
|
51
|
-
def
|
|
52
|
-
|
|
53
|
-
) -> list[str] | Literal[AgentGraphNode.AGENT, AgentGraphNode.TERMINATE]:
|
|
54
|
-
"""Route after agent: handles all routing logic including control flow detection.
|
|
50
|
+
def create_route_agent(thinking_messages_limit: int = 0):
|
|
51
|
+
"""Create a routing function configured with thinking_messages_limit.
|
|
55
52
|
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
2. If control flow tool(s) remain, route to TERMINATE
|
|
59
|
-
3. If regular tool calls remain, route to specific tool nodes (return list of tool names)
|
|
60
|
-
4. If no tool calls, handle successive completions
|
|
53
|
+
Args:
|
|
54
|
+
thinking_messages_limit: Max consecutive thinking messages before error
|
|
61
55
|
|
|
62
56
|
Returns:
|
|
63
|
-
|
|
64
|
-
- AgentGraphNode.AGENT: For successive completions
|
|
65
|
-
- AgentGraphNode.TERMINATE: For control flow termination
|
|
66
|
-
|
|
67
|
-
Raises:
|
|
68
|
-
AgentNodeRoutingException: When encountering unexpected state (empty messages, non-AIMessage, or excessive completions)
|
|
57
|
+
Routing function for LangGraph conditional edges
|
|
69
58
|
"""
|
|
70
|
-
messages = state.messages
|
|
71
|
-
last_message = __validate_last_message_is_AI(messages)
|
|
72
59
|
|
|
73
|
-
|
|
74
|
-
|
|
60
|
+
def route_agent(
|
|
61
|
+
state: AgentGraphState,
|
|
62
|
+
) -> list[str] | Literal[AgentGraphNode.AGENT, AgentGraphNode.TERMINATE]:
|
|
63
|
+
"""Route after agent: handles all routing logic including control flow detection.
|
|
64
|
+
|
|
65
|
+
Routing logic:
|
|
66
|
+
1. If multiple tool calls exist, filter out control flow tools (EndExecution, RaiseError)
|
|
67
|
+
2. If control flow tool(s) remain, route to TERMINATE
|
|
68
|
+
3. If regular tool calls remain, route to specific tool nodes (return list of tool names)
|
|
69
|
+
4. If no tool calls, handle consecutive completions
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
- list[str]: Tool node names for parallel execution
|
|
73
|
+
- AgentGraphNode.AGENT: For consecutive completions
|
|
74
|
+
- AgentGraphNode.TERMINATE: For control flow termination
|
|
75
|
+
|
|
76
|
+
Raises:
|
|
77
|
+
AgentNodeRoutingException: When encountering unexpected state (empty messages, non-AIMessage, or excessive completions)
|
|
78
|
+
"""
|
|
79
|
+
messages = state.messages
|
|
80
|
+
last_message = __validate_last_message_is_AI(messages)
|
|
75
81
|
|
|
76
|
-
|
|
77
|
-
|
|
82
|
+
tool_calls = list(last_message.tool_calls) if last_message.tool_calls else []
|
|
83
|
+
tool_calls = __filter_control_flow_tool_calls(tool_calls)
|
|
78
84
|
|
|
79
|
-
|
|
80
|
-
|
|
85
|
+
if tool_calls and __has_control_flow_tool(tool_calls):
|
|
86
|
+
return AgentGraphNode.TERMINATE
|
|
81
87
|
|
|
82
|
-
|
|
88
|
+
if tool_calls:
|
|
89
|
+
return [tc["name"] for tc in tool_calls]
|
|
90
|
+
|
|
91
|
+
consecutive_thinking_messages = count_consecutive_thinking_messages(messages)
|
|
92
|
+
|
|
93
|
+
if consecutive_thinking_messages > thinking_messages_limit:
|
|
94
|
+
raise AgentNodeRoutingException(
|
|
95
|
+
f"Agent exceeded consecutive completions limit without producing tool calls "
|
|
96
|
+
f"(completions: {consecutive_thinking_messages}, max: {thinking_messages_limit}). "
|
|
97
|
+
f"This should not happen as tool_choice='required' is enforced at the limit."
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
if last_message.content:
|
|
101
|
+
return AgentGraphNode.AGENT
|
|
83
102
|
|
|
84
|
-
if successive_completions > MAX_SUCCESSIVE_COMPLETIONS:
|
|
85
103
|
raise AgentNodeRoutingException(
|
|
86
|
-
f"Agent
|
|
87
|
-
f"(completions: {
|
|
88
|
-
f"This should not happen as tool_choice='required' is enforced at the limit."
|
|
104
|
+
f"Agent produced empty response without tool calls "
|
|
105
|
+
f"(completions: {consecutive_thinking_messages}, has_content: False)"
|
|
89
106
|
)
|
|
90
107
|
|
|
91
|
-
|
|
92
|
-
return AgentGraphNode.AGENT
|
|
93
|
-
|
|
94
|
-
raise AgentNodeRoutingException(
|
|
95
|
-
f"Agent produced empty response without tool calls "
|
|
96
|
-
f"(completions: {successive_completions}, has_content: False)"
|
|
97
|
-
)
|
|
108
|
+
return route_agent
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from enum import StrEnum
|
|
2
|
-
from typing import Annotated
|
|
2
|
+
from typing import Annotated, Any, Optional
|
|
3
3
|
|
|
4
4
|
from langchain_core.messages import AnyMessage
|
|
5
5
|
from langgraph.graph.message import add_messages
|
|
@@ -25,15 +25,29 @@ class AgentGraphState(BaseModel):
|
|
|
25
25
|
termination: AgentTermination | None = None
|
|
26
26
|
|
|
27
27
|
|
|
28
|
+
class AgentGuardrailsGraphState(AgentGraphState):
|
|
29
|
+
"""Agent Guardrails Graph state for guardrail subgraph."""
|
|
30
|
+
|
|
31
|
+
guardrail_validation_result: Optional[str] = None
|
|
32
|
+
agent_result: Optional[dict[str, Any]] = None
|
|
33
|
+
|
|
34
|
+
|
|
28
35
|
class AgentGraphNode(StrEnum):
|
|
29
36
|
INIT = "init"
|
|
37
|
+
GUARDED_INIT = "guarded-init"
|
|
30
38
|
AGENT = "agent"
|
|
31
39
|
LLM = "llm"
|
|
32
40
|
TOOLS = "tools"
|
|
33
41
|
TERMINATE = "terminate"
|
|
42
|
+
GUARDED_TERMINATE = "guarded-terminate"
|
|
34
43
|
|
|
35
44
|
|
|
36
45
|
class AgentGraphConfig(BaseModel):
|
|
37
46
|
recursion_limit: int = Field(
|
|
38
47
|
default=50, ge=1, description="Maximum recursion limit for the agent graph"
|
|
39
48
|
)
|
|
49
|
+
thinking_messages_limit: int = Field(
|
|
50
|
+
default=0,
|
|
51
|
+
ge=0,
|
|
52
|
+
description="Max consecutive thinking messages before enforcing tool usage. 0 = force tools every time.",
|
|
53
|
+
)
|
|
@@ -28,7 +28,7 @@ def resolve_output_model(
|
|
|
28
28
|
return END_EXECUTION_TOOL.args_schema
|
|
29
29
|
|
|
30
30
|
|
|
31
|
-
def
|
|
31
|
+
def count_consecutive_thinking_messages(messages: Sequence[BaseMessage]) -> int:
|
|
32
32
|
"""Count consecutive AIMessages without tool calls at end of message history."""
|
|
33
33
|
if not messages:
|
|
34
34
|
return 0
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from .context_tool import create_context_tool
|
|
4
4
|
from .integration_tool import create_integration_tool
|
|
5
|
+
from .mcp_tool import create_mcp_tools
|
|
5
6
|
from .process_tool import create_process_tool
|
|
6
7
|
from .tool_factory import (
|
|
7
8
|
create_tools_from_resources,
|
|
@@ -14,4 +15,5 @@ __all__ = [
|
|
|
14
15
|
"create_context_tool",
|
|
15
16
|
"create_process_tool",
|
|
16
17
|
"create_integration_tool",
|
|
18
|
+
"create_mcp_tools",
|
|
17
19
|
]
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import os
|
|
3
|
+
from collections import Counter, defaultdict
|
|
4
|
+
from contextlib import AsyncExitStack, asynccontextmanager
|
|
5
|
+
from itertools import chain
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
from langchain_core.tools import BaseTool
|
|
9
|
+
from langchain_mcp_adapters.tools import load_mcp_tools
|
|
10
|
+
from mcp import ClientSession
|
|
11
|
+
from mcp.client.streamable_http import streamable_http_client
|
|
12
|
+
from uipath._utils._ssl_context import get_httpx_client_kwargs
|
|
13
|
+
from uipath.agent.models.agent import AgentMcpResourceConfig
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _deduplicate_tools(tools: list[BaseTool]) -> list[BaseTool]:
|
|
17
|
+
"""Deduplicate tools by appending numeric suffix to duplicate names."""
|
|
18
|
+
counts = Counter(tool.name for tool in tools)
|
|
19
|
+
seen: defaultdict[str, int] = defaultdict(int)
|
|
20
|
+
|
|
21
|
+
for tool in tools:
|
|
22
|
+
if counts[tool.name] > 1:
|
|
23
|
+
seen[tool.name] += 1
|
|
24
|
+
tool.name = f"{tool.name}_{seen[tool.name]}"
|
|
25
|
+
|
|
26
|
+
return tools
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _filter_tools(tools: list[BaseTool], cfg: AgentMcpResourceConfig) -> list[BaseTool]:
|
|
30
|
+
"""Filter tools to only include those in available_tools."""
|
|
31
|
+
allowed = {t.name for t in cfg.available_tools}
|
|
32
|
+
return [t for t in tools if t.name in allowed]
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@asynccontextmanager
|
|
36
|
+
async def create_mcp_tools(
|
|
37
|
+
config: AgentMcpResourceConfig | list[AgentMcpResourceConfig],
|
|
38
|
+
max_concurrency: int = 5,
|
|
39
|
+
):
|
|
40
|
+
"""Connect to UiPath MCP server(s) and yield LangChain-compatible tools."""
|
|
41
|
+
if not (base_url := os.getenv("UIPATH_URL")):
|
|
42
|
+
raise ValueError("UIPATH_URL environment variable is not set")
|
|
43
|
+
if not (access_token := os.getenv("UIPATH_ACCESS_TOKEN")):
|
|
44
|
+
raise ValueError("UIPATH_ACCESS_TOKEN environment variable is not set")
|
|
45
|
+
|
|
46
|
+
configs = config if isinstance(config, list) else [config]
|
|
47
|
+
enabled = [c for c in configs if c.is_enabled is not False]
|
|
48
|
+
|
|
49
|
+
if not enabled:
|
|
50
|
+
yield []
|
|
51
|
+
return
|
|
52
|
+
|
|
53
|
+
base_url = base_url.rstrip("/")
|
|
54
|
+
semaphore = asyncio.Semaphore(max_concurrency)
|
|
55
|
+
|
|
56
|
+
default_client_kwargs = get_httpx_client_kwargs()
|
|
57
|
+
client_kwargs = {
|
|
58
|
+
**default_client_kwargs,
|
|
59
|
+
"headers": {"Authorization": f"Bearer {access_token}"},
|
|
60
|
+
"timeout": httpx.Timeout(60),
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
async def init_session(
|
|
64
|
+
session: ClientSession, cfg: AgentMcpResourceConfig
|
|
65
|
+
) -> list[BaseTool]:
|
|
66
|
+
async with semaphore:
|
|
67
|
+
await session.initialize()
|
|
68
|
+
tools = await load_mcp_tools(session)
|
|
69
|
+
return _filter_tools(tools, cfg)
|
|
70
|
+
|
|
71
|
+
async def create_session(
|
|
72
|
+
stack: AsyncExitStack, cfg: AgentMcpResourceConfig
|
|
73
|
+
) -> ClientSession:
|
|
74
|
+
url = f"{base_url}/agenthub_/mcp/{cfg.folder_path}/{cfg.slug}"
|
|
75
|
+
http_client = await stack.enter_async_context(
|
|
76
|
+
httpx.AsyncClient(**client_kwargs)
|
|
77
|
+
)
|
|
78
|
+
read, write, _ = await stack.enter_async_context(
|
|
79
|
+
streamable_http_client(url=url, http_client=http_client)
|
|
80
|
+
)
|
|
81
|
+
return await stack.enter_async_context(ClientSession(read, write))
|
|
82
|
+
|
|
83
|
+
async with AsyncExitStack() as stack:
|
|
84
|
+
sessions = [(await create_session(stack, cfg), cfg) for cfg in enabled]
|
|
85
|
+
results = await asyncio.gather(*[init_session(s, cfg) for s, cfg in sessions])
|
|
86
|
+
yield _deduplicate_tools(list(chain.from_iterable(results)))
|
|
@@ -1,10 +1,14 @@
|
|
|
1
1
|
from .mapper import UiPathChatMessagesMapper
|
|
2
2
|
from .models import UiPathAzureChatOpenAI, UiPathChat
|
|
3
3
|
from .openai import UiPathChatOpenAI
|
|
4
|
+
from .supported_models import BedrockModels, GeminiModels, OpenAIModels
|
|
4
5
|
|
|
5
6
|
__all__ = [
|
|
6
7
|
"UiPathChat",
|
|
7
8
|
"UiPathAzureChatOpenAI",
|
|
8
9
|
"UiPathChatOpenAI",
|
|
9
10
|
"UiPathChatMessagesMapper",
|
|
11
|
+
"OpenAIModels",
|
|
12
|
+
"BedrockModels",
|
|
13
|
+
"GeminiModels",
|
|
10
14
|
]
|