uipath-langchain 0.0.133__py3-none-any.whl → 0.1.28__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- uipath_langchain/_cli/cli_init.py +130 -191
- uipath_langchain/_cli/cli_new.py +2 -3
- uipath_langchain/_resources/AGENTS.md +21 -0
- uipath_langchain/_resources/REQUIRED_STRUCTURE.md +92 -0
- uipath_langchain/_tracing/__init__.py +3 -2
- uipath_langchain/_tracing/_instrument_traceable.py +11 -12
- uipath_langchain/_utils/_request_mixin.py +327 -51
- uipath_langchain/_utils/_settings.py +2 -2
- uipath_langchain/agent/exceptions/__init__.py +6 -0
- uipath_langchain/agent/exceptions/exceptions.py +11 -0
- uipath_langchain/agent/guardrails/__init__.py +21 -0
- uipath_langchain/agent/guardrails/actions/__init__.py +11 -0
- uipath_langchain/agent/guardrails/actions/base_action.py +24 -0
- uipath_langchain/agent/guardrails/actions/block_action.py +42 -0
- uipath_langchain/agent/guardrails/actions/escalate_action.py +499 -0
- uipath_langchain/agent/guardrails/actions/log_action.py +58 -0
- uipath_langchain/agent/guardrails/guardrail_nodes.py +173 -0
- uipath_langchain/agent/guardrails/guardrails_factory.py +70 -0
- uipath_langchain/agent/guardrails/guardrails_subgraph.py +283 -0
- uipath_langchain/agent/guardrails/types.py +20 -0
- uipath_langchain/agent/react/__init__.py +14 -0
- uipath_langchain/agent/react/agent.py +117 -0
- uipath_langchain/agent/react/constants.py +2 -0
- uipath_langchain/agent/react/init_node.py +20 -0
- uipath_langchain/agent/react/llm_node.py +43 -0
- uipath_langchain/agent/react/router.py +97 -0
- uipath_langchain/agent/react/terminate_node.py +82 -0
- uipath_langchain/agent/react/tools/__init__.py +7 -0
- uipath_langchain/agent/react/tools/tools.py +50 -0
- uipath_langchain/agent/react/types.py +39 -0
- uipath_langchain/agent/react/utils.py +49 -0
- uipath_langchain/agent/tools/__init__.py +17 -0
- uipath_langchain/agent/tools/context_tool.py +53 -0
- uipath_langchain/agent/tools/escalation_tool.py +111 -0
- uipath_langchain/agent/tools/integration_tool.py +181 -0
- uipath_langchain/agent/tools/process_tool.py +49 -0
- uipath_langchain/agent/tools/static_args.py +138 -0
- uipath_langchain/agent/tools/structured_tool_with_output_type.py +14 -0
- uipath_langchain/agent/tools/tool_factory.py +45 -0
- uipath_langchain/agent/tools/tool_node.py +22 -0
- uipath_langchain/agent/tools/utils.py +11 -0
- uipath_langchain/chat/__init__.py +4 -0
- uipath_langchain/chat/bedrock.py +187 -0
- uipath_langchain/chat/mapper.py +309 -0
- uipath_langchain/chat/models.py +248 -35
- uipath_langchain/chat/openai.py +133 -0
- uipath_langchain/chat/supported_models.py +42 -0
- uipath_langchain/chat/vertex.py +255 -0
- uipath_langchain/embeddings/embeddings.py +131 -34
- uipath_langchain/middlewares.py +0 -6
- uipath_langchain/retrievers/context_grounding_retriever.py +7 -9
- uipath_langchain/runtime/__init__.py +36 -0
- uipath_langchain/runtime/_serialize.py +46 -0
- uipath_langchain/runtime/config.py +61 -0
- uipath_langchain/runtime/errors.py +43 -0
- uipath_langchain/runtime/factory.py +315 -0
- uipath_langchain/runtime/graph.py +159 -0
- uipath_langchain/runtime/runtime.py +453 -0
- uipath_langchain/runtime/schema.py +386 -0
- uipath_langchain/runtime/storage.py +115 -0
- uipath_langchain/vectorstores/context_grounding_vectorstore.py +90 -110
- {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.28.dist-info}/METADATA +44 -23
- uipath_langchain-0.1.28.dist-info/RECORD +76 -0
- {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.28.dist-info}/WHEEL +1 -1
- uipath_langchain-0.1.28.dist-info/entry_points.txt +5 -0
- uipath_langchain/_cli/_runtime/_context.py +0 -21
- uipath_langchain/_cli/_runtime/_conversation.py +0 -298
- uipath_langchain/_cli/_runtime/_exception.py +0 -17
- uipath_langchain/_cli/_runtime/_input.py +0 -139
- uipath_langchain/_cli/_runtime/_output.py +0 -234
- uipath_langchain/_cli/_runtime/_runtime.py +0 -379
- uipath_langchain/_cli/_utils/_graph.py +0 -199
- uipath_langchain/_cli/cli_dev.py +0 -44
- uipath_langchain/_cli/cli_eval.py +0 -78
- uipath_langchain/_cli/cli_run.py +0 -82
- uipath_langchain/_tracing/_oteladapter.py +0 -222
- uipath_langchain/_tracing/_utils.py +0 -28
- uipath_langchain/builder/agent_config.py +0 -191
- uipath_langchain/tools/preconfigured.py +0 -191
- uipath_langchain-0.0.133.dist-info/RECORD +0 -41
- uipath_langchain-0.0.133.dist-info/entry_points.txt +0 -2
- /uipath_langchain/{tools/__init__.py → py.typed} +0 -0
- {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.28.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
"""LLM node implementation for LangGraph."""
|
|
2
|
+
|
|
3
|
+
from typing import Sequence
|
|
4
|
+
|
|
5
|
+
from langchain_core.language_models import BaseChatModel
|
|
6
|
+
from langchain_core.messages import AIMessage, AnyMessage
|
|
7
|
+
from langchain_core.tools import BaseTool
|
|
8
|
+
|
|
9
|
+
from .constants import MAX_SUCCESSIVE_COMPLETIONS
|
|
10
|
+
from .types import AgentGraphState
|
|
11
|
+
from .utils import count_successive_completions
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def create_llm_node(
|
|
15
|
+
model: BaseChatModel,
|
|
16
|
+
tools: Sequence[BaseTool] | None = None,
|
|
17
|
+
):
|
|
18
|
+
"""Invoke LLM with tools and dynamically control tool_choice based on successive completions.
|
|
19
|
+
|
|
20
|
+
When successive completions reach the limit, tool_choice is set to "required" to force
|
|
21
|
+
the LLM to use a tool and prevent infinite reasoning loops.
|
|
22
|
+
"""
|
|
23
|
+
bindable_tools = list(tools) if tools else []
|
|
24
|
+
base_llm = model.bind_tools(bindable_tools) if bindable_tools else model
|
|
25
|
+
|
|
26
|
+
async def llm_node(state: AgentGraphState):
|
|
27
|
+
messages: list[AnyMessage] = state.messages
|
|
28
|
+
|
|
29
|
+
successive_completions = count_successive_completions(messages)
|
|
30
|
+
if successive_completions >= MAX_SUCCESSIVE_COMPLETIONS:
|
|
31
|
+
llm = base_llm.bind(tool_choice="required")
|
|
32
|
+
else:
|
|
33
|
+
llm = base_llm
|
|
34
|
+
|
|
35
|
+
response = await llm.ainvoke(messages)
|
|
36
|
+
if not isinstance(response, AIMessage):
|
|
37
|
+
raise TypeError(
|
|
38
|
+
f"LLM returned {type(response).__name__} instead of AIMessage"
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
return {"messages": [response]}
|
|
42
|
+
|
|
43
|
+
return llm_node
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
"""Routing functions for conditional edges in the agent graph."""
|
|
2
|
+
|
|
3
|
+
from typing import Literal
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import AIMessage, AnyMessage, ToolCall
|
|
6
|
+
from uipath.agent.react import END_EXECUTION_TOOL, RAISE_ERROR_TOOL
|
|
7
|
+
|
|
8
|
+
from ..exceptions import AgentNodeRoutingException
|
|
9
|
+
from .constants import MAX_SUCCESSIVE_COMPLETIONS
|
|
10
|
+
from .types import AgentGraphNode, AgentGraphState
|
|
11
|
+
from .utils import count_successive_completions
|
|
12
|
+
|
|
13
|
+
FLOW_CONTROL_TOOLS = [END_EXECUTION_TOOL.name, RAISE_ERROR_TOOL.name]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def __filter_control_flow_tool_calls(
|
|
17
|
+
tool_calls: list[ToolCall],
|
|
18
|
+
) -> list[ToolCall]:
|
|
19
|
+
"""Remove control flow tools when multiple tool calls exist."""
|
|
20
|
+
if len(tool_calls) <= 1:
|
|
21
|
+
return tool_calls
|
|
22
|
+
|
|
23
|
+
return [tc for tc in tool_calls if tc.get("name") not in FLOW_CONTROL_TOOLS]
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def __has_control_flow_tool(tool_calls: list[ToolCall]) -> bool:
|
|
27
|
+
"""Check if any tool call is of a control flow tool."""
|
|
28
|
+
return any(tc.get("name") in FLOW_CONTROL_TOOLS for tc in tool_calls)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def __validate_last_message_is_AI(messages: list[AnyMessage]) -> AIMessage:
|
|
32
|
+
"""Validate and return last message from state.
|
|
33
|
+
|
|
34
|
+
Raises:
|
|
35
|
+
AgentNodeRoutingException: If messages are empty or last message is not AIMessage
|
|
36
|
+
"""
|
|
37
|
+
if not messages:
|
|
38
|
+
raise AgentNodeRoutingException(
|
|
39
|
+
"No messages in state - cannot route after agent"
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
last_message = messages[-1]
|
|
43
|
+
if not isinstance(last_message, AIMessage):
|
|
44
|
+
raise AgentNodeRoutingException(
|
|
45
|
+
f"Last message is not AIMessage (type: {type(last_message).__name__}) - cannot route after agent"
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
return last_message
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def route_agent(
|
|
52
|
+
state: AgentGraphState,
|
|
53
|
+
) -> list[str] | Literal[AgentGraphNode.AGENT, AgentGraphNode.TERMINATE]:
|
|
54
|
+
"""Route after agent: handles all routing logic including control flow detection.
|
|
55
|
+
|
|
56
|
+
Routing logic:
|
|
57
|
+
1. If multiple tool calls exist, filter out control flow tools (EndExecution, RaiseError)
|
|
58
|
+
2. If control flow tool(s) remain, route to TERMINATE
|
|
59
|
+
3. If regular tool calls remain, route to specific tool nodes (return list of tool names)
|
|
60
|
+
4. If no tool calls, handle successive completions
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
- list[str]: Tool node names for parallel execution
|
|
64
|
+
- AgentGraphNode.AGENT: For successive completions
|
|
65
|
+
- AgentGraphNode.TERMINATE: For control flow termination
|
|
66
|
+
|
|
67
|
+
Raises:
|
|
68
|
+
AgentNodeRoutingException: When encountering unexpected state (empty messages, non-AIMessage, or excessive completions)
|
|
69
|
+
"""
|
|
70
|
+
messages = state.messages
|
|
71
|
+
last_message = __validate_last_message_is_AI(messages)
|
|
72
|
+
|
|
73
|
+
tool_calls = list(last_message.tool_calls) if last_message.tool_calls else []
|
|
74
|
+
tool_calls = __filter_control_flow_tool_calls(tool_calls)
|
|
75
|
+
|
|
76
|
+
if tool_calls and __has_control_flow_tool(tool_calls):
|
|
77
|
+
return AgentGraphNode.TERMINATE
|
|
78
|
+
|
|
79
|
+
if tool_calls:
|
|
80
|
+
return [tc["name"] for tc in tool_calls]
|
|
81
|
+
|
|
82
|
+
successive_completions = count_successive_completions(messages)
|
|
83
|
+
|
|
84
|
+
if successive_completions > MAX_SUCCESSIVE_COMPLETIONS:
|
|
85
|
+
raise AgentNodeRoutingException(
|
|
86
|
+
f"Agent exceeded successive completions limit without producing tool calls "
|
|
87
|
+
f"(completions: {successive_completions}, max: {MAX_SUCCESSIVE_COMPLETIONS}). "
|
|
88
|
+
f"This should not happen as tool_choice='required' is enforced at the limit."
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
if last_message.content:
|
|
92
|
+
return AgentGraphNode.AGENT
|
|
93
|
+
|
|
94
|
+
raise AgentNodeRoutingException(
|
|
95
|
+
f"Agent produced empty response without tool calls "
|
|
96
|
+
f"(completions: {successive_completions}, has_content: False)"
|
|
97
|
+
)
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
"""Termination node for the Agent graph."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, NoReturn
|
|
6
|
+
|
|
7
|
+
from langchain_core.messages import AIMessage
|
|
8
|
+
from pydantic import BaseModel
|
|
9
|
+
from uipath.agent.react import END_EXECUTION_TOOL, RAISE_ERROR_TOOL
|
|
10
|
+
from uipath.runtime.errors import UiPathErrorCode
|
|
11
|
+
|
|
12
|
+
from ..exceptions import (
|
|
13
|
+
AgentNodeRoutingException,
|
|
14
|
+
AgentTerminationException,
|
|
15
|
+
)
|
|
16
|
+
from .types import AgentGraphState, AgentTermination
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _handle_end_execution(
|
|
20
|
+
args: dict[str, Any], response_schema: type[BaseModel] | None
|
|
21
|
+
) -> dict[str, Any]:
|
|
22
|
+
"""Handle LLM-initiated termination via END_EXECUTION_TOOL."""
|
|
23
|
+
output_schema = response_schema or END_EXECUTION_TOOL.args_schema
|
|
24
|
+
validated = output_schema.model_validate(args)
|
|
25
|
+
return validated.model_dump()
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _handle_raise_error(args: dict[str, Any]) -> NoReturn:
|
|
29
|
+
"""Handle LLM-initiated error via RAISE_ERROR_TOOL."""
|
|
30
|
+
error_message = args.get("message", "The LLM did not set the error message")
|
|
31
|
+
detail = args.get("details", "")
|
|
32
|
+
raise AgentTerminationException(
|
|
33
|
+
code=UiPathErrorCode.EXECUTION_ERROR,
|
|
34
|
+
title=error_message,
|
|
35
|
+
detail=detail,
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _handle_agent_termination(termination: AgentTermination) -> NoReturn:
|
|
40
|
+
"""Handle Command-based termination."""
|
|
41
|
+
raise AgentTerminationException(
|
|
42
|
+
code=UiPathErrorCode.EXECUTION_ERROR,
|
|
43
|
+
title=termination.title,
|
|
44
|
+
detail=termination.detail,
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def create_terminate_node(
|
|
49
|
+
response_schema: type[BaseModel] | None = None,
|
|
50
|
+
):
|
|
51
|
+
"""Handles Agent Graph termination for multiple sources and output or error propagation to Orchestrator.
|
|
52
|
+
|
|
53
|
+
Termination scenarios:
|
|
54
|
+
1. Command based termination with information in state (e.g: escalation)
|
|
55
|
+
2. LLM-initiated termination (END_EXECUTION_TOOL)
|
|
56
|
+
3. LLM-initiated error (RAISE_ERROR_TOOL)
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
def terminate_node(state: AgentGraphState):
|
|
60
|
+
if state.termination:
|
|
61
|
+
_handle_agent_termination(state.termination)
|
|
62
|
+
|
|
63
|
+
last_message = state.messages[-1]
|
|
64
|
+
if not isinstance(last_message, AIMessage):
|
|
65
|
+
raise AgentNodeRoutingException(
|
|
66
|
+
f"Expected last message to be AIMessage, got {type(last_message).__name__}"
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
for tool_call in last_message.tool_calls:
|
|
70
|
+
tool_name = tool_call["name"]
|
|
71
|
+
|
|
72
|
+
if tool_name == END_EXECUTION_TOOL.name:
|
|
73
|
+
return _handle_end_execution(tool_call["args"], response_schema)
|
|
74
|
+
|
|
75
|
+
if tool_name == RAISE_ERROR_TOOL.name:
|
|
76
|
+
_handle_raise_error(tool_call["args"])
|
|
77
|
+
|
|
78
|
+
raise AgentNodeRoutingException(
|
|
79
|
+
"No control flow tool call found in terminate node. Unexpected state."
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
return terminate_node
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
"""Control flow tools for agent execution."""
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from langchain_core.tools import BaseTool, StructuredTool
|
|
6
|
+
from pydantic import BaseModel
|
|
7
|
+
from uipath.agent.react import (
|
|
8
|
+
END_EXECUTION_TOOL,
|
|
9
|
+
RAISE_ERROR_TOOL,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def create_end_execution_tool(
|
|
14
|
+
agent_output_schema: type[BaseModel] | None = None,
|
|
15
|
+
) -> StructuredTool:
|
|
16
|
+
"""Never executed - routing intercepts and extracts args for successful termination."""
|
|
17
|
+
input_schema = agent_output_schema or END_EXECUTION_TOOL.args_schema
|
|
18
|
+
|
|
19
|
+
async def end_execution_fn(**kwargs: Any) -> dict[str, Any]:
|
|
20
|
+
return kwargs
|
|
21
|
+
|
|
22
|
+
return StructuredTool(
|
|
23
|
+
name=END_EXECUTION_TOOL.name,
|
|
24
|
+
description=END_EXECUTION_TOOL.description,
|
|
25
|
+
args_schema=input_schema,
|
|
26
|
+
coroutine=end_execution_fn,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def create_raise_error_tool() -> StructuredTool:
|
|
31
|
+
"""Never executed - routing intercepts and raises AgentTerminationException."""
|
|
32
|
+
|
|
33
|
+
async def raise_error_fn(**kwargs: Any) -> dict[str, Any]:
|
|
34
|
+
return kwargs
|
|
35
|
+
|
|
36
|
+
return StructuredTool(
|
|
37
|
+
name=RAISE_ERROR_TOOL.name,
|
|
38
|
+
description=RAISE_ERROR_TOOL.description,
|
|
39
|
+
args_schema=RAISE_ERROR_TOOL.args_schema,
|
|
40
|
+
coroutine=raise_error_fn,
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def create_flow_control_tools(
|
|
45
|
+
agent_output_schema: type[BaseModel] | None = None,
|
|
46
|
+
) -> list[BaseTool]:
|
|
47
|
+
return [
|
|
48
|
+
create_end_execution_tool(agent_output_schema),
|
|
49
|
+
create_raise_error_tool(),
|
|
50
|
+
]
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
from enum import StrEnum
|
|
2
|
+
from typing import Annotated
|
|
3
|
+
|
|
4
|
+
from langchain_core.messages import AnyMessage
|
|
5
|
+
from langgraph.graph.message import add_messages
|
|
6
|
+
from pydantic import BaseModel, Field
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class AgentTerminationSource(StrEnum):
|
|
10
|
+
ESCALATION = "escalation"
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class AgentTermination(BaseModel):
|
|
14
|
+
"""Agent Graph Termination model."""
|
|
15
|
+
|
|
16
|
+
source: AgentTerminationSource
|
|
17
|
+
title: str
|
|
18
|
+
detail: str = ""
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class AgentGraphState(BaseModel):
|
|
22
|
+
"""Agent Graph state for standard loop execution."""
|
|
23
|
+
|
|
24
|
+
messages: Annotated[list[AnyMessage], add_messages] = []
|
|
25
|
+
termination: AgentTermination | None = None
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class AgentGraphNode(StrEnum):
|
|
29
|
+
INIT = "init"
|
|
30
|
+
AGENT = "agent"
|
|
31
|
+
LLM = "llm"
|
|
32
|
+
TOOLS = "tools"
|
|
33
|
+
TERMINATE = "terminate"
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class AgentGraphConfig(BaseModel):
|
|
37
|
+
recursion_limit: int = Field(
|
|
38
|
+
default=50, ge=1, description="Maximum recursion limit for the agent graph"
|
|
39
|
+
)
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
"""ReAct Agent loop utilities."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Sequence
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import AIMessage, BaseMessage
|
|
6
|
+
from pydantic import BaseModel
|
|
7
|
+
from uipath.agent.react import END_EXECUTION_TOOL
|
|
8
|
+
from uipath.utils.dynamic_schema import jsonschema_to_pydantic
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def resolve_input_model(
|
|
12
|
+
input_schema: dict[str, Any] | None,
|
|
13
|
+
) -> type[BaseModel]:
|
|
14
|
+
"""Resolve the input model from the input schema."""
|
|
15
|
+
if input_schema:
|
|
16
|
+
return jsonschema_to_pydantic(input_schema)
|
|
17
|
+
|
|
18
|
+
return BaseModel
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def resolve_output_model(
|
|
22
|
+
output_schema: dict[str, Any] | None,
|
|
23
|
+
) -> type[BaseModel]:
|
|
24
|
+
"""Fallback to default end_execution tool schema when no agent output schema is provided."""
|
|
25
|
+
if output_schema:
|
|
26
|
+
return jsonschema_to_pydantic(output_schema)
|
|
27
|
+
|
|
28
|
+
return END_EXECUTION_TOOL.args_schema
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def count_successive_completions(messages: Sequence[BaseMessage]) -> int:
|
|
32
|
+
"""Count consecutive AIMessages without tool calls at end of message history."""
|
|
33
|
+
if not messages:
|
|
34
|
+
return 0
|
|
35
|
+
|
|
36
|
+
count = 0
|
|
37
|
+
for message in reversed(messages):
|
|
38
|
+
if not isinstance(message, AIMessage):
|
|
39
|
+
break
|
|
40
|
+
|
|
41
|
+
if message.tool_calls:
|
|
42
|
+
break
|
|
43
|
+
|
|
44
|
+
if not message.content:
|
|
45
|
+
break
|
|
46
|
+
|
|
47
|
+
count += 1
|
|
48
|
+
|
|
49
|
+
return count
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""Tool creation and management for LowCode agents."""
|
|
2
|
+
|
|
3
|
+
from .context_tool import create_context_tool
|
|
4
|
+
from .integration_tool import create_integration_tool
|
|
5
|
+
from .process_tool import create_process_tool
|
|
6
|
+
from .tool_factory import (
|
|
7
|
+
create_tools_from_resources,
|
|
8
|
+
)
|
|
9
|
+
from .tool_node import create_tool_node
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
"create_tools_from_resources",
|
|
13
|
+
"create_tool_node",
|
|
14
|
+
"create_context_tool",
|
|
15
|
+
"create_process_tool",
|
|
16
|
+
"create_integration_tool",
|
|
17
|
+
]
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"""Context tool creation for semantic index retrieval."""
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from langchain_core.documents import Document
|
|
6
|
+
from langchain_core.tools import StructuredTool
|
|
7
|
+
from pydantic import BaseModel, Field
|
|
8
|
+
from uipath.agent.models.agent import AgentContextResourceConfig
|
|
9
|
+
from uipath.eval.mocks import mockable
|
|
10
|
+
|
|
11
|
+
from uipath_langchain.retrievers import ContextGroundingRetriever
|
|
12
|
+
|
|
13
|
+
from .structured_tool_with_output_type import StructuredToolWithOutputType
|
|
14
|
+
from .utils import sanitize_tool_name
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def create_context_tool(resource: AgentContextResourceConfig) -> StructuredTool:
|
|
18
|
+
tool_name = sanitize_tool_name(resource.name)
|
|
19
|
+
retriever = ContextGroundingRetriever(
|
|
20
|
+
index_name=resource.index_name,
|
|
21
|
+
folder_path=resource.folder_path,
|
|
22
|
+
number_of_results=resource.settings.result_count,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
class ContextInputSchemaModel(BaseModel):
|
|
26
|
+
query: str = Field(
|
|
27
|
+
..., description="The query to search for in the knowledge base"
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
class ContextOutputSchemaModel(BaseModel):
|
|
31
|
+
documents: list[Document] = Field(
|
|
32
|
+
..., description="List of retrieved documents."
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
input_model = ContextInputSchemaModel
|
|
36
|
+
output_model = ContextOutputSchemaModel
|
|
37
|
+
|
|
38
|
+
@mockable(
|
|
39
|
+
name=resource.name,
|
|
40
|
+
description=resource.description,
|
|
41
|
+
input_schema=input_model.model_json_schema(),
|
|
42
|
+
output_schema=output_model.model_json_schema(),
|
|
43
|
+
)
|
|
44
|
+
async def context_tool_fn(query: str) -> dict[str, Any]:
|
|
45
|
+
return {"documents": await retriever.ainvoke(query)}
|
|
46
|
+
|
|
47
|
+
return StructuredToolWithOutputType(
|
|
48
|
+
name=tool_name,
|
|
49
|
+
description=resource.description,
|
|
50
|
+
args_schema=input_model,
|
|
51
|
+
coroutine=context_tool_fn,
|
|
52
|
+
output_type=output_model,
|
|
53
|
+
)
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
"""Escalation tool creation for Action Center integration."""
|
|
2
|
+
|
|
3
|
+
from enum import Enum
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from jsonschema_pydantic_converter import transform as create_model
|
|
7
|
+
from langchain.tools import ToolRuntime
|
|
8
|
+
from langchain_core.messages import ToolMessage
|
|
9
|
+
from langchain_core.tools import StructuredTool
|
|
10
|
+
from langgraph.types import Command, interrupt
|
|
11
|
+
from uipath.agent.models.agent import (
|
|
12
|
+
AgentEscalationChannel,
|
|
13
|
+
AgentEscalationRecipientType,
|
|
14
|
+
AgentEscalationResourceConfig,
|
|
15
|
+
)
|
|
16
|
+
from uipath.eval.mocks import mockable
|
|
17
|
+
from uipath.platform.common import CreateEscalation
|
|
18
|
+
|
|
19
|
+
from ..react.types import AgentGraphNode, AgentTerminationSource
|
|
20
|
+
from .utils import sanitize_tool_name
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class EscalationAction(str, Enum):
|
|
24
|
+
"""Actions that can be taken after an escalation completes."""
|
|
25
|
+
|
|
26
|
+
CONTINUE = "continue"
|
|
27
|
+
END = "end"
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def create_escalation_tool(resource: AgentEscalationResourceConfig) -> StructuredTool:
|
|
31
|
+
"""Uses interrupt() for Action Center human-in-the-loop."""
|
|
32
|
+
|
|
33
|
+
tool_name: str = f"escalate_{sanitize_tool_name(resource.name)}"
|
|
34
|
+
channel: AgentEscalationChannel = resource.channels[0]
|
|
35
|
+
|
|
36
|
+
input_model: Any = create_model(channel.input_schema)
|
|
37
|
+
output_model: Any = create_model(channel.output_schema)
|
|
38
|
+
|
|
39
|
+
assignee: str | None = (
|
|
40
|
+
channel.recipients[0].value
|
|
41
|
+
if channel.recipients
|
|
42
|
+
and channel.recipients[0].type == AgentEscalationRecipientType.USER_EMAIL
|
|
43
|
+
else None
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
@mockable(
|
|
47
|
+
name=resource.name,
|
|
48
|
+
description=resource.description,
|
|
49
|
+
input_schema=input_model.model_json_schema(),
|
|
50
|
+
output_schema=output_model.model_json_schema(),
|
|
51
|
+
)
|
|
52
|
+
async def escalation_tool_fn(
|
|
53
|
+
runtime: ToolRuntime, **kwargs: Any
|
|
54
|
+
) -> Command[Any] | Any:
|
|
55
|
+
task_title = channel.task_title or "Escalation Task"
|
|
56
|
+
|
|
57
|
+
result = interrupt(
|
|
58
|
+
CreateEscalation(
|
|
59
|
+
title=task_title,
|
|
60
|
+
data=kwargs,
|
|
61
|
+
assignee=assignee,
|
|
62
|
+
app_name=channel.properties.app_name,
|
|
63
|
+
app_folder_path=channel.properties.folder_name,
|
|
64
|
+
app_version=channel.properties.app_version,
|
|
65
|
+
priority=channel.priority,
|
|
66
|
+
labels=channel.labels,
|
|
67
|
+
is_actionable_message_enabled=channel.properties.is_actionable_message_enabled,
|
|
68
|
+
actionable_message_metadata=channel.properties.actionable_message_meta_data,
|
|
69
|
+
)
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
escalation_action = getattr(result, "action", None)
|
|
73
|
+
escalation_output = getattr(result, "data", {})
|
|
74
|
+
|
|
75
|
+
outcome = (
|
|
76
|
+
channel.outcome_mapping.get(escalation_action)
|
|
77
|
+
if channel.outcome_mapping and escalation_action
|
|
78
|
+
else None
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
if outcome == EscalationAction.END:
|
|
82
|
+
output_detail = f"Escalation output: {escalation_output}"
|
|
83
|
+
termination_title = f"Agent run ended based on escalation outcome {outcome} with directive {escalation_action}"
|
|
84
|
+
|
|
85
|
+
return Command(
|
|
86
|
+
update={
|
|
87
|
+
"messages": [
|
|
88
|
+
ToolMessage(
|
|
89
|
+
content=f"{termination_title}. {output_detail}",
|
|
90
|
+
tool_call_id=runtime.tool_call_id,
|
|
91
|
+
)
|
|
92
|
+
],
|
|
93
|
+
"termination": {
|
|
94
|
+
"source": AgentTerminationSource.ESCALATION,
|
|
95
|
+
"title": termination_title,
|
|
96
|
+
"detail": output_detail,
|
|
97
|
+
},
|
|
98
|
+
},
|
|
99
|
+
goto=AgentGraphNode.TERMINATE,
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
return escalation_output
|
|
103
|
+
|
|
104
|
+
tool = StructuredTool(
|
|
105
|
+
name=tool_name,
|
|
106
|
+
description=resource.description,
|
|
107
|
+
args_schema=input_model,
|
|
108
|
+
coroutine=escalation_tool_fn,
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
return tool
|