uipath-langchain 0.1.28__py3-none-any.whl → 0.1.34__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- uipath_langchain/_utils/_request_mixin.py +8 -0
- uipath_langchain/_utils/_settings.py +3 -2
- uipath_langchain/agent/guardrails/__init__.py +0 -16
- uipath_langchain/agent/guardrails/actions/__init__.py +2 -0
- uipath_langchain/agent/guardrails/actions/block_action.py +1 -1
- uipath_langchain/agent/guardrails/actions/escalate_action.py +17 -34
- uipath_langchain/agent/guardrails/actions/filter_action.py +55 -0
- uipath_langchain/agent/guardrails/actions/log_action.py +1 -1
- uipath_langchain/agent/guardrails/guardrail_nodes.py +161 -45
- uipath_langchain/agent/guardrails/guardrails_factory.py +200 -4
- uipath_langchain/agent/guardrails/types.py +0 -12
- uipath_langchain/agent/guardrails/utils.py +146 -0
- uipath_langchain/agent/react/agent.py +20 -7
- uipath_langchain/agent/react/constants.py +1 -2
- uipath_langchain/agent/{guardrails → react/guardrails}/guardrails_subgraph.py +57 -18
- uipath_langchain/agent/react/llm_node.py +41 -10
- uipath_langchain/agent/react/router.py +48 -37
- uipath_langchain/agent/react/types.py +15 -1
- uipath_langchain/agent/react/utils.py +1 -1
- uipath_langchain/agent/tools/__init__.py +2 -0
- uipath_langchain/agent/tools/mcp_tool.py +86 -0
- uipath_langchain/chat/__init__.py +4 -0
- uipath_langchain/chat/bedrock.py +16 -0
- uipath_langchain/chat/openai.py +56 -26
- uipath_langchain/chat/supported_models.py +9 -0
- uipath_langchain/chat/vertex.py +62 -46
- uipath_langchain/embeddings/embeddings.py +18 -12
- uipath_langchain/runtime/schema.py +72 -16
- {uipath_langchain-0.1.28.dist-info → uipath_langchain-0.1.34.dist-info}/METADATA +4 -2
- {uipath_langchain-0.1.28.dist-info → uipath_langchain-0.1.34.dist-info}/RECORD +33 -30
- {uipath_langchain-0.1.28.dist-info → uipath_langchain-0.1.34.dist-info}/WHEEL +0 -0
- {uipath_langchain-0.1.28.dist-info → uipath_langchain-0.1.34.dist-info}/entry_points.txt +0 -0
- {uipath_langchain-0.1.28.dist-info → uipath_langchain-0.1.34.dist-info}/licenses/LICENSE +0 -0
|
@@ -6,9 +6,8 @@ from langchain_core.messages import AIMessage, AnyMessage, ToolCall
|
|
|
6
6
|
from uipath.agent.react import END_EXECUTION_TOOL, RAISE_ERROR_TOOL
|
|
7
7
|
|
|
8
8
|
from ..exceptions import AgentNodeRoutingException
|
|
9
|
-
from .constants import MAX_SUCCESSIVE_COMPLETIONS
|
|
10
9
|
from .types import AgentGraphNode, AgentGraphState
|
|
11
|
-
from .utils import
|
|
10
|
+
from .utils import count_consecutive_thinking_messages
|
|
12
11
|
|
|
13
12
|
FLOW_CONTROL_TOOLS = [END_EXECUTION_TOOL.name, RAISE_ERROR_TOOL.name]
|
|
14
13
|
|
|
@@ -48,50 +47,62 @@ def __validate_last_message_is_AI(messages: list[AnyMessage]) -> AIMessage:
|
|
|
48
47
|
return last_message
|
|
49
48
|
|
|
50
49
|
|
|
51
|
-
def
|
|
52
|
-
|
|
53
|
-
) -> list[str] | Literal[AgentGraphNode.AGENT, AgentGraphNode.TERMINATE]:
|
|
54
|
-
"""Route after agent: handles all routing logic including control flow detection.
|
|
50
|
+
def create_route_agent(thinking_messages_limit: int = 0):
|
|
51
|
+
"""Create a routing function configured with thinking_messages_limit.
|
|
55
52
|
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
2. If control flow tool(s) remain, route to TERMINATE
|
|
59
|
-
3. If regular tool calls remain, route to specific tool nodes (return list of tool names)
|
|
60
|
-
4. If no tool calls, handle successive completions
|
|
53
|
+
Args:
|
|
54
|
+
thinking_messages_limit: Max consecutive thinking messages before error
|
|
61
55
|
|
|
62
56
|
Returns:
|
|
63
|
-
|
|
64
|
-
- AgentGraphNode.AGENT: For successive completions
|
|
65
|
-
- AgentGraphNode.TERMINATE: For control flow termination
|
|
66
|
-
|
|
67
|
-
Raises:
|
|
68
|
-
AgentNodeRoutingException: When encountering unexpected state (empty messages, non-AIMessage, or excessive completions)
|
|
57
|
+
Routing function for LangGraph conditional edges
|
|
69
58
|
"""
|
|
70
|
-
messages = state.messages
|
|
71
|
-
last_message = __validate_last_message_is_AI(messages)
|
|
72
59
|
|
|
73
|
-
|
|
74
|
-
|
|
60
|
+
def route_agent(
|
|
61
|
+
state: AgentGraphState,
|
|
62
|
+
) -> list[str] | Literal[AgentGraphNode.AGENT, AgentGraphNode.TERMINATE]:
|
|
63
|
+
"""Route after agent: handles all routing logic including control flow detection.
|
|
64
|
+
|
|
65
|
+
Routing logic:
|
|
66
|
+
1. If multiple tool calls exist, filter out control flow tools (EndExecution, RaiseError)
|
|
67
|
+
2. If control flow tool(s) remain, route to TERMINATE
|
|
68
|
+
3. If regular tool calls remain, route to specific tool nodes (return list of tool names)
|
|
69
|
+
4. If no tool calls, handle consecutive completions
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
- list[str]: Tool node names for parallel execution
|
|
73
|
+
- AgentGraphNode.AGENT: For consecutive completions
|
|
74
|
+
- AgentGraphNode.TERMINATE: For control flow termination
|
|
75
|
+
|
|
76
|
+
Raises:
|
|
77
|
+
AgentNodeRoutingException: When encountering unexpected state (empty messages, non-AIMessage, or excessive completions)
|
|
78
|
+
"""
|
|
79
|
+
messages = state.messages
|
|
80
|
+
last_message = __validate_last_message_is_AI(messages)
|
|
75
81
|
|
|
76
|
-
|
|
77
|
-
|
|
82
|
+
tool_calls = list(last_message.tool_calls) if last_message.tool_calls else []
|
|
83
|
+
tool_calls = __filter_control_flow_tool_calls(tool_calls)
|
|
78
84
|
|
|
79
|
-
|
|
80
|
-
|
|
85
|
+
if tool_calls and __has_control_flow_tool(tool_calls):
|
|
86
|
+
return AgentGraphNode.TERMINATE
|
|
81
87
|
|
|
82
|
-
|
|
88
|
+
if tool_calls:
|
|
89
|
+
return [tc["name"] for tc in tool_calls]
|
|
90
|
+
|
|
91
|
+
consecutive_thinking_messages = count_consecutive_thinking_messages(messages)
|
|
92
|
+
|
|
93
|
+
if consecutive_thinking_messages > thinking_messages_limit:
|
|
94
|
+
raise AgentNodeRoutingException(
|
|
95
|
+
f"Agent exceeded consecutive completions limit without producing tool calls "
|
|
96
|
+
f"(completions: {consecutive_thinking_messages}, max: {thinking_messages_limit}). "
|
|
97
|
+
f"This should not happen as tool_choice='required' is enforced at the limit."
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
if last_message.content:
|
|
101
|
+
return AgentGraphNode.AGENT
|
|
83
102
|
|
|
84
|
-
if successive_completions > MAX_SUCCESSIVE_COMPLETIONS:
|
|
85
103
|
raise AgentNodeRoutingException(
|
|
86
|
-
f"Agent
|
|
87
|
-
f"(completions: {
|
|
88
|
-
f"This should not happen as tool_choice='required' is enforced at the limit."
|
|
104
|
+
f"Agent produced empty response without tool calls "
|
|
105
|
+
f"(completions: {consecutive_thinking_messages}, has_content: False)"
|
|
89
106
|
)
|
|
90
107
|
|
|
91
|
-
|
|
92
|
-
return AgentGraphNode.AGENT
|
|
93
|
-
|
|
94
|
-
raise AgentNodeRoutingException(
|
|
95
|
-
f"Agent produced empty response without tool calls "
|
|
96
|
-
f"(completions: {successive_completions}, has_content: False)"
|
|
97
|
-
)
|
|
108
|
+
return route_agent
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from enum import StrEnum
|
|
2
|
-
from typing import Annotated
|
|
2
|
+
from typing import Annotated, Any, Optional
|
|
3
3
|
|
|
4
4
|
from langchain_core.messages import AnyMessage
|
|
5
5
|
from langgraph.graph.message import add_messages
|
|
@@ -25,15 +25,29 @@ class AgentGraphState(BaseModel):
|
|
|
25
25
|
termination: AgentTermination | None = None
|
|
26
26
|
|
|
27
27
|
|
|
28
|
+
class AgentGuardrailsGraphState(AgentGraphState):
|
|
29
|
+
"""Agent Guardrails Graph state for guardrail subgraph."""
|
|
30
|
+
|
|
31
|
+
guardrail_validation_result: Optional[str] = None
|
|
32
|
+
agent_result: Optional[dict[str, Any]] = None
|
|
33
|
+
|
|
34
|
+
|
|
28
35
|
class AgentGraphNode(StrEnum):
|
|
29
36
|
INIT = "init"
|
|
37
|
+
GUARDED_INIT = "guarded-init"
|
|
30
38
|
AGENT = "agent"
|
|
31
39
|
LLM = "llm"
|
|
32
40
|
TOOLS = "tools"
|
|
33
41
|
TERMINATE = "terminate"
|
|
42
|
+
GUARDED_TERMINATE = "guarded-terminate"
|
|
34
43
|
|
|
35
44
|
|
|
36
45
|
class AgentGraphConfig(BaseModel):
|
|
37
46
|
recursion_limit: int = Field(
|
|
38
47
|
default=50, ge=1, description="Maximum recursion limit for the agent graph"
|
|
39
48
|
)
|
|
49
|
+
thinking_messages_limit: int = Field(
|
|
50
|
+
default=0,
|
|
51
|
+
ge=0,
|
|
52
|
+
description="Max consecutive thinking messages before enforcing tool usage. 0 = force tools every time.",
|
|
53
|
+
)
|
|
@@ -28,7 +28,7 @@ def resolve_output_model(
|
|
|
28
28
|
return END_EXECUTION_TOOL.args_schema
|
|
29
29
|
|
|
30
30
|
|
|
31
|
-
def
|
|
31
|
+
def count_consecutive_thinking_messages(messages: Sequence[BaseMessage]) -> int:
|
|
32
32
|
"""Count consecutive AIMessages without tool calls at end of message history."""
|
|
33
33
|
if not messages:
|
|
34
34
|
return 0
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from .context_tool import create_context_tool
|
|
4
4
|
from .integration_tool import create_integration_tool
|
|
5
|
+
from .mcp_tool import create_mcp_tools
|
|
5
6
|
from .process_tool import create_process_tool
|
|
6
7
|
from .tool_factory import (
|
|
7
8
|
create_tools_from_resources,
|
|
@@ -14,4 +15,5 @@ __all__ = [
|
|
|
14
15
|
"create_context_tool",
|
|
15
16
|
"create_process_tool",
|
|
16
17
|
"create_integration_tool",
|
|
18
|
+
"create_mcp_tools",
|
|
17
19
|
]
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import os
|
|
3
|
+
from collections import Counter, defaultdict
|
|
4
|
+
from contextlib import AsyncExitStack, asynccontextmanager
|
|
5
|
+
from itertools import chain
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
from langchain_core.tools import BaseTool
|
|
9
|
+
from langchain_mcp_adapters.tools import load_mcp_tools
|
|
10
|
+
from mcp import ClientSession
|
|
11
|
+
from mcp.client.streamable_http import streamable_http_client
|
|
12
|
+
from uipath._utils._ssl_context import get_httpx_client_kwargs
|
|
13
|
+
from uipath.agent.models.agent import AgentMcpResourceConfig
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _deduplicate_tools(tools: list[BaseTool]) -> list[BaseTool]:
|
|
17
|
+
"""Deduplicate tools by appending numeric suffix to duplicate names."""
|
|
18
|
+
counts = Counter(tool.name for tool in tools)
|
|
19
|
+
seen: defaultdict[str, int] = defaultdict(int)
|
|
20
|
+
|
|
21
|
+
for tool in tools:
|
|
22
|
+
if counts[tool.name] > 1:
|
|
23
|
+
seen[tool.name] += 1
|
|
24
|
+
tool.name = f"{tool.name}_{seen[tool.name]}"
|
|
25
|
+
|
|
26
|
+
return tools
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _filter_tools(tools: list[BaseTool], cfg: AgentMcpResourceConfig) -> list[BaseTool]:
|
|
30
|
+
"""Filter tools to only include those in available_tools."""
|
|
31
|
+
allowed = {t.name for t in cfg.available_tools}
|
|
32
|
+
return [t for t in tools if t.name in allowed]
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@asynccontextmanager
|
|
36
|
+
async def create_mcp_tools(
|
|
37
|
+
config: AgentMcpResourceConfig | list[AgentMcpResourceConfig],
|
|
38
|
+
max_concurrency: int = 5,
|
|
39
|
+
):
|
|
40
|
+
"""Connect to UiPath MCP server(s) and yield LangChain-compatible tools."""
|
|
41
|
+
if not (base_url := os.getenv("UIPATH_URL")):
|
|
42
|
+
raise ValueError("UIPATH_URL environment variable is not set")
|
|
43
|
+
if not (access_token := os.getenv("UIPATH_ACCESS_TOKEN")):
|
|
44
|
+
raise ValueError("UIPATH_ACCESS_TOKEN environment variable is not set")
|
|
45
|
+
|
|
46
|
+
configs = config if isinstance(config, list) else [config]
|
|
47
|
+
enabled = [c for c in configs if c.is_enabled is not False]
|
|
48
|
+
|
|
49
|
+
if not enabled:
|
|
50
|
+
yield []
|
|
51
|
+
return
|
|
52
|
+
|
|
53
|
+
base_url = base_url.rstrip("/")
|
|
54
|
+
semaphore = asyncio.Semaphore(max_concurrency)
|
|
55
|
+
|
|
56
|
+
default_client_kwargs = get_httpx_client_kwargs()
|
|
57
|
+
client_kwargs = {
|
|
58
|
+
**default_client_kwargs,
|
|
59
|
+
"headers": {"Authorization": f"Bearer {access_token}"},
|
|
60
|
+
"timeout": httpx.Timeout(60),
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
async def init_session(
|
|
64
|
+
session: ClientSession, cfg: AgentMcpResourceConfig
|
|
65
|
+
) -> list[BaseTool]:
|
|
66
|
+
async with semaphore:
|
|
67
|
+
await session.initialize()
|
|
68
|
+
tools = await load_mcp_tools(session)
|
|
69
|
+
return _filter_tools(tools, cfg)
|
|
70
|
+
|
|
71
|
+
async def create_session(
|
|
72
|
+
stack: AsyncExitStack, cfg: AgentMcpResourceConfig
|
|
73
|
+
) -> ClientSession:
|
|
74
|
+
url = f"{base_url}/agenthub_/mcp/{cfg.folder_path}/{cfg.slug}"
|
|
75
|
+
http_client = await stack.enter_async_context(
|
|
76
|
+
httpx.AsyncClient(**client_kwargs)
|
|
77
|
+
)
|
|
78
|
+
read, write, _ = await stack.enter_async_context(
|
|
79
|
+
streamable_http_client(url=url, http_client=http_client)
|
|
80
|
+
)
|
|
81
|
+
return await stack.enter_async_context(ClientSession(read, write))
|
|
82
|
+
|
|
83
|
+
async with AsyncExitStack() as stack:
|
|
84
|
+
sessions = [(await create_session(stack, cfg), cfg) for cfg in enabled]
|
|
85
|
+
results = await asyncio.gather(*[init_session(s, cfg) for s, cfg in sessions])
|
|
86
|
+
yield _deduplicate_tools(list(chain.from_iterable(results)))
|
|
@@ -1,10 +1,14 @@
|
|
|
1
1
|
from .mapper import UiPathChatMessagesMapper
|
|
2
2
|
from .models import UiPathAzureChatOpenAI, UiPathChat
|
|
3
3
|
from .openai import UiPathChatOpenAI
|
|
4
|
+
from .supported_models import BedrockModels, GeminiModels, OpenAIModels
|
|
4
5
|
|
|
5
6
|
__all__ = [
|
|
6
7
|
"UiPathChat",
|
|
7
8
|
"UiPathAzureChatOpenAI",
|
|
8
9
|
"UiPathChatOpenAI",
|
|
9
10
|
"UiPathChatMessagesMapper",
|
|
11
|
+
"OpenAIModels",
|
|
12
|
+
"BedrockModels",
|
|
13
|
+
"GeminiModels",
|
|
10
14
|
]
|
uipath_langchain/chat/bedrock.py
CHANGED
|
@@ -48,10 +48,14 @@ class AwsBedrockCompletionsPassthroughClient:
|
|
|
48
48
|
model: str,
|
|
49
49
|
token: str,
|
|
50
50
|
api_flavor: str,
|
|
51
|
+
agenthub_config: Optional[str] = None,
|
|
52
|
+
byo_connection_id: Optional[str] = None,
|
|
51
53
|
):
|
|
52
54
|
self.model = model
|
|
53
55
|
self.token = token
|
|
54
56
|
self.api_flavor = api_flavor
|
|
57
|
+
self.agenthub_config = agenthub_config
|
|
58
|
+
self.byo_connection_id = byo_connection_id
|
|
55
59
|
self._vendor = "awsbedrock"
|
|
56
60
|
self._url: Optional[str] = None
|
|
57
61
|
|
|
@@ -101,6 +105,10 @@ class AwsBedrockCompletionsPassthroughClient:
|
|
|
101
105
|
"X-UiPath-Streaming-Enabled": streaming,
|
|
102
106
|
}
|
|
103
107
|
|
|
108
|
+
if self.agenthub_config:
|
|
109
|
+
headers["X-UiPath-AgentHub-Config"] = self.agenthub_config
|
|
110
|
+
if self.byo_connection_id:
|
|
111
|
+
headers["X-UiPath-LlmGateway-ByoIsConnectionId"] = self.byo_connection_id
|
|
104
112
|
job_key = os.getenv("UIPATH_JOB_KEY")
|
|
105
113
|
process_key = os.getenv("UIPATH_PROCESS_KEY")
|
|
106
114
|
if job_key:
|
|
@@ -118,6 +126,8 @@ class UiPathChatBedrockConverse(ChatBedrockConverse):
|
|
|
118
126
|
tenant_id: Optional[str] = None,
|
|
119
127
|
token: Optional[str] = None,
|
|
120
128
|
model_name: str = BedrockModels.anthropic_claude_haiku_4_5,
|
|
129
|
+
agenthub_config: Optional[str] = None,
|
|
130
|
+
byo_connection_id: Optional[str] = None,
|
|
121
131
|
**kwargs,
|
|
122
132
|
):
|
|
123
133
|
org_id = org_id or os.getenv("UIPATH_ORGANIZATION_ID")
|
|
@@ -141,6 +151,8 @@ class UiPathChatBedrockConverse(ChatBedrockConverse):
|
|
|
141
151
|
model=model_name,
|
|
142
152
|
token=token,
|
|
143
153
|
api_flavor="converse",
|
|
154
|
+
agenthub_config=agenthub_config,
|
|
155
|
+
byo_connection_id=byo_connection_id,
|
|
144
156
|
)
|
|
145
157
|
|
|
146
158
|
client = passthrough_client.get_client()
|
|
@@ -156,6 +168,8 @@ class UiPathChatBedrock(ChatBedrock):
|
|
|
156
168
|
tenant_id: Optional[str] = None,
|
|
157
169
|
token: Optional[str] = None,
|
|
158
170
|
model_name: str = BedrockModels.anthropic_claude_haiku_4_5,
|
|
171
|
+
agenthub_config: Optional[str] = None,
|
|
172
|
+
byo_connection_id: Optional[str] = None,
|
|
159
173
|
**kwargs,
|
|
160
174
|
):
|
|
161
175
|
org_id = org_id or os.getenv("UIPATH_ORGANIZATION_ID")
|
|
@@ -179,6 +193,8 @@ class UiPathChatBedrock(ChatBedrock):
|
|
|
179
193
|
model=model_name,
|
|
180
194
|
token=token,
|
|
181
195
|
api_flavor="invoke",
|
|
196
|
+
agenthub_config=agenthub_config,
|
|
197
|
+
byo_connection_id=byo_connection_id,
|
|
182
198
|
)
|
|
183
199
|
|
|
184
200
|
client = passthrough_client.get_client()
|
uipath_langchain/chat/openai.py
CHANGED
|
@@ -12,21 +12,41 @@ from .supported_models import OpenAIModels
|
|
|
12
12
|
logger = logging.getLogger(__name__)
|
|
13
13
|
|
|
14
14
|
|
|
15
|
+
def _rewrite_openai_url(
|
|
16
|
+
original_url: str, params: httpx.QueryParams
|
|
17
|
+
) -> httpx.URL | None:
|
|
18
|
+
"""Rewrite OpenAI URLs to UiPath gateway completions endpoint.
|
|
19
|
+
|
|
20
|
+
Handles three URL patterns:
|
|
21
|
+
- responses: false -> .../openai/deployments/.../chat/completions?api-version=...
|
|
22
|
+
- responses: true -> .../openai/responses?api-version=...
|
|
23
|
+
- responses API base -> .../{model}?api-version=... (no /openai/ path)
|
|
24
|
+
|
|
25
|
+
All are rewritten to .../completions
|
|
26
|
+
"""
|
|
27
|
+
if "/openai/deployments/" in original_url:
|
|
28
|
+
base_url = original_url.split("/openai/deployments/")[0]
|
|
29
|
+
elif "/openai/responses" in original_url:
|
|
30
|
+
base_url = original_url.split("/openai/responses")[0]
|
|
31
|
+
else:
|
|
32
|
+
# Handle base URL case (no /openai/ path appended yet)
|
|
33
|
+
# Strip query string to get base URL
|
|
34
|
+
base_url = original_url.split("?")[0]
|
|
35
|
+
|
|
36
|
+
new_url_str = f"{base_url}/completions"
|
|
37
|
+
if params:
|
|
38
|
+
return httpx.URL(new_url_str, params=params)
|
|
39
|
+
return httpx.URL(new_url_str)
|
|
40
|
+
|
|
41
|
+
|
|
15
42
|
class UiPathURLRewriteTransport(httpx.AsyncHTTPTransport):
|
|
16
43
|
def __init__(self, verify: bool = True, **kwargs):
|
|
17
44
|
super().__init__(verify=verify, **kwargs)
|
|
18
45
|
|
|
19
46
|
async def handle_async_request(self, request: httpx.Request) -> httpx.Response:
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
base_url = original_url.split("/openai/deployments/")[0]
|
|
24
|
-
query_string = request.url.params
|
|
25
|
-
new_url_str = f"{base_url}/completions"
|
|
26
|
-
if query_string:
|
|
27
|
-
request.url = httpx.URL(new_url_str, params=query_string)
|
|
28
|
-
else:
|
|
29
|
-
request.url = httpx.URL(new_url_str)
|
|
47
|
+
new_url = _rewrite_openai_url(str(request.url), request.url.params)
|
|
48
|
+
if new_url:
|
|
49
|
+
request.url = new_url
|
|
30
50
|
|
|
31
51
|
return await super().handle_async_request(request)
|
|
32
52
|
|
|
@@ -36,16 +56,9 @@ class UiPathSyncURLRewriteTransport(httpx.HTTPTransport):
|
|
|
36
56
|
super().__init__(verify=verify, **kwargs)
|
|
37
57
|
|
|
38
58
|
def handle_request(self, request: httpx.Request) -> httpx.Response:
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
base_url = original_url.split("/openai/deployments/")[0]
|
|
43
|
-
query_string = request.url.params
|
|
44
|
-
new_url_str = f"{base_url}/completions"
|
|
45
|
-
if query_string:
|
|
46
|
-
request.url = httpx.URL(new_url_str, params=query_string)
|
|
47
|
-
else:
|
|
48
|
-
request.url = httpx.URL(new_url_str)
|
|
59
|
+
new_url = _rewrite_openai_url(str(request.url), request.url.params)
|
|
60
|
+
if new_url:
|
|
61
|
+
request.url = new_url
|
|
49
62
|
|
|
50
63
|
return super().handle_request(request)
|
|
51
64
|
|
|
@@ -58,6 +71,9 @@ class UiPathChatOpenAI(AzureChatOpenAI):
|
|
|
58
71
|
api_version: str = "2024-12-01-preview",
|
|
59
72
|
org_id: Optional[str] = None,
|
|
60
73
|
tenant_id: Optional[str] = None,
|
|
74
|
+
agenthub_config: Optional[str] = None,
|
|
75
|
+
extra_headers: Optional[dict[str, str]] = None,
|
|
76
|
+
byo_connection_id: Optional[str] = None,
|
|
61
77
|
**kwargs,
|
|
62
78
|
):
|
|
63
79
|
org_id = org_id or os.getenv("UIPATH_ORGANIZATION_ID")
|
|
@@ -81,18 +97,24 @@ class UiPathChatOpenAI(AzureChatOpenAI):
|
|
|
81
97
|
self._vendor = "openai"
|
|
82
98
|
self._model_name = model_name
|
|
83
99
|
self._url: Optional[str] = None
|
|
100
|
+
self._agenthub_config = agenthub_config
|
|
101
|
+
self._byo_connection_id = byo_connection_id
|
|
102
|
+
self._extra_headers = extra_headers or {}
|
|
103
|
+
|
|
104
|
+
client_kwargs = get_httpx_client_kwargs()
|
|
105
|
+
verify = client_kwargs.get("verify", True)
|
|
84
106
|
|
|
85
107
|
super().__init__(
|
|
86
108
|
azure_endpoint=self._build_base_url(),
|
|
87
109
|
model_name=model_name,
|
|
88
110
|
default_headers=self._build_headers(token),
|
|
89
111
|
http_async_client=httpx.AsyncClient(
|
|
90
|
-
transport=UiPathURLRewriteTransport(verify=
|
|
91
|
-
**
|
|
112
|
+
transport=UiPathURLRewriteTransport(verify=verify),
|
|
113
|
+
**client_kwargs,
|
|
92
114
|
),
|
|
93
115
|
http_client=httpx.Client(
|
|
94
|
-
transport=UiPathSyncURLRewriteTransport(verify=
|
|
95
|
-
**
|
|
116
|
+
transport=UiPathSyncURLRewriteTransport(verify=verify),
|
|
117
|
+
**client_kwargs,
|
|
96
118
|
),
|
|
97
119
|
api_key=token,
|
|
98
120
|
api_version=api_version,
|
|
@@ -105,10 +127,18 @@ class UiPathChatOpenAI(AzureChatOpenAI):
|
|
|
105
127
|
"X-UiPath-LlmGateway-ApiFlavor": "auto",
|
|
106
128
|
"Authorization": f"Bearer {token}",
|
|
107
129
|
}
|
|
130
|
+
|
|
131
|
+
if self._agenthub_config:
|
|
132
|
+
headers["X-UiPath-AgentHub-Config"] = self._agenthub_config
|
|
133
|
+
if self._byo_connection_id:
|
|
134
|
+
headers["X-UiPath-LlmGateway-ByoIsConnectionId"] = self._byo_connection_id
|
|
108
135
|
if job_key := os.getenv("UIPATH_JOB_KEY"):
|
|
109
136
|
headers["X-UiPath-JobKey"] = job_key
|
|
110
137
|
if process_key := os.getenv("UIPATH_PROCESS_KEY"):
|
|
111
138
|
headers["X-UiPath-ProcessKey"] = process_key
|
|
139
|
+
|
|
140
|
+
# Allow extra_headers to override defaults
|
|
141
|
+
headers.update(self._extra_headers)
|
|
112
142
|
return headers
|
|
113
143
|
|
|
114
144
|
@property
|
|
@@ -117,9 +147,9 @@ class UiPathChatOpenAI(AzureChatOpenAI):
|
|
|
117
147
|
formatted_endpoint = vendor_endpoint.format(
|
|
118
148
|
vendor=self._vendor,
|
|
119
149
|
model=self._model_name,
|
|
120
|
-
api_version=self._openai_api_version,
|
|
121
150
|
)
|
|
122
|
-
|
|
151
|
+
base_endpoint = formatted_endpoint.replace("/completions", "")
|
|
152
|
+
return f"{base_endpoint}?api-version={self._openai_api_version}"
|
|
123
153
|
|
|
124
154
|
def _build_base_url(self) -> str:
|
|
125
155
|
if not self._url:
|
|
@@ -21,14 +21,21 @@ class OpenAIModels:
|
|
|
21
21
|
# GPT-5.1 models
|
|
22
22
|
gpt_5_1_2025_11_13 = "gpt-5.1-2025-11-13"
|
|
23
23
|
|
|
24
|
+
# GPT-5.2 models
|
|
25
|
+
gpt_5_2_2025_12_11 = "gpt-5.2-2025-12-11"
|
|
26
|
+
|
|
24
27
|
|
|
25
28
|
class GeminiModels:
|
|
26
29
|
"""Supported Google Gemini model identifiers."""
|
|
27
30
|
|
|
31
|
+
# Gemini 2 models
|
|
28
32
|
gemini_2_5_pro = "gemini-2.5-pro"
|
|
29
33
|
gemini_2_5_flash = "gemini-2.5-flash"
|
|
30
34
|
gemini_2_0_flash_001 = "gemini-2.0-flash-001"
|
|
31
35
|
|
|
36
|
+
# Gemini 3 models
|
|
37
|
+
gemini_3_pro_preview = "gemini-3-pro-preview"
|
|
38
|
+
|
|
32
39
|
|
|
33
40
|
class BedrockModels:
|
|
34
41
|
"""Supported AWS Bedrock model identifiers."""
|
|
@@ -38,5 +45,7 @@ class BedrockModels:
|
|
|
38
45
|
|
|
39
46
|
# Claude 4 models
|
|
40
47
|
anthropic_claude_sonnet_4 = "anthropic.claude-sonnet-4-20250514-v1:0"
|
|
48
|
+
|
|
49
|
+
# Claude 4.5 models
|
|
41
50
|
anthropic_claude_sonnet_4_5 = "anthropic.claude-sonnet-4-5-20250929-v1:0"
|
|
42
51
|
anthropic_claude_haiku_4_5 = "anthropic.claude-haiku-4-5-20251001-v1:0"
|