uipath-langchain 0.0.112__py3-none-any.whl → 0.1.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- uipath_langchain/_cli/_templates/main.py.template +12 -13
- uipath_langchain/_cli/cli_init.py +127 -156
- uipath_langchain/_cli/cli_new.py +2 -6
- uipath_langchain/_resources/AGENTS.md +21 -0
- uipath_langchain/_resources/REQUIRED_STRUCTURE.md +92 -0
- uipath_langchain/{tracers → _tracing}/__init__.py +0 -2
- uipath_langchain/_tracing/_instrument_traceable.py +134 -0
- uipath_langchain/_utils/__init__.py +1 -2
- uipath_langchain/_utils/_request_mixin.py +351 -54
- uipath_langchain/_utils/_settings.py +2 -11
- uipath_langchain/agent/exceptions/__init__.py +6 -0
- uipath_langchain/agent/exceptions/exceptions.py +11 -0
- uipath_langchain/agent/guardrails/__init__.py +21 -0
- uipath_langchain/agent/guardrails/actions/__init__.py +11 -0
- uipath_langchain/agent/guardrails/actions/base_action.py +23 -0
- uipath_langchain/agent/guardrails/actions/block_action.py +41 -0
- uipath_langchain/agent/guardrails/actions/escalate_action.py +274 -0
- uipath_langchain/agent/guardrails/actions/log_action.py +57 -0
- uipath_langchain/agent/guardrails/guardrail_nodes.py +125 -0
- uipath_langchain/agent/guardrails/guardrails_factory.py +70 -0
- uipath_langchain/agent/guardrails/guardrails_subgraph.py +247 -0
- uipath_langchain/agent/guardrails/types.py +20 -0
- uipath_langchain/agent/react/__init__.py +14 -0
- uipath_langchain/agent/react/agent.py +113 -0
- uipath_langchain/agent/react/constants.py +2 -0
- uipath_langchain/agent/react/init_node.py +20 -0
- uipath_langchain/agent/react/llm_node.py +43 -0
- uipath_langchain/agent/react/router.py +97 -0
- uipath_langchain/agent/react/terminate_node.py +82 -0
- uipath_langchain/agent/react/tools/__init__.py +7 -0
- uipath_langchain/agent/react/tools/tools.py +50 -0
- uipath_langchain/agent/react/types.py +39 -0
- uipath_langchain/agent/react/utils.py +49 -0
- uipath_langchain/agent/tools/__init__.py +17 -0
- uipath_langchain/agent/tools/context_tool.py +53 -0
- uipath_langchain/agent/tools/escalation_tool.py +111 -0
- uipath_langchain/agent/tools/integration_tool.py +181 -0
- uipath_langchain/agent/tools/process_tool.py +49 -0
- uipath_langchain/agent/tools/static_args.py +138 -0
- uipath_langchain/agent/tools/structured_tool_with_output_type.py +14 -0
- uipath_langchain/agent/tools/tool_factory.py +45 -0
- uipath_langchain/agent/tools/tool_node.py +22 -0
- uipath_langchain/agent/tools/utils.py +11 -0
- uipath_langchain/chat/__init__.py +4 -0
- uipath_langchain/chat/bedrock.py +187 -0
- uipath_langchain/chat/gemini.py +330 -0
- uipath_langchain/chat/mapper.py +309 -0
- uipath_langchain/chat/models.py +261 -38
- uipath_langchain/chat/openai.py +132 -0
- uipath_langchain/chat/supported_models.py +42 -0
- uipath_langchain/embeddings/embeddings.py +136 -36
- uipath_langchain/middlewares.py +0 -2
- uipath_langchain/py.typed +0 -0
- uipath_langchain/retrievers/context_grounding_retriever.py +7 -9
- uipath_langchain/runtime/__init__.py +36 -0
- uipath_langchain/runtime/_serialize.py +46 -0
- uipath_langchain/runtime/config.py +61 -0
- uipath_langchain/runtime/errors.py +43 -0
- uipath_langchain/runtime/factory.py +315 -0
- uipath_langchain/runtime/graph.py +159 -0
- uipath_langchain/runtime/runtime.py +453 -0
- uipath_langchain/runtime/schema.py +349 -0
- uipath_langchain/runtime/storage.py +115 -0
- uipath_langchain/vectorstores/context_grounding_vectorstore.py +90 -110
- {uipath_langchain-0.0.112.dist-info → uipath_langchain-0.1.24.dist-info}/METADATA +42 -20
- uipath_langchain-0.1.24.dist-info/RECORD +76 -0
- {uipath_langchain-0.0.112.dist-info → uipath_langchain-0.1.24.dist-info}/WHEEL +1 -1
- uipath_langchain-0.1.24.dist-info/entry_points.txt +5 -0
- uipath_langchain/_cli/_runtime/_context.py +0 -21
- uipath_langchain/_cli/_runtime/_exception.py +0 -17
- uipath_langchain/_cli/_runtime/_input.py +0 -136
- uipath_langchain/_cli/_runtime/_output.py +0 -234
- uipath_langchain/_cli/_runtime/_runtime.py +0 -371
- uipath_langchain/_cli/_utils/_graph.py +0 -202
- uipath_langchain/_cli/cli_run.py +0 -80
- uipath_langchain/tracers/AsyncUiPathTracer.py +0 -274
- uipath_langchain/tracers/_events.py +0 -33
- uipath_langchain/tracers/_instrument_traceable.py +0 -416
- uipath_langchain/tracers/_utils.py +0 -52
- uipath_langchain-0.0.112.dist-info/RECORD +0 -36
- uipath_langchain-0.0.112.dist-info/entry_points.txt +0 -2
- {uipath_langchain-0.0.112.dist-info → uipath_langchain-0.1.24.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from langchain_core.tools import StructuredTool
|
|
4
|
+
from pydantic import Field
|
|
5
|
+
from typing_extensions import override
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class StructuredToolWithOutputType(StructuredTool):
|
|
9
|
+
output_type: Any = Field(Any, description="Output type.")
|
|
10
|
+
|
|
11
|
+
@override
|
|
12
|
+
@property
|
|
13
|
+
def OutputType(self) -> type[Any]:
|
|
14
|
+
return self.output_type
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""Factory functions for creating tools from agent resources."""
|
|
2
|
+
|
|
3
|
+
from langchain_core.tools import BaseTool, StructuredTool
|
|
4
|
+
from uipath.agent.models.agent import (
|
|
5
|
+
AgentContextResourceConfig,
|
|
6
|
+
AgentEscalationResourceConfig,
|
|
7
|
+
AgentIntegrationToolResourceConfig,
|
|
8
|
+
AgentProcessToolResourceConfig,
|
|
9
|
+
BaseAgentResourceConfig,
|
|
10
|
+
LowCodeAgentDefinition,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
from .context_tool import create_context_tool
|
|
14
|
+
from .escalation_tool import create_escalation_tool
|
|
15
|
+
from .integration_tool import create_integration_tool
|
|
16
|
+
from .process_tool import create_process_tool
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
async def create_tools_from_resources(agent: LowCodeAgentDefinition) -> list[BaseTool]:
|
|
20
|
+
tools: list[BaseTool] = []
|
|
21
|
+
|
|
22
|
+
for resource in agent.resources:
|
|
23
|
+
tool = await _build_tool_for_resource(resource)
|
|
24
|
+
if tool is not None:
|
|
25
|
+
tools.append(tool)
|
|
26
|
+
|
|
27
|
+
return tools
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
async def _build_tool_for_resource(
|
|
31
|
+
resource: BaseAgentResourceConfig,
|
|
32
|
+
) -> StructuredTool | None:
|
|
33
|
+
if isinstance(resource, AgentProcessToolResourceConfig):
|
|
34
|
+
return create_process_tool(resource)
|
|
35
|
+
|
|
36
|
+
elif isinstance(resource, AgentContextResourceConfig):
|
|
37
|
+
return create_context_tool(resource)
|
|
38
|
+
|
|
39
|
+
elif isinstance(resource, AgentEscalationResourceConfig):
|
|
40
|
+
return create_escalation_tool(resource)
|
|
41
|
+
|
|
42
|
+
elif isinstance(resource, AgentIntegrationToolResourceConfig):
|
|
43
|
+
return create_integration_tool(resource)
|
|
44
|
+
|
|
45
|
+
return None
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
"""Tool node factory wiring directly to LangGraph's ToolNode."""
|
|
2
|
+
|
|
3
|
+
from collections.abc import Sequence
|
|
4
|
+
|
|
5
|
+
from langchain_core.tools import BaseTool
|
|
6
|
+
from langgraph.prebuilt import ToolNode
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def create_tool_node(tools: Sequence[BaseTool]) -> dict[str, ToolNode]:
|
|
10
|
+
"""Create individual ToolNode for each tool.
|
|
11
|
+
|
|
12
|
+
Args:
|
|
13
|
+
tools: Sequence of tools to create nodes for.
|
|
14
|
+
|
|
15
|
+
Returns:
|
|
16
|
+
Dict mapping tool.name -> ToolNode([tool]).
|
|
17
|
+
Each tool gets its own dedicated node for middleware composition.
|
|
18
|
+
|
|
19
|
+
Note:
|
|
20
|
+
handle_tool_errors=False delegates error handling to LangGraph's error boundary.
|
|
21
|
+
"""
|
|
22
|
+
return {tool.name: ToolNode([tool], handle_tool_errors=False) for tool in tools}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
"""Tool-related utility functions."""
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def sanitize_tool_name(name: str) -> str:
|
|
7
|
+
"""Sanitize tool name for LLM compatibility (alphanumeric, underscore, hyphen only, max 64 chars)."""
|
|
8
|
+
trim_whitespaces = "_".join(name.split())
|
|
9
|
+
sanitized_tool_name = re.sub(r"[^a-zA-Z0-9_-]", "", trim_whitespaces)
|
|
10
|
+
sanitized_tool_name = sanitized_tool_name[:64]
|
|
11
|
+
return sanitized_tool_name
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
from uipath.utils import EndpointManager
|
|
6
|
+
|
|
7
|
+
from .supported_models import BedrockModels
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _check_bedrock_dependencies() -> None:
|
|
13
|
+
"""Check if required dependencies for UiPathChatBedrock are installed."""
|
|
14
|
+
import importlib.util
|
|
15
|
+
|
|
16
|
+
missing_packages = []
|
|
17
|
+
|
|
18
|
+
if importlib.util.find_spec("langchain_aws") is None:
|
|
19
|
+
missing_packages.append("langchain-aws")
|
|
20
|
+
|
|
21
|
+
if importlib.util.find_spec("boto3") is None:
|
|
22
|
+
missing_packages.append("boto3")
|
|
23
|
+
|
|
24
|
+
if missing_packages:
|
|
25
|
+
packages_str = ", ".join(missing_packages)
|
|
26
|
+
raise ImportError(
|
|
27
|
+
f"The following packages are required to use UiPathChatBedrock: {packages_str}\n"
|
|
28
|
+
"Please install them using one of the following methods:\n\n"
|
|
29
|
+
" # Using pip:\n"
|
|
30
|
+
f" pip install uipath-langchain[bedrock]\n\n"
|
|
31
|
+
" # Using uv:\n"
|
|
32
|
+
f" uv add 'uipath-langchain[bedrock]'\n\n"
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
_check_bedrock_dependencies()
|
|
37
|
+
|
|
38
|
+
import boto3
|
|
39
|
+
from langchain_aws import (
|
|
40
|
+
ChatBedrock,
|
|
41
|
+
ChatBedrockConverse,
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class AwsBedrockCompletionsPassthroughClient:
|
|
46
|
+
def __init__(
|
|
47
|
+
self,
|
|
48
|
+
model: str,
|
|
49
|
+
token: str,
|
|
50
|
+
api_flavor: str,
|
|
51
|
+
):
|
|
52
|
+
self.model = model
|
|
53
|
+
self.token = token
|
|
54
|
+
self.api_flavor = api_flavor
|
|
55
|
+
self._vendor = "awsbedrock"
|
|
56
|
+
self._url: Optional[str] = None
|
|
57
|
+
|
|
58
|
+
@property
|
|
59
|
+
def endpoint(self) -> str:
|
|
60
|
+
vendor_endpoint = EndpointManager.get_vendor_endpoint()
|
|
61
|
+
formatted_endpoint = vendor_endpoint.format(
|
|
62
|
+
vendor=self._vendor,
|
|
63
|
+
model=self.model,
|
|
64
|
+
)
|
|
65
|
+
return formatted_endpoint
|
|
66
|
+
|
|
67
|
+
def _build_base_url(self) -> str:
|
|
68
|
+
if not self._url:
|
|
69
|
+
env_uipath_url = os.getenv("UIPATH_URL")
|
|
70
|
+
|
|
71
|
+
if env_uipath_url:
|
|
72
|
+
self._url = f"{env_uipath_url.rstrip('/')}/{self.endpoint}"
|
|
73
|
+
else:
|
|
74
|
+
raise ValueError("UIPATH_URL environment variable is required")
|
|
75
|
+
|
|
76
|
+
return self._url
|
|
77
|
+
|
|
78
|
+
def get_client(self):
|
|
79
|
+
client = boto3.client(
|
|
80
|
+
"bedrock-runtime",
|
|
81
|
+
region_name="none",
|
|
82
|
+
aws_access_key_id="none",
|
|
83
|
+
aws_secret_access_key="none",
|
|
84
|
+
)
|
|
85
|
+
client.meta.events.register(
|
|
86
|
+
"before-send.bedrock-runtime.*", self._modify_request
|
|
87
|
+
)
|
|
88
|
+
return client
|
|
89
|
+
|
|
90
|
+
def _modify_request(self, request, **kwargs):
|
|
91
|
+
"""Intercept boto3 request and redirect to LLM Gateway"""
|
|
92
|
+
# Detect streaming based on URL suffix:
|
|
93
|
+
# - converse-stream / invoke-with-response-stream -> streaming
|
|
94
|
+
# - converse / invoke -> non-streaming
|
|
95
|
+
streaming = "true" if request.url.endswith("-stream") else "false"
|
|
96
|
+
request.url = self._build_base_url()
|
|
97
|
+
|
|
98
|
+
headers = {
|
|
99
|
+
"Authorization": f"Bearer {self.token}",
|
|
100
|
+
"X-UiPath-LlmGateway-ApiFlavor": self.api_flavor,
|
|
101
|
+
"X-UiPath-Streaming-Enabled": streaming,
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
job_key = os.getenv("UIPATH_JOB_KEY")
|
|
105
|
+
process_key = os.getenv("UIPATH_PROCESS_KEY")
|
|
106
|
+
if job_key:
|
|
107
|
+
headers["X-UiPath-JobKey"] = job_key
|
|
108
|
+
if process_key:
|
|
109
|
+
headers["X-UiPath-ProcessKey"] = process_key
|
|
110
|
+
|
|
111
|
+
request.headers.update(headers)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
class UiPathChatBedrockConverse(ChatBedrockConverse):
|
|
115
|
+
def __init__(
|
|
116
|
+
self,
|
|
117
|
+
org_id: Optional[str] = None,
|
|
118
|
+
tenant_id: Optional[str] = None,
|
|
119
|
+
token: Optional[str] = None,
|
|
120
|
+
model_name: str = BedrockModels.anthropic_claude_haiku_4_5,
|
|
121
|
+
**kwargs,
|
|
122
|
+
):
|
|
123
|
+
org_id = org_id or os.getenv("UIPATH_ORGANIZATION_ID")
|
|
124
|
+
tenant_id = tenant_id or os.getenv("UIPATH_TENANT_ID")
|
|
125
|
+
token = token or os.getenv("UIPATH_ACCESS_TOKEN")
|
|
126
|
+
|
|
127
|
+
if not org_id:
|
|
128
|
+
raise ValueError(
|
|
129
|
+
"UIPATH_ORGANIZATION_ID environment variable or org_id parameter is required"
|
|
130
|
+
)
|
|
131
|
+
if not tenant_id:
|
|
132
|
+
raise ValueError(
|
|
133
|
+
"UIPATH_TENANT_ID environment variable or tenant_id parameter is required"
|
|
134
|
+
)
|
|
135
|
+
if not token:
|
|
136
|
+
raise ValueError(
|
|
137
|
+
"UIPATH_ACCESS_TOKEN environment variable or token parameter is required"
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
passthrough_client = AwsBedrockCompletionsPassthroughClient(
|
|
141
|
+
model=model_name,
|
|
142
|
+
token=token,
|
|
143
|
+
api_flavor="converse",
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
client = passthrough_client.get_client()
|
|
147
|
+
kwargs["client"] = client
|
|
148
|
+
kwargs["model"] = model_name
|
|
149
|
+
super().__init__(**kwargs)
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
class UiPathChatBedrock(ChatBedrock):
|
|
153
|
+
def __init__(
|
|
154
|
+
self,
|
|
155
|
+
org_id: Optional[str] = None,
|
|
156
|
+
tenant_id: Optional[str] = None,
|
|
157
|
+
token: Optional[str] = None,
|
|
158
|
+
model_name: str = BedrockModels.anthropic_claude_haiku_4_5,
|
|
159
|
+
**kwargs,
|
|
160
|
+
):
|
|
161
|
+
org_id = org_id or os.getenv("UIPATH_ORGANIZATION_ID")
|
|
162
|
+
tenant_id = tenant_id or os.getenv("UIPATH_TENANT_ID")
|
|
163
|
+
token = token or os.getenv("UIPATH_ACCESS_TOKEN")
|
|
164
|
+
|
|
165
|
+
if not org_id:
|
|
166
|
+
raise ValueError(
|
|
167
|
+
"UIPATH_ORGANIZATION_ID environment variable or org_id parameter is required"
|
|
168
|
+
)
|
|
169
|
+
if not tenant_id:
|
|
170
|
+
raise ValueError(
|
|
171
|
+
"UIPATH_TENANT_ID environment variable or tenant_id parameter is required"
|
|
172
|
+
)
|
|
173
|
+
if not token:
|
|
174
|
+
raise ValueError(
|
|
175
|
+
"UIPATH_ACCESS_TOKEN environment variable or token parameter is required"
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
passthrough_client = AwsBedrockCompletionsPassthroughClient(
|
|
179
|
+
model=model_name,
|
|
180
|
+
token=token,
|
|
181
|
+
api_flavor="invoke",
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
client = passthrough_client.get_client()
|
|
185
|
+
kwargs["client"] = client
|
|
186
|
+
kwargs["model"] = model_name
|
|
187
|
+
super().__init__(**kwargs)
|
|
@@ -0,0 +1,330 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
from typing import Optional, Union
|
|
4
|
+
|
|
5
|
+
import aiohttp
|
|
6
|
+
from pydantic import Field
|
|
7
|
+
from uipath.utils import EndpointManager
|
|
8
|
+
|
|
9
|
+
from .supported_models import GeminiModels
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def _check_vertex_dependencies() -> None:
|
|
15
|
+
"""Check if required dependencies for UiPathChatVertex are installed."""
|
|
16
|
+
import importlib.util
|
|
17
|
+
|
|
18
|
+
missing_packages = []
|
|
19
|
+
|
|
20
|
+
if importlib.util.find_spec("langchain_google_vertexai") is None:
|
|
21
|
+
missing_packages.append("langchain-google-vertexai")
|
|
22
|
+
|
|
23
|
+
if importlib.util.find_spec("langchain_community") is None:
|
|
24
|
+
missing_packages.append("langchain-community")
|
|
25
|
+
|
|
26
|
+
if missing_packages:
|
|
27
|
+
packages_str = ", ".join(missing_packages)
|
|
28
|
+
raise ImportError(
|
|
29
|
+
f"The following packages are required to use UiPathChatVertex: {packages_str}\n"
|
|
30
|
+
"Please install them using one of the following methods:\n\n"
|
|
31
|
+
" # Using pip:\n"
|
|
32
|
+
f" pip install uipath-langchain[vertex]\n\n"
|
|
33
|
+
" # Using uv:\n"
|
|
34
|
+
f" uv add 'uipath-langchain[vertex]'\n\n"
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
_check_vertex_dependencies()
|
|
39
|
+
|
|
40
|
+
from google.auth.credentials import AnonymousCredentials
|
|
41
|
+
from google.cloud.aiplatform_v1.services.prediction_service import (
|
|
42
|
+
PredictionServiceAsyncClient as v1PredictionServiceAsyncClient,
|
|
43
|
+
)
|
|
44
|
+
from google.cloud.aiplatform_v1.services.prediction_service import (
|
|
45
|
+
PredictionServiceClient as v1PredictionServiceClient,
|
|
46
|
+
)
|
|
47
|
+
from google.cloud.aiplatform_v1beta1.services.prediction_service import (
|
|
48
|
+
PredictionServiceAsyncClient as v1beta1PredictionServiceAsyncClient,
|
|
49
|
+
)
|
|
50
|
+
from google.cloud.aiplatform_v1beta1.services.prediction_service import (
|
|
51
|
+
PredictionServiceClient as v1beta1PredictionServiceClient,
|
|
52
|
+
)
|
|
53
|
+
from google.cloud.aiplatform_v1beta1.services.prediction_service.transports.base import (
|
|
54
|
+
PredictionServiceTransport,
|
|
55
|
+
)
|
|
56
|
+
from google.cloud.aiplatform_v1beta1.services.prediction_service.transports.rest import (
|
|
57
|
+
PredictionServiceRestTransport,
|
|
58
|
+
)
|
|
59
|
+
from langchain_community.utilities.vertexai import (
|
|
60
|
+
get_client_info,
|
|
61
|
+
)
|
|
62
|
+
from langchain_google_vertexai import ChatVertexAI
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class CustomPredictionServiceRestTransport(PredictionServiceRestTransport):
|
|
66
|
+
def __init__(self, llmgw_url: str, custom_headers: dict[str, str], **kwargs):
|
|
67
|
+
self.llmgw_url = llmgw_url
|
|
68
|
+
self.custom_headers = custom_headers or {}
|
|
69
|
+
|
|
70
|
+
kwargs.setdefault("credentials", AnonymousCredentials())
|
|
71
|
+
super().__init__(**kwargs)
|
|
72
|
+
|
|
73
|
+
original_request = self._session.request
|
|
74
|
+
|
|
75
|
+
def redirected_request(method, url, **kwargs_inner):
|
|
76
|
+
headers = kwargs_inner.pop("headers", {})
|
|
77
|
+
headers.update(self.custom_headers)
|
|
78
|
+
|
|
79
|
+
is_streaming = kwargs_inner.get("stream", False)
|
|
80
|
+
headers["X-UiPath-Streaming-Enabled"] = "true" if is_streaming else "false"
|
|
81
|
+
|
|
82
|
+
return original_request(
|
|
83
|
+
method, self.llmgw_url, headers=headers, **kwargs_inner
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
self._session.request = redirected_request # type: ignore[method-assign, assignment]
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
class CustomPredictionServiceRestAsyncTransport:
|
|
90
|
+
"""
|
|
91
|
+
Custom async transport for calling UiPath LLM Gateway.
|
|
92
|
+
|
|
93
|
+
Uses aiohttp for REST/HTTP communication instead of gRPC.
|
|
94
|
+
Handles both regular and streaming responses from the gateway.
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
def __init__(self, llmgw_url: str, custom_headers: dict[str, str], **kwargs):
|
|
98
|
+
self.llmgw_url = llmgw_url
|
|
99
|
+
self.custom_headers = custom_headers or {}
|
|
100
|
+
|
|
101
|
+
def _serialize_request(self, request) -> str:
|
|
102
|
+
"""Convert proto-plus request to JSON string."""
|
|
103
|
+
import json
|
|
104
|
+
|
|
105
|
+
from proto import ( # type: ignore[import-untyped]
|
|
106
|
+
Message as ProtoMessage,
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
if isinstance(request, ProtoMessage):
|
|
110
|
+
request_dict = type(request).to_dict(
|
|
111
|
+
request, preserving_proto_field_name=False
|
|
112
|
+
)
|
|
113
|
+
return json.dumps(request_dict)
|
|
114
|
+
else:
|
|
115
|
+
from google.protobuf.json_format import MessageToJson
|
|
116
|
+
|
|
117
|
+
return MessageToJson(request, preserving_proto_field_name=False)
|
|
118
|
+
|
|
119
|
+
def _get_response_class(self, request):
|
|
120
|
+
"""Get the response class corresponding to the request class."""
|
|
121
|
+
import importlib
|
|
122
|
+
|
|
123
|
+
response_class_name = request.__class__.__name__.replace("Request", "Response")
|
|
124
|
+
response_class = getattr(
|
|
125
|
+
request.__class__.__module__, response_class_name, None
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
if response_class is None:
|
|
129
|
+
module = importlib.import_module(request.__class__.__module__)
|
|
130
|
+
response_class = getattr(module, response_class_name, None)
|
|
131
|
+
|
|
132
|
+
return response_class
|
|
133
|
+
|
|
134
|
+
def _deserialize_response(self, response_json: str, request):
|
|
135
|
+
"""Convert JSON string to proto-plus response object."""
|
|
136
|
+
import json
|
|
137
|
+
|
|
138
|
+
from proto import Message as ProtoMessage
|
|
139
|
+
|
|
140
|
+
response_class = self._get_response_class(request)
|
|
141
|
+
|
|
142
|
+
if response_class and isinstance(request, ProtoMessage):
|
|
143
|
+
return response_class.from_json(response_json, ignore_unknown_fields=True)
|
|
144
|
+
elif response_class:
|
|
145
|
+
from google.protobuf.json_format import Parse
|
|
146
|
+
|
|
147
|
+
return Parse(response_json, response_class(), ignore_unknown_fields=True)
|
|
148
|
+
else:
|
|
149
|
+
return json.loads(response_json)
|
|
150
|
+
|
|
151
|
+
async def _make_request(self, request_json: str, streaming: bool = False):
|
|
152
|
+
"""Make HTTP POST request to UiPath gateway."""
|
|
153
|
+
headers = self.custom_headers.copy()
|
|
154
|
+
headers["Content-Type"] = "application/json"
|
|
155
|
+
|
|
156
|
+
if streaming:
|
|
157
|
+
headers["X-UiPath-Streaming-Enabled"] = "true"
|
|
158
|
+
|
|
159
|
+
connector = aiohttp.TCPConnector(ssl=True)
|
|
160
|
+
async with aiohttp.ClientSession(connector=connector) as session:
|
|
161
|
+
async with session.post(
|
|
162
|
+
self.llmgw_url, headers=headers, data=request_json
|
|
163
|
+
) as response:
|
|
164
|
+
if response.status != 200:
|
|
165
|
+
error_text = await response.text()
|
|
166
|
+
raise Exception(f"HTTP {response.status}: {error_text}")
|
|
167
|
+
|
|
168
|
+
return await response.text()
|
|
169
|
+
|
|
170
|
+
async def generate_content(self, request, **kwargs):
|
|
171
|
+
"""Handle non-streaming generate_content calls."""
|
|
172
|
+
request_json = self._serialize_request(request)
|
|
173
|
+
response_text = await self._make_request(request_json, streaming=False)
|
|
174
|
+
return self._deserialize_response(response_text, request)
|
|
175
|
+
|
|
176
|
+
def stream_generate_content(self, request, **kwargs):
|
|
177
|
+
"""
|
|
178
|
+
Handle streaming generate_content calls.
|
|
179
|
+
|
|
180
|
+
Returns a coroutine that yields an async iterator.
|
|
181
|
+
"""
|
|
182
|
+
return self._create_stream_awaitable(request)
|
|
183
|
+
|
|
184
|
+
async def _create_stream_awaitable(self, request):
|
|
185
|
+
"""Awaitable wrapper that returns the async generator."""
|
|
186
|
+
return self._stream_implementation(request)
|
|
187
|
+
|
|
188
|
+
async def _stream_implementation(self, request):
|
|
189
|
+
"""
|
|
190
|
+
Async generator that yields streaming response chunks.
|
|
191
|
+
|
|
192
|
+
Parses the array and yields each chunk individually.
|
|
193
|
+
"""
|
|
194
|
+
import json
|
|
195
|
+
|
|
196
|
+
request_json = self._serialize_request(request)
|
|
197
|
+
response_text = await self._make_request(request_json, streaming=True)
|
|
198
|
+
|
|
199
|
+
try:
|
|
200
|
+
chunks_array = json.loads(response_text)
|
|
201
|
+
if isinstance(chunks_array, list):
|
|
202
|
+
logger.info(f"Streaming: yielding {len(chunks_array)} chunks")
|
|
203
|
+
for chunk_data in chunks_array:
|
|
204
|
+
chunk_json = json.dumps(chunk_data)
|
|
205
|
+
yield self._deserialize_response(chunk_json, request)
|
|
206
|
+
return
|
|
207
|
+
except Exception as e:
|
|
208
|
+
logger.info(f"Not a JSON array, trying single response: {e}")
|
|
209
|
+
|
|
210
|
+
try:
|
|
211
|
+
yield self._deserialize_response(response_text, request)
|
|
212
|
+
except Exception as e:
|
|
213
|
+
logger.error(f"Failed to parse streaming response: {e}")
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
class UiPathChatVertex(ChatVertexAI):
|
|
217
|
+
transport: Optional[PredictionServiceTransport] = Field(default=None)
|
|
218
|
+
async_transport: Optional[CustomPredictionServiceRestAsyncTransport] = Field(
|
|
219
|
+
default=None
|
|
220
|
+
)
|
|
221
|
+
async_client: Optional[
|
|
222
|
+
Union[v1beta1PredictionServiceAsyncClient, v1PredictionServiceAsyncClient]
|
|
223
|
+
] = Field(default=None)
|
|
224
|
+
|
|
225
|
+
def __init__(
|
|
226
|
+
self,
|
|
227
|
+
org_id: Optional[str] = None,
|
|
228
|
+
tenant_id: Optional[str] = None,
|
|
229
|
+
token: Optional[str] = None,
|
|
230
|
+
model_name: str = GeminiModels.gemini_2_5_flash,
|
|
231
|
+
**kwargs,
|
|
232
|
+
):
|
|
233
|
+
org_id = org_id or os.getenv("UIPATH_ORGANIZATION_ID")
|
|
234
|
+
tenant_id = tenant_id or os.getenv("UIPATH_TENANT_ID")
|
|
235
|
+
token = token or os.getenv("UIPATH_ACCESS_TOKEN")
|
|
236
|
+
|
|
237
|
+
if not org_id:
|
|
238
|
+
raise ValueError(
|
|
239
|
+
"UIPATH_ORGANIZATION_ID environment variable or org_id parameter is required"
|
|
240
|
+
)
|
|
241
|
+
if not tenant_id:
|
|
242
|
+
raise ValueError(
|
|
243
|
+
"UIPATH_TENANT_ID environment variable or tenant_id parameter is required"
|
|
244
|
+
)
|
|
245
|
+
if not token:
|
|
246
|
+
raise ValueError(
|
|
247
|
+
"UIPATH_ACCESS_TOKEN environment variable or token parameter is required"
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
self._vendor = "vertexai"
|
|
251
|
+
self._model_name = model_name
|
|
252
|
+
self._url: Optional[str] = None
|
|
253
|
+
|
|
254
|
+
llmgw_url = self._build_base_url()
|
|
255
|
+
|
|
256
|
+
headers = self._build_headers(token)
|
|
257
|
+
|
|
258
|
+
super().__init__(
|
|
259
|
+
model=model_name,
|
|
260
|
+
project=os.getenv("VERTEXAI_PROJECT", "none"),
|
|
261
|
+
location=os.getenv("VERTEXAI_LOCATION", "us-central1"),
|
|
262
|
+
**kwargs,
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
self.transport = CustomPredictionServiceRestTransport(
|
|
266
|
+
llmgw_url=llmgw_url, custom_headers=headers
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
self.async_transport = CustomPredictionServiceRestAsyncTransport(
|
|
270
|
+
llmgw_url=llmgw_url, custom_headers=headers
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
@property
|
|
274
|
+
def prediction_client(
|
|
275
|
+
self,
|
|
276
|
+
) -> Union[v1beta1PredictionServiceClient, v1PredictionServiceClient]:
|
|
277
|
+
if self.client is None:
|
|
278
|
+
if self.endpoint_version == "v1":
|
|
279
|
+
self.client = v1PredictionServiceClient(
|
|
280
|
+
client_options=self.client_options,
|
|
281
|
+
client_info=get_client_info(module=self._user_agent),
|
|
282
|
+
transport=self.transport, # type: ignore[arg-type]
|
|
283
|
+
)
|
|
284
|
+
else:
|
|
285
|
+
self.client = v1beta1PredictionServiceClient(
|
|
286
|
+
client_options=self.client_options,
|
|
287
|
+
client_info=get_client_info(module=self._user_agent),
|
|
288
|
+
transport=self.transport,
|
|
289
|
+
)
|
|
290
|
+
return self.client
|
|
291
|
+
|
|
292
|
+
@property
|
|
293
|
+
def async_prediction_client(
|
|
294
|
+
self,
|
|
295
|
+
) -> Union[
|
|
296
|
+
v1beta1PredictionServiceAsyncClient,
|
|
297
|
+
v1PredictionServiceAsyncClient,
|
|
298
|
+
]:
|
|
299
|
+
return self.async_transport # type: ignore[return-value]
|
|
300
|
+
|
|
301
|
+
@property
|
|
302
|
+
def endpoint(self) -> str:
|
|
303
|
+
vendor_endpoint = EndpointManager.get_vendor_endpoint()
|
|
304
|
+
formatted_endpoint = vendor_endpoint.format(
|
|
305
|
+
vendor=self._vendor,
|
|
306
|
+
model=self._model_name,
|
|
307
|
+
)
|
|
308
|
+
return formatted_endpoint
|
|
309
|
+
|
|
310
|
+
def _build_headers(self, token: str) -> dict[str, str]:
|
|
311
|
+
headers = {
|
|
312
|
+
# "X-UiPath-LlmGateway-ApiFlavor": "auto",
|
|
313
|
+
"Authorization": f"Bearer {token}",
|
|
314
|
+
}
|
|
315
|
+
if job_key := os.getenv("UIPATH_JOB_KEY"):
|
|
316
|
+
headers["X-UiPath-JobKey"] = job_key
|
|
317
|
+
if process_key := os.getenv("UIPATH_PROCESS_KEY"):
|
|
318
|
+
headers["X-UiPath-ProcessKey"] = process_key
|
|
319
|
+
return headers
|
|
320
|
+
|
|
321
|
+
def _build_base_url(self) -> str:
|
|
322
|
+
if not self._url:
|
|
323
|
+
env_uipath_url = os.getenv("UIPATH_URL")
|
|
324
|
+
|
|
325
|
+
if env_uipath_url:
|
|
326
|
+
self._url = f"{env_uipath_url.rstrip('/')}/{self.endpoint}"
|
|
327
|
+
else:
|
|
328
|
+
raise ValueError("UIPATH_URL environment variable is required")
|
|
329
|
+
|
|
330
|
+
return self._url
|