uipath-langchain 0.0.112__py3-none-any.whl → 0.1.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- uipath_langchain/_cli/_templates/main.py.template +12 -13
- uipath_langchain/_cli/cli_init.py +127 -156
- uipath_langchain/_cli/cli_new.py +2 -6
- uipath_langchain/_resources/AGENTS.md +21 -0
- uipath_langchain/_resources/REQUIRED_STRUCTURE.md +92 -0
- uipath_langchain/{tracers → _tracing}/__init__.py +0 -2
- uipath_langchain/_tracing/_instrument_traceable.py +134 -0
- uipath_langchain/_utils/__init__.py +1 -2
- uipath_langchain/_utils/_request_mixin.py +351 -54
- uipath_langchain/_utils/_settings.py +2 -11
- uipath_langchain/agent/exceptions/__init__.py +6 -0
- uipath_langchain/agent/exceptions/exceptions.py +11 -0
- uipath_langchain/agent/guardrails/__init__.py +21 -0
- uipath_langchain/agent/guardrails/actions/__init__.py +11 -0
- uipath_langchain/agent/guardrails/actions/base_action.py +23 -0
- uipath_langchain/agent/guardrails/actions/block_action.py +41 -0
- uipath_langchain/agent/guardrails/actions/escalate_action.py +274 -0
- uipath_langchain/agent/guardrails/actions/log_action.py +57 -0
- uipath_langchain/agent/guardrails/guardrail_nodes.py +125 -0
- uipath_langchain/agent/guardrails/guardrails_factory.py +70 -0
- uipath_langchain/agent/guardrails/guardrails_subgraph.py +247 -0
- uipath_langchain/agent/guardrails/types.py +20 -0
- uipath_langchain/agent/react/__init__.py +14 -0
- uipath_langchain/agent/react/agent.py +113 -0
- uipath_langchain/agent/react/constants.py +2 -0
- uipath_langchain/agent/react/init_node.py +20 -0
- uipath_langchain/agent/react/llm_node.py +43 -0
- uipath_langchain/agent/react/router.py +97 -0
- uipath_langchain/agent/react/terminate_node.py +82 -0
- uipath_langchain/agent/react/tools/__init__.py +7 -0
- uipath_langchain/agent/react/tools/tools.py +50 -0
- uipath_langchain/agent/react/types.py +39 -0
- uipath_langchain/agent/react/utils.py +49 -0
- uipath_langchain/agent/tools/__init__.py +17 -0
- uipath_langchain/agent/tools/context_tool.py +53 -0
- uipath_langchain/agent/tools/escalation_tool.py +111 -0
- uipath_langchain/agent/tools/integration_tool.py +181 -0
- uipath_langchain/agent/tools/process_tool.py +49 -0
- uipath_langchain/agent/tools/static_args.py +138 -0
- uipath_langchain/agent/tools/structured_tool_with_output_type.py +14 -0
- uipath_langchain/agent/tools/tool_factory.py +45 -0
- uipath_langchain/agent/tools/tool_node.py +22 -0
- uipath_langchain/agent/tools/utils.py +11 -0
- uipath_langchain/chat/__init__.py +4 -0
- uipath_langchain/chat/bedrock.py +187 -0
- uipath_langchain/chat/gemini.py +330 -0
- uipath_langchain/chat/mapper.py +309 -0
- uipath_langchain/chat/models.py +261 -38
- uipath_langchain/chat/openai.py +132 -0
- uipath_langchain/chat/supported_models.py +42 -0
- uipath_langchain/embeddings/embeddings.py +136 -36
- uipath_langchain/middlewares.py +0 -2
- uipath_langchain/py.typed +0 -0
- uipath_langchain/retrievers/context_grounding_retriever.py +7 -9
- uipath_langchain/runtime/__init__.py +36 -0
- uipath_langchain/runtime/_serialize.py +46 -0
- uipath_langchain/runtime/config.py +61 -0
- uipath_langchain/runtime/errors.py +43 -0
- uipath_langchain/runtime/factory.py +315 -0
- uipath_langchain/runtime/graph.py +159 -0
- uipath_langchain/runtime/runtime.py +453 -0
- uipath_langchain/runtime/schema.py +349 -0
- uipath_langchain/runtime/storage.py +115 -0
- uipath_langchain/vectorstores/context_grounding_vectorstore.py +90 -110
- {uipath_langchain-0.0.112.dist-info → uipath_langchain-0.1.24.dist-info}/METADATA +42 -20
- uipath_langchain-0.1.24.dist-info/RECORD +76 -0
- {uipath_langchain-0.0.112.dist-info → uipath_langchain-0.1.24.dist-info}/WHEEL +1 -1
- uipath_langchain-0.1.24.dist-info/entry_points.txt +5 -0
- uipath_langchain/_cli/_runtime/_context.py +0 -21
- uipath_langchain/_cli/_runtime/_exception.py +0 -17
- uipath_langchain/_cli/_runtime/_input.py +0 -136
- uipath_langchain/_cli/_runtime/_output.py +0 -234
- uipath_langchain/_cli/_runtime/_runtime.py +0 -371
- uipath_langchain/_cli/_utils/_graph.py +0 -202
- uipath_langchain/_cli/cli_run.py +0 -80
- uipath_langchain/tracers/AsyncUiPathTracer.py +0 -274
- uipath_langchain/tracers/_events.py +0 -33
- uipath_langchain/tracers/_instrument_traceable.py +0 -416
- uipath_langchain/tracers/_utils.py +0 -52
- uipath_langchain-0.0.112.dist-info/RECORD +0 -36
- uipath_langchain-0.0.112.dist-info/entry_points.txt +0 -2
- {uipath_langchain-0.0.112.dist-info → uipath_langchain-0.1.24.dist-info}/licenses/LICENSE +0 -0
|
@@ -3,13 +3,16 @@ import json
|
|
|
3
3
|
import logging
|
|
4
4
|
import os
|
|
5
5
|
import time
|
|
6
|
-
from typing import Any, Dict,
|
|
6
|
+
from typing import Any, AsyncIterator, Dict, Iterator, Mapping
|
|
7
7
|
|
|
8
8
|
import httpx
|
|
9
9
|
import openai
|
|
10
10
|
from langchain_core.embeddings import Embeddings
|
|
11
11
|
from langchain_core.language_models.chat_models import _cleanup_llm_representation
|
|
12
|
-
from
|
|
12
|
+
from langchain_core.messages import AIMessageChunk
|
|
13
|
+
from langchain_core.messages.ai import UsageMetadata
|
|
14
|
+
from langchain_core.outputs import ChatGenerationChunk
|
|
15
|
+
from pydantic import BaseModel, ConfigDict, Field, SecretStr, ValidationError
|
|
13
16
|
from tenacity import (
|
|
14
17
|
AsyncRetrying,
|
|
15
18
|
Retrying,
|
|
@@ -17,6 +20,12 @@ from tenacity import (
|
|
|
17
20
|
stop_after_attempt,
|
|
18
21
|
wait_exponential_jitter,
|
|
19
22
|
)
|
|
23
|
+
from uipath._utils._ssl_context import get_httpx_client_kwargs
|
|
24
|
+
from uipath.runtime.errors import (
|
|
25
|
+
UiPathErrorCategory,
|
|
26
|
+
UiPathErrorCode,
|
|
27
|
+
UiPathRuntimeError,
|
|
28
|
+
)
|
|
20
29
|
|
|
21
30
|
from uipath_langchain._utils._settings import (
|
|
22
31
|
UiPathClientFactorySettings,
|
|
@@ -24,6 +33,10 @@ from uipath_langchain._utils._settings import (
|
|
|
24
33
|
get_uipath_token_header,
|
|
25
34
|
)
|
|
26
35
|
from uipath_langchain._utils._sleep_policy import before_sleep_log
|
|
36
|
+
from uipath_langchain.runtime.errors import (
|
|
37
|
+
LangGraphErrorCode,
|
|
38
|
+
LangGraphRuntimeError,
|
|
39
|
+
)
|
|
27
40
|
|
|
28
41
|
|
|
29
42
|
def get_from_uipath_url():
|
|
@@ -33,44 +46,60 @@ def get_from_uipath_url():
|
|
|
33
46
|
return None
|
|
34
47
|
|
|
35
48
|
|
|
49
|
+
def _get_access_token(data):
|
|
50
|
+
"""Get access token from settings, environment variables, or UiPath client factory."""
|
|
51
|
+
token = (
|
|
52
|
+
getattr(data["settings"], "access_token", None)
|
|
53
|
+
or os.getenv("UIPATH_ACCESS_TOKEN")
|
|
54
|
+
or os.getenv("UIPATH_SERVICE_TOKEN")
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
if token:
|
|
58
|
+
return token
|
|
59
|
+
|
|
60
|
+
try:
|
|
61
|
+
settings = UiPathClientFactorySettings(
|
|
62
|
+
UIPATH_BASE_URL=data["base_url"],
|
|
63
|
+
UIPATH_CLIENT_ID=data["client_id"],
|
|
64
|
+
UIPATH_CLIENT_SECRET=data["client_secret"],
|
|
65
|
+
)
|
|
66
|
+
return get_uipath_token_header(settings)
|
|
67
|
+
except ValidationError:
|
|
68
|
+
raise UiPathRuntimeError(
|
|
69
|
+
UiPathErrorCode.EXECUTION_ERROR,
|
|
70
|
+
title="Authorization required",
|
|
71
|
+
detail="Authorization required. Please run uipath auth",
|
|
72
|
+
category=UiPathErrorCategory.USER,
|
|
73
|
+
) from None
|
|
74
|
+
|
|
75
|
+
|
|
36
76
|
class UiPathRequestMixin(BaseModel):
|
|
37
|
-
|
|
38
|
-
arbitrary_types_allowed = True
|
|
77
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
39
78
|
|
|
40
|
-
default_headers:
|
|
79
|
+
default_headers: Mapping[str, str] | None = {
|
|
41
80
|
"X-UiPath-Streaming-Enabled": "false",
|
|
81
|
+
"X-UiPath-JobKey": os.getenv("UIPATH_JOB_KEY", ""),
|
|
82
|
+
"X-UiPath-ProcessKey": os.getenv("UIPATH_PROCESS_KEY", ""),
|
|
42
83
|
}
|
|
43
|
-
model_name:
|
|
84
|
+
model_name: str | None = Field(
|
|
44
85
|
default_factory=lambda: os.getenv("UIPATH_MODEL_NAME", "gpt-4o-2024-08-06"),
|
|
45
86
|
alias="model",
|
|
46
87
|
)
|
|
47
|
-
settings:
|
|
48
|
-
client_id:
|
|
49
|
-
|
|
50
|
-
)
|
|
51
|
-
client_secret: Optional[str] = Field(
|
|
88
|
+
settings: UiPathClientSettings | None = None
|
|
89
|
+
client_id: str | None = Field(default_factory=lambda: os.getenv("UIPATH_CLIENT_ID"))
|
|
90
|
+
client_secret: str | None = Field(
|
|
52
91
|
default_factory=lambda: os.getenv("UIPATH_CLIENT_SECRET")
|
|
53
92
|
)
|
|
54
|
-
base_url:
|
|
93
|
+
base_url: str | None = Field(
|
|
55
94
|
default_factory=lambda data: getattr(data["settings"], "base_url", None)
|
|
56
95
|
or os.getenv("UIPATH_BASE_URL")
|
|
57
96
|
or get_from_uipath_url(),
|
|
58
97
|
alias="azure_endpoint",
|
|
59
98
|
)
|
|
60
|
-
access_token:
|
|
61
|
-
default_factory=lambda data: (
|
|
62
|
-
getattr(data["settings"], "access_token", None)
|
|
63
|
-
or os.getenv("UIPATH_ACCESS_TOKEN") # Environment variable
|
|
64
|
-
or os.getenv("UIPATH_SERVICE_TOKEN") # Environment variable
|
|
65
|
-
or get_uipath_token_header(
|
|
66
|
-
UiPathClientFactorySettings(
|
|
67
|
-
UIPATH_BASE_URL=data["base_url"],
|
|
68
|
-
UIPATH_CLIENT_ID=data["client_id"],
|
|
69
|
-
UIPATH_CLIENT_SECRET=data["client_secret"],
|
|
70
|
-
)
|
|
71
|
-
) # Get service token from UiPath
|
|
72
|
-
)
|
|
99
|
+
access_token: str | None = Field(
|
|
100
|
+
default_factory=lambda data: _get_access_token(data)
|
|
73
101
|
)
|
|
102
|
+
|
|
74
103
|
org_id: Any = Field(
|
|
75
104
|
default_factory=lambda data: getattr(data["settings"], "org_id", None)
|
|
76
105
|
or os.getenv("UIPATH_ORGANIZATION_ID", "")
|
|
@@ -83,13 +112,13 @@ class UiPathRequestMixin(BaseModel):
|
|
|
83
112
|
default_factory=lambda data: getattr(
|
|
84
113
|
data["settings"], "requesting_product", None
|
|
85
114
|
)
|
|
86
|
-
or os.getenv("UIPATH_REQUESTING_PRODUCT", "")
|
|
115
|
+
or os.getenv("UIPATH_REQUESTING_PRODUCT", "uipath-python-sdk")
|
|
87
116
|
)
|
|
88
117
|
requesting_feature: Any = Field(
|
|
89
118
|
default_factory=lambda data: getattr(
|
|
90
119
|
data["settings"], "requesting_feature", None
|
|
91
120
|
)
|
|
92
|
-
or os.getenv("UIPATH_REQUESTING_FEATURE", "")
|
|
121
|
+
or os.getenv("UIPATH_REQUESTING_FEATURE", "langgraph-agent")
|
|
93
122
|
)
|
|
94
123
|
default_request_timeout: Any = Field(
|
|
95
124
|
default_factory=lambda data: float(
|
|
@@ -99,47 +128,49 @@ class UiPathRequestMixin(BaseModel):
|
|
|
99
128
|
alias="timeout",
|
|
100
129
|
)
|
|
101
130
|
|
|
102
|
-
openai_api_version:
|
|
131
|
+
openai_api_version: str | None = Field(
|
|
103
132
|
default_factory=lambda: os.getenv("OPENAI_API_VERSION", "2024-08-01-preview"),
|
|
104
133
|
alias="api_version",
|
|
105
134
|
)
|
|
106
135
|
include_account_id: bool = False
|
|
107
|
-
temperature:
|
|
108
|
-
max_tokens:
|
|
109
|
-
frequency_penalty:
|
|
110
|
-
presence_penalty:
|
|
136
|
+
temperature: float | None = 0.0
|
|
137
|
+
max_tokens: int | None = 1000
|
|
138
|
+
frequency_penalty: float | None = None
|
|
139
|
+
presence_penalty: float | None = None
|
|
111
140
|
|
|
112
|
-
logger:
|
|
113
|
-
max_retries:
|
|
141
|
+
logger: logging.Logger | None = None
|
|
142
|
+
max_retries: int | None = 5
|
|
114
143
|
base_delay: float = 5.0
|
|
115
144
|
max_delay: float = 60.0
|
|
116
145
|
|
|
117
|
-
_url:
|
|
118
|
-
_auth_headers:
|
|
146
|
+
_url: str | None = None
|
|
147
|
+
_auth_headers: dict[str, str] | None = None
|
|
119
148
|
|
|
120
149
|
# required to instantiate AzureChatOpenAI subclasses
|
|
121
|
-
azure_endpoint:
|
|
150
|
+
azure_endpoint: str | None = Field(
|
|
122
151
|
default="placeholder", description="Bypassed Azure endpoint"
|
|
123
152
|
)
|
|
124
|
-
openai_api_key:
|
|
153
|
+
openai_api_key: SecretStr | None = Field(
|
|
125
154
|
default=SecretStr("placeholder"), description="Bypassed API key"
|
|
126
155
|
)
|
|
127
156
|
# required to instatiate ChatAnthropic subclasses (will be needed when passthrough is implemented for Anthropic models)
|
|
128
|
-
stop_sequences:
|
|
157
|
+
stop_sequences: list[str] | None = Field(
|
|
129
158
|
default=None, description="Bypassed stop sequence"
|
|
130
159
|
)
|
|
131
160
|
|
|
132
161
|
def _request(
|
|
133
|
-
self, url: str, request_body:
|
|
134
|
-
) ->
|
|
162
|
+
self, url: str, request_body: dict[str, Any], headers: dict[str, str]
|
|
163
|
+
) -> dict[str, Any]:
|
|
135
164
|
"""Run an asynchronous call to the LLM."""
|
|
136
165
|
# if self.logger:
|
|
137
166
|
# self.logger.info(f"Completion request: {request_body['messages'][:2]}")
|
|
167
|
+
client_kwargs = get_httpx_client_kwargs()
|
|
138
168
|
with httpx.Client(
|
|
169
|
+
**client_kwargs, # Apply SSL configuration
|
|
139
170
|
event_hooks={
|
|
140
171
|
"request": [self._log_request_duration],
|
|
141
172
|
"response": [self._log_response_duration],
|
|
142
|
-
}
|
|
173
|
+
},
|
|
143
174
|
) as client:
|
|
144
175
|
response = client.post(
|
|
145
176
|
url,
|
|
@@ -150,6 +181,8 @@ class UiPathRequestMixin(BaseModel):
|
|
|
150
181
|
|
|
151
182
|
# Handle HTTP errors and map them to OpenAI exceptions
|
|
152
183
|
try:
|
|
184
|
+
content = response.content # Read content to avoid closed stream issues
|
|
185
|
+
print(f"Response content: {content.decode('utf-8')}")
|
|
153
186
|
response.raise_for_status()
|
|
154
187
|
except httpx.HTTPStatusError as err:
|
|
155
188
|
if self.logger:
|
|
@@ -169,8 +202,8 @@ class UiPathRequestMixin(BaseModel):
|
|
|
169
202
|
return response.json()
|
|
170
203
|
|
|
171
204
|
def _call(
|
|
172
|
-
self, url: str, request_body:
|
|
173
|
-
) ->
|
|
205
|
+
self, url: str, request_body: dict[str, Any], headers: dict[str, str]
|
|
206
|
+
) -> dict[str, Any]:
|
|
174
207
|
"""Run a synchronous call with retries to LLM"""
|
|
175
208
|
if self.max_retries is None:
|
|
176
209
|
return self._request(url, request_body, headers)
|
|
@@ -193,6 +226,7 @@ class UiPathRequestMixin(BaseModel):
|
|
|
193
226
|
|
|
194
227
|
try:
|
|
195
228
|
return retryer(self._request, url, request_body, headers)
|
|
229
|
+
# return self._request(url, request_body, headers)
|
|
196
230
|
except openai.APIStatusError as err:
|
|
197
231
|
if self.logger:
|
|
198
232
|
self.logger.error(
|
|
@@ -208,15 +242,17 @@ class UiPathRequestMixin(BaseModel):
|
|
|
208
242
|
raise err
|
|
209
243
|
|
|
210
244
|
async def _arequest(
|
|
211
|
-
self, url: str, request_body:
|
|
212
|
-
) ->
|
|
245
|
+
self, url: str, request_body: dict[str, Any], headers: dict[str, str]
|
|
246
|
+
) -> dict[str, Any]:
|
|
213
247
|
# if self.logger:
|
|
214
248
|
# self.logger.info(f"Completion request: {request_body['messages'][:2]}")
|
|
249
|
+
client_kwargs = get_httpx_client_kwargs()
|
|
215
250
|
async with httpx.AsyncClient(
|
|
251
|
+
**client_kwargs, # Apply SSL configuration
|
|
216
252
|
event_hooks={
|
|
217
253
|
"request": [self._alog_request_duration],
|
|
218
254
|
"response": [self._alog_response_duration],
|
|
219
|
-
}
|
|
255
|
+
},
|
|
220
256
|
) as client:
|
|
221
257
|
response = await client.post(
|
|
222
258
|
url,
|
|
@@ -245,8 +281,8 @@ class UiPathRequestMixin(BaseModel):
|
|
|
245
281
|
return response.json()
|
|
246
282
|
|
|
247
283
|
async def _acall(
|
|
248
|
-
self, url: str, request_body:
|
|
249
|
-
) ->
|
|
284
|
+
self, url: str, request_body: dict[str, Any], headers: dict[str, str]
|
|
285
|
+
) -> dict[str, Any]:
|
|
250
286
|
"""Run an asynchronous call with retries to the LLM."""
|
|
251
287
|
if self.max_retries is None:
|
|
252
288
|
return await self._arequest(url, request_body, headers)
|
|
@@ -302,6 +338,256 @@ class UiPathRequestMixin(BaseModel):
|
|
|
302
338
|
)
|
|
303
339
|
raise err
|
|
304
340
|
|
|
341
|
+
def _convert_chunk(
|
|
342
|
+
self,
|
|
343
|
+
chunk: Dict[str, Any],
|
|
344
|
+
default_chunk_class: type,
|
|
345
|
+
include_tool_calls: bool = False,
|
|
346
|
+
) -> ChatGenerationChunk | None:
|
|
347
|
+
"""Convert a streaming chunk to a ChatGenerationChunk.
|
|
348
|
+
|
|
349
|
+
Args:
|
|
350
|
+
chunk: The raw SSE chunk dictionary
|
|
351
|
+
default_chunk_class: The default message chunk class to use
|
|
352
|
+
include_tool_calls: Whether to parse and include tool call chunks
|
|
353
|
+
|
|
354
|
+
Returns:
|
|
355
|
+
A ChatGenerationChunk or None if the chunk should be skipped
|
|
356
|
+
"""
|
|
357
|
+
|
|
358
|
+
token_usage = chunk.get("usage")
|
|
359
|
+
choices = chunk.get("choices", [])
|
|
360
|
+
|
|
361
|
+
usage_metadata: UsageMetadata | None = None
|
|
362
|
+
if token_usage:
|
|
363
|
+
usage_metadata = UsageMetadata(
|
|
364
|
+
input_tokens=token_usage.get("prompt_tokens", 0),
|
|
365
|
+
output_tokens=token_usage.get("completion_tokens", 0),
|
|
366
|
+
total_tokens=token_usage.get("total_tokens", 0),
|
|
367
|
+
)
|
|
368
|
+
|
|
369
|
+
if len(choices) == 0:
|
|
370
|
+
return ChatGenerationChunk(
|
|
371
|
+
message=default_chunk_class(content="", usage_metadata=usage_metadata),
|
|
372
|
+
generation_info={},
|
|
373
|
+
)
|
|
374
|
+
|
|
375
|
+
choice = choices[0]
|
|
376
|
+
delta = choice.get("delta")
|
|
377
|
+
if delta is None:
|
|
378
|
+
return None
|
|
379
|
+
|
|
380
|
+
# Extract content from delta
|
|
381
|
+
content = delta.get("content", "")
|
|
382
|
+
|
|
383
|
+
# Build the message chunk
|
|
384
|
+
message_kwargs = {
|
|
385
|
+
"content": content or "",
|
|
386
|
+
"usage_metadata": usage_metadata,
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
# Handle tool calls if requested (for normalized API)
|
|
390
|
+
if include_tool_calls:
|
|
391
|
+
tool_calls = delta.get("tool_calls", [])
|
|
392
|
+
tool_call_chunks = []
|
|
393
|
+
if tool_calls:
|
|
394
|
+
for tc in tool_calls:
|
|
395
|
+
# Tool call structure: {'function': {'name': '...', 'arguments': '...'}, 'id': '...', 'index': 0}
|
|
396
|
+
function = tc.get("function", {})
|
|
397
|
+
tool_call_chunks.append(
|
|
398
|
+
{
|
|
399
|
+
"id": tc.get("id"),
|
|
400
|
+
"name": function.get("name"),
|
|
401
|
+
"args": function.get("arguments", ""),
|
|
402
|
+
"index": tc.get("index", 0),
|
|
403
|
+
}
|
|
404
|
+
)
|
|
405
|
+
if tool_call_chunks:
|
|
406
|
+
message_kwargs["tool_call_chunks"] = tool_call_chunks
|
|
407
|
+
|
|
408
|
+
message_chunk = AIMessageChunk(**message_kwargs)
|
|
409
|
+
|
|
410
|
+
generation_info = {}
|
|
411
|
+
if finish_reason := choice.get("finish_reason"):
|
|
412
|
+
generation_info["finish_reason"] = finish_reason
|
|
413
|
+
if model_name := chunk.get("model"):
|
|
414
|
+
generation_info["model_name"] = model_name
|
|
415
|
+
|
|
416
|
+
return ChatGenerationChunk(
|
|
417
|
+
message=message_chunk,
|
|
418
|
+
generation_info=generation_info or None,
|
|
419
|
+
)
|
|
420
|
+
|
|
421
|
+
def _stream_request(
|
|
422
|
+
self, url: str, request_body: Dict[str, Any], headers: Dict[str, str]
|
|
423
|
+
) -> Iterator[Dict[str, Any]]:
|
|
424
|
+
"""Stream SSE responses from the LLM."""
|
|
425
|
+
client_kwargs = get_httpx_client_kwargs()
|
|
426
|
+
with httpx.Client(
|
|
427
|
+
**client_kwargs,
|
|
428
|
+
event_hooks={
|
|
429
|
+
"request": [self._log_request_duration],
|
|
430
|
+
"response": [self._log_response_duration],
|
|
431
|
+
},
|
|
432
|
+
) as client:
|
|
433
|
+
with client.stream(
|
|
434
|
+
"POST",
|
|
435
|
+
url,
|
|
436
|
+
headers=headers,
|
|
437
|
+
json=request_body,
|
|
438
|
+
timeout=self.default_request_timeout,
|
|
439
|
+
) as response:
|
|
440
|
+
try:
|
|
441
|
+
response.raise_for_status()
|
|
442
|
+
except httpx.HTTPStatusError as err:
|
|
443
|
+
if self.logger:
|
|
444
|
+
self.logger.error(
|
|
445
|
+
"Error querying UiPath: %s (%s)",
|
|
446
|
+
err.response.reason_phrase,
|
|
447
|
+
err.response.status_code,
|
|
448
|
+
extra={
|
|
449
|
+
"ActionName": self.settings.action_name,
|
|
450
|
+
"ActionId": self.settings.action_id,
|
|
451
|
+
}
|
|
452
|
+
if self.settings
|
|
453
|
+
else None,
|
|
454
|
+
)
|
|
455
|
+
# Read the response body for streaming responses
|
|
456
|
+
err.response.read()
|
|
457
|
+
raise self._make_status_error_from_response(err.response) from err
|
|
458
|
+
|
|
459
|
+
for line in response.iter_lines():
|
|
460
|
+
line = line.strip()
|
|
461
|
+
if not line:
|
|
462
|
+
continue
|
|
463
|
+
|
|
464
|
+
if self.logger:
|
|
465
|
+
self.logger.debug(f"[SSE] Raw line: {line}")
|
|
466
|
+
|
|
467
|
+
if line.startswith("data:"):
|
|
468
|
+
data = line[
|
|
469
|
+
5:
|
|
470
|
+
].strip() # Remove "data:" prefix and strip whitespace
|
|
471
|
+
if data == "[DONE]":
|
|
472
|
+
break
|
|
473
|
+
if not data: # Skip empty data lines
|
|
474
|
+
continue
|
|
475
|
+
try:
|
|
476
|
+
parsed = json.loads(data)
|
|
477
|
+
# Skip empty chunks (some APIs send them as keepalive)
|
|
478
|
+
# Check for truly empty: empty id AND (no choices or empty choices list)
|
|
479
|
+
if (not parsed.get("id") or parsed.get("id") == "") and (
|
|
480
|
+
not parsed.get("choices")
|
|
481
|
+
or len(parsed.get("choices", [])) == 0
|
|
482
|
+
):
|
|
483
|
+
if self.logger:
|
|
484
|
+
self.logger.debug(
|
|
485
|
+
"[SSE] Skipping empty keepalive chunk"
|
|
486
|
+
)
|
|
487
|
+
continue
|
|
488
|
+
yield parsed
|
|
489
|
+
except json.JSONDecodeError as e:
|
|
490
|
+
if self.logger:
|
|
491
|
+
self.logger.warning(
|
|
492
|
+
f"Failed to parse SSE chunk: {data}, error: {e}"
|
|
493
|
+
)
|
|
494
|
+
continue
|
|
495
|
+
else:
|
|
496
|
+
# Handle lines without "data: " prefix (some APIs send raw JSON)
|
|
497
|
+
try:
|
|
498
|
+
parsed = json.loads(line)
|
|
499
|
+
if self.logger:
|
|
500
|
+
self.logger.debug(f"[SSE] Parsed raw JSON: {parsed}")
|
|
501
|
+
yield parsed
|
|
502
|
+
except json.JSONDecodeError:
|
|
503
|
+
# Not JSON, skip
|
|
504
|
+
pass
|
|
505
|
+
|
|
506
|
+
async def _astream_request(
|
|
507
|
+
self, url: str, request_body: Dict[str, Any], headers: Dict[str, str]
|
|
508
|
+
) -> AsyncIterator[Dict[str, Any]]:
|
|
509
|
+
"""Async stream SSE responses from the LLM."""
|
|
510
|
+
client_kwargs = get_httpx_client_kwargs()
|
|
511
|
+
async with httpx.AsyncClient(
|
|
512
|
+
**client_kwargs,
|
|
513
|
+
event_hooks={
|
|
514
|
+
"request": [self._alog_request_duration],
|
|
515
|
+
"response": [self._alog_response_duration],
|
|
516
|
+
},
|
|
517
|
+
) as client:
|
|
518
|
+
async with client.stream(
|
|
519
|
+
"POST",
|
|
520
|
+
url,
|
|
521
|
+
headers=headers,
|
|
522
|
+
json=request_body,
|
|
523
|
+
timeout=self.default_request_timeout,
|
|
524
|
+
) as response:
|
|
525
|
+
try:
|
|
526
|
+
response.raise_for_status()
|
|
527
|
+
except httpx.HTTPStatusError as err:
|
|
528
|
+
if self.logger:
|
|
529
|
+
self.logger.error(
|
|
530
|
+
"Error querying LLM: %s (%s)",
|
|
531
|
+
err.response.reason_phrase,
|
|
532
|
+
err.response.status_code,
|
|
533
|
+
extra={
|
|
534
|
+
"ActionName": self.settings.action_name,
|
|
535
|
+
"ActionId": self.settings.action_id,
|
|
536
|
+
}
|
|
537
|
+
if self.settings
|
|
538
|
+
else None,
|
|
539
|
+
)
|
|
540
|
+
# Read the response body for streaming responses
|
|
541
|
+
await err.response.aread()
|
|
542
|
+
raise self._make_status_error_from_response(err.response) from err
|
|
543
|
+
|
|
544
|
+
async for line in response.aiter_lines():
|
|
545
|
+
line = line.strip()
|
|
546
|
+
if not line:
|
|
547
|
+
continue
|
|
548
|
+
|
|
549
|
+
if self.logger:
|
|
550
|
+
self.logger.debug(f"[SSE] Raw line: {line}")
|
|
551
|
+
|
|
552
|
+
if line.startswith("data:"):
|
|
553
|
+
data = line[
|
|
554
|
+
5:
|
|
555
|
+
].strip() # Remove "data:" prefix and strip whitespace
|
|
556
|
+
if data == "[DONE]":
|
|
557
|
+
break
|
|
558
|
+
if not data: # Skip empty data lines
|
|
559
|
+
continue
|
|
560
|
+
try:
|
|
561
|
+
parsed = json.loads(data)
|
|
562
|
+
# Skip empty chunks (some APIs send them as keepalive)
|
|
563
|
+
# Check for truly empty: empty id AND (no choices or empty choices list)
|
|
564
|
+
if (not parsed.get("id") or parsed.get("id") == "") and (
|
|
565
|
+
not parsed.get("choices")
|
|
566
|
+
or len(parsed.get("choices", [])) == 0
|
|
567
|
+
):
|
|
568
|
+
if self.logger:
|
|
569
|
+
self.logger.debug(
|
|
570
|
+
"[SSE] Skipping empty keepalive chunk"
|
|
571
|
+
)
|
|
572
|
+
continue
|
|
573
|
+
yield parsed
|
|
574
|
+
except json.JSONDecodeError as e:
|
|
575
|
+
if self.logger:
|
|
576
|
+
self.logger.warning(
|
|
577
|
+
f"Failed to parse SSE chunk: {data}, error: {e}"
|
|
578
|
+
)
|
|
579
|
+
continue
|
|
580
|
+
else:
|
|
581
|
+
# Handle lines without "data: " prefix (some APIs send raw JSON)
|
|
582
|
+
try:
|
|
583
|
+
parsed = json.loads(line)
|
|
584
|
+
if self.logger:
|
|
585
|
+
self.logger.debug(f"[SSE] Parsed raw JSON: {parsed}")
|
|
586
|
+
yield parsed
|
|
587
|
+
except json.JSONDecodeError:
|
|
588
|
+
# Not JSON, skip
|
|
589
|
+
pass
|
|
590
|
+
|
|
305
591
|
def _make_status_error_from_response(
|
|
306
592
|
self,
|
|
307
593
|
response: httpx.Response,
|
|
@@ -341,6 +627,19 @@ class UiPathRequestMixin(BaseModel):
|
|
|
341
627
|
return openai.AuthenticationError(err_msg, response=response, body=data)
|
|
342
628
|
|
|
343
629
|
if response.status_code == 403:
|
|
630
|
+
# Check if this is a license-specific error
|
|
631
|
+
if isinstance(body, dict):
|
|
632
|
+
title = body.get("title", "").lower()
|
|
633
|
+
if title == "license not available":
|
|
634
|
+
raise LangGraphRuntimeError(
|
|
635
|
+
code=LangGraphErrorCode.LICENSE_NOT_AVAILABLE,
|
|
636
|
+
title=body.get("title", "License Not Available"),
|
|
637
|
+
detail=body.get(
|
|
638
|
+
"detail", "License not available for this service"
|
|
639
|
+
),
|
|
640
|
+
category=UiPathErrorCategory.DEPLOYMENT,
|
|
641
|
+
)
|
|
642
|
+
|
|
344
643
|
return openai.PermissionDeniedError(err_msg, response=response, body=data)
|
|
345
644
|
|
|
346
645
|
if response.status_code == 404:
|
|
@@ -406,7 +705,7 @@ class UiPathRequestMixin(BaseModel):
|
|
|
406
705
|
return "uipath"
|
|
407
706
|
|
|
408
707
|
@property
|
|
409
|
-
def _identifying_params(self) ->
|
|
708
|
+
def _identifying_params(self) -> dict[str, Any]:
|
|
410
709
|
return {
|
|
411
710
|
"url": self.url,
|
|
412
711
|
"model": self.model_name,
|
|
@@ -442,13 +741,11 @@ class UiPathRequestMixin(BaseModel):
|
|
|
442
741
|
)
|
|
443
742
|
|
|
444
743
|
@property
|
|
445
|
-
def auth_headers(self) ->
|
|
744
|
+
def auth_headers(self) -> dict[str, str]:
|
|
446
745
|
if not self._auth_headers:
|
|
447
746
|
self._auth_headers = {
|
|
448
747
|
**self.default_headers, # type: ignore
|
|
449
748
|
"Authorization": f"Bearer {self.access_token}",
|
|
450
|
-
"X-UiPath-LlmGateway-RequestingProduct": self.requesting_product,
|
|
451
|
-
"X-UiPath-LlmGateway-RequestingFeature": self.requesting_feature,
|
|
452
749
|
"X-UiPath-LlmGateway-TimeoutSeconds": str(self.default_request_timeout),
|
|
453
750
|
}
|
|
454
751
|
if self.is_normalized and self.model_name:
|
|
@@ -460,7 +757,7 @@ class UiPathRequestMixin(BaseModel):
|
|
|
460
757
|
self._auth_headers["x-uipath-internal-tenantid"] = self.tenant_id
|
|
461
758
|
return self._auth_headers
|
|
462
759
|
|
|
463
|
-
def _get_llm_string(self, stop:
|
|
760
|
+
def _get_llm_string(self, stop: list[str] | None = None, **kwargs: Any) -> str:
|
|
464
761
|
serialized_repr = getattr(self, "_serialized", self.model_dump())
|
|
465
762
|
_cleanup_llm_representation(serialized_repr, 1)
|
|
466
763
|
kwargs = serialized_repr.get("kwargs", serialized_repr)
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
# mypy: disable-error-code="syntax"
|
|
2
2
|
import os
|
|
3
|
-
from
|
|
4
|
-
from typing import Any, Optional
|
|
3
|
+
from typing import Any
|
|
5
4
|
|
|
6
5
|
import httpx
|
|
7
6
|
from pydantic import Field
|
|
@@ -22,7 +21,7 @@ class UiPathCachedPathsSettings(BaseSettings):
|
|
|
22
21
|
|
|
23
22
|
|
|
24
23
|
uipath_cached_paths_settings = UiPathCachedPathsSettings()
|
|
25
|
-
uipath_token_header:
|
|
24
|
+
uipath_token_header: str | None = None
|
|
26
25
|
|
|
27
26
|
|
|
28
27
|
class UiPathClientFactorySettings(BaseSettings):
|
|
@@ -47,14 +46,6 @@ class UiPathClientSettings(BaseSettings):
|
|
|
47
46
|
action_id: str = Field(default="DefaultActionId", alias="UIPATH_ACTION_ID")
|
|
48
47
|
|
|
49
48
|
|
|
50
|
-
class UiPathEndpoints(Enum):
|
|
51
|
-
NORMALIZED_COMPLETION_ENDPOINT = "llmgateway_/api/chat/completions"
|
|
52
|
-
PASSTHROUGH_COMPLETION_ENDPOINT = "llmgateway_/openai/deployments/{model}/chat/completions?api-version={api_version}"
|
|
53
|
-
EMBEDDING_ENDPOINT = (
|
|
54
|
-
"llmgateway_/openai/deployments/{model}/embeddings?api-version={api_version}"
|
|
55
|
-
)
|
|
56
|
-
|
|
57
|
-
|
|
58
49
|
def get_uipath_token_header(
|
|
59
50
|
settings: Any = None,
|
|
60
51
|
) -> str:
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from .guardrail_nodes import (
|
|
2
|
+
create_agent_guardrail_node,
|
|
3
|
+
create_llm_guardrail_node,
|
|
4
|
+
create_tool_guardrail_node,
|
|
5
|
+
)
|
|
6
|
+
from .guardrails_factory import build_guardrails_with_actions
|
|
7
|
+
from .guardrails_subgraph import (
|
|
8
|
+
create_agent_guardrails_subgraph,
|
|
9
|
+
create_llm_guardrails_subgraph,
|
|
10
|
+
create_tool_guardrails_subgraph,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
__all__ = [
|
|
14
|
+
"create_llm_guardrails_subgraph",
|
|
15
|
+
"create_agent_guardrails_subgraph",
|
|
16
|
+
"create_tool_guardrails_subgraph",
|
|
17
|
+
"create_llm_guardrail_node",
|
|
18
|
+
"create_agent_guardrail_node",
|
|
19
|
+
"create_tool_guardrail_node",
|
|
20
|
+
"build_guardrails_with_actions",
|
|
21
|
+
]
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
from uipath.platform.guardrails import BaseGuardrail, GuardrailScope
|
|
5
|
+
|
|
6
|
+
from uipath_langchain.agent.guardrails.types import ExecutionStage
|
|
7
|
+
|
|
8
|
+
GuardrailActionNode = tuple[str, Any]
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class GuardrailAction(ABC):
|
|
12
|
+
"""Extensible action interface producing a node to enforce the action on guardrail validation failure."""
|
|
13
|
+
|
|
14
|
+
@abstractmethod
|
|
15
|
+
def action_node(
|
|
16
|
+
self,
|
|
17
|
+
*,
|
|
18
|
+
guardrail: BaseGuardrail,
|
|
19
|
+
scope: GuardrailScope,
|
|
20
|
+
execution_stage: ExecutionStage,
|
|
21
|
+
) -> GuardrailActionNode:
|
|
22
|
+
"""Create and return the Action node to execute on validation failure."""
|
|
23
|
+
...
|