uipath-langchain 0.0.133__py3-none-any.whl → 0.1.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- uipath_langchain/_cli/cli_init.py +130 -191
- uipath_langchain/_cli/cli_new.py +2 -3
- uipath_langchain/_resources/AGENTS.md +21 -0
- uipath_langchain/_resources/REQUIRED_STRUCTURE.md +92 -0
- uipath_langchain/_tracing/__init__.py +3 -2
- uipath_langchain/_tracing/_instrument_traceable.py +11 -12
- uipath_langchain/_utils/_request_mixin.py +327 -51
- uipath_langchain/_utils/_settings.py +2 -2
- uipath_langchain/agent/exceptions/__init__.py +6 -0
- uipath_langchain/agent/exceptions/exceptions.py +11 -0
- uipath_langchain/agent/guardrails/__init__.py +21 -0
- uipath_langchain/agent/guardrails/actions/__init__.py +11 -0
- uipath_langchain/agent/guardrails/actions/base_action.py +23 -0
- uipath_langchain/agent/guardrails/actions/block_action.py +41 -0
- uipath_langchain/agent/guardrails/actions/escalate_action.py +274 -0
- uipath_langchain/agent/guardrails/actions/log_action.py +57 -0
- uipath_langchain/agent/guardrails/guardrail_nodes.py +125 -0
- uipath_langchain/agent/guardrails/guardrails_factory.py +70 -0
- uipath_langchain/agent/guardrails/guardrails_subgraph.py +247 -0
- uipath_langchain/agent/guardrails/types.py +20 -0
- uipath_langchain/agent/react/__init__.py +14 -0
- uipath_langchain/agent/react/agent.py +113 -0
- uipath_langchain/agent/react/constants.py +2 -0
- uipath_langchain/agent/react/init_node.py +20 -0
- uipath_langchain/agent/react/llm_node.py +43 -0
- uipath_langchain/agent/react/router.py +97 -0
- uipath_langchain/agent/react/terminate_node.py +82 -0
- uipath_langchain/agent/react/tools/__init__.py +7 -0
- uipath_langchain/agent/react/tools/tools.py +50 -0
- uipath_langchain/agent/react/types.py +39 -0
- uipath_langchain/agent/react/utils.py +49 -0
- uipath_langchain/agent/tools/__init__.py +17 -0
- uipath_langchain/agent/tools/context_tool.py +53 -0
- uipath_langchain/agent/tools/escalation_tool.py +111 -0
- uipath_langchain/agent/tools/integration_tool.py +181 -0
- uipath_langchain/agent/tools/process_tool.py +49 -0
- uipath_langchain/agent/tools/static_args.py +138 -0
- uipath_langchain/agent/tools/structured_tool_with_output_type.py +14 -0
- uipath_langchain/agent/tools/tool_factory.py +45 -0
- uipath_langchain/agent/tools/tool_node.py +22 -0
- uipath_langchain/agent/tools/utils.py +11 -0
- uipath_langchain/chat/__init__.py +4 -0
- uipath_langchain/chat/bedrock.py +187 -0
- uipath_langchain/chat/gemini.py +330 -0
- uipath_langchain/chat/mapper.py +309 -0
- uipath_langchain/chat/models.py +248 -35
- uipath_langchain/chat/openai.py +132 -0
- uipath_langchain/chat/supported_models.py +42 -0
- uipath_langchain/embeddings/embeddings.py +131 -34
- uipath_langchain/middlewares.py +0 -6
- uipath_langchain/retrievers/context_grounding_retriever.py +7 -9
- uipath_langchain/runtime/__init__.py +36 -0
- uipath_langchain/runtime/_serialize.py +46 -0
- uipath_langchain/runtime/config.py +61 -0
- uipath_langchain/runtime/errors.py +43 -0
- uipath_langchain/runtime/factory.py +315 -0
- uipath_langchain/runtime/graph.py +159 -0
- uipath_langchain/runtime/runtime.py +453 -0
- uipath_langchain/runtime/schema.py +349 -0
- uipath_langchain/runtime/storage.py +115 -0
- uipath_langchain/vectorstores/context_grounding_vectorstore.py +90 -110
- {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.24.dist-info}/METADATA +42 -22
- uipath_langchain-0.1.24.dist-info/RECORD +76 -0
- {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.24.dist-info}/WHEEL +1 -1
- uipath_langchain-0.1.24.dist-info/entry_points.txt +5 -0
- uipath_langchain/_cli/_runtime/_context.py +0 -21
- uipath_langchain/_cli/_runtime/_conversation.py +0 -298
- uipath_langchain/_cli/_runtime/_exception.py +0 -17
- uipath_langchain/_cli/_runtime/_input.py +0 -139
- uipath_langchain/_cli/_runtime/_output.py +0 -234
- uipath_langchain/_cli/_runtime/_runtime.py +0 -379
- uipath_langchain/_cli/_utils/_graph.py +0 -199
- uipath_langchain/_cli/cli_dev.py +0 -44
- uipath_langchain/_cli/cli_eval.py +0 -78
- uipath_langchain/_cli/cli_run.py +0 -82
- uipath_langchain/_tracing/_oteladapter.py +0 -222
- uipath_langchain/_tracing/_utils.py +0 -28
- uipath_langchain/builder/agent_config.py +0 -191
- uipath_langchain/tools/preconfigured.py +0 -191
- uipath_langchain-0.0.133.dist-info/RECORD +0 -41
- uipath_langchain-0.0.133.dist-info/entry_points.txt +0 -2
- /uipath_langchain/{tools/__init__.py → py.typed} +0 -0
- {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.24.dist-info}/licenses/LICENSE +0 -0
|
@@ -3,13 +3,16 @@ import json
|
|
|
3
3
|
import logging
|
|
4
4
|
import os
|
|
5
5
|
import time
|
|
6
|
-
from typing import Any, Dict,
|
|
6
|
+
from typing import Any, AsyncIterator, Dict, Iterator, Mapping
|
|
7
7
|
|
|
8
8
|
import httpx
|
|
9
9
|
import openai
|
|
10
10
|
from langchain_core.embeddings import Embeddings
|
|
11
11
|
from langchain_core.language_models.chat_models import _cleanup_llm_representation
|
|
12
|
-
from
|
|
12
|
+
from langchain_core.messages import AIMessageChunk
|
|
13
|
+
from langchain_core.messages.ai import UsageMetadata
|
|
14
|
+
from langchain_core.outputs import ChatGenerationChunk
|
|
15
|
+
from pydantic import BaseModel, ConfigDict, Field, SecretStr, ValidationError
|
|
13
16
|
from tenacity import (
|
|
14
17
|
AsyncRetrying,
|
|
15
18
|
Retrying,
|
|
@@ -17,16 +20,23 @@ from tenacity import (
|
|
|
17
20
|
stop_after_attempt,
|
|
18
21
|
wait_exponential_jitter,
|
|
19
22
|
)
|
|
20
|
-
from uipath._cli._runtime._contracts import UiPathErrorCategory
|
|
21
23
|
from uipath._utils._ssl_context import get_httpx_client_kwargs
|
|
24
|
+
from uipath.runtime.errors import (
|
|
25
|
+
UiPathErrorCategory,
|
|
26
|
+
UiPathErrorCode,
|
|
27
|
+
UiPathRuntimeError,
|
|
28
|
+
)
|
|
22
29
|
|
|
23
|
-
from uipath_langchain._cli._runtime._exception import LangGraphRuntimeError
|
|
24
30
|
from uipath_langchain._utils._settings import (
|
|
25
31
|
UiPathClientFactorySettings,
|
|
26
32
|
UiPathClientSettings,
|
|
27
33
|
get_uipath_token_header,
|
|
28
34
|
)
|
|
29
35
|
from uipath_langchain._utils._sleep_policy import before_sleep_log
|
|
36
|
+
from uipath_langchain.runtime.errors import (
|
|
37
|
+
LangGraphErrorCode,
|
|
38
|
+
LangGraphRuntimeError,
|
|
39
|
+
)
|
|
30
40
|
|
|
31
41
|
|
|
32
42
|
def get_from_uipath_url():
|
|
@@ -36,45 +46,60 @@ def get_from_uipath_url():
|
|
|
36
46
|
return None
|
|
37
47
|
|
|
38
48
|
|
|
49
|
+
def _get_access_token(data):
|
|
50
|
+
"""Get access token from settings, environment variables, or UiPath client factory."""
|
|
51
|
+
token = (
|
|
52
|
+
getattr(data["settings"], "access_token", None)
|
|
53
|
+
or os.getenv("UIPATH_ACCESS_TOKEN")
|
|
54
|
+
or os.getenv("UIPATH_SERVICE_TOKEN")
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
if token:
|
|
58
|
+
return token
|
|
59
|
+
|
|
60
|
+
try:
|
|
61
|
+
settings = UiPathClientFactorySettings(
|
|
62
|
+
UIPATH_BASE_URL=data["base_url"],
|
|
63
|
+
UIPATH_CLIENT_ID=data["client_id"],
|
|
64
|
+
UIPATH_CLIENT_SECRET=data["client_secret"],
|
|
65
|
+
)
|
|
66
|
+
return get_uipath_token_header(settings)
|
|
67
|
+
except ValidationError:
|
|
68
|
+
raise UiPathRuntimeError(
|
|
69
|
+
UiPathErrorCode.EXECUTION_ERROR,
|
|
70
|
+
title="Authorization required",
|
|
71
|
+
detail="Authorization required. Please run uipath auth",
|
|
72
|
+
category=UiPathErrorCategory.USER,
|
|
73
|
+
) from None
|
|
74
|
+
|
|
75
|
+
|
|
39
76
|
class UiPathRequestMixin(BaseModel):
|
|
40
77
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
41
78
|
|
|
42
|
-
default_headers:
|
|
79
|
+
default_headers: Mapping[str, str] | None = {
|
|
43
80
|
"X-UiPath-Streaming-Enabled": "false",
|
|
44
81
|
"X-UiPath-JobKey": os.getenv("UIPATH_JOB_KEY", ""),
|
|
45
82
|
"X-UiPath-ProcessKey": os.getenv("UIPATH_PROCESS_KEY", ""),
|
|
46
83
|
}
|
|
47
|
-
model_name:
|
|
84
|
+
model_name: str | None = Field(
|
|
48
85
|
default_factory=lambda: os.getenv("UIPATH_MODEL_NAME", "gpt-4o-2024-08-06"),
|
|
49
86
|
alias="model",
|
|
50
87
|
)
|
|
51
|
-
settings:
|
|
52
|
-
client_id:
|
|
53
|
-
|
|
54
|
-
)
|
|
55
|
-
client_secret: Optional[str] = Field(
|
|
88
|
+
settings: UiPathClientSettings | None = None
|
|
89
|
+
client_id: str | None = Field(default_factory=lambda: os.getenv("UIPATH_CLIENT_ID"))
|
|
90
|
+
client_secret: str | None = Field(
|
|
56
91
|
default_factory=lambda: os.getenv("UIPATH_CLIENT_SECRET")
|
|
57
92
|
)
|
|
58
|
-
base_url:
|
|
93
|
+
base_url: str | None = Field(
|
|
59
94
|
default_factory=lambda data: getattr(data["settings"], "base_url", None)
|
|
60
95
|
or os.getenv("UIPATH_BASE_URL")
|
|
61
96
|
or get_from_uipath_url(),
|
|
62
97
|
alias="azure_endpoint",
|
|
63
98
|
)
|
|
64
|
-
access_token:
|
|
65
|
-
default_factory=lambda data: (
|
|
66
|
-
getattr(data["settings"], "access_token", None)
|
|
67
|
-
or os.getenv("UIPATH_ACCESS_TOKEN") # Environment variable
|
|
68
|
-
or os.getenv("UIPATH_SERVICE_TOKEN") # Environment variable
|
|
69
|
-
or get_uipath_token_header(
|
|
70
|
-
UiPathClientFactorySettings(
|
|
71
|
-
UIPATH_BASE_URL=data["base_url"],
|
|
72
|
-
UIPATH_CLIENT_ID=data["client_id"],
|
|
73
|
-
UIPATH_CLIENT_SECRET=data["client_secret"],
|
|
74
|
-
)
|
|
75
|
-
) # Get service token from UiPath
|
|
76
|
-
)
|
|
99
|
+
access_token: str | None = Field(
|
|
100
|
+
default_factory=lambda data: _get_access_token(data)
|
|
77
101
|
)
|
|
102
|
+
|
|
78
103
|
org_id: Any = Field(
|
|
79
104
|
default_factory=lambda data: getattr(data["settings"], "org_id", None)
|
|
80
105
|
or os.getenv("UIPATH_ORGANIZATION_ID", "")
|
|
@@ -103,39 +128,39 @@ class UiPathRequestMixin(BaseModel):
|
|
|
103
128
|
alias="timeout",
|
|
104
129
|
)
|
|
105
130
|
|
|
106
|
-
openai_api_version:
|
|
131
|
+
openai_api_version: str | None = Field(
|
|
107
132
|
default_factory=lambda: os.getenv("OPENAI_API_VERSION", "2024-08-01-preview"),
|
|
108
133
|
alias="api_version",
|
|
109
134
|
)
|
|
110
135
|
include_account_id: bool = False
|
|
111
|
-
temperature:
|
|
112
|
-
max_tokens:
|
|
113
|
-
frequency_penalty:
|
|
114
|
-
presence_penalty:
|
|
136
|
+
temperature: float | None = 0.0
|
|
137
|
+
max_tokens: int | None = 1000
|
|
138
|
+
frequency_penalty: float | None = None
|
|
139
|
+
presence_penalty: float | None = None
|
|
115
140
|
|
|
116
|
-
logger:
|
|
117
|
-
max_retries:
|
|
141
|
+
logger: logging.Logger | None = None
|
|
142
|
+
max_retries: int | None = 5
|
|
118
143
|
base_delay: float = 5.0
|
|
119
144
|
max_delay: float = 60.0
|
|
120
145
|
|
|
121
|
-
_url:
|
|
122
|
-
_auth_headers:
|
|
146
|
+
_url: str | None = None
|
|
147
|
+
_auth_headers: dict[str, str] | None = None
|
|
123
148
|
|
|
124
149
|
# required to instantiate AzureChatOpenAI subclasses
|
|
125
|
-
azure_endpoint:
|
|
150
|
+
azure_endpoint: str | None = Field(
|
|
126
151
|
default="placeholder", description="Bypassed Azure endpoint"
|
|
127
152
|
)
|
|
128
|
-
openai_api_key:
|
|
153
|
+
openai_api_key: SecretStr | None = Field(
|
|
129
154
|
default=SecretStr("placeholder"), description="Bypassed API key"
|
|
130
155
|
)
|
|
131
156
|
# required to instatiate ChatAnthropic subclasses (will be needed when passthrough is implemented for Anthropic models)
|
|
132
|
-
stop_sequences:
|
|
157
|
+
stop_sequences: list[str] | None = Field(
|
|
133
158
|
default=None, description="Bypassed stop sequence"
|
|
134
159
|
)
|
|
135
160
|
|
|
136
161
|
def _request(
|
|
137
|
-
self, url: str, request_body:
|
|
138
|
-
) ->
|
|
162
|
+
self, url: str, request_body: dict[str, Any], headers: dict[str, str]
|
|
163
|
+
) -> dict[str, Any]:
|
|
139
164
|
"""Run an asynchronous call to the LLM."""
|
|
140
165
|
# if self.logger:
|
|
141
166
|
# self.logger.info(f"Completion request: {request_body['messages'][:2]}")
|
|
@@ -156,6 +181,8 @@ class UiPathRequestMixin(BaseModel):
|
|
|
156
181
|
|
|
157
182
|
# Handle HTTP errors and map them to OpenAI exceptions
|
|
158
183
|
try:
|
|
184
|
+
content = response.content # Read content to avoid closed stream issues
|
|
185
|
+
print(f"Response content: {content.decode('utf-8')}")
|
|
159
186
|
response.raise_for_status()
|
|
160
187
|
except httpx.HTTPStatusError as err:
|
|
161
188
|
if self.logger:
|
|
@@ -175,8 +202,8 @@ class UiPathRequestMixin(BaseModel):
|
|
|
175
202
|
return response.json()
|
|
176
203
|
|
|
177
204
|
def _call(
|
|
178
|
-
self, url: str, request_body:
|
|
179
|
-
) ->
|
|
205
|
+
self, url: str, request_body: dict[str, Any], headers: dict[str, str]
|
|
206
|
+
) -> dict[str, Any]:
|
|
180
207
|
"""Run a synchronous call with retries to LLM"""
|
|
181
208
|
if self.max_retries is None:
|
|
182
209
|
return self._request(url, request_body, headers)
|
|
@@ -199,6 +226,7 @@ class UiPathRequestMixin(BaseModel):
|
|
|
199
226
|
|
|
200
227
|
try:
|
|
201
228
|
return retryer(self._request, url, request_body, headers)
|
|
229
|
+
# return self._request(url, request_body, headers)
|
|
202
230
|
except openai.APIStatusError as err:
|
|
203
231
|
if self.logger:
|
|
204
232
|
self.logger.error(
|
|
@@ -214,8 +242,8 @@ class UiPathRequestMixin(BaseModel):
|
|
|
214
242
|
raise err
|
|
215
243
|
|
|
216
244
|
async def _arequest(
|
|
217
|
-
self, url: str, request_body:
|
|
218
|
-
) ->
|
|
245
|
+
self, url: str, request_body: dict[str, Any], headers: dict[str, str]
|
|
246
|
+
) -> dict[str, Any]:
|
|
219
247
|
# if self.logger:
|
|
220
248
|
# self.logger.info(f"Completion request: {request_body['messages'][:2]}")
|
|
221
249
|
client_kwargs = get_httpx_client_kwargs()
|
|
@@ -253,8 +281,8 @@ class UiPathRequestMixin(BaseModel):
|
|
|
253
281
|
return response.json()
|
|
254
282
|
|
|
255
283
|
async def _acall(
|
|
256
|
-
self, url: str, request_body:
|
|
257
|
-
) ->
|
|
284
|
+
self, url: str, request_body: dict[str, Any], headers: dict[str, str]
|
|
285
|
+
) -> dict[str, Any]:
|
|
258
286
|
"""Run an asynchronous call with retries to the LLM."""
|
|
259
287
|
if self.max_retries is None:
|
|
260
288
|
return await self._arequest(url, request_body, headers)
|
|
@@ -310,6 +338,256 @@ class UiPathRequestMixin(BaseModel):
|
|
|
310
338
|
)
|
|
311
339
|
raise err
|
|
312
340
|
|
|
341
|
+
def _convert_chunk(
|
|
342
|
+
self,
|
|
343
|
+
chunk: Dict[str, Any],
|
|
344
|
+
default_chunk_class: type,
|
|
345
|
+
include_tool_calls: bool = False,
|
|
346
|
+
) -> ChatGenerationChunk | None:
|
|
347
|
+
"""Convert a streaming chunk to a ChatGenerationChunk.
|
|
348
|
+
|
|
349
|
+
Args:
|
|
350
|
+
chunk: The raw SSE chunk dictionary
|
|
351
|
+
default_chunk_class: The default message chunk class to use
|
|
352
|
+
include_tool_calls: Whether to parse and include tool call chunks
|
|
353
|
+
|
|
354
|
+
Returns:
|
|
355
|
+
A ChatGenerationChunk or None if the chunk should be skipped
|
|
356
|
+
"""
|
|
357
|
+
|
|
358
|
+
token_usage = chunk.get("usage")
|
|
359
|
+
choices = chunk.get("choices", [])
|
|
360
|
+
|
|
361
|
+
usage_metadata: UsageMetadata | None = None
|
|
362
|
+
if token_usage:
|
|
363
|
+
usage_metadata = UsageMetadata(
|
|
364
|
+
input_tokens=token_usage.get("prompt_tokens", 0),
|
|
365
|
+
output_tokens=token_usage.get("completion_tokens", 0),
|
|
366
|
+
total_tokens=token_usage.get("total_tokens", 0),
|
|
367
|
+
)
|
|
368
|
+
|
|
369
|
+
if len(choices) == 0:
|
|
370
|
+
return ChatGenerationChunk(
|
|
371
|
+
message=default_chunk_class(content="", usage_metadata=usage_metadata),
|
|
372
|
+
generation_info={},
|
|
373
|
+
)
|
|
374
|
+
|
|
375
|
+
choice = choices[0]
|
|
376
|
+
delta = choice.get("delta")
|
|
377
|
+
if delta is None:
|
|
378
|
+
return None
|
|
379
|
+
|
|
380
|
+
# Extract content from delta
|
|
381
|
+
content = delta.get("content", "")
|
|
382
|
+
|
|
383
|
+
# Build the message chunk
|
|
384
|
+
message_kwargs = {
|
|
385
|
+
"content": content or "",
|
|
386
|
+
"usage_metadata": usage_metadata,
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
# Handle tool calls if requested (for normalized API)
|
|
390
|
+
if include_tool_calls:
|
|
391
|
+
tool_calls = delta.get("tool_calls", [])
|
|
392
|
+
tool_call_chunks = []
|
|
393
|
+
if tool_calls:
|
|
394
|
+
for tc in tool_calls:
|
|
395
|
+
# Tool call structure: {'function': {'name': '...', 'arguments': '...'}, 'id': '...', 'index': 0}
|
|
396
|
+
function = tc.get("function", {})
|
|
397
|
+
tool_call_chunks.append(
|
|
398
|
+
{
|
|
399
|
+
"id": tc.get("id"),
|
|
400
|
+
"name": function.get("name"),
|
|
401
|
+
"args": function.get("arguments", ""),
|
|
402
|
+
"index": tc.get("index", 0),
|
|
403
|
+
}
|
|
404
|
+
)
|
|
405
|
+
if tool_call_chunks:
|
|
406
|
+
message_kwargs["tool_call_chunks"] = tool_call_chunks
|
|
407
|
+
|
|
408
|
+
message_chunk = AIMessageChunk(**message_kwargs)
|
|
409
|
+
|
|
410
|
+
generation_info = {}
|
|
411
|
+
if finish_reason := choice.get("finish_reason"):
|
|
412
|
+
generation_info["finish_reason"] = finish_reason
|
|
413
|
+
if model_name := chunk.get("model"):
|
|
414
|
+
generation_info["model_name"] = model_name
|
|
415
|
+
|
|
416
|
+
return ChatGenerationChunk(
|
|
417
|
+
message=message_chunk,
|
|
418
|
+
generation_info=generation_info or None,
|
|
419
|
+
)
|
|
420
|
+
|
|
421
|
+
def _stream_request(
|
|
422
|
+
self, url: str, request_body: Dict[str, Any], headers: Dict[str, str]
|
|
423
|
+
) -> Iterator[Dict[str, Any]]:
|
|
424
|
+
"""Stream SSE responses from the LLM."""
|
|
425
|
+
client_kwargs = get_httpx_client_kwargs()
|
|
426
|
+
with httpx.Client(
|
|
427
|
+
**client_kwargs,
|
|
428
|
+
event_hooks={
|
|
429
|
+
"request": [self._log_request_duration],
|
|
430
|
+
"response": [self._log_response_duration],
|
|
431
|
+
},
|
|
432
|
+
) as client:
|
|
433
|
+
with client.stream(
|
|
434
|
+
"POST",
|
|
435
|
+
url,
|
|
436
|
+
headers=headers,
|
|
437
|
+
json=request_body,
|
|
438
|
+
timeout=self.default_request_timeout,
|
|
439
|
+
) as response:
|
|
440
|
+
try:
|
|
441
|
+
response.raise_for_status()
|
|
442
|
+
except httpx.HTTPStatusError as err:
|
|
443
|
+
if self.logger:
|
|
444
|
+
self.logger.error(
|
|
445
|
+
"Error querying UiPath: %s (%s)",
|
|
446
|
+
err.response.reason_phrase,
|
|
447
|
+
err.response.status_code,
|
|
448
|
+
extra={
|
|
449
|
+
"ActionName": self.settings.action_name,
|
|
450
|
+
"ActionId": self.settings.action_id,
|
|
451
|
+
}
|
|
452
|
+
if self.settings
|
|
453
|
+
else None,
|
|
454
|
+
)
|
|
455
|
+
# Read the response body for streaming responses
|
|
456
|
+
err.response.read()
|
|
457
|
+
raise self._make_status_error_from_response(err.response) from err
|
|
458
|
+
|
|
459
|
+
for line in response.iter_lines():
|
|
460
|
+
line = line.strip()
|
|
461
|
+
if not line:
|
|
462
|
+
continue
|
|
463
|
+
|
|
464
|
+
if self.logger:
|
|
465
|
+
self.logger.debug(f"[SSE] Raw line: {line}")
|
|
466
|
+
|
|
467
|
+
if line.startswith("data:"):
|
|
468
|
+
data = line[
|
|
469
|
+
5:
|
|
470
|
+
].strip() # Remove "data:" prefix and strip whitespace
|
|
471
|
+
if data == "[DONE]":
|
|
472
|
+
break
|
|
473
|
+
if not data: # Skip empty data lines
|
|
474
|
+
continue
|
|
475
|
+
try:
|
|
476
|
+
parsed = json.loads(data)
|
|
477
|
+
# Skip empty chunks (some APIs send them as keepalive)
|
|
478
|
+
# Check for truly empty: empty id AND (no choices or empty choices list)
|
|
479
|
+
if (not parsed.get("id") or parsed.get("id") == "") and (
|
|
480
|
+
not parsed.get("choices")
|
|
481
|
+
or len(parsed.get("choices", [])) == 0
|
|
482
|
+
):
|
|
483
|
+
if self.logger:
|
|
484
|
+
self.logger.debug(
|
|
485
|
+
"[SSE] Skipping empty keepalive chunk"
|
|
486
|
+
)
|
|
487
|
+
continue
|
|
488
|
+
yield parsed
|
|
489
|
+
except json.JSONDecodeError as e:
|
|
490
|
+
if self.logger:
|
|
491
|
+
self.logger.warning(
|
|
492
|
+
f"Failed to parse SSE chunk: {data}, error: {e}"
|
|
493
|
+
)
|
|
494
|
+
continue
|
|
495
|
+
else:
|
|
496
|
+
# Handle lines without "data: " prefix (some APIs send raw JSON)
|
|
497
|
+
try:
|
|
498
|
+
parsed = json.loads(line)
|
|
499
|
+
if self.logger:
|
|
500
|
+
self.logger.debug(f"[SSE] Parsed raw JSON: {parsed}")
|
|
501
|
+
yield parsed
|
|
502
|
+
except json.JSONDecodeError:
|
|
503
|
+
# Not JSON, skip
|
|
504
|
+
pass
|
|
505
|
+
|
|
506
|
+
async def _astream_request(
|
|
507
|
+
self, url: str, request_body: Dict[str, Any], headers: Dict[str, str]
|
|
508
|
+
) -> AsyncIterator[Dict[str, Any]]:
|
|
509
|
+
"""Async stream SSE responses from the LLM."""
|
|
510
|
+
client_kwargs = get_httpx_client_kwargs()
|
|
511
|
+
async with httpx.AsyncClient(
|
|
512
|
+
**client_kwargs,
|
|
513
|
+
event_hooks={
|
|
514
|
+
"request": [self._alog_request_duration],
|
|
515
|
+
"response": [self._alog_response_duration],
|
|
516
|
+
},
|
|
517
|
+
) as client:
|
|
518
|
+
async with client.stream(
|
|
519
|
+
"POST",
|
|
520
|
+
url,
|
|
521
|
+
headers=headers,
|
|
522
|
+
json=request_body,
|
|
523
|
+
timeout=self.default_request_timeout,
|
|
524
|
+
) as response:
|
|
525
|
+
try:
|
|
526
|
+
response.raise_for_status()
|
|
527
|
+
except httpx.HTTPStatusError as err:
|
|
528
|
+
if self.logger:
|
|
529
|
+
self.logger.error(
|
|
530
|
+
"Error querying LLM: %s (%s)",
|
|
531
|
+
err.response.reason_phrase,
|
|
532
|
+
err.response.status_code,
|
|
533
|
+
extra={
|
|
534
|
+
"ActionName": self.settings.action_name,
|
|
535
|
+
"ActionId": self.settings.action_id,
|
|
536
|
+
}
|
|
537
|
+
if self.settings
|
|
538
|
+
else None,
|
|
539
|
+
)
|
|
540
|
+
# Read the response body for streaming responses
|
|
541
|
+
await err.response.aread()
|
|
542
|
+
raise self._make_status_error_from_response(err.response) from err
|
|
543
|
+
|
|
544
|
+
async for line in response.aiter_lines():
|
|
545
|
+
line = line.strip()
|
|
546
|
+
if not line:
|
|
547
|
+
continue
|
|
548
|
+
|
|
549
|
+
if self.logger:
|
|
550
|
+
self.logger.debug(f"[SSE] Raw line: {line}")
|
|
551
|
+
|
|
552
|
+
if line.startswith("data:"):
|
|
553
|
+
data = line[
|
|
554
|
+
5:
|
|
555
|
+
].strip() # Remove "data:" prefix and strip whitespace
|
|
556
|
+
if data == "[DONE]":
|
|
557
|
+
break
|
|
558
|
+
if not data: # Skip empty data lines
|
|
559
|
+
continue
|
|
560
|
+
try:
|
|
561
|
+
parsed = json.loads(data)
|
|
562
|
+
# Skip empty chunks (some APIs send them as keepalive)
|
|
563
|
+
# Check for truly empty: empty id AND (no choices or empty choices list)
|
|
564
|
+
if (not parsed.get("id") or parsed.get("id") == "") and (
|
|
565
|
+
not parsed.get("choices")
|
|
566
|
+
or len(parsed.get("choices", [])) == 0
|
|
567
|
+
):
|
|
568
|
+
if self.logger:
|
|
569
|
+
self.logger.debug(
|
|
570
|
+
"[SSE] Skipping empty keepalive chunk"
|
|
571
|
+
)
|
|
572
|
+
continue
|
|
573
|
+
yield parsed
|
|
574
|
+
except json.JSONDecodeError as e:
|
|
575
|
+
if self.logger:
|
|
576
|
+
self.logger.warning(
|
|
577
|
+
f"Failed to parse SSE chunk: {data}, error: {e}"
|
|
578
|
+
)
|
|
579
|
+
continue
|
|
580
|
+
else:
|
|
581
|
+
# Handle lines without "data: " prefix (some APIs send raw JSON)
|
|
582
|
+
try:
|
|
583
|
+
parsed = json.loads(line)
|
|
584
|
+
if self.logger:
|
|
585
|
+
self.logger.debug(f"[SSE] Parsed raw JSON: {parsed}")
|
|
586
|
+
yield parsed
|
|
587
|
+
except json.JSONDecodeError:
|
|
588
|
+
# Not JSON, skip
|
|
589
|
+
pass
|
|
590
|
+
|
|
313
591
|
def _make_status_error_from_response(
|
|
314
592
|
self,
|
|
315
593
|
response: httpx.Response,
|
|
@@ -354,7 +632,7 @@ class UiPathRequestMixin(BaseModel):
|
|
|
354
632
|
title = body.get("title", "").lower()
|
|
355
633
|
if title == "license not available":
|
|
356
634
|
raise LangGraphRuntimeError(
|
|
357
|
-
code=
|
|
635
|
+
code=LangGraphErrorCode.LICENSE_NOT_AVAILABLE,
|
|
358
636
|
title=body.get("title", "License Not Available"),
|
|
359
637
|
detail=body.get(
|
|
360
638
|
"detail", "License not available for this service"
|
|
@@ -427,7 +705,7 @@ class UiPathRequestMixin(BaseModel):
|
|
|
427
705
|
return "uipath"
|
|
428
706
|
|
|
429
707
|
@property
|
|
430
|
-
def _identifying_params(self) ->
|
|
708
|
+
def _identifying_params(self) -> dict[str, Any]:
|
|
431
709
|
return {
|
|
432
710
|
"url": self.url,
|
|
433
711
|
"model": self.model_name,
|
|
@@ -463,13 +741,11 @@ class UiPathRequestMixin(BaseModel):
|
|
|
463
741
|
)
|
|
464
742
|
|
|
465
743
|
@property
|
|
466
|
-
def auth_headers(self) ->
|
|
744
|
+
def auth_headers(self) -> dict[str, str]:
|
|
467
745
|
if not self._auth_headers:
|
|
468
746
|
self._auth_headers = {
|
|
469
747
|
**self.default_headers, # type: ignore
|
|
470
748
|
"Authorization": f"Bearer {self.access_token}",
|
|
471
|
-
"X-UiPath-LlmGateway-RequestingProduct": self.requesting_product,
|
|
472
|
-
"X-UiPath-LlmGateway-RequestingFeature": self.requesting_feature,
|
|
473
749
|
"X-UiPath-LlmGateway-TimeoutSeconds": str(self.default_request_timeout),
|
|
474
750
|
}
|
|
475
751
|
if self.is_normalized and self.model_name:
|
|
@@ -481,7 +757,7 @@ class UiPathRequestMixin(BaseModel):
|
|
|
481
757
|
self._auth_headers["x-uipath-internal-tenantid"] = self.tenant_id
|
|
482
758
|
return self._auth_headers
|
|
483
759
|
|
|
484
|
-
def _get_llm_string(self, stop:
|
|
760
|
+
def _get_llm_string(self, stop: list[str] | None = None, **kwargs: Any) -> str:
|
|
485
761
|
serialized_repr = getattr(self, "_serialized", self.model_dump())
|
|
486
762
|
_cleanup_llm_representation(serialized_repr, 1)
|
|
487
763
|
kwargs = serialized_repr.get("kwargs", serialized_repr)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# mypy: disable-error-code="syntax"
|
|
2
2
|
import os
|
|
3
|
-
from typing import Any
|
|
3
|
+
from typing import Any
|
|
4
4
|
|
|
5
5
|
import httpx
|
|
6
6
|
from pydantic import Field
|
|
@@ -21,7 +21,7 @@ class UiPathCachedPathsSettings(BaseSettings):
|
|
|
21
21
|
|
|
22
22
|
|
|
23
23
|
uipath_cached_paths_settings = UiPathCachedPathsSettings()
|
|
24
|
-
uipath_token_header:
|
|
24
|
+
uipath_token_header: str | None = None
|
|
25
25
|
|
|
26
26
|
|
|
27
27
|
class UiPathClientFactorySettings(BaseSettings):
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from .guardrail_nodes import (
|
|
2
|
+
create_agent_guardrail_node,
|
|
3
|
+
create_llm_guardrail_node,
|
|
4
|
+
create_tool_guardrail_node,
|
|
5
|
+
)
|
|
6
|
+
from .guardrails_factory import build_guardrails_with_actions
|
|
7
|
+
from .guardrails_subgraph import (
|
|
8
|
+
create_agent_guardrails_subgraph,
|
|
9
|
+
create_llm_guardrails_subgraph,
|
|
10
|
+
create_tool_guardrails_subgraph,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
__all__ = [
|
|
14
|
+
"create_llm_guardrails_subgraph",
|
|
15
|
+
"create_agent_guardrails_subgraph",
|
|
16
|
+
"create_tool_guardrails_subgraph",
|
|
17
|
+
"create_llm_guardrail_node",
|
|
18
|
+
"create_agent_guardrail_node",
|
|
19
|
+
"create_tool_guardrail_node",
|
|
20
|
+
"build_guardrails_with_actions",
|
|
21
|
+
]
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
from uipath.platform.guardrails import BaseGuardrail, GuardrailScope
|
|
5
|
+
|
|
6
|
+
from uipath_langchain.agent.guardrails.types import ExecutionStage
|
|
7
|
+
|
|
8
|
+
GuardrailActionNode = tuple[str, Any]
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class GuardrailAction(ABC):
|
|
12
|
+
"""Extensible action interface producing a node to enforce the action on guardrail validation failure."""
|
|
13
|
+
|
|
14
|
+
@abstractmethod
|
|
15
|
+
def action_node(
|
|
16
|
+
self,
|
|
17
|
+
*,
|
|
18
|
+
guardrail: BaseGuardrail,
|
|
19
|
+
scope: GuardrailScope,
|
|
20
|
+
execution_stage: ExecutionStage,
|
|
21
|
+
) -> GuardrailActionNode:
|
|
22
|
+
"""Create and return the Action node to execute on validation failure."""
|
|
23
|
+
...
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import re
|
|
2
|
+
|
|
3
|
+
from uipath.platform.guardrails import BaseGuardrail, GuardrailScope
|
|
4
|
+
from uipath.runtime.errors import UiPathErrorCategory, UiPathErrorCode
|
|
5
|
+
|
|
6
|
+
from uipath_langchain.agent.guardrails.types import ExecutionStage
|
|
7
|
+
|
|
8
|
+
from ...exceptions import AgentTerminationException
|
|
9
|
+
from ..types import AgentGuardrailsGraphState
|
|
10
|
+
from .base_action import GuardrailAction, GuardrailActionNode
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class BlockAction(GuardrailAction):
|
|
14
|
+
"""Action that terminates execution when a guardrail fails.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
reason: Reason string to include in the raised exception title.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(self, reason: str) -> None:
|
|
21
|
+
self.reason = reason
|
|
22
|
+
|
|
23
|
+
def action_node(
|
|
24
|
+
self,
|
|
25
|
+
*,
|
|
26
|
+
guardrail: BaseGuardrail,
|
|
27
|
+
scope: GuardrailScope,
|
|
28
|
+
execution_stage: ExecutionStage,
|
|
29
|
+
) -> GuardrailActionNode:
|
|
30
|
+
raw_node_name = f"{scope.name}_{execution_stage.name}_{guardrail.name}_block"
|
|
31
|
+
node_name = re.sub(r"\W+", "_", raw_node_name.lower()).strip("_")
|
|
32
|
+
|
|
33
|
+
async def _node(_state: AgentGuardrailsGraphState):
|
|
34
|
+
raise AgentTerminationException(
|
|
35
|
+
code=UiPathErrorCode.EXECUTION_ERROR,
|
|
36
|
+
title="Guardrail violation",
|
|
37
|
+
detail=self.reason,
|
|
38
|
+
category=UiPathErrorCategory.USER,
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
return node_name, _node
|