uipath-langchain 0.0.133__py3-none-any.whl → 0.1.28__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. uipath_langchain/_cli/cli_init.py +130 -191
  2. uipath_langchain/_cli/cli_new.py +2 -3
  3. uipath_langchain/_resources/AGENTS.md +21 -0
  4. uipath_langchain/_resources/REQUIRED_STRUCTURE.md +92 -0
  5. uipath_langchain/_tracing/__init__.py +3 -2
  6. uipath_langchain/_tracing/_instrument_traceable.py +11 -12
  7. uipath_langchain/_utils/_request_mixin.py +327 -51
  8. uipath_langchain/_utils/_settings.py +2 -2
  9. uipath_langchain/agent/exceptions/__init__.py +6 -0
  10. uipath_langchain/agent/exceptions/exceptions.py +11 -0
  11. uipath_langchain/agent/guardrails/__init__.py +21 -0
  12. uipath_langchain/agent/guardrails/actions/__init__.py +11 -0
  13. uipath_langchain/agent/guardrails/actions/base_action.py +24 -0
  14. uipath_langchain/agent/guardrails/actions/block_action.py +42 -0
  15. uipath_langchain/agent/guardrails/actions/escalate_action.py +499 -0
  16. uipath_langchain/agent/guardrails/actions/log_action.py +58 -0
  17. uipath_langchain/agent/guardrails/guardrail_nodes.py +173 -0
  18. uipath_langchain/agent/guardrails/guardrails_factory.py +70 -0
  19. uipath_langchain/agent/guardrails/guardrails_subgraph.py +283 -0
  20. uipath_langchain/agent/guardrails/types.py +20 -0
  21. uipath_langchain/agent/react/__init__.py +14 -0
  22. uipath_langchain/agent/react/agent.py +117 -0
  23. uipath_langchain/agent/react/constants.py +2 -0
  24. uipath_langchain/agent/react/init_node.py +20 -0
  25. uipath_langchain/agent/react/llm_node.py +43 -0
  26. uipath_langchain/agent/react/router.py +97 -0
  27. uipath_langchain/agent/react/terminate_node.py +82 -0
  28. uipath_langchain/agent/react/tools/__init__.py +7 -0
  29. uipath_langchain/agent/react/tools/tools.py +50 -0
  30. uipath_langchain/agent/react/types.py +39 -0
  31. uipath_langchain/agent/react/utils.py +49 -0
  32. uipath_langchain/agent/tools/__init__.py +17 -0
  33. uipath_langchain/agent/tools/context_tool.py +53 -0
  34. uipath_langchain/agent/tools/escalation_tool.py +111 -0
  35. uipath_langchain/agent/tools/integration_tool.py +181 -0
  36. uipath_langchain/agent/tools/process_tool.py +49 -0
  37. uipath_langchain/agent/tools/static_args.py +138 -0
  38. uipath_langchain/agent/tools/structured_tool_with_output_type.py +14 -0
  39. uipath_langchain/agent/tools/tool_factory.py +45 -0
  40. uipath_langchain/agent/tools/tool_node.py +22 -0
  41. uipath_langchain/agent/tools/utils.py +11 -0
  42. uipath_langchain/chat/__init__.py +4 -0
  43. uipath_langchain/chat/bedrock.py +187 -0
  44. uipath_langchain/chat/mapper.py +309 -0
  45. uipath_langchain/chat/models.py +248 -35
  46. uipath_langchain/chat/openai.py +133 -0
  47. uipath_langchain/chat/supported_models.py +42 -0
  48. uipath_langchain/chat/vertex.py +255 -0
  49. uipath_langchain/embeddings/embeddings.py +131 -34
  50. uipath_langchain/middlewares.py +0 -6
  51. uipath_langchain/retrievers/context_grounding_retriever.py +7 -9
  52. uipath_langchain/runtime/__init__.py +36 -0
  53. uipath_langchain/runtime/_serialize.py +46 -0
  54. uipath_langchain/runtime/config.py +61 -0
  55. uipath_langchain/runtime/errors.py +43 -0
  56. uipath_langchain/runtime/factory.py +315 -0
  57. uipath_langchain/runtime/graph.py +159 -0
  58. uipath_langchain/runtime/runtime.py +453 -0
  59. uipath_langchain/runtime/schema.py +386 -0
  60. uipath_langchain/runtime/storage.py +115 -0
  61. uipath_langchain/vectorstores/context_grounding_vectorstore.py +90 -110
  62. {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.28.dist-info}/METADATA +44 -23
  63. uipath_langchain-0.1.28.dist-info/RECORD +76 -0
  64. {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.28.dist-info}/WHEEL +1 -1
  65. uipath_langchain-0.1.28.dist-info/entry_points.txt +5 -0
  66. uipath_langchain/_cli/_runtime/_context.py +0 -21
  67. uipath_langchain/_cli/_runtime/_conversation.py +0 -298
  68. uipath_langchain/_cli/_runtime/_exception.py +0 -17
  69. uipath_langchain/_cli/_runtime/_input.py +0 -139
  70. uipath_langchain/_cli/_runtime/_output.py +0 -234
  71. uipath_langchain/_cli/_runtime/_runtime.py +0 -379
  72. uipath_langchain/_cli/_utils/_graph.py +0 -199
  73. uipath_langchain/_cli/cli_dev.py +0 -44
  74. uipath_langchain/_cli/cli_eval.py +0 -78
  75. uipath_langchain/_cli/cli_run.py +0 -82
  76. uipath_langchain/_tracing/_oteladapter.py +0 -222
  77. uipath_langchain/_tracing/_utils.py +0 -28
  78. uipath_langchain/builder/agent_config.py +0 -191
  79. uipath_langchain/tools/preconfigured.py +0 -191
  80. uipath_langchain-0.0.133.dist-info/RECORD +0 -41
  81. uipath_langchain-0.0.133.dist-info/entry_points.txt +0 -2
  82. /uipath_langchain/{tools/__init__.py → py.typed} +0 -0
  83. {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.28.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,133 @@
1
+ import logging
2
+ import os
3
+ from typing import Optional
4
+
5
+ import httpx
6
+ from langchain_openai import AzureChatOpenAI
7
+ from uipath._utils._ssl_context import get_httpx_client_kwargs
8
+ from uipath.utils import EndpointManager
9
+
10
+ from .supported_models import OpenAIModels
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ class UiPathURLRewriteTransport(httpx.AsyncHTTPTransport):
16
+ def __init__(self, verify: bool = True, **kwargs):
17
+ super().__init__(verify=verify, **kwargs)
18
+
19
+ async def handle_async_request(self, request: httpx.Request) -> httpx.Response:
20
+ original_url = str(request.url)
21
+
22
+ if "/openai/deployments/" in original_url:
23
+ base_url = original_url.split("/openai/deployments/")[0]
24
+ query_string = request.url.params
25
+ new_url_str = f"{base_url}/completions"
26
+ if query_string:
27
+ request.url = httpx.URL(new_url_str, params=query_string)
28
+ else:
29
+ request.url = httpx.URL(new_url_str)
30
+
31
+ return await super().handle_async_request(request)
32
+
33
+
34
+ class UiPathSyncURLRewriteTransport(httpx.HTTPTransport):
35
+ def __init__(self, verify: bool = True, **kwargs):
36
+ super().__init__(verify=verify, **kwargs)
37
+
38
+ def handle_request(self, request: httpx.Request) -> httpx.Response:
39
+ original_url = str(request.url)
40
+
41
+ if "/openai/deployments/" in original_url:
42
+ base_url = original_url.split("/openai/deployments/")[0]
43
+ query_string = request.url.params
44
+ new_url_str = f"{base_url}/completions"
45
+ if query_string:
46
+ request.url = httpx.URL(new_url_str, params=query_string)
47
+ else:
48
+ request.url = httpx.URL(new_url_str)
49
+
50
+ return super().handle_request(request)
51
+
52
+
53
+ class UiPathChatOpenAI(AzureChatOpenAI):
54
+ def __init__(
55
+ self,
56
+ token: Optional[str] = None,
57
+ model_name: str = OpenAIModels.gpt_5_mini_2025_08_07,
58
+ api_version: str = "2024-12-01-preview",
59
+ org_id: Optional[str] = None,
60
+ tenant_id: Optional[str] = None,
61
+ **kwargs,
62
+ ):
63
+ org_id = org_id or os.getenv("UIPATH_ORGANIZATION_ID")
64
+ tenant_id = tenant_id or os.getenv("UIPATH_TENANT_ID")
65
+ token = token or os.getenv("UIPATH_ACCESS_TOKEN")
66
+
67
+ if not org_id:
68
+ raise ValueError(
69
+ "UIPATH_ORGANIZATION_ID environment variable or org_id parameter is required"
70
+ )
71
+ if not tenant_id:
72
+ raise ValueError(
73
+ "UIPATH_TENANT_ID environment variable or tenant_id parameter is required"
74
+ )
75
+ if not token:
76
+ raise ValueError(
77
+ "UIPATH_ACCESS_TOKEN environment variable or token parameter is required"
78
+ )
79
+
80
+ self._openai_api_version = api_version
81
+ self._vendor = "openai"
82
+ self._model_name = model_name
83
+ self._url: Optional[str] = None
84
+
85
+ super().__init__(
86
+ azure_endpoint=self._build_base_url(),
87
+ model_name=model_name,
88
+ default_headers=self._build_headers(token),
89
+ http_async_client=httpx.AsyncClient(
90
+ transport=UiPathURLRewriteTransport(verify=True),
91
+ **get_httpx_client_kwargs(),
92
+ ),
93
+ http_client=httpx.Client(
94
+ transport=UiPathSyncURLRewriteTransport(verify=True),
95
+ **get_httpx_client_kwargs(),
96
+ ),
97
+ api_key=token,
98
+ api_version=api_version,
99
+ validate_base_url=False,
100
+ **kwargs,
101
+ )
102
+
103
+ def _build_headers(self, token: str) -> dict[str, str]:
104
+ headers = {
105
+ "X-UiPath-LlmGateway-ApiFlavor": "auto",
106
+ "Authorization": f"Bearer {token}",
107
+ }
108
+ if job_key := os.getenv("UIPATH_JOB_KEY"):
109
+ headers["X-UiPath-JobKey"] = job_key
110
+ if process_key := os.getenv("UIPATH_PROCESS_KEY"):
111
+ headers["X-UiPath-ProcessKey"] = process_key
112
+ return headers
113
+
114
+ @property
115
+ def endpoint(self) -> str:
116
+ vendor_endpoint = EndpointManager.get_vendor_endpoint()
117
+ formatted_endpoint = vendor_endpoint.format(
118
+ vendor=self._vendor,
119
+ model=self._model_name,
120
+ api_version=self._openai_api_version,
121
+ )
122
+ return formatted_endpoint.replace("/completions", "")
123
+
124
+ def _build_base_url(self) -> str:
125
+ if not self._url:
126
+ env_uipath_url = os.getenv("UIPATH_URL")
127
+
128
+ if env_uipath_url:
129
+ self._url = f"{env_uipath_url.rstrip('/')}/{self.endpoint}"
130
+ else:
131
+ raise ValueError("UIPATH_URL environment variable is required")
132
+
133
+ return self._url
@@ -0,0 +1,42 @@
1
+ class OpenAIModels:
2
+ """Supported OpenAI model identifiers."""
3
+
4
+ # GPT-4o models
5
+ gpt_4o_2024_05_13 = "gpt-4o-2024-05-13"
6
+ gpt_4o_2024_08_06 = "gpt-4o-2024-08-06"
7
+ gpt_4o_2024_11_20 = "gpt-4o-2024-11-20"
8
+ gpt_4o_mini_2024_07_18 = "gpt-4o-mini-2024-07-18"
9
+
10
+ # GPT-4.1 models
11
+ gpt_4_1_2025_04_14 = "gpt-4.1-2025-04-14"
12
+ gpt_4_1_mini_2025_04_14 = "gpt-4.1-mini-2025-04-14"
13
+ gpt_4_1_nano_2025_04_14 = "gpt-4.1-nano-2025-04-14"
14
+
15
+ # GPT-5 models
16
+ gpt_5_2025_08_07 = "gpt-5-2025-08-07"
17
+ gpt_5_chat_2025_08_07 = "gpt-5-chat-2025-08-07"
18
+ gpt_5_mini_2025_08_07 = "gpt-5-mini-2025-08-07"
19
+ gpt_5_nano_2025_08_07 = "gpt-5-nano-2025-08-07"
20
+
21
+ # GPT-5.1 models
22
+ gpt_5_1_2025_11_13 = "gpt-5.1-2025-11-13"
23
+
24
+
25
+ class GeminiModels:
26
+ """Supported Google Gemini model identifiers."""
27
+
28
+ gemini_2_5_pro = "gemini-2.5-pro"
29
+ gemini_2_5_flash = "gemini-2.5-flash"
30
+ gemini_2_0_flash_001 = "gemini-2.0-flash-001"
31
+
32
+
33
+ class BedrockModels:
34
+ """Supported AWS Bedrock model identifiers."""
35
+
36
+ # Claude 3.7 models
37
+ anthropic_claude_3_7_sonnet = "anthropic.claude-3-7-sonnet-20250219-v1:0"
38
+
39
+ # Claude 4 models
40
+ anthropic_claude_sonnet_4 = "anthropic.claude-sonnet-4-20250514-v1:0"
41
+ anthropic_claude_sonnet_4_5 = "anthropic.claude-sonnet-4-5-20250929-v1:0"
42
+ anthropic_claude_haiku_4_5 = "anthropic.claude-haiku-4-5-20251001-v1:0"
@@ -0,0 +1,255 @@
1
+ import os
2
+ from typing import Any, Optional
3
+
4
+ import httpx
5
+ from uipath._utils._ssl_context import get_httpx_client_kwargs
6
+ from uipath.utils import EndpointManager
7
+
8
+ from .supported_models import GeminiModels
9
+
10
+
11
+ def _check_genai_dependencies() -> None:
12
+ """Check if required dependencies for UiPathChatVertex are installed."""
13
+ import importlib.util
14
+
15
+ missing_packages = []
16
+
17
+ if importlib.util.find_spec("langchain_google_genai") is None:
18
+ missing_packages.append("langchain-google-genai")
19
+
20
+ if importlib.util.find_spec("google.genai") is None:
21
+ missing_packages.append("google-genai")
22
+
23
+ if missing_packages:
24
+ packages_str = ", ".join(missing_packages)
25
+ raise ImportError(
26
+ f"The following packages are required to use UiPathChatVertex: {packages_str}\n"
27
+ "Please install them using one of the following methods:\n\n"
28
+ " # Using pip:\n"
29
+ f" pip install uipath-langchain[vertex]\n\n"
30
+ " # Using uv:\n"
31
+ f" uv add 'uipath-langchain[vertex]'\n\n"
32
+ )
33
+
34
+
35
+ _check_genai_dependencies()
36
+
37
+ import google.genai
38
+ from google.genai import types as genai_types
39
+ from langchain_google_genai import ChatGoogleGenerativeAI
40
+ from pydantic import PrivateAttr
41
+
42
+
43
+ def _rewrite_request_for_gateway(
44
+ request: httpx.Request, gateway_url: str
45
+ ) -> httpx.Request:
46
+ """Rewrite a request to redirect to the UiPath gateway."""
47
+ url_str = str(request.url)
48
+ if "generateContent" in url_str or "streamGenerateContent" in url_str:
49
+ is_streaming = "alt=sse" in url_str
50
+
51
+ headers = dict(request.headers)
52
+
53
+ headers["X-UiPath-Streaming-Enabled"] = "true" if is_streaming else "false"
54
+
55
+ gateway_url_parsed = httpx.URL(gateway_url)
56
+ if gateway_url_parsed.host:
57
+ headers["host"] = gateway_url_parsed.host
58
+
59
+ return httpx.Request(
60
+ method=request.method,
61
+ url=gateway_url,
62
+ headers=headers,
63
+ content=request.content,
64
+ extensions=request.extensions,
65
+ )
66
+ return request
67
+
68
+
69
+ class _UrlRewriteTransport(httpx.BaseTransport):
70
+ """Transport that rewrites URLs to redirect to UiPath gateway."""
71
+
72
+ def __init__(self, gateway_url: str):
73
+ self.gateway_url = gateway_url
74
+ self._transport = httpx.HTTPTransport()
75
+
76
+ def handle_request(self, request: httpx.Request) -> httpx.Response:
77
+ request = _rewrite_request_for_gateway(request, self.gateway_url)
78
+ return self._transport.handle_request(request)
79
+
80
+ def close(self) -> None:
81
+ self._transport.close()
82
+
83
+
84
+ class _AsyncUrlRewriteTransport(httpx.AsyncBaseTransport):
85
+ """Async transport that rewrites URLs to redirect to UiPath gateway."""
86
+
87
+ def __init__(self, gateway_url: str):
88
+ self.gateway_url = gateway_url
89
+ self._transport = httpx.AsyncHTTPTransport()
90
+
91
+ async def handle_async_request(self, request: httpx.Request) -> httpx.Response:
92
+ request = _rewrite_request_for_gateway(request, self.gateway_url)
93
+ return await self._transport.handle_async_request(request)
94
+
95
+ async def aclose(self) -> None:
96
+ await self._transport.aclose()
97
+
98
+
99
+ class UiPathChatVertex(ChatGoogleGenerativeAI):
100
+ """UiPath Vertex AI Chat model that routes requests through UiPath's LLM Gateway."""
101
+
102
+ _vendor: str = PrivateAttr(default="vertexai")
103
+ _model_name: str = PrivateAttr()
104
+ _uipath_token: str = PrivateAttr()
105
+ _uipath_llmgw_url: Optional[str] = PrivateAttr(default=None)
106
+
107
+ def __init__(
108
+ self,
109
+ org_id: Optional[str] = None,
110
+ tenant_id: Optional[str] = None,
111
+ token: Optional[str] = None,
112
+ model_name: str = GeminiModels.gemini_2_5_flash,
113
+ temperature: Optional[float] = None,
114
+ **kwargs: Any,
115
+ ):
116
+ org_id = org_id or os.getenv("UIPATH_ORGANIZATION_ID")
117
+ tenant_id = tenant_id or os.getenv("UIPATH_TENANT_ID")
118
+ token = token or os.getenv("UIPATH_ACCESS_TOKEN")
119
+
120
+ if not org_id:
121
+ raise ValueError(
122
+ "UIPATH_ORGANIZATION_ID environment variable or org_id parameter is required"
123
+ )
124
+ if not tenant_id:
125
+ raise ValueError(
126
+ "UIPATH_TENANT_ID environment variable or tenant_id parameter is required"
127
+ )
128
+ if not token:
129
+ raise ValueError(
130
+ "UIPATH_ACCESS_TOKEN environment variable or token parameter is required"
131
+ )
132
+
133
+ uipath_url = self._build_base_url(model_name)
134
+ headers = self._build_headers(token)
135
+
136
+ http_options = genai_types.HttpOptions(
137
+ httpx_client=httpx.Client(
138
+ transport=_UrlRewriteTransport(uipath_url),
139
+ headers=headers,
140
+ **get_httpx_client_kwargs(),
141
+ ),
142
+ httpx_async_client=httpx.AsyncClient(
143
+ transport=_AsyncUrlRewriteTransport(uipath_url),
144
+ headers=headers,
145
+ **get_httpx_client_kwargs(),
146
+ ),
147
+ )
148
+
149
+ if temperature is None and (
150
+ "gemini-3" in model_name or "gemini-2" in model_name
151
+ ):
152
+ temperature = 1.0
153
+
154
+ super().__init__(
155
+ model=model_name,
156
+ google_api_key="uipath-gateway",
157
+ temperature=temperature,
158
+ **kwargs,
159
+ )
160
+
161
+ custom_client = google.genai.Client(
162
+ api_key="uipath-gateway",
163
+ http_options=http_options,
164
+ )
165
+
166
+ object.__setattr__(self, "client", custom_client)
167
+
168
+ self._model_name = model_name
169
+ self._uipath_token = token
170
+ self._uipath_llmgw_url = uipath_url
171
+
172
+ if self.temperature is not None and not 0 <= self.temperature <= 2.0:
173
+ raise ValueError("temperature must be in the range [0.0, 2.0]")
174
+
175
+ if self.top_p is not None and not 0 <= self.top_p <= 1:
176
+ raise ValueError("top_p must be in the range [0.0, 1.0]")
177
+
178
+ if self.top_k is not None and self.top_k <= 0:
179
+ raise ValueError("top_k must be positive")
180
+
181
+ additional_headers = self.additional_headers or {}
182
+ self.default_metadata = tuple(additional_headers.items())
183
+
184
+ @staticmethod
185
+ def _build_headers(token: str) -> dict[str, str]:
186
+ """Build HTTP headers for UiPath Gateway requests."""
187
+ headers = {
188
+ "Authorization": f"Bearer {token}",
189
+ }
190
+ if job_key := os.getenv("UIPATH_JOB_KEY"):
191
+ headers["X-UiPath-JobKey"] = job_key
192
+ if process_key := os.getenv("UIPATH_PROCESS_KEY"):
193
+ headers["X-UiPath-ProcessKey"] = process_key
194
+ return headers
195
+
196
+ @staticmethod
197
+ def _build_base_url(model_name: str) -> str:
198
+ """Build the full URL for the UiPath LLM Gateway."""
199
+ env_uipath_url = os.getenv("UIPATH_URL")
200
+
201
+ if not env_uipath_url:
202
+ raise ValueError("UIPATH_URL environment variable is required")
203
+
204
+ vendor_endpoint = EndpointManager.get_vendor_endpoint()
205
+ formatted_endpoint = vendor_endpoint.format(
206
+ vendor="vertexai",
207
+ model=model_name,
208
+ )
209
+ return f"{env_uipath_url.rstrip('/')}/{formatted_endpoint}"
210
+
211
+ def _stream(self, messages, stop=None, run_manager=None, **kwargs):
212
+ """Streaming fallback - calls _generate and yields single response."""
213
+ from langchain_core.messages import AIMessageChunk
214
+ from langchain_core.outputs import ChatGenerationChunk
215
+
216
+ result = self._generate(messages, stop=stop, run_manager=run_manager, **kwargs)
217
+
218
+ if result.generations:
219
+ message = result.generations[0].message
220
+ chunk = AIMessageChunk(
221
+ content=message.content,
222
+ additional_kwargs=message.additional_kwargs,
223
+ response_metadata=getattr(message, "response_metadata", {}),
224
+ id=message.id,
225
+ tool_calls=getattr(message, "tool_calls", []),
226
+ tool_call_chunks=getattr(message, "tool_call_chunks", []),
227
+ )
228
+ if hasattr(message, "usage_metadata") and message.usage_metadata:
229
+ chunk.usage_metadata = message.usage_metadata
230
+
231
+ yield ChatGenerationChunk(message=chunk)
232
+
233
+ async def _astream(self, messages, stop=None, run_manager=None, **kwargs):
234
+ """Async streaming fallback - calls _agenerate and yields single response."""
235
+ from langchain_core.messages import AIMessageChunk
236
+ from langchain_core.outputs import ChatGenerationChunk
237
+
238
+ result = await self._agenerate(
239
+ messages, stop=stop, run_manager=run_manager, **kwargs
240
+ )
241
+
242
+ if result.generations:
243
+ message = result.generations[0].message
244
+ chunk = AIMessageChunk(
245
+ content=message.content,
246
+ additional_kwargs=message.additional_kwargs,
247
+ response_metadata=getattr(message, "response_metadata", {}),
248
+ id=message.id,
249
+ tool_calls=getattr(message, "tool_calls", []),
250
+ tool_call_chunks=getattr(message, "tool_call_chunks", []),
251
+ )
252
+ if hasattr(message, "usage_metadata") and message.usage_metadata:
253
+ chunk.usage_metadata = message.usage_metadata
254
+
255
+ yield ChatGenerationChunk(message=chunk)
@@ -1,8 +1,7 @@
1
1
  import os
2
- from typing import List, Optional
2
+ from typing import Any
3
3
 
4
4
  import httpx
5
- from langchain_community.callbacks.manager import openai_callback_var
6
5
  from langchain_openai.embeddings import AzureOpenAIEmbeddings, OpenAIEmbeddings
7
6
  from pydantic import Field
8
7
  from uipath.utils import EndpointManager
@@ -11,9 +10,15 @@ from uipath_langchain._utils._request_mixin import UiPathRequestMixin
11
10
 
12
11
 
13
12
  class UiPathAzureOpenAIEmbeddings(UiPathRequestMixin, AzureOpenAIEmbeddings):
14
- """Custom Embeddings connector for LangChain integration with UiPath, with minimal changes compared to AzureOpenAIEmbeddings."""
13
+ """Custom Embeddings connector for LangChain integration with UiPath.
15
14
 
16
- model_name: Optional[str] = Field(
15
+ This class modifies the OpenAI client to:
16
+ - Use UiPath endpoints
17
+ - Log request/response durations
18
+ - Apply custom URL preparation and header building
19
+ """
20
+
21
+ model_name: str | None = Field(
17
22
  default_factory=lambda: os.getenv(
18
23
  "UIPATH_MODEL_NAME", "text-embedding-3-large"
19
24
  ),
@@ -36,6 +41,7 @@ class UiPathAzureOpenAIEmbeddings(UiPathRequestMixin, AzureOpenAIEmbeddings):
36
41
  ),
37
42
  **kwargs,
38
43
  )
44
+ # Monkey-patch the OpenAI client to use your custom methods
39
45
  self.client._client._prepare_url = self._prepare_url
40
46
  self.client._client._build_headers = self._build_headers
41
47
  self.async_client._client._prepare_url = self._prepare_url
@@ -50,63 +56,154 @@ class UiPathAzureOpenAIEmbeddings(UiPathRequestMixin, AzureOpenAIEmbeddings):
50
56
 
51
57
 
52
58
  class UiPathOpenAIEmbeddings(UiPathRequestMixin, OpenAIEmbeddings):
53
- """Custom Embeddings connector for LangChain integration with UiPath, with full control over the embedding call."""
59
+ """Custom Embeddings connector for LangChain integration with UiPath.
60
+
61
+ This implementation uses custom _call and _acall methods for full control
62
+ over the API request/response cycle.
63
+ """
54
64
 
55
- model_name: Optional[str] = Field(
65
+ model_name: str | None = Field(
56
66
  default_factory=lambda: os.getenv(
57
67
  "UIPATH_MODEL_NAME", "text-embedding-3-large"
58
68
  ),
59
69
  alias="model",
60
70
  )
61
71
 
72
+ # Add instance variables for tracking if needed
73
+ def __init__(self, **kwargs):
74
+ super().__init__(**kwargs)
75
+ self._total_tokens = 0
76
+ self._total_requests = 0
77
+
62
78
  def embed_documents(
63
- self, texts: List[str], chunk_size: Optional[int] = None, **kwargs
64
- ) -> List[List[float]]:
65
- """Embed a list of documents using the UiPath."""
66
- embeddings = []
67
- total_tokens = 0
68
- # Process in chunks if specified
69
- chunk_size_ = chunk_size or self.chunk_size or len(texts)
79
+ self, texts: list[str], chunk_size: int | None = None, **kwargs: Any
80
+ ) -> list[list[float]]:
81
+ """Embed a list of documents using UiPath endpoint.
82
+
83
+ Args:
84
+ texts: List of texts to embed
85
+ chunk_size: Number of texts to process in each batch
86
+ **kwargs: Additional arguments passed to the API
87
+
88
+ Returns:
89
+ List of embeddings for each text
90
+ """
91
+ chunk_size_ = chunk_size or self.chunk_size
92
+ embeddings: list[list[float]] = []
93
+
70
94
  for i in range(0, len(texts), chunk_size_):
71
95
  chunk = texts[i : i + chunk_size_]
72
- payload = {"input": chunk}
96
+
97
+ # Build payload matching OpenAI API format
98
+ payload: dict[str, Any] = {
99
+ "input": chunk,
100
+ "model": self.model,
101
+ }
102
+
103
+ # Add optional parameters
104
+ if self.dimensions is not None:
105
+ payload["dimensions"] = self.dimensions
106
+
107
+ # Add model_kwargs and any additional kwargs
108
+ payload.update(self.model_kwargs)
109
+ payload.update(kwargs)
110
+
111
+ # Make the API call using custom _call method
73
112
  response = self._call(self.url, payload, self.auth_headers)
113
+
114
+ # Extract embeddings
74
115
  chunk_embeddings = [r["embedding"] for r in response["data"]]
75
- total_tokens += response["usage"]["prompt_tokens"]
76
116
  embeddings.extend(chunk_embeddings)
77
- if contextvar := openai_callback_var.get():
78
- contextvar.prompt_tokens += total_tokens
79
- contextvar.total_tokens += total_tokens
80
- contextvar.successful_requests += 1
117
+
118
+ # Track usage internally (optional)
119
+ if "usage" in response:
120
+ self._total_tokens += response["usage"].get("total_tokens", 0)
121
+ self._total_requests += 1
122
+
81
123
  return embeddings
82
124
 
83
125
  async def aembed_documents(
84
126
  self,
85
- texts: List[str],
86
- chunk_size: Optional[int] = None,
87
- **kwargs,
88
- ) -> List[List[float]]:
89
- """Async version of embed_documents."""
90
- embeddings = []
91
- total_tokens = 0
92
- # Process in chunks if specified
93
- chunk_size_ = chunk_size or self.chunk_size or len(texts)
127
+ texts: list[str],
128
+ chunk_size: int | None = None,
129
+ **kwargs: Any,
130
+ ) -> list[list[float]]:
131
+ """Async version of embed_documents.
132
+
133
+ Args:
134
+ texts: List of texts to embed
135
+ chunk_size: Number of texts to process in each batch
136
+ **kwargs: Additional arguments passed to the API
137
+
138
+ Returns:
139
+ List of embeddings for each text
140
+ """
141
+ chunk_size_ = chunk_size or self.chunk_size
142
+ embeddings: list[list[float]] = []
143
+
94
144
  for i in range(0, len(texts), chunk_size_):
95
145
  chunk = texts[i : i + chunk_size_]
96
- payload = {"input": chunk}
146
+
147
+ # Build payload matching OpenAI API format
148
+ payload: dict[str, Any] = {
149
+ "input": chunk,
150
+ "model": self.model,
151
+ }
152
+
153
+ # Add optional parameters
154
+ if self.dimensions is not None:
155
+ payload["dimensions"] = self.dimensions
156
+
157
+ # Add model_kwargs and any additional kwargs
158
+ payload.update(self.model_kwargs)
159
+ payload.update(kwargs)
160
+
161
+ # Make the async API call using custom _acall method
97
162
  response = await self._acall(self.url, payload, self.auth_headers)
163
+
164
+ # Extract embeddings
98
165
  chunk_embeddings = [r["embedding"] for r in response["data"]]
99
- total_tokens += response["usage"]["prompt_tokens"]
100
166
  embeddings.extend(chunk_embeddings)
101
- if contextvar := openai_callback_var.get():
102
- contextvar.prompt_tokens += total_tokens
103
- contextvar.total_tokens += total_tokens
104
- contextvar.successful_requests += 1
167
+
168
+ # Track usage internally (optional)
169
+ if "usage" in response:
170
+ self._total_tokens += response["usage"].get("total_tokens", 0)
171
+ self._total_requests += 1
172
+
105
173
  return embeddings
106
174
 
107
175
  @property
108
176
  def endpoint(self) -> str:
177
+ """Get the UiPath endpoint for embeddings."""
109
178
  endpoint = EndpointManager.get_embeddings_endpoint()
110
179
  return endpoint.format(
111
180
  model=self.model_name, api_version=self.openai_api_version
112
181
  )
182
+
183
+ @property
184
+ def url(self) -> str:
185
+ """Get the full URL for API requests."""
186
+ return self.endpoint
187
+
188
+ @property
189
+ def auth_headers(self) -> dict[str, str]:
190
+ """Get authentication headers for API requests."""
191
+ headers = {}
192
+ if self.openai_api_key:
193
+ headers["Authorization"] = (
194
+ f"Bearer {self.openai_api_key.get_secret_value()}"
195
+ )
196
+ if self.default_headers:
197
+ headers.update(self.default_headers)
198
+ return headers
199
+
200
+ def get_usage_stats(self) -> dict[str, int]:
201
+ """Get token usage statistics.
202
+
203
+ Returns:
204
+ Dictionary with total_tokens and total_requests
205
+ """
206
+ return {
207
+ "total_tokens": self._total_tokens,
208
+ "total_requests": self._total_requests,
209
+ }
@@ -1,16 +1,10 @@
1
1
  from uipath._cli.middlewares import Middlewares
2
2
 
3
- from ._cli.cli_dev import langgraph_dev_middleware
4
- from ._cli.cli_eval import langgraph_eval_middleware
5
3
  from ._cli.cli_init import langgraph_init_middleware
6
4
  from ._cli.cli_new import langgraph_new_middleware
7
- from ._cli.cli_run import langgraph_run_middleware
8
5
 
9
6
 
10
7
  def register_middleware():
11
8
  """This function will be called by the entry point system when uipath_langchain is installed"""
12
9
  Middlewares.register("init", langgraph_init_middleware)
13
- Middlewares.register("run", langgraph_run_middleware)
14
10
  Middlewares.register("new", langgraph_new_middleware)
15
- Middlewares.register("dev", langgraph_dev_middleware)
16
- Middlewares.register("eval", langgraph_eval_middleware)