uipath-langchain 0.0.112__py3-none-any.whl → 0.1.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. uipath_langchain/_cli/_templates/main.py.template +12 -13
  2. uipath_langchain/_cli/cli_init.py +127 -156
  3. uipath_langchain/_cli/cli_new.py +2 -6
  4. uipath_langchain/_resources/AGENTS.md +21 -0
  5. uipath_langchain/_resources/REQUIRED_STRUCTURE.md +92 -0
  6. uipath_langchain/{tracers → _tracing}/__init__.py +0 -2
  7. uipath_langchain/_tracing/_instrument_traceable.py +134 -0
  8. uipath_langchain/_utils/__init__.py +1 -2
  9. uipath_langchain/_utils/_request_mixin.py +351 -54
  10. uipath_langchain/_utils/_settings.py +2 -11
  11. uipath_langchain/agent/exceptions/__init__.py +6 -0
  12. uipath_langchain/agent/exceptions/exceptions.py +11 -0
  13. uipath_langchain/agent/guardrails/__init__.py +21 -0
  14. uipath_langchain/agent/guardrails/actions/__init__.py +11 -0
  15. uipath_langchain/agent/guardrails/actions/base_action.py +23 -0
  16. uipath_langchain/agent/guardrails/actions/block_action.py +41 -0
  17. uipath_langchain/agent/guardrails/actions/escalate_action.py +274 -0
  18. uipath_langchain/agent/guardrails/actions/log_action.py +57 -0
  19. uipath_langchain/agent/guardrails/guardrail_nodes.py +125 -0
  20. uipath_langchain/agent/guardrails/guardrails_factory.py +70 -0
  21. uipath_langchain/agent/guardrails/guardrails_subgraph.py +247 -0
  22. uipath_langchain/agent/guardrails/types.py +20 -0
  23. uipath_langchain/agent/react/__init__.py +14 -0
  24. uipath_langchain/agent/react/agent.py +113 -0
  25. uipath_langchain/agent/react/constants.py +2 -0
  26. uipath_langchain/agent/react/init_node.py +20 -0
  27. uipath_langchain/agent/react/llm_node.py +43 -0
  28. uipath_langchain/agent/react/router.py +97 -0
  29. uipath_langchain/agent/react/terminate_node.py +82 -0
  30. uipath_langchain/agent/react/tools/__init__.py +7 -0
  31. uipath_langchain/agent/react/tools/tools.py +50 -0
  32. uipath_langchain/agent/react/types.py +39 -0
  33. uipath_langchain/agent/react/utils.py +49 -0
  34. uipath_langchain/agent/tools/__init__.py +17 -0
  35. uipath_langchain/agent/tools/context_tool.py +53 -0
  36. uipath_langchain/agent/tools/escalation_tool.py +111 -0
  37. uipath_langchain/agent/tools/integration_tool.py +181 -0
  38. uipath_langchain/agent/tools/process_tool.py +49 -0
  39. uipath_langchain/agent/tools/static_args.py +138 -0
  40. uipath_langchain/agent/tools/structured_tool_with_output_type.py +14 -0
  41. uipath_langchain/agent/tools/tool_factory.py +45 -0
  42. uipath_langchain/agent/tools/tool_node.py +22 -0
  43. uipath_langchain/agent/tools/utils.py +11 -0
  44. uipath_langchain/chat/__init__.py +4 -0
  45. uipath_langchain/chat/bedrock.py +187 -0
  46. uipath_langchain/chat/gemini.py +330 -0
  47. uipath_langchain/chat/mapper.py +309 -0
  48. uipath_langchain/chat/models.py +261 -38
  49. uipath_langchain/chat/openai.py +132 -0
  50. uipath_langchain/chat/supported_models.py +42 -0
  51. uipath_langchain/embeddings/embeddings.py +136 -36
  52. uipath_langchain/middlewares.py +0 -2
  53. uipath_langchain/py.typed +0 -0
  54. uipath_langchain/retrievers/context_grounding_retriever.py +7 -9
  55. uipath_langchain/runtime/__init__.py +36 -0
  56. uipath_langchain/runtime/_serialize.py +46 -0
  57. uipath_langchain/runtime/config.py +61 -0
  58. uipath_langchain/runtime/errors.py +43 -0
  59. uipath_langchain/runtime/factory.py +315 -0
  60. uipath_langchain/runtime/graph.py +159 -0
  61. uipath_langchain/runtime/runtime.py +453 -0
  62. uipath_langchain/runtime/schema.py +349 -0
  63. uipath_langchain/runtime/storage.py +115 -0
  64. uipath_langchain/vectorstores/context_grounding_vectorstore.py +90 -110
  65. {uipath_langchain-0.0.112.dist-info → uipath_langchain-0.1.24.dist-info}/METADATA +42 -20
  66. uipath_langchain-0.1.24.dist-info/RECORD +76 -0
  67. {uipath_langchain-0.0.112.dist-info → uipath_langchain-0.1.24.dist-info}/WHEEL +1 -1
  68. uipath_langchain-0.1.24.dist-info/entry_points.txt +5 -0
  69. uipath_langchain/_cli/_runtime/_context.py +0 -21
  70. uipath_langchain/_cli/_runtime/_exception.py +0 -17
  71. uipath_langchain/_cli/_runtime/_input.py +0 -136
  72. uipath_langchain/_cli/_runtime/_output.py +0 -234
  73. uipath_langchain/_cli/_runtime/_runtime.py +0 -371
  74. uipath_langchain/_cli/_utils/_graph.py +0 -202
  75. uipath_langchain/_cli/cli_run.py +0 -80
  76. uipath_langchain/tracers/AsyncUiPathTracer.py +0 -274
  77. uipath_langchain/tracers/_events.py +0 -33
  78. uipath_langchain/tracers/_instrument_traceable.py +0 -416
  79. uipath_langchain/tracers/_utils.py +0 -52
  80. uipath_langchain-0.0.112.dist-info/RECORD +0 -36
  81. uipath_langchain-0.0.112.dist-info/entry_points.txt +0 -2
  82. {uipath_langchain-0.0.112.dist-info → uipath_langchain-0.1.24.dist-info}/licenses/LICENSE +0 -0
@@ -1,20 +1,27 @@
1
1
  import json
2
- from typing import Any, Dict, List, Literal, Optional, Union
2
+ import logging
3
+ from typing import Any, AsyncIterator, Iterator, Literal, Union
3
4
 
4
5
  from langchain_core.callbacks import (
5
6
  AsyncCallbackManagerForLLMRun,
6
7
  CallbackManagerForLLMRun,
7
8
  )
8
9
  from langchain_core.language_models import LanguageModelInput
9
- from langchain_core.messages import AIMessage, BaseMessage
10
+ from langchain_core.language_models.chat_models import (
11
+ agenerate_from_stream,
12
+ generate_from_stream,
13
+ )
14
+ from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage
10
15
  from langchain_core.messages.ai import UsageMetadata
11
- from langchain_core.outputs import ChatGeneration, ChatResult
16
+ from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
12
17
  from langchain_core.runnables import Runnable
13
18
  from langchain_openai.chat_models import AzureChatOpenAI
14
19
  from pydantic import BaseModel
20
+ from uipath.utils import EndpointManager
15
21
 
16
22
  from uipath_langchain._utils._request_mixin import UiPathRequestMixin
17
- from uipath_langchain._utils._settings import UiPathEndpoints
23
+
24
+ logger = logging.getLogger(__name__)
18
25
 
19
26
 
20
27
  class UiPathAzureChatOpenAI(UiPathRequestMixin, AzureChatOpenAI):
@@ -22,37 +29,118 @@ class UiPathAzureChatOpenAI(UiPathRequestMixin, AzureChatOpenAI):
22
29
 
23
30
  def _generate(
24
31
  self,
25
- messages: List[BaseMessage],
26
- stop: Optional[List[str]] = None,
27
- run_manager: Optional[CallbackManagerForLLMRun] = None,
32
+ messages: list[BaseMessage],
33
+ stop: list[str] | None = None,
34
+ run_manager: CallbackManagerForLLMRun | None = None,
28
35
  **kwargs: Any,
29
36
  ) -> ChatResult:
30
37
  if "tools" in kwargs and not kwargs["tools"]:
31
38
  del kwargs["tools"]
39
+
40
+ if self.streaming:
41
+ stream_iter = self._stream(
42
+ messages, stop=stop, run_manager=run_manager, **kwargs
43
+ )
44
+ return generate_from_stream(stream_iter)
45
+
32
46
  payload = self._get_request_payload(messages, stop=stop, **kwargs)
33
47
  response = self._call(self.url, payload, self.auth_headers)
34
48
  return self._create_chat_result(response)
35
49
 
36
50
  async def _agenerate(
37
51
  self,
38
- messages: List[BaseMessage],
39
- stop: Optional[List[str]] = None,
40
- run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
52
+ messages: list[BaseMessage],
53
+ stop: list[str] | None = None,
54
+ run_manager: AsyncCallbackManagerForLLMRun | None = None,
41
55
  **kwargs: Any,
42
56
  ) -> ChatResult:
43
57
  if "tools" in kwargs and not kwargs["tools"]:
44
58
  del kwargs["tools"]
59
+
60
+ if self.streaming:
61
+ stream_iter = self._astream(
62
+ messages, stop=stop, run_manager=run_manager, **kwargs
63
+ )
64
+ return await agenerate_from_stream(stream_iter)
65
+
45
66
  payload = self._get_request_payload(messages, stop=stop, **kwargs)
46
67
  response = await self._acall(self.url, payload, self.auth_headers)
47
68
  return self._create_chat_result(response)
48
69
 
70
+ def _stream(
71
+ self,
72
+ messages: list[BaseMessage],
73
+ stop: list[str] | None = None,
74
+ run_manager: CallbackManagerForLLMRun | None = None,
75
+ **kwargs: Any,
76
+ ) -> Iterator[ChatGenerationChunk]:
77
+ if "tools" in kwargs and not kwargs["tools"]:
78
+ del kwargs["tools"]
79
+ kwargs["stream"] = True
80
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
81
+
82
+ default_chunk_class = AIMessageChunk
83
+
84
+ for chunk in self._stream_request(self.url, payload, self.auth_headers):
85
+ if self.logger:
86
+ self.logger.debug(f"[Stream] Got chunk from _stream_request: {chunk}")
87
+ generation_chunk = self._convert_chunk(
88
+ chunk, default_chunk_class, include_tool_calls=True
89
+ )
90
+ if generation_chunk is None:
91
+ if self.logger:
92
+ self.logger.debug("[Stream] Skipping None generation_chunk")
93
+ continue
94
+
95
+ if self.logger:
96
+ self.logger.debug(
97
+ f"[Stream] Yielding generation_chunk: {generation_chunk}"
98
+ )
99
+
100
+ if run_manager:
101
+ run_manager.on_llm_new_token(
102
+ generation_chunk.text,
103
+ chunk=generation_chunk,
104
+ )
105
+
106
+ yield generation_chunk
107
+
108
+ async def _astream(
109
+ self,
110
+ messages: list[BaseMessage],
111
+ stop: list[str] | None = None,
112
+ run_manager: AsyncCallbackManagerForLLMRun | None = None,
113
+ **kwargs: Any,
114
+ ) -> AsyncIterator[ChatGenerationChunk]:
115
+ if "tools" in kwargs and not kwargs["tools"]:
116
+ del kwargs["tools"]
117
+ kwargs["stream"] = True
118
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
119
+
120
+ default_chunk_class = AIMessageChunk
121
+
122
+ async for chunk in self._astream_request(self.url, payload, self.auth_headers):
123
+ generation_chunk = self._convert_chunk(
124
+ chunk, default_chunk_class, include_tool_calls=True
125
+ )
126
+ if generation_chunk is None:
127
+ continue
128
+
129
+ if run_manager:
130
+ await run_manager.on_llm_new_token(
131
+ generation_chunk.text,
132
+ chunk=generation_chunk,
133
+ )
134
+
135
+ yield generation_chunk
136
+
49
137
  def with_structured_output(
50
138
  self,
51
- schema: Optional[Any] = None,
139
+ schema: Any = None,
52
140
  *,
53
141
  method: Literal["function_calling", "json_mode", "json_schema"] = "json_schema",
54
142
  include_raw: bool = False,
55
- strict: Optional[bool] = None,
143
+ strict: bool | None = None,
56
144
  **kwargs: Any,
57
145
  ) -> Runnable[LanguageModelInput, Any]:
58
146
  """Model wrapper that returns outputs formatted to match the given schema."""
@@ -71,7 +159,9 @@ class UiPathAzureChatOpenAI(UiPathRequestMixin, AzureChatOpenAI):
71
159
 
72
160
  @property
73
161
  def endpoint(self) -> str:
74
- return UiPathEndpoints.PASSTHROUGH_COMPLETION_ENDPOINT.value.format(
162
+ endpoint = EndpointManager.get_passthrough_endpoint()
163
+ logger.debug("Using endpoint: %s", endpoint)
164
+ return endpoint.format(
75
165
  model=self.model_name, api_version=self.openai_api_version
76
166
  )
77
167
 
@@ -81,8 +171,8 @@ class UiPathChat(UiPathRequestMixin, AzureChatOpenAI):
81
171
 
82
172
  def _create_chat_result(
83
173
  self,
84
- response: Union[Dict[str, Any], BaseModel],
85
- generation_info: Optional[Dict[Any, Any]] = None,
174
+ response: Union[dict[str, Any], BaseModel],
175
+ generation_info: dict[Any, Any] | None = None,
86
176
  ) -> ChatResult:
87
177
  if not isinstance(response, dict):
88
178
  response = response.model_dump()
@@ -112,6 +202,7 @@ class UiPathChat(UiPathRequestMixin, AzureChatOpenAI):
112
202
  "id": tool["id"],
113
203
  "name": tool["name"],
114
204
  "args": tool["arguments"],
205
+ "type": "tool_call",
115
206
  }
116
207
  for tool in message["tool_calls"]
117
208
  ]
@@ -122,9 +213,9 @@ class UiPathChat(UiPathRequestMixin, AzureChatOpenAI):
122
213
  self,
123
214
  input_: LanguageModelInput,
124
215
  *,
125
- stop: Optional[List[str]] = None,
216
+ stop: list[str] | None = None,
126
217
  **kwargs: Any,
127
- ) -> Dict[Any, Any]:
218
+ ) -> dict[Any, Any]:
128
219
  payload = super()._get_request_payload(input_, stop=stop, **kwargs)
129
220
  # hacks to make the request work with uipath normalized
130
221
  for message in payload["messages"]:
@@ -143,11 +234,40 @@ class UiPathChat(UiPathRequestMixin, AzureChatOpenAI):
143
234
  }
144
235
  return payload
145
236
 
237
+ def _normalize_tool_choice(self, kwargs: dict[str, Any]) -> None:
238
+ """Normalize tool_choice for UiPath Gateway compatibility.
239
+
240
+ Converts LangChain tool_choice formats to UiPath Gateway format:
241
+ - String "required" -> {"type": "required"}
242
+ - String "auto" -> {"type": "auto"}
243
+ - Dict with function -> {"type": "tool", "name": "function_name"}
244
+ """
245
+ if "tool_choice" in kwargs:
246
+ tool_choice = kwargs["tool_choice"]
247
+
248
+ if isinstance(tool_choice, str):
249
+ if tool_choice in ("required", "auto", "none"):
250
+ logger.debug(
251
+ f"Converting tool_choice from '{tool_choice}' to {{'type': '{tool_choice}'}}"
252
+ )
253
+ kwargs["tool_choice"] = {"type": tool_choice}
254
+ elif (
255
+ isinstance(tool_choice, dict) and tool_choice.get("type") == "function"
256
+ ):
257
+ function_name = tool_choice["function"]["name"]
258
+ logger.debug(
259
+ f"Converting tool_choice from function '{function_name}' to tool format"
260
+ )
261
+ kwargs["tool_choice"] = {
262
+ "type": "tool",
263
+ "name": function_name,
264
+ }
265
+
146
266
  def _generate(
147
267
  self,
148
- messages: List[BaseMessage],
149
- stop: Optional[List[str]] = None,
150
- run_manager: Optional[CallbackManagerForLLMRun] = None,
268
+ messages: list[BaseMessage],
269
+ stop: list[str] | None = None,
270
+ run_manager: CallbackManagerForLLMRun | None = None,
151
271
  **kwargs: Any,
152
272
  ) -> ChatResult:
153
273
  """Override the _generate method to implement the chat model logic.
@@ -167,21 +287,23 @@ class UiPathChat(UiPathRequestMixin, AzureChatOpenAI):
167
287
  """
168
288
  if kwargs.get("tools"):
169
289
  kwargs["tools"] = [tool["function"] for tool in kwargs["tools"]]
170
- if "tool_choice" in kwargs and kwargs["tool_choice"]["type"] == "function":
171
- kwargs["tool_choice"] = {
172
- "type": "tool",
173
- "name": kwargs["tool_choice"]["function"]["name"],
174
- }
175
- payload = self._get_request_payload(messages, stop=stop, **kwargs)
290
+ self._normalize_tool_choice(kwargs)
291
+
292
+ if self.streaming:
293
+ stream_iter = self._stream(
294
+ messages, stop=stop, run_manager=run_manager, **kwargs
295
+ )
296
+ return generate_from_stream(stream_iter)
176
297
 
298
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
177
299
  response = self._call(self.url, payload, self.auth_headers)
178
300
  return self._create_chat_result(response)
179
301
 
180
302
  async def _agenerate(
181
303
  self,
182
- messages: List[BaseMessage],
183
- stop: Optional[List[str]] = None,
184
- run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
304
+ messages: list[BaseMessage],
305
+ stop: list[str] | None = None,
306
+ run_manager: AsyncCallbackManagerForLLMRun | None = None,
185
307
  **kwargs: Any,
186
308
  ) -> ChatResult:
187
309
  """Override the _generate method to implement the chat model logic.
@@ -201,25 +323,122 @@ class UiPathChat(UiPathRequestMixin, AzureChatOpenAI):
201
323
  """
202
324
  if kwargs.get("tools"):
203
325
  kwargs["tools"] = [tool["function"] for tool in kwargs["tools"]]
204
- if "tool_choice" in kwargs and kwargs["tool_choice"]["type"] == "function":
205
- kwargs["tool_choice"] = {
206
- "type": "tool",
207
- "name": kwargs["tool_choice"]["function"]["name"],
208
- }
209
- payload = self._get_request_payload(messages, stop=stop, **kwargs)
326
+ self._normalize_tool_choice(kwargs)
327
+
328
+ if self.streaming:
329
+ stream_iter = self._astream(
330
+ messages, stop=stop, run_manager=run_manager, **kwargs
331
+ )
332
+ return await agenerate_from_stream(stream_iter)
210
333
 
334
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
211
335
  response = await self._acall(self.url, payload, self.auth_headers)
212
336
  return self._create_chat_result(response)
213
337
 
338
+ def _stream(
339
+ self,
340
+ messages: list[BaseMessage],
341
+ stop: list[str] | None = None,
342
+ run_manager: CallbackManagerForLLMRun | None = None,
343
+ **kwargs: Any,
344
+ ) -> Iterator[ChatGenerationChunk]:
345
+ """Stream the LLM on a given prompt.
346
+
347
+ Args:
348
+ messages: the prompt composed of a list of messages.
349
+ stop: a list of strings on which the model should stop generating.
350
+ run_manager: A run manager with callbacks for the LLM.
351
+ **kwargs: Additional keyword arguments.
352
+
353
+ Returns:
354
+ An iterator of ChatGenerationChunk objects.
355
+ """
356
+ if kwargs.get("tools"):
357
+ kwargs["tools"] = [tool["function"] for tool in kwargs["tools"]]
358
+ self._normalize_tool_choice(kwargs)
359
+ kwargs["stream"] = True
360
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
361
+
362
+ default_chunk_class = AIMessageChunk
363
+
364
+ for chunk in self._stream_request(self.url, payload, self.auth_headers):
365
+ if self.logger:
366
+ self.logger.debug(f"[Stream] Got chunk from _stream_request: {chunk}")
367
+ generation_chunk = self._convert_chunk(
368
+ chunk, default_chunk_class, include_tool_calls=True
369
+ )
370
+ if generation_chunk is None:
371
+ if self.logger:
372
+ self.logger.debug("[Stream] Skipping None generation_chunk")
373
+ continue
374
+
375
+ if self.logger:
376
+ self.logger.debug(
377
+ f"[Stream] Yielding generation_chunk: {generation_chunk}"
378
+ )
379
+
380
+ if run_manager:
381
+ run_manager.on_llm_new_token(
382
+ generation_chunk.text,
383
+ chunk=generation_chunk,
384
+ )
385
+
386
+ yield generation_chunk
387
+
388
+ async def _astream(
389
+ self,
390
+ messages: list[BaseMessage],
391
+ stop: list[str] | None = None,
392
+ run_manager: AsyncCallbackManagerForLLMRun | None = None,
393
+ **kwargs: Any,
394
+ ) -> AsyncIterator[ChatGenerationChunk]:
395
+ """Async stream the LLM on a given prompt.
396
+
397
+ Args:
398
+ messages: the prompt composed of a list of messages.
399
+ stop: a list of strings on which the model should stop generating.
400
+ run_manager: A run manager with callbacks for the LLM.
401
+ **kwargs: Additional keyword arguments.
402
+
403
+ Returns:
404
+ An async iterator of ChatGenerationChunk objects.
405
+ """
406
+ if kwargs.get("tools"):
407
+ kwargs["tools"] = [tool["function"] for tool in kwargs["tools"]]
408
+ self._normalize_tool_choice(kwargs)
409
+ kwargs["stream"] = True
410
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
411
+
412
+ # Update headers to enable streaming
413
+ headers = {**self.auth_headers}
414
+ headers["X-UiPath-Streaming-Enabled"] = "true"
415
+
416
+ default_chunk_class = AIMessageChunk
417
+
418
+ async for chunk in self._astream_request(self.url, payload, headers):
419
+ generation_chunk = self._convert_chunk(
420
+ chunk, default_chunk_class, include_tool_calls=True
421
+ )
422
+ if generation_chunk is None:
423
+ continue
424
+
425
+ if run_manager:
426
+ await run_manager.on_llm_new_token(
427
+ generation_chunk.text,
428
+ chunk=generation_chunk,
429
+ )
430
+
431
+ yield generation_chunk
432
+
214
433
  def with_structured_output(
215
434
  self,
216
- schema: Optional[Any] = None,
435
+ schema: Any = None,
217
436
  *,
218
437
  method: Literal[
219
438
  "function_calling", "json_mode", "json_schema"
220
439
  ] = "function_calling",
221
440
  include_raw: bool = False,
222
- strict: Optional[bool] = None,
441
+ strict: bool | None = None,
223
442
  **kwargs: Any,
224
443
  ) -> Runnable[LanguageModelInput, Any]:
225
444
  """Model wrapper that returns outputs formatted to match the given schema."""
@@ -252,7 +471,11 @@ class UiPathChat(UiPathRequestMixin, AzureChatOpenAI):
252
471
 
253
472
  @property
254
473
  def endpoint(self) -> str:
255
- return UiPathEndpoints.NORMALIZED_COMPLETION_ENDPOINT.value
474
+ endpoint = EndpointManager.get_normalized_endpoint()
475
+ logger.debug("Using endpoint: %s", endpoint)
476
+ return endpoint.format(
477
+ model=self.model_name, api_version=self.openai_api_version
478
+ )
256
479
 
257
480
  @property
258
481
  def is_normalized(self) -> bool:
@@ -0,0 +1,132 @@
1
+ import logging
2
+ import os
3
+ from typing import Optional
4
+
5
+ import httpx
6
+ from langchain_openai import AzureChatOpenAI
7
+ from uipath.utils import EndpointManager
8
+
9
+ from .supported_models import OpenAIModels
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ class UiPathURLRewriteTransport(httpx.AsyncHTTPTransport):
15
+ def __init__(self, verify: bool = True, **kwargs):
16
+ super().__init__(verify=verify, **kwargs)
17
+
18
+ async def handle_async_request(self, request: httpx.Request) -> httpx.Response:
19
+ original_url = str(request.url)
20
+
21
+ if "/openai/deployments/" in original_url:
22
+ base_url = original_url.split("/openai/deployments/")[0]
23
+ query_string = request.url.params
24
+ new_url_str = f"{base_url}/completions"
25
+ if query_string:
26
+ request.url = httpx.URL(new_url_str, params=query_string)
27
+ else:
28
+ request.url = httpx.URL(new_url_str)
29
+
30
+ return await super().handle_async_request(request)
31
+
32
+
33
+ class UiPathSyncURLRewriteTransport(httpx.HTTPTransport):
34
+ def __init__(self, verify: bool = True, **kwargs):
35
+ super().__init__(verify=verify, **kwargs)
36
+
37
+ def handle_request(self, request: httpx.Request) -> httpx.Response:
38
+ original_url = str(request.url)
39
+
40
+ if "/openai/deployments/" in original_url:
41
+ base_url = original_url.split("/openai/deployments/")[0]
42
+ query_string = request.url.params
43
+ new_url_str = f"{base_url}/completions"
44
+ if query_string:
45
+ request.url = httpx.URL(new_url_str, params=query_string)
46
+ else:
47
+ request.url = httpx.URL(new_url_str)
48
+
49
+ return super().handle_request(request)
50
+
51
+
52
+ class UiPathChatOpenAI(AzureChatOpenAI):
53
+ def __init__(
54
+ self,
55
+ token: Optional[str] = None,
56
+ model_name: str = OpenAIModels.gpt_5_mini_2025_08_07,
57
+ api_version: str = "2024-12-01-preview",
58
+ org_id: Optional[str] = None,
59
+ tenant_id: Optional[str] = None,
60
+ **kwargs,
61
+ ):
62
+ org_id = org_id or os.getenv("UIPATH_ORGANIZATION_ID")
63
+ tenant_id = tenant_id or os.getenv("UIPATH_TENANT_ID")
64
+ token = token or os.getenv("UIPATH_ACCESS_TOKEN")
65
+
66
+ if not org_id:
67
+ raise ValueError(
68
+ "UIPATH_ORGANIZATION_ID environment variable or org_id parameter is required"
69
+ )
70
+ if not tenant_id:
71
+ raise ValueError(
72
+ "UIPATH_TENANT_ID environment variable or tenant_id parameter is required"
73
+ )
74
+ if not token:
75
+ raise ValueError(
76
+ "UIPATH_ACCESS_TOKEN environment variable or token parameter is required"
77
+ )
78
+
79
+ self._openai_api_version = api_version
80
+ self._vendor = "openai"
81
+ self._model_name = model_name
82
+ self._url: Optional[str] = None
83
+
84
+ super().__init__(
85
+ azure_endpoint=self._build_base_url(),
86
+ model_name=model_name,
87
+ default_headers=self._build_headers(token),
88
+ http_async_client=httpx.AsyncClient(
89
+ transport=UiPathURLRewriteTransport(verify=True),
90
+ verify=True,
91
+ ),
92
+ http_client=httpx.Client(
93
+ transport=UiPathSyncURLRewriteTransport(verify=True),
94
+ verify=True,
95
+ ),
96
+ api_key=token,
97
+ api_version=api_version,
98
+ validate_base_url=False,
99
+ **kwargs,
100
+ )
101
+
102
+ def _build_headers(self, token: str) -> dict[str, str]:
103
+ headers = {
104
+ "X-UiPath-LlmGateway-ApiFlavor": "auto",
105
+ "Authorization": f"Bearer {token}",
106
+ }
107
+ if job_key := os.getenv("UIPATH_JOB_KEY"):
108
+ headers["X-UiPath-JobKey"] = job_key
109
+ if process_key := os.getenv("UIPATH_PROCESS_KEY"):
110
+ headers["X-UiPath-ProcessKey"] = process_key
111
+ return headers
112
+
113
+ @property
114
+ def endpoint(self) -> str:
115
+ vendor_endpoint = EndpointManager.get_vendor_endpoint()
116
+ formatted_endpoint = vendor_endpoint.format(
117
+ vendor=self._vendor,
118
+ model=self._model_name,
119
+ api_version=self._openai_api_version,
120
+ )
121
+ return formatted_endpoint.replace("/completions", "")
122
+
123
+ def _build_base_url(self) -> str:
124
+ if not self._url:
125
+ env_uipath_url = os.getenv("UIPATH_URL")
126
+
127
+ if env_uipath_url:
128
+ self._url = f"{env_uipath_url.rstrip('/')}/{self.endpoint}"
129
+ else:
130
+ raise ValueError("UIPATH_URL environment variable is required")
131
+
132
+ return self._url
@@ -0,0 +1,42 @@
1
+ class OpenAIModels:
2
+ """Supported OpenAI model identifiers."""
3
+
4
+ # GPT-4o models
5
+ gpt_4o_2024_05_13 = "gpt-4o-2024-05-13"
6
+ gpt_4o_2024_08_06 = "gpt-4o-2024-08-06"
7
+ gpt_4o_2024_11_20 = "gpt-4o-2024-11-20"
8
+ gpt_4o_mini_2024_07_18 = "gpt-4o-mini-2024-07-18"
9
+
10
+ # GPT-4.1 models
11
+ gpt_4_1_2025_04_14 = "gpt-4.1-2025-04-14"
12
+ gpt_4_1_mini_2025_04_14 = "gpt-4.1-mini-2025-04-14"
13
+ gpt_4_1_nano_2025_04_14 = "gpt-4.1-nano-2025-04-14"
14
+
15
+ # GPT-5 models
16
+ gpt_5_2025_08_07 = "gpt-5-2025-08-07"
17
+ gpt_5_chat_2025_08_07 = "gpt-5-chat-2025-08-07"
18
+ gpt_5_mini_2025_08_07 = "gpt-5-mini-2025-08-07"
19
+ gpt_5_nano_2025_08_07 = "gpt-5-nano-2025-08-07"
20
+
21
+ # GPT-5.1 models
22
+ gpt_5_1_2025_11_13 = "gpt-5.1-2025-11-13"
23
+
24
+
25
+ class GeminiModels:
26
+ """Supported Google Gemini model identifiers."""
27
+
28
+ gemini_2_5_pro = "gemini-2.5-pro"
29
+ gemini_2_5_flash = "gemini-2.5-flash"
30
+ gemini_2_0_flash_001 = "gemini-2.0-flash-001"
31
+
32
+
33
+ class BedrockModels:
34
+ """Supported AWS Bedrock model identifiers."""
35
+
36
+ # Claude 3.7 models
37
+ anthropic_claude_3_7_sonnet = "anthropic.claude-3-7-sonnet-20250219-v1:0"
38
+
39
+ # Claude 4 models
40
+ anthropic_claude_sonnet_4 = "anthropic.claude-sonnet-4-20250514-v1:0"
41
+ anthropic_claude_sonnet_4_5 = "anthropic.claude-sonnet-4-5-20250929-v1:0"
42
+ anthropic_claude_haiku_4_5 = "anthropic.claude-haiku-4-5-20251001-v1:0"