uipath-langchain-client 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. uipath_langchain_client/__init__.py +50 -0
  2. uipath_langchain_client/__version__.py +3 -0
  3. uipath_langchain_client/base_client.py +277 -0
  4. uipath_langchain_client/clients/anthropic/__init__.py +3 -0
  5. uipath_langchain_client/clients/anthropic/chat_models.py +157 -0
  6. uipath_langchain_client/clients/azure/__init__.py +4 -0
  7. uipath_langchain_client/clients/azure/chat_models.py +46 -0
  8. uipath_langchain_client/clients/azure/embeddings.py +46 -0
  9. uipath_langchain_client/clients/bedrock/__init__.py +7 -0
  10. uipath_langchain_client/clients/bedrock/chat_models.py +63 -0
  11. uipath_langchain_client/clients/bedrock/embeddings.py +33 -0
  12. uipath_langchain_client/clients/bedrock/utils.py +90 -0
  13. uipath_langchain_client/clients/google/__init__.py +4 -0
  14. uipath_langchain_client/clients/google/chat_models.py +203 -0
  15. uipath_langchain_client/clients/google/embeddings.py +45 -0
  16. uipath_langchain_client/clients/normalized/__init__.py +4 -0
  17. uipath_langchain_client/clients/normalized/chat_models.py +419 -0
  18. uipath_langchain_client/clients/normalized/embeddings.py +31 -0
  19. uipath_langchain_client/clients/openai/__init__.py +15 -0
  20. uipath_langchain_client/clients/openai/chat_models.py +102 -0
  21. uipath_langchain_client/clients/openai/embeddings.py +82 -0
  22. uipath_langchain_client/clients/vertexai/__init__.py +3 -0
  23. uipath_langchain_client/clients/vertexai/chat_models.py +48 -0
  24. uipath_langchain_client/factory.py +217 -0
  25. uipath_langchain_client/settings.py +32 -0
  26. uipath_langchain_client-1.0.0.dist-info/METADATA +276 -0
  27. uipath_langchain_client-1.0.0.dist-info/RECORD +28 -0
  28. uipath_langchain_client-1.0.0.dist-info/WHEEL +4 -0
@@ -0,0 +1,419 @@
1
+ """
2
+ Normalized Chat Model for UiPath LangChain Client
3
+
4
+ This module provides a provider-agnostic chat model that uses UiPath's normalized API.
5
+ The normalized API provides a consistent interface across all LLM providers (OpenAI,
6
+ Google, Anthropic, etc.), making it easy to switch providers without code changes.
7
+
8
+ The normalized API supports:
9
+ - Standard chat completions with messages
10
+ - Tool/function calling with automatic format conversion
11
+ - Streaming responses (sync and async)
12
+ - Extended thinking/reasoning parameters for supported models
13
+
14
+ Example:
15
+ >>> from uipath_langchain_client.normalized.chat_models import UiPathNormalizedChatModel
16
+ >>> from uipath_langchain_client.settings import get_default_client_settings
17
+ >>>
18
+ >>> settings = get_default_client_settings()
19
+ >>> chat = UiPathNormalizedChatModel(
20
+ ... model="gpt-4o-2024-11-20",
21
+ ... client_settings=settings,
22
+ ... )
23
+ >>> response = chat.invoke("Hello!")
24
+ """
25
+
26
+ import json
27
+ from collections.abc import AsyncIterator, Callable, Iterator, Sequence
28
+ from typing import Any
29
+
30
+ from langchain_core.callbacks import (
31
+ AsyncCallbackManagerForLLMRun,
32
+ CallbackManagerForLLMRun,
33
+ )
34
+ from langchain_core.language_models.base import (
35
+ LanguageModelInput,
36
+ )
37
+ from langchain_core.language_models.chat_models import BaseChatModel
38
+ from langchain_core.messages import (
39
+ AIMessage,
40
+ AIMessageChunk,
41
+ BaseMessage,
42
+ InputTokenDetails,
43
+ OutputTokenDetails,
44
+ ToolCallChunk,
45
+ UsageMetadata,
46
+ )
47
+ from langchain_core.messages.utils import convert_to_openai_messages
48
+ from langchain_core.outputs import (
49
+ ChatGeneration,
50
+ ChatGenerationChunk,
51
+ ChatResult,
52
+ )
53
+ from langchain_core.runnables import Runnable
54
+ from langchain_core.tools import BaseTool
55
+ from langchain_core.utils.function_calling import (
56
+ convert_to_openai_function,
57
+ )
58
+ from pydantic import Field
59
+ from uipath_langchain_client.base_client import UiPathBaseLLMClient
60
+ from uipath_langchain_client.settings import UiPathAPIConfig
61
+
62
+
63
+ class UiPathNormalizedChatModel(UiPathBaseLLMClient, BaseChatModel):
64
+ """LangChain chat model using UiPath's normalized (provider-agnostic) API.
65
+
66
+ This model provides a consistent interface across all LLM providers supported
67
+ by UiPath AgentHub and LLM Gateway. It automatically handles message format
68
+ conversion, tool calling, and streaming for any supported provider.
69
+
70
+ Attributes:
71
+ model_name: The model identifier (e.g., "gpt-4o-2024-11-20", "gemini-2.5-flash").
72
+ max_tokens: Maximum tokens in the response.
73
+ temperature: Sampling temperature (0.0 to 2.0).
74
+ stop: Stop sequences to end generation.
75
+ n: Number of completions to generate.
76
+ top_p: Nucleus sampling probability mass.
77
+ presence_penalty: Penalty for repeated tokens (-2.0 to 2.0).
78
+ frequency_penalty: Penalty based on token frequency (-2.0 to 2.0).
79
+
80
+ Extended Thinking (model-specific):
81
+ reasoning: OpenAI o1/o3 reasoning config {"effort": "low"|"medium"|"high"}.
82
+ reasoning_effort: OpenAI reasoning effort level.
83
+ thinking: Anthropic Claude thinking config {"type": "enabled", "budget_tokens": N}.
84
+ thinking_level: Gemini thinking level.
85
+ thinking_budget: Gemini thinking token budget.
86
+ include_thoughts: Whether to include thinking in Gemini responses.
87
+
88
+ Example:
89
+ >>> chat = UiPathNormalizedChatModel(
90
+ ... model="gpt-4o-2024-11-20",
91
+ ... client_settings=settings,
92
+ ... temperature=0.7,
93
+ ... max_tokens=1000,
94
+ ... )
95
+ >>> response = chat.invoke("Explain machine learning.")
96
+ """
97
+
98
+ api_config: UiPathAPIConfig = UiPathAPIConfig(
99
+ api_type="completions",
100
+ client_type="normalized",
101
+ freeze_base_url=True,
102
+ )
103
+
104
+ # Standard LLM parameters
105
+ max_tokens: int | None = None
106
+ temperature: float | None = None
107
+ stop: list[str] | str | None = Field(default=None, alias="stop_sequences")
108
+
109
+ n: int | None = None # Number of completions to generate
110
+ top_p: float | None = None # Nucleus sampling probability mass
111
+ presence_penalty: float | None = None # Penalty for repeated tokens
112
+ frequency_penalty: float | None = None # Frequency-based repetition penalty
113
+ verbosity: str | None = None # Response verbosity: "low", "medium", or "high"
114
+
115
+ model_kwargs: dict[str, Any] = Field(
116
+ default_factory=dict
117
+ ) # Additional model-specific parameters
118
+ disabled_params: dict[str, Any] | None = None # Parameters to exclude from requests
119
+
120
+ # OpenAI o1/o3 reasoning parameters
121
+ reasoning: dict[str, Any] | None = None # {"effort": "low"|"medium"|"high", "summary": ...}
122
+ reasoning_effort: str | None = None # "minimal", "low", "medium", or "high"
123
+
124
+ # Anthropic Claude extended thinking parameters
125
+ thinking: dict[str, Any] | None = None # {"type": "enabled"|"disabled", "budget_tokens": N}
126
+
127
+ # Google Gemini thinking parameters
128
+ thinking_level: str | None = None # Thinking depth level
129
+ thinking_budget: int | None = None # Token budget for thinking
130
+ include_thoughts: bool | None = None # Include thinking in response
131
+
132
+ @property
133
+ def _llm_type(self) -> str:
134
+ """Return type of chat model."""
135
+ return "UiPath-Normalized"
136
+
137
+ @property
138
+ def _default_params(self) -> dict[str, Any]:
139
+ """Get the default parameters for calling OpenAI API."""
140
+ exclude_if_none = {
141
+ "frequency_penalty": self.frequency_penalty,
142
+ "presence_penalty": self.presence_penalty,
143
+ "top_p": self.top_p,
144
+ "stop": self.stop or None, # Also exclude empty list for this
145
+ "n": self.n,
146
+ "max_tokens": self.max_tokens,
147
+ "temperature": self.temperature,
148
+ "verbosity": self.verbosity,
149
+ "reasoning": self.reasoning,
150
+ "reasoning_effort": self.reasoning_effort,
151
+ "thinking": self.thinking,
152
+ "thinking_level": self.thinking_level,
153
+ "thinking_budget": self.thinking_budget,
154
+ "include_thoughts": self.include_thoughts,
155
+ }
156
+
157
+ return {
158
+ "model": self.model_name,
159
+ **{k: v for k, v in exclude_if_none.items() if v is not None},
160
+ **self.model_kwargs,
161
+ }
162
+
163
+ def _get_usage_metadata(self, json_data: dict[str, Any]) -> UsageMetadata:
164
+ return UsageMetadata(
165
+ input_tokens=json_data.get("prompt_tokens", 0),
166
+ output_tokens=json_data.get("completion_tokens", 0),
167
+ total_tokens=json_data.get("total_tokens", 0),
168
+ input_token_details=InputTokenDetails(
169
+ audio=json_data.get("audio_tokens", 0),
170
+ cache_read=json_data.get("cache_read_input_tokens", 0),
171
+ cache_creation=json_data.get("cache_creation_input_tokens", 0),
172
+ ),
173
+ output_token_details=OutputTokenDetails(
174
+ audio=json_data.get("audio_tokens", 0),
175
+ reasoning=json_data.get("thoughts_tokens", 0),
176
+ ),
177
+ )
178
+
179
+ def bind_tools(
180
+ self,
181
+ tools: Sequence[dict[str, Any] | type | Callable | BaseTool],
182
+ *,
183
+ tool_choice: str | None = None,
184
+ strict: bool | None = None,
185
+ **kwargs: Any,
186
+ ) -> Runnable[LanguageModelInput, AIMessage]:
187
+ """Bind tools to the model with automatic tool choice detection."""
188
+ formatted_tools = [convert_to_openai_function(t, strict=strict) for t in tools]
189
+ tool_names = [tool["name"] for tool in formatted_tools]
190
+
191
+ if tool_choice is None:
192
+ tool_choice = "auto"
193
+ elif tool_choice in ["required", "any"]:
194
+ tool_choice = "required"
195
+ elif tool_choice in tool_names:
196
+ pass
197
+ else:
198
+ tool_choice = "auto"
199
+
200
+ if tool_choice in ["required", "auto"]:
201
+ tool_choice_object = {
202
+ "type": tool_choice,
203
+ }
204
+ else:
205
+ tool_choice_object = {
206
+ "type": "tool",
207
+ "name": tool_choice,
208
+ }
209
+
210
+ return super().bind(
211
+ tools=formatted_tools,
212
+ tool_choice=tool_choice_object,
213
+ **kwargs,
214
+ )
215
+
216
+ def _preprocess_request(
217
+ self, messages: list[BaseMessage], stop: list[str] | None = None, **kwargs: Any
218
+ ) -> dict[str, Any]:
219
+ """Convert LangChain messages to normalized API request format."""
220
+ converted_messages = convert_to_openai_messages(messages)
221
+ for message, converted_message in zip(messages, converted_messages):
222
+ if isinstance(message, AIMessage):
223
+ if isinstance(converted_message["content"], list):
224
+ converted_message["content"] = [
225
+ item for item in converted_message["content"] if item["type"] != "tool_call"
226
+ ]
227
+ if len(converted_message["content"]) == 0:
228
+ converted_message["content"] = ""
229
+ if (
230
+ self.model_name
231
+ and "claude" in self.model_name.lower()
232
+ and not converted_message["content"]
233
+ ):
234
+ converted_message["content"] = "tool_call"
235
+ if "tool_calls" in converted_message:
236
+ converted_message["tool_calls"] = [
237
+ {
238
+ "id": tool_call["id"],
239
+ "name": tool_call["function"]["name"],
240
+ "arguments": json.loads(tool_call["function"]["arguments"]),
241
+ }
242
+ for tool_call in converted_message["tool_calls"]
243
+ ]
244
+ if "signature" in message.additional_kwargs: # required for Gemini models
245
+ converted_message["signature"] = message.additional_kwargs["signature"]
246
+ elif converted_message["role"] == "tool":
247
+ converted_message["content"] = {
248
+ "result": converted_message["content"],
249
+ "call_id": converted_message.pop("tool_call_id"),
250
+ }
251
+
252
+ request_body = {
253
+ "messages": converted_messages,
254
+ **self._default_params,
255
+ **kwargs,
256
+ }
257
+ if stop is not None:
258
+ request_body["stop"] = stop
259
+
260
+ return request_body
261
+
262
+ def _postprocess_response(self, response: dict[str, Any]) -> ChatResult:
263
+ """Convert normalized API response to LangChain ChatResult format."""
264
+ generations = []
265
+ llm_output = {
266
+ "id": response.get("id"),
267
+ "created": response.get("created"),
268
+ "model_name": response.get("model"),
269
+ }
270
+ usage = response.get("usage", {})
271
+ usage_metadata = self._get_usage_metadata(usage)
272
+ for choice in response["choices"]:
273
+ generation_info = {
274
+ "finish_reason": choice.get("finish_reason", ""),
275
+ }
276
+ message = choice["message"]
277
+ generation = ChatGeneration(
278
+ message=AIMessage(
279
+ content=message.get("content", ""),
280
+ tool_calls=[
281
+ {
282
+ "id": tool_call["id"],
283
+ "name": tool_call["name"],
284
+ "args": tool_call["arguments"],
285
+ }
286
+ for tool_call in message.get("tool_calls", [])
287
+ ],
288
+ additional_kwargs={},
289
+ response_metadata={},
290
+ usage_metadata=usage_metadata,
291
+ ),
292
+ generation_info=generation_info,
293
+ )
294
+ if "signature" in message: # required for Gemini models
295
+ generation.message.additional_kwargs["signature"] = message["signature"]
296
+ generations.append(generation)
297
+ return ChatResult(
298
+ generations=generations,
299
+ llm_output=llm_output,
300
+ )
301
+
302
+ def _generate(
303
+ self,
304
+ messages: list[BaseMessage],
305
+ *args: Any,
306
+ run_manager: CallbackManagerForLLMRun | None = None,
307
+ **kwargs: Any,
308
+ ) -> ChatResult:
309
+ request_body = self._preprocess_request(messages, **kwargs)
310
+ response = self.uipath_request(request_body=request_body)
311
+ return self._postprocess_response(response.json())
312
+
313
+ async def _agenerate(
314
+ self,
315
+ messages: list[BaseMessage],
316
+ *args: Any,
317
+ run_manager: AsyncCallbackManagerForLLMRun | None = None,
318
+ **kwargs: Any,
319
+ ) -> ChatResult:
320
+ request_body = self._preprocess_request(messages, **kwargs)
321
+ response = await self.uipath_arequest(request_body=request_body)
322
+ return self._postprocess_response(response.json())
323
+
324
+ def _generate_chunk(
325
+ self, original_message: str, json_data: dict[str, Any]
326
+ ) -> ChatGenerationChunk:
327
+ generation_info = {
328
+ "id": json_data.get("id"),
329
+ "created": json_data.get("created", ""),
330
+ "model_name": json_data.get("model", ""),
331
+ }
332
+ content = ""
333
+ usage_metadata = None
334
+ tool_call_chunks = []
335
+ if usage := json_data.get("usage", {}):
336
+ usage_metadata = self._get_usage_metadata(usage)
337
+ if choices := json_data.get("choices", []):
338
+ if "finish_reason" in choices[0]:
339
+ generation_info["finish_reason"] = choices[0]["finish_reason"]
340
+
341
+ if "delta" in choices[0]:
342
+ content = choices[0]["delta"].get("content", "")
343
+ tool_calls = choices[0]["delta"].get("tool_calls", [])
344
+ elif "message" in choices[0]:
345
+ content = choices[0]["message"].get("content", "")
346
+ tool_calls = choices[0]["message"].get("tool_calls", [])
347
+ else:
348
+ content = choices[0].get("content", "")
349
+ tool_calls = choices[0].get("tool_calls", [])
350
+
351
+ for tool_call in tool_calls:
352
+ if "function" in tool_call:
353
+ name = tool_call["function"].get("name", "")
354
+ args = tool_call["function"].get("arguments", "")
355
+ else:
356
+ name = tool_call.get("name", "")
357
+ args = tool_call.get("arguments", "")
358
+ if args == {}:
359
+ args = ""
360
+ if isinstance(args, dict):
361
+ args = json.dumps(args)
362
+ tool_call_chunks.append(
363
+ ToolCallChunk(
364
+ id=tool_call.get("id", ""),
365
+ name=name,
366
+ args=args,
367
+ index=tool_call.get("index", 0),
368
+ )
369
+ )
370
+
371
+ return ChatGenerationChunk(
372
+ text=original_message,
373
+ generation_info=generation_info,
374
+ message=AIMessageChunk(
375
+ content=content,
376
+ usage_metadata=usage_metadata,
377
+ tool_call_chunks=tool_call_chunks,
378
+ ),
379
+ )
380
+
381
+ def _stream(
382
+ self,
383
+ messages: list[BaseMessage],
384
+ *args: Any,
385
+ run_manager: CallbackManagerForLLMRun | None = None,
386
+ **kwargs: Any,
387
+ ) -> Iterator[ChatGenerationChunk]:
388
+ request_body = self._preprocess_request(messages, **kwargs)
389
+ for chunk in self.uipath_stream(request_body=request_body, stream_type="lines"):
390
+ chunk = str(chunk)
391
+ if chunk.startswith("data:"):
392
+ chunk = chunk.split("data:")[1].strip()
393
+ try:
394
+ json_data = json.loads(chunk)
395
+ except json.JSONDecodeError:
396
+ continue
397
+ if "id" in json_data and not json_data["id"]:
398
+ continue
399
+ yield self._generate_chunk(chunk, json_data)
400
+
401
+ async def _astream(
402
+ self,
403
+ messages: list[BaseMessage],
404
+ *args: Any,
405
+ run_manager: AsyncCallbackManagerForLLMRun | None = None,
406
+ **kwargs: Any,
407
+ ) -> AsyncIterator[ChatGenerationChunk]:
408
+ request_body = self._preprocess_request(messages, **kwargs)
409
+ async for chunk in self.uipath_astream(request_body=request_body, stream_type="lines"):
410
+ chunk = str(chunk)
411
+ if chunk.startswith("data:"):
412
+ chunk = chunk.split("data:")[1].strip()
413
+ try:
414
+ json_data = json.loads(chunk)
415
+ except json.JSONDecodeError:
416
+ continue
417
+ if "id" in json_data and not json_data["id"]:
418
+ continue
419
+ yield self._generate_chunk(chunk, json_data)
@@ -0,0 +1,31 @@
1
+ from langchain_core.embeddings import Embeddings
2
+ from uipath_langchain_client.base_client import UiPathBaseLLMClient
3
+ from uipath_langchain_client.settings import UiPathAPIConfig
4
+
5
+
6
+ class UiPathNormalizedEmbeddings(UiPathBaseLLMClient, Embeddings):
7
+ """LangChain embeddings using the UiPath's normalized embeddings API.
8
+
9
+ Provides a consistent interface for generating text embeddings across all
10
+ embedding providers supported by UiPath AgentHub and LLM Gateway.
11
+ """
12
+
13
+ api_config: UiPathAPIConfig = UiPathAPIConfig(
14
+ api_type="embeddings",
15
+ client_type="normalized",
16
+ freeze_base_url=True,
17
+ )
18
+
19
+ def embed_documents(self, texts: list[str]) -> list[list[float]]:
20
+ response = self.uipath_request(request_body={"input": texts})
21
+ return [r["embedding"] for r in response.json()["data"]]
22
+
23
+ def embed_query(self, text: str) -> list[float]:
24
+ return self.embed_documents([text])[0]
25
+
26
+ async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
27
+ response = await self.uipath_arequest(request_body={"input": texts})
28
+ return [r["embedding"] for r in response.json()["data"]]
29
+
30
+ async def aembed_query(self, text: str) -> list[float]:
31
+ return (await self.aembed_documents([text]))[0]
@@ -0,0 +1,15 @@
1
+ from uipath_langchain_client.clients.openai.chat_models import (
2
+ UiPathAzureChatOpenAI,
3
+ UiPathChatOpenAI,
4
+ )
5
+ from uipath_langchain_client.clients.openai.embeddings import (
6
+ UiPathAzureOpenAIEmbeddings,
7
+ UiPathOpenAIEmbeddings,
8
+ )
9
+
10
+ __all__ = [
11
+ "UiPathChatOpenAI",
12
+ "UiPathOpenAIEmbeddings",
13
+ "UiPathAzureChatOpenAI",
14
+ "UiPathAzureOpenAIEmbeddings",
15
+ ]
@@ -0,0 +1,102 @@
1
+ from collections.abc import Awaitable, Callable
2
+ from typing import Self
3
+
4
+ from pydantic import Field, SecretStr, model_validator
5
+ from uipath_langchain_client.base_client import UiPathBaseLLMClient
6
+ from uipath_langchain_client.settings import UiPathAPIConfig
7
+
8
+ try:
9
+ from langchain_openai.chat_models import AzureChatOpenAI, ChatOpenAI
10
+
11
+ from openai import AsyncAzureOpenAI, AsyncOpenAI, AzureOpenAI, OpenAI
12
+ except ImportError as e:
13
+ raise ImportError(
14
+ "The 'openai' extra is required to use UiPathChatOpenAI and UiPathAzureChatOpenAI. "
15
+ "Install it with: uv add uipath-langchain-client[openai]"
16
+ ) from e
17
+
18
+
19
+ class UiPathChatOpenAI(UiPathBaseLLMClient, ChatOpenAI): # type: ignore[override]
20
+ api_config: UiPathAPIConfig = UiPathAPIConfig(
21
+ api_type="completions",
22
+ client_type="passthrough",
23
+ vendor_type="openai",
24
+ freeze_base_url=True,
25
+ )
26
+
27
+ # Override fields to avoid errors when instantiating the class
28
+ openai_api_key: SecretStr | None | Callable[[], str] | Callable[[], Awaitable[str]] = Field(
29
+ alias="api_key", default=SecretStr("PLACEHOLDER")
30
+ )
31
+
32
+ @model_validator(mode="after")
33
+ def setup_uipath_api_flavor_and_version(self) -> Self:
34
+ self.api_config.api_version = "2025-03-01-preview"
35
+ if self._use_responses_api({}):
36
+ self.api_config.api_flavor = "responses"
37
+ else:
38
+ self.api_config.api_flavor = "chat-completions"
39
+ return self
40
+
41
+ @model_validator(mode="after")
42
+ def setup_uipath_client(self) -> Self:
43
+ self.root_client = OpenAI(
44
+ api_key="PLACEHOLDER",
45
+ timeout=None, # handled by the UiPath client
46
+ max_retries=1, # handled by the UiPath client
47
+ http_client=self.uipath_sync_client,
48
+ )
49
+ self.root_async_client = AsyncOpenAI(
50
+ api_key="PLACEHOLDER",
51
+ timeout=None, # handled by the UiPath client
52
+ max_retries=1, # handled by the UiPath client
53
+ http_client=self.uipath_async_client,
54
+ )
55
+ self.client = self.root_client.chat.completions
56
+ self.async_client = self.root_async_client.chat.completions
57
+ return self
58
+
59
+
60
+ class UiPathAzureChatOpenAI(UiPathBaseLLMClient, AzureChatOpenAI): # type: ignore[override]
61
+ api_config: UiPathAPIConfig = UiPathAPIConfig(
62
+ api_type="completions",
63
+ client_type="passthrough",
64
+ vendor_type="openai",
65
+ freeze_base_url=True,
66
+ )
67
+
68
+ # Override fields to avoid errors when instantiating the class
69
+ azure_endpoint: str | None = Field(default="PLACEHOLDER")
70
+ openai_api_version: str | None = Field(default="PLACEHOLDER", alias="api_version")
71
+ openai_api_key: SecretStr | None = Field(default=SecretStr("PLACEHOLDER"), alias="api_key")
72
+
73
+ @model_validator(mode="after")
74
+ def setup_uipath_api_flavor_and_version(self) -> Self:
75
+ self.api_config.api_version = "2025-03-01-preview"
76
+ if self._use_responses_api({}):
77
+ self.api_config.api_flavor = "responses"
78
+ else:
79
+ self.api_config.api_flavor = "chat-completions"
80
+ return self
81
+
82
+ @model_validator(mode="after")
83
+ def setup_uipath_client(self) -> Self:
84
+ self.root_client = AzureOpenAI(
85
+ azure_endpoint="PLACEHOLDER",
86
+ api_version="PLACEHOLDER",
87
+ api_key="PLACEHOLDER",
88
+ timeout=None, # handled by the UiPath client
89
+ max_retries=1, # handled by the UiPath client
90
+ http_client=self.uipath_sync_client,
91
+ )
92
+ self.root_async_client = AsyncAzureOpenAI(
93
+ azure_endpoint="PLACEHOLDER",
94
+ api_version="PLACEHOLDER",
95
+ api_key="PLACEHOLDER",
96
+ timeout=None, # handled by the UiPath client
97
+ max_retries=1, # handled by the UiPath client
98
+ http_client=self.uipath_async_client,
99
+ )
100
+ self.client = self.root_client.chat.completions
101
+ self.async_client = self.root_async_client.chat.completions
102
+ return self
@@ -0,0 +1,82 @@
1
+ from collections.abc import Awaitable, Callable
2
+ from typing import Self
3
+
4
+ from pydantic import Field, SecretStr, model_validator
5
+ from uipath_langchain_client.base_client import UiPathBaseLLMClient
6
+ from uipath_langchain_client.settings import UiPathAPIConfig
7
+
8
+ try:
9
+ from langchain_openai.embeddings import AzureOpenAIEmbeddings, OpenAIEmbeddings
10
+
11
+ from openai import AsyncAzureOpenAI, AsyncOpenAI, AzureOpenAI, OpenAI
12
+ except ImportError as e:
13
+ raise ImportError(
14
+ "The 'openai' extra is required to use UiPathOpenAIEmbeddings and UiPathAzureOpenAIEmbeddings. "
15
+ "Install it with: uv add uipath-langchain-client[openai]"
16
+ ) from e
17
+
18
+
19
+ class UiPathOpenAIEmbeddings(UiPathBaseLLMClient, OpenAIEmbeddings):
20
+ api_config: UiPathAPIConfig = UiPathAPIConfig(
21
+ api_type="embeddings",
22
+ client_type="passthrough",
23
+ vendor_type="openai",
24
+ freeze_base_url=True,
25
+ )
26
+
27
+ # Override fields to avoid errors when instantiating the class
28
+ model: str = Field(default="", alias="model_name")
29
+ openai_api_key: SecretStr | None | Callable[[], str] | Callable[[], Awaitable[str]] = Field(
30
+ alias="api_key", default=SecretStr("PLACEHOLDER")
31
+ )
32
+
33
+ @model_validator(mode="after")
34
+ def setup_uipath_client(self) -> Self:
35
+ self.client = OpenAI(
36
+ api_key="PLACEHOLDER",
37
+ timeout=None, # handled by the UiPath client
38
+ max_retries=1, # handled by the UiPath client
39
+ http_client=self.uipath_sync_client,
40
+ ).embeddings
41
+ self.async_client = AsyncOpenAI(
42
+ api_key="PLACEHOLDER",
43
+ timeout=None, # handled by the UiPath client
44
+ max_retries=1, # handled by the UiPath client
45
+ http_client=self.uipath_async_client,
46
+ ).embeddings
47
+ return self
48
+
49
+
50
+ class UiPathAzureOpenAIEmbeddings(UiPathBaseLLMClient, AzureOpenAIEmbeddings):
51
+ api_config: UiPathAPIConfig = UiPathAPIConfig(
52
+ api_type="embeddings",
53
+ client_type="passthrough",
54
+ vendor_type="openai",
55
+ freeze_base_url=True,
56
+ )
57
+
58
+ # Override fields to avoid errors when instantiating the class
59
+ model: str = Field(default="", alias="model_name")
60
+ azure_endpoint: str | None = Field(default="PLACEHOLDER")
61
+ openai_api_version: str | None = Field(default="PLACEHOLDER", alias="api_version")
62
+ openai_api_key: SecretStr | None = Field(default=SecretStr("PLACEHOLDER"), alias="api_key")
63
+
64
+ @model_validator(mode="after")
65
+ def setup_uipath_client(self) -> Self:
66
+ self.client = AzureOpenAI(
67
+ azure_endpoint="PLACEHOLDER",
68
+ api_version="PLACEHOLDER",
69
+ api_key="PLACEHOLDER",
70
+ timeout=None, # handled by the UiPath client
71
+ max_retries=1, # handled by the UiPath client
72
+ http_client=self.uipath_sync_client,
73
+ ).embeddings
74
+ self.async_client = AsyncAzureOpenAI(
75
+ azure_endpoint="PLACEHOLDER",
76
+ api_version="PLACEHOLDER",
77
+ api_key="PLACEHOLDER",
78
+ timeout=None, # handled by the UiPath client
79
+ max_retries=1, # handled by the UiPath client
80
+ http_client=self.uipath_async_client,
81
+ ).embeddings
82
+ return self
@@ -0,0 +1,3 @@
1
+ from uipath_langchain_client.clients.vertexai.chat_models import UiPathChatAnthropicVertex
2
+
3
+ __all__ = ["UiPathChatAnthropicVertex"]