uipath-langchain 0.1.24__py3-none-any.whl → 0.1.28__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,330 +0,0 @@
1
- import logging
2
- import os
3
- from typing import Optional, Union
4
-
5
- import aiohttp
6
- from pydantic import Field
7
- from uipath.utils import EndpointManager
8
-
9
- from .supported_models import GeminiModels
10
-
11
- logger = logging.getLogger(__name__)
12
-
13
-
14
- def _check_vertex_dependencies() -> None:
15
- """Check if required dependencies for UiPathChatVertex are installed."""
16
- import importlib.util
17
-
18
- missing_packages = []
19
-
20
- if importlib.util.find_spec("langchain_google_vertexai") is None:
21
- missing_packages.append("langchain-google-vertexai")
22
-
23
- if importlib.util.find_spec("langchain_community") is None:
24
- missing_packages.append("langchain-community")
25
-
26
- if missing_packages:
27
- packages_str = ", ".join(missing_packages)
28
- raise ImportError(
29
- f"The following packages are required to use UiPathChatVertex: {packages_str}\n"
30
- "Please install them using one of the following methods:\n\n"
31
- " # Using pip:\n"
32
- f" pip install uipath-langchain[vertex]\n\n"
33
- " # Using uv:\n"
34
- f" uv add 'uipath-langchain[vertex]'\n\n"
35
- )
36
-
37
-
38
- _check_vertex_dependencies()
39
-
40
- from google.auth.credentials import AnonymousCredentials
41
- from google.cloud.aiplatform_v1.services.prediction_service import (
42
- PredictionServiceAsyncClient as v1PredictionServiceAsyncClient,
43
- )
44
- from google.cloud.aiplatform_v1.services.prediction_service import (
45
- PredictionServiceClient as v1PredictionServiceClient,
46
- )
47
- from google.cloud.aiplatform_v1beta1.services.prediction_service import (
48
- PredictionServiceAsyncClient as v1beta1PredictionServiceAsyncClient,
49
- )
50
- from google.cloud.aiplatform_v1beta1.services.prediction_service import (
51
- PredictionServiceClient as v1beta1PredictionServiceClient,
52
- )
53
- from google.cloud.aiplatform_v1beta1.services.prediction_service.transports.base import (
54
- PredictionServiceTransport,
55
- )
56
- from google.cloud.aiplatform_v1beta1.services.prediction_service.transports.rest import (
57
- PredictionServiceRestTransport,
58
- )
59
- from langchain_community.utilities.vertexai import (
60
- get_client_info,
61
- )
62
- from langchain_google_vertexai import ChatVertexAI
63
-
64
-
65
- class CustomPredictionServiceRestTransport(PredictionServiceRestTransport):
66
- def __init__(self, llmgw_url: str, custom_headers: dict[str, str], **kwargs):
67
- self.llmgw_url = llmgw_url
68
- self.custom_headers = custom_headers or {}
69
-
70
- kwargs.setdefault("credentials", AnonymousCredentials())
71
- super().__init__(**kwargs)
72
-
73
- original_request = self._session.request
74
-
75
- def redirected_request(method, url, **kwargs_inner):
76
- headers = kwargs_inner.pop("headers", {})
77
- headers.update(self.custom_headers)
78
-
79
- is_streaming = kwargs_inner.get("stream", False)
80
- headers["X-UiPath-Streaming-Enabled"] = "true" if is_streaming else "false"
81
-
82
- return original_request(
83
- method, self.llmgw_url, headers=headers, **kwargs_inner
84
- )
85
-
86
- self._session.request = redirected_request # type: ignore[method-assign, assignment]
87
-
88
-
89
- class CustomPredictionServiceRestAsyncTransport:
90
- """
91
- Custom async transport for calling UiPath LLM Gateway.
92
-
93
- Uses aiohttp for REST/HTTP communication instead of gRPC.
94
- Handles both regular and streaming responses from the gateway.
95
- """
96
-
97
- def __init__(self, llmgw_url: str, custom_headers: dict[str, str], **kwargs):
98
- self.llmgw_url = llmgw_url
99
- self.custom_headers = custom_headers or {}
100
-
101
- def _serialize_request(self, request) -> str:
102
- """Convert proto-plus request to JSON string."""
103
- import json
104
-
105
- from proto import ( # type: ignore[import-untyped]
106
- Message as ProtoMessage,
107
- )
108
-
109
- if isinstance(request, ProtoMessage):
110
- request_dict = type(request).to_dict(
111
- request, preserving_proto_field_name=False
112
- )
113
- return json.dumps(request_dict)
114
- else:
115
- from google.protobuf.json_format import MessageToJson
116
-
117
- return MessageToJson(request, preserving_proto_field_name=False)
118
-
119
- def _get_response_class(self, request):
120
- """Get the response class corresponding to the request class."""
121
- import importlib
122
-
123
- response_class_name = request.__class__.__name__.replace("Request", "Response")
124
- response_class = getattr(
125
- request.__class__.__module__, response_class_name, None
126
- )
127
-
128
- if response_class is None:
129
- module = importlib.import_module(request.__class__.__module__)
130
- response_class = getattr(module, response_class_name, None)
131
-
132
- return response_class
133
-
134
- def _deserialize_response(self, response_json: str, request):
135
- """Convert JSON string to proto-plus response object."""
136
- import json
137
-
138
- from proto import Message as ProtoMessage
139
-
140
- response_class = self._get_response_class(request)
141
-
142
- if response_class and isinstance(request, ProtoMessage):
143
- return response_class.from_json(response_json, ignore_unknown_fields=True)
144
- elif response_class:
145
- from google.protobuf.json_format import Parse
146
-
147
- return Parse(response_json, response_class(), ignore_unknown_fields=True)
148
- else:
149
- return json.loads(response_json)
150
-
151
- async def _make_request(self, request_json: str, streaming: bool = False):
152
- """Make HTTP POST request to UiPath gateway."""
153
- headers = self.custom_headers.copy()
154
- headers["Content-Type"] = "application/json"
155
-
156
- if streaming:
157
- headers["X-UiPath-Streaming-Enabled"] = "true"
158
-
159
- connector = aiohttp.TCPConnector(ssl=True)
160
- async with aiohttp.ClientSession(connector=connector) as session:
161
- async with session.post(
162
- self.llmgw_url, headers=headers, data=request_json
163
- ) as response:
164
- if response.status != 200:
165
- error_text = await response.text()
166
- raise Exception(f"HTTP {response.status}: {error_text}")
167
-
168
- return await response.text()
169
-
170
- async def generate_content(self, request, **kwargs):
171
- """Handle non-streaming generate_content calls."""
172
- request_json = self._serialize_request(request)
173
- response_text = await self._make_request(request_json, streaming=False)
174
- return self._deserialize_response(response_text, request)
175
-
176
- def stream_generate_content(self, request, **kwargs):
177
- """
178
- Handle streaming generate_content calls.
179
-
180
- Returns a coroutine that yields an async iterator.
181
- """
182
- return self._create_stream_awaitable(request)
183
-
184
- async def _create_stream_awaitable(self, request):
185
- """Awaitable wrapper that returns the async generator."""
186
- return self._stream_implementation(request)
187
-
188
- async def _stream_implementation(self, request):
189
- """
190
- Async generator that yields streaming response chunks.
191
-
192
- Parses the array and yields each chunk individually.
193
- """
194
- import json
195
-
196
- request_json = self._serialize_request(request)
197
- response_text = await self._make_request(request_json, streaming=True)
198
-
199
- try:
200
- chunks_array = json.loads(response_text)
201
- if isinstance(chunks_array, list):
202
- logger.info(f"Streaming: yielding {len(chunks_array)} chunks")
203
- for chunk_data in chunks_array:
204
- chunk_json = json.dumps(chunk_data)
205
- yield self._deserialize_response(chunk_json, request)
206
- return
207
- except Exception as e:
208
- logger.info(f"Not a JSON array, trying single response: {e}")
209
-
210
- try:
211
- yield self._deserialize_response(response_text, request)
212
- except Exception as e:
213
- logger.error(f"Failed to parse streaming response: {e}")
214
-
215
-
216
- class UiPathChatVertex(ChatVertexAI):
217
- transport: Optional[PredictionServiceTransport] = Field(default=None)
218
- async_transport: Optional[CustomPredictionServiceRestAsyncTransport] = Field(
219
- default=None
220
- )
221
- async_client: Optional[
222
- Union[v1beta1PredictionServiceAsyncClient, v1PredictionServiceAsyncClient]
223
- ] = Field(default=None)
224
-
225
- def __init__(
226
- self,
227
- org_id: Optional[str] = None,
228
- tenant_id: Optional[str] = None,
229
- token: Optional[str] = None,
230
- model_name: str = GeminiModels.gemini_2_5_flash,
231
- **kwargs,
232
- ):
233
- org_id = org_id or os.getenv("UIPATH_ORGANIZATION_ID")
234
- tenant_id = tenant_id or os.getenv("UIPATH_TENANT_ID")
235
- token = token or os.getenv("UIPATH_ACCESS_TOKEN")
236
-
237
- if not org_id:
238
- raise ValueError(
239
- "UIPATH_ORGANIZATION_ID environment variable or org_id parameter is required"
240
- )
241
- if not tenant_id:
242
- raise ValueError(
243
- "UIPATH_TENANT_ID environment variable or tenant_id parameter is required"
244
- )
245
- if not token:
246
- raise ValueError(
247
- "UIPATH_ACCESS_TOKEN environment variable or token parameter is required"
248
- )
249
-
250
- self._vendor = "vertexai"
251
- self._model_name = model_name
252
- self._url: Optional[str] = None
253
-
254
- llmgw_url = self._build_base_url()
255
-
256
- headers = self._build_headers(token)
257
-
258
- super().__init__(
259
- model=model_name,
260
- project=os.getenv("VERTEXAI_PROJECT", "none"),
261
- location=os.getenv("VERTEXAI_LOCATION", "us-central1"),
262
- **kwargs,
263
- )
264
-
265
- self.transport = CustomPredictionServiceRestTransport(
266
- llmgw_url=llmgw_url, custom_headers=headers
267
- )
268
-
269
- self.async_transport = CustomPredictionServiceRestAsyncTransport(
270
- llmgw_url=llmgw_url, custom_headers=headers
271
- )
272
-
273
- @property
274
- def prediction_client(
275
- self,
276
- ) -> Union[v1beta1PredictionServiceClient, v1PredictionServiceClient]:
277
- if self.client is None:
278
- if self.endpoint_version == "v1":
279
- self.client = v1PredictionServiceClient(
280
- client_options=self.client_options,
281
- client_info=get_client_info(module=self._user_agent),
282
- transport=self.transport, # type: ignore[arg-type]
283
- )
284
- else:
285
- self.client = v1beta1PredictionServiceClient(
286
- client_options=self.client_options,
287
- client_info=get_client_info(module=self._user_agent),
288
- transport=self.transport,
289
- )
290
- return self.client
291
-
292
- @property
293
- def async_prediction_client(
294
- self,
295
- ) -> Union[
296
- v1beta1PredictionServiceAsyncClient,
297
- v1PredictionServiceAsyncClient,
298
- ]:
299
- return self.async_transport # type: ignore[return-value]
300
-
301
- @property
302
- def endpoint(self) -> str:
303
- vendor_endpoint = EndpointManager.get_vendor_endpoint()
304
- formatted_endpoint = vendor_endpoint.format(
305
- vendor=self._vendor,
306
- model=self._model_name,
307
- )
308
- return formatted_endpoint
309
-
310
- def _build_headers(self, token: str) -> dict[str, str]:
311
- headers = {
312
- # "X-UiPath-LlmGateway-ApiFlavor": "auto",
313
- "Authorization": f"Bearer {token}",
314
- }
315
- if job_key := os.getenv("UIPATH_JOB_KEY"):
316
- headers["X-UiPath-JobKey"] = job_key
317
- if process_key := os.getenv("UIPATH_PROCESS_KEY"):
318
- headers["X-UiPath-ProcessKey"] = process_key
319
- return headers
320
-
321
- def _build_base_url(self) -> str:
322
- if not self._url:
323
- env_uipath_url = os.getenv("UIPATH_URL")
324
-
325
- if env_uipath_url:
326
- self._url = f"{env_uipath_url.rstrip('/')}/{self.endpoint}"
327
- else:
328
- raise ValueError("UIPATH_URL environment variable is required")
329
-
330
- return self._url