uipath-langchain 0.0.139__py3-none-any.whl → 0.0.140__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of uipath-langchain might be problematic. Click here for more details.

@@ -1,15 +1,15 @@
1
1
  import json
2
2
  import logging
3
- from typing import Any, Dict, List, Literal, Optional, Union
3
+ from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Optional, Union
4
4
 
5
5
  from langchain_core.callbacks import (
6
6
  AsyncCallbackManagerForLLMRun,
7
7
  CallbackManagerForLLMRun,
8
8
  )
9
9
  from langchain_core.language_models import LanguageModelInput
10
- from langchain_core.messages import AIMessage, BaseMessage
10
+ from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage
11
11
  from langchain_core.messages.ai import UsageMetadata
12
- from langchain_core.outputs import ChatGeneration, ChatResult
12
+ from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
13
13
  from langchain_core.runnables import Runnable
14
14
  from langchain_openai.chat_models import AzureChatOpenAI
15
15
  from pydantic import BaseModel
@@ -49,6 +49,54 @@ class UiPathAzureChatOpenAI(UiPathRequestMixin, AzureChatOpenAI):
49
49
  response = await self._acall(self.url, payload, self.auth_headers)
50
50
  return self._create_chat_result(response)
51
51
 
52
+ def _stream(
53
+ self,
54
+ messages: List[BaseMessage],
55
+ stop: Optional[List[str]] = None,
56
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
57
+ **kwargs: Any,
58
+ ) -> Iterator[ChatGenerationChunk]:
59
+ if "tools" in kwargs and not kwargs["tools"]:
60
+ del kwargs["tools"]
61
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
62
+ response = self._call(self.url, payload, self.auth_headers)
63
+
64
+ # For non-streaming response, yield single chunk
65
+ chat_result = self._create_chat_result(response)
66
+ chunk = ChatGenerationChunk(
67
+ message=AIMessageChunk(
68
+ content=chat_result.generations[0].message.content,
69
+ additional_kwargs=chat_result.generations[0].message.additional_kwargs,
70
+ response_metadata=chat_result.generations[0].message.response_metadata,
71
+ usage_metadata=chat_result.generations[0].message.usage_metadata, # type: ignore
72
+ )
73
+ )
74
+ yield chunk
75
+
76
+ async def _astream(
77
+ self,
78
+ messages: List[BaseMessage],
79
+ stop: Optional[List[str]] = None,
80
+ run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
81
+ **kwargs: Any,
82
+ ) -> AsyncIterator[ChatGenerationChunk]:
83
+ if "tools" in kwargs and not kwargs["tools"]:
84
+ del kwargs["tools"]
85
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
86
+ response = await self._acall(self.url, payload, self.auth_headers)
87
+
88
+ # For non-streaming response, yield single chunk
89
+ chat_result = self._create_chat_result(response)
90
+ chunk = ChatGenerationChunk(
91
+ message=AIMessageChunk(
92
+ content=chat_result.generations[0].message.content,
93
+ additional_kwargs=chat_result.generations[0].message.additional_kwargs,
94
+ response_metadata=chat_result.generations[0].message.response_metadata,
95
+ usage_metadata=chat_result.generations[0].message.usage_metadata, # type: ignore
96
+ )
97
+ )
98
+ yield chunk
99
+
52
100
  def with_structured_output(
53
101
  self,
54
102
  schema: Optional[Any] = None,
@@ -217,6 +265,92 @@ class UiPathChat(UiPathRequestMixin, AzureChatOpenAI):
217
265
  response = await self._acall(self.url, payload, self.auth_headers)
218
266
  return self._create_chat_result(response)
219
267
 
268
+ def _stream(
269
+ self,
270
+ messages: List[BaseMessage],
271
+ stop: Optional[List[str]] = None,
272
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
273
+ **kwargs: Any,
274
+ ) -> Iterator[ChatGenerationChunk]:
275
+ """Stream the LLM on a given prompt.
276
+
277
+ Args:
278
+ messages: the prompt composed of a list of messages.
279
+ stop: a list of strings on which the model should stop generating.
280
+ run_manager: A run manager with callbacks for the LLM.
281
+ **kwargs: Additional keyword arguments.
282
+
283
+ Returns:
284
+ An iterator of ChatGenerationChunk objects.
285
+ """
286
+ if kwargs.get("tools"):
287
+ kwargs["tools"] = [tool["function"] for tool in kwargs["tools"]]
288
+ if "tool_choice" in kwargs and kwargs["tool_choice"]["type"] == "function":
289
+ kwargs["tool_choice"] = {
290
+ "type": "tool",
291
+ "name": kwargs["tool_choice"]["function"]["name"],
292
+ }
293
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
294
+ response = self._call(self.url, payload, self.auth_headers)
295
+
296
+ # For non-streaming response, yield single chunk
297
+ chat_result = self._create_chat_result(response)
298
+ chunk = ChatGenerationChunk(
299
+ message=AIMessageChunk(
300
+ content=chat_result.generations[0].message.content,
301
+ additional_kwargs=chat_result.generations[0].message.additional_kwargs,
302
+ response_metadata=chat_result.generations[0].message.response_metadata,
303
+ usage_metadata=chat_result.generations[0].message.usage_metadata, # type: ignore
304
+ tool_calls=getattr(
305
+ chat_result.generations[0].message, "tool_calls", None
306
+ ),
307
+ )
308
+ )
309
+ yield chunk
310
+
311
+ async def _astream(
312
+ self,
313
+ messages: List[BaseMessage],
314
+ stop: Optional[List[str]] = None,
315
+ run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
316
+ **kwargs: Any,
317
+ ) -> AsyncIterator[ChatGenerationChunk]:
318
+ """Async stream the LLM on a given prompt.
319
+
320
+ Args:
321
+ messages: the prompt composed of a list of messages.
322
+ stop: a list of strings on which the model should stop generating.
323
+ run_manager: A run manager with callbacks for the LLM.
324
+ **kwargs: Additional keyword arguments.
325
+
326
+ Returns:
327
+ An async iterator of ChatGenerationChunk objects.
328
+ """
329
+ if kwargs.get("tools"):
330
+ kwargs["tools"] = [tool["function"] for tool in kwargs["tools"]]
331
+ if "tool_choice" in kwargs and kwargs["tool_choice"]["type"] == "function":
332
+ kwargs["tool_choice"] = {
333
+ "type": "tool",
334
+ "name": kwargs["tool_choice"]["function"]["name"],
335
+ }
336
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
337
+ response = await self._acall(self.url, payload, self.auth_headers)
338
+
339
+ # For non-streaming response, yield single chunk
340
+ chat_result = self._create_chat_result(response)
341
+ chunk = ChatGenerationChunk(
342
+ message=AIMessageChunk(
343
+ content=chat_result.generations[0].message.content,
344
+ additional_kwargs=chat_result.generations[0].message.additional_kwargs,
345
+ response_metadata=chat_result.generations[0].message.response_metadata,
346
+ usage_metadata=chat_result.generations[0].message.usage_metadata, # type: ignore
347
+ tool_calls=getattr(
348
+ chat_result.generations[0].message, "tool_calls", None
349
+ ),
350
+ )
351
+ )
352
+ yield chunk
353
+
220
354
  def with_structured_output(
221
355
  self,
222
356
  schema: Optional[Any] = None,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: uipath-langchain
3
- Version: 0.0.139
3
+ Version: 0.0.140
4
4
  Summary: UiPath Langchain
5
5
  Project-URL: Homepage, https://uipath.com
6
6
  Project-URL: Repository, https://github.com/UiPath/uipath-langchain-python
@@ -25,7 +25,7 @@ uipath_langchain/_utils/_request_mixin.py,sha256=sYvvn3_fUJxtF893xFpVGwJx2YoEbw1
25
25
  uipath_langchain/_utils/_settings.py,sha256=2fExMQJ88YptfldmzMfZIpsx-m1gfMkeYGf5t6KIe0A,3084
26
26
  uipath_langchain/_utils/_sleep_policy.py,sha256=e9pHdjmcCj4CVoFM1jMyZFelH11YatsgWfpyrfXzKBQ,1251
27
27
  uipath_langchain/chat/__init__.py,sha256=WDcvy91ixvZ3Mq7Ae94g5CjyQwXovDBnEv1NlD5SXBE,116
28
- uipath_langchain/chat/models.py,sha256=m5PRAFXzUamt6-1K9uSlWUvZg_NfVyYHkgoQDJ-1rGs,10527
28
+ uipath_langchain/chat/models.py,sha256=PifcbDURqfttqVYKSnzdbOdbSiLiwHfQ6lWgVAtoLj8,16407
29
29
  uipath_langchain/embeddings/__init__.py,sha256=QICtYB58ZyqFfDQrEaO8lTEgAU5NuEKlR7iIrS0OBtc,156
30
30
  uipath_langchain/embeddings/embeddings.py,sha256=45gKyb6HVKigwE-0CXeZcAk33c0mteaEdPGa8hviqcw,4339
31
31
  uipath_langchain/retrievers/__init__.py,sha256=rOn7PyyHgZ4pMnXWPkGqmuBmx8eGuo-Oyndo7Wm9IUU,108
@@ -34,8 +34,8 @@ uipath_langchain/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3
34
34
  uipath_langchain/tools/preconfigured.py,sha256=xCP0hiQuFKIv45PTvMsoWlwsxJDs7goyZujKflYBngY,7476
35
35
  uipath_langchain/vectorstores/__init__.py,sha256=w8qs1P548ud1aIcVA_QhBgf_jZDrRMK5Lono78yA8cs,114
36
36
  uipath_langchain/vectorstores/context_grounding_vectorstore.py,sha256=TncIXG-YsUlO0R5ZYzWsM-Dj1SVCZbzmo2LraVxXelc,9559
37
- uipath_langchain-0.0.139.dist-info/METADATA,sha256=QtbSg_ijtsfWBdkLdkMr7tJUOVHkDFVZtZs-Bhuht_8,4275
38
- uipath_langchain-0.0.139.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
39
- uipath_langchain-0.0.139.dist-info/entry_points.txt,sha256=FUtzqGOEntlJKMJIXhQUfT7ZTbQmGhke1iCmDWZaQZI,81
40
- uipath_langchain-0.0.139.dist-info/licenses/LICENSE,sha256=JDpt-uotAkHFmxpwxi6gwx6HQ25e-lG4U_Gzcvgp7JY,1063
41
- uipath_langchain-0.0.139.dist-info/RECORD,,
37
+ uipath_langchain-0.0.140.dist-info/METADATA,sha256=RD2Nx8F1T9nL3QEtgGWFRrtVgJN6rnVSdzMH1QtXYJ0,4275
38
+ uipath_langchain-0.0.140.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
39
+ uipath_langchain-0.0.140.dist-info/entry_points.txt,sha256=FUtzqGOEntlJKMJIXhQUfT7ZTbQmGhke1iCmDWZaQZI,81
40
+ uipath_langchain-0.0.140.dist-info/licenses/LICENSE,sha256=JDpt-uotAkHFmxpwxi6gwx6HQ25e-lG4U_Gzcvgp7JY,1063
41
+ uipath_langchain-0.0.140.dist-info/RECORD,,