uipath-langchain 0.0.133__py3-none-any.whl → 0.1.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. uipath_langchain/_cli/cli_init.py +130 -191
  2. uipath_langchain/_cli/cli_new.py +2 -3
  3. uipath_langchain/_resources/AGENTS.md +21 -0
  4. uipath_langchain/_resources/REQUIRED_STRUCTURE.md +92 -0
  5. uipath_langchain/_tracing/__init__.py +3 -2
  6. uipath_langchain/_tracing/_instrument_traceable.py +11 -12
  7. uipath_langchain/_utils/_request_mixin.py +327 -51
  8. uipath_langchain/_utils/_settings.py +2 -2
  9. uipath_langchain/agent/exceptions/__init__.py +6 -0
  10. uipath_langchain/agent/exceptions/exceptions.py +11 -0
  11. uipath_langchain/agent/guardrails/__init__.py +21 -0
  12. uipath_langchain/agent/guardrails/actions/__init__.py +11 -0
  13. uipath_langchain/agent/guardrails/actions/base_action.py +23 -0
  14. uipath_langchain/agent/guardrails/actions/block_action.py +41 -0
  15. uipath_langchain/agent/guardrails/actions/escalate_action.py +274 -0
  16. uipath_langchain/agent/guardrails/actions/log_action.py +57 -0
  17. uipath_langchain/agent/guardrails/guardrail_nodes.py +125 -0
  18. uipath_langchain/agent/guardrails/guardrails_factory.py +70 -0
  19. uipath_langchain/agent/guardrails/guardrails_subgraph.py +247 -0
  20. uipath_langchain/agent/guardrails/types.py +20 -0
  21. uipath_langchain/agent/react/__init__.py +14 -0
  22. uipath_langchain/agent/react/agent.py +113 -0
  23. uipath_langchain/agent/react/constants.py +2 -0
  24. uipath_langchain/agent/react/init_node.py +20 -0
  25. uipath_langchain/agent/react/llm_node.py +43 -0
  26. uipath_langchain/agent/react/router.py +97 -0
  27. uipath_langchain/agent/react/terminate_node.py +82 -0
  28. uipath_langchain/agent/react/tools/__init__.py +7 -0
  29. uipath_langchain/agent/react/tools/tools.py +50 -0
  30. uipath_langchain/agent/react/types.py +39 -0
  31. uipath_langchain/agent/react/utils.py +49 -0
  32. uipath_langchain/agent/tools/__init__.py +17 -0
  33. uipath_langchain/agent/tools/context_tool.py +53 -0
  34. uipath_langchain/agent/tools/escalation_tool.py +111 -0
  35. uipath_langchain/agent/tools/integration_tool.py +181 -0
  36. uipath_langchain/agent/tools/process_tool.py +49 -0
  37. uipath_langchain/agent/tools/static_args.py +138 -0
  38. uipath_langchain/agent/tools/structured_tool_with_output_type.py +14 -0
  39. uipath_langchain/agent/tools/tool_factory.py +45 -0
  40. uipath_langchain/agent/tools/tool_node.py +22 -0
  41. uipath_langchain/agent/tools/utils.py +11 -0
  42. uipath_langchain/chat/__init__.py +4 -0
  43. uipath_langchain/chat/bedrock.py +187 -0
  44. uipath_langchain/chat/gemini.py +330 -0
  45. uipath_langchain/chat/mapper.py +309 -0
  46. uipath_langchain/chat/models.py +248 -35
  47. uipath_langchain/chat/openai.py +132 -0
  48. uipath_langchain/chat/supported_models.py +42 -0
  49. uipath_langchain/embeddings/embeddings.py +131 -34
  50. uipath_langchain/middlewares.py +0 -6
  51. uipath_langchain/retrievers/context_grounding_retriever.py +7 -9
  52. uipath_langchain/runtime/__init__.py +36 -0
  53. uipath_langchain/runtime/_serialize.py +46 -0
  54. uipath_langchain/runtime/config.py +61 -0
  55. uipath_langchain/runtime/errors.py +43 -0
  56. uipath_langchain/runtime/factory.py +315 -0
  57. uipath_langchain/runtime/graph.py +159 -0
  58. uipath_langchain/runtime/runtime.py +453 -0
  59. uipath_langchain/runtime/schema.py +349 -0
  60. uipath_langchain/runtime/storage.py +115 -0
  61. uipath_langchain/vectorstores/context_grounding_vectorstore.py +90 -110
  62. {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.24.dist-info}/METADATA +42 -22
  63. uipath_langchain-0.1.24.dist-info/RECORD +76 -0
  64. {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.24.dist-info}/WHEEL +1 -1
  65. uipath_langchain-0.1.24.dist-info/entry_points.txt +5 -0
  66. uipath_langchain/_cli/_runtime/_context.py +0 -21
  67. uipath_langchain/_cli/_runtime/_conversation.py +0 -298
  68. uipath_langchain/_cli/_runtime/_exception.py +0 -17
  69. uipath_langchain/_cli/_runtime/_input.py +0 -139
  70. uipath_langchain/_cli/_runtime/_output.py +0 -234
  71. uipath_langchain/_cli/_runtime/_runtime.py +0 -379
  72. uipath_langchain/_cli/_utils/_graph.py +0 -199
  73. uipath_langchain/_cli/cli_dev.py +0 -44
  74. uipath_langchain/_cli/cli_eval.py +0 -78
  75. uipath_langchain/_cli/cli_run.py +0 -82
  76. uipath_langchain/_tracing/_oteladapter.py +0 -222
  77. uipath_langchain/_tracing/_utils.py +0 -28
  78. uipath_langchain/builder/agent_config.py +0 -191
  79. uipath_langchain/tools/preconfigured.py +0 -191
  80. uipath_langchain-0.0.133.dist-info/RECORD +0 -41
  81. uipath_langchain-0.0.133.dist-info/entry_points.txt +0 -2
  82. /uipath_langchain/{tools/__init__.py → py.typed} +0 -0
  83. {uipath_langchain-0.0.133.dist-info → uipath_langchain-0.1.24.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,309 @@
1
+ import json
2
+ import logging
3
+ from datetime import datetime, timezone
4
+ from typing import Any, cast
5
+ from uuid import uuid4
6
+
7
+ from langchain_core.messages import (
8
+ AIMessageChunk,
9
+ BaseMessage,
10
+ HumanMessage,
11
+ TextContentBlock,
12
+ ToolCallChunk,
13
+ ToolMessage,
14
+ )
15
+ from pydantic import ValidationError
16
+ from uipath.core.chat import (
17
+ UiPathConversationContentPartChunkEvent,
18
+ UiPathConversationContentPartEndEvent,
19
+ UiPathConversationContentPartEvent,
20
+ UiPathConversationContentPartStartEvent,
21
+ UiPathConversationMessage,
22
+ UiPathConversationMessageEndEvent,
23
+ UiPathConversationMessageEvent,
24
+ UiPathConversationMessageStartEvent,
25
+ UiPathConversationToolCallEndEvent,
26
+ UiPathConversationToolCallEvent,
27
+ UiPathConversationToolCallStartEvent,
28
+ UiPathInlineValue,
29
+ )
30
+
31
+ logger = logging.getLogger(__name__)
32
+
33
+
34
+ class UiPathChatMessagesMapper:
35
+ """Stateful mapper that converts LangChain messages to UiPath message events.
36
+
37
+ Maintains state across multiple message conversions to properly track:
38
+ - The AI message ID associated with each tool call for proper correlation with ToolMessage
39
+ """
40
+
41
+ def __init__(self):
42
+ """Initialize the mapper with empty state."""
43
+ self.tool_call_to_ai_message: dict[str, str] = {}
44
+ self.seen_message_ids: set[str] = set()
45
+
46
+ def _extract_text(self, content: Any) -> str:
47
+ """Normalize LangGraph message.content to plain text."""
48
+ if isinstance(content, str):
49
+ return content
50
+ if isinstance(content, list):
51
+ return "".join(
52
+ part.get("text", "")
53
+ for part in content
54
+ if isinstance(part, dict) and part.get("type") == "text"
55
+ )
56
+ return str(content or "")
57
+
58
+ def map_messages(self, messages: list[Any]) -> list[Any]:
59
+ """Normalize any 'messages' list into LangChain messages.
60
+
61
+ - If already BaseMessage instances: return as-is.
62
+ - If UiPathConversationMessage: convert to HumanMessage.
63
+ """
64
+ if not isinstance(messages, list):
65
+ raise TypeError("messages must be a list")
66
+
67
+ if not messages:
68
+ return []
69
+
70
+ first = messages[0]
71
+
72
+ # Case 1: already LangChain messages
73
+ if isinstance(first, BaseMessage):
74
+ return cast(list[BaseMessage], messages)
75
+
76
+ # Case 2: UiPath messages -> convert to HumanMessage
77
+ if isinstance(first, UiPathConversationMessage):
78
+ if not all(isinstance(m, UiPathConversationMessage) for m in messages):
79
+ raise TypeError("Mixed message types not supported")
80
+ return self._map_messages_internal(
81
+ cast(list[UiPathConversationMessage], messages)
82
+ )
83
+
84
+ # Case3: List[dict] -> parse to List[UiPathConversationMessage]
85
+ if isinstance(first, dict):
86
+ try:
87
+ parsed_messages = [
88
+ UiPathConversationMessage.model_validate(message)
89
+ for message in messages
90
+ ]
91
+ return self._map_messages_internal(parsed_messages)
92
+ except ValidationError:
93
+ pass
94
+
95
+ # Fallback: unknown type – just pass through
96
+ return messages
97
+
98
+ def _map_messages_internal(
99
+ self, messages: list[UiPathConversationMessage]
100
+ ) -> list[HumanMessage]:
101
+ """
102
+ Converts a UiPathConversationMessage into a list of HumanMessages for LangGraph.
103
+ Supports multimodal content parts (text, external content) and preserves metadata.
104
+ """
105
+ human_messages: list[HumanMessage] = []
106
+
107
+ for uipath_msg in messages:
108
+ # Loop over each content part
109
+ if uipath_msg.content_parts:
110
+ for part in uipath_msg.content_parts:
111
+ data = part.data
112
+ content = ""
113
+ metadata: dict[str, Any] = {
114
+ "message_id": uipath_msg.message_id,
115
+ "content_part_id": part.content_part_id,
116
+ "mime_type": part.mime_type,
117
+ "created_at": uipath_msg.created_at,
118
+ "updated_at": uipath_msg.updated_at,
119
+ }
120
+
121
+ if isinstance(data, UiPathInlineValue):
122
+ content = str(data.inline)
123
+
124
+ # Append a HumanMessage for this content part
125
+ human_messages.append(
126
+ HumanMessage(content=content, metadata=metadata)
127
+ )
128
+
129
+ # Handle the case where there are no content parts
130
+ else:
131
+ metadata = {
132
+ "message_id": uipath_msg.message_id,
133
+ "role": uipath_msg.role,
134
+ "created_at": uipath_msg.created_at,
135
+ "updated_at": uipath_msg.updated_at,
136
+ }
137
+ human_messages.append(HumanMessage(content="", metadata=metadata))
138
+
139
+ return human_messages
140
+
141
+ def map_event(
142
+ self,
143
+ message: BaseMessage,
144
+ ) -> UiPathConversationMessageEvent | None:
145
+ """Convert LangGraph BaseMessage (chunk or full) into a UiPathConversationMessageEvent.
146
+
147
+ Args:
148
+ message: The LangChain message to convert
149
+
150
+ Returns:
151
+ A UiPathConversationMessageEvent if the message should be emitted, None otherwise.
152
+ """
153
+ # Format timestamp as ISO 8601 UTC with milliseconds: 2025-01-04T10:30:00.123Z
154
+ timestamp = (
155
+ datetime.now(timezone.utc)
156
+ .isoformat(timespec="milliseconds")
157
+ .replace("+00:00", "Z")
158
+ )
159
+
160
+ # --- Streaming AIMessageChunk ---
161
+ if isinstance(message, AIMessageChunk):
162
+ if message.id is None:
163
+ return None
164
+
165
+ msg_event = UiPathConversationMessageEvent(
166
+ message_id=message.id,
167
+ )
168
+
169
+ # Check if this is the last chunk by examining chunk_position
170
+ if message.chunk_position == "last":
171
+ msg_event.end = UiPathConversationMessageEndEvent(timestamp=timestamp)
172
+ msg_event.content_part = UiPathConversationContentPartEvent(
173
+ content_part_id=f"chunk-{message.id}-0",
174
+ end=UiPathConversationContentPartEndEvent(),
175
+ )
176
+ return msg_event
177
+
178
+ # For every new message_id, start a new message
179
+ if message.id not in self.seen_message_ids:
180
+ self.seen_message_ids.add(message.id)
181
+ msg_event.start = UiPathConversationMessageStartEvent(
182
+ role="assistant", timestamp=timestamp
183
+ )
184
+ msg_event.content_part = UiPathConversationContentPartEvent(
185
+ content_part_id=f"chunk-{message.id}-0",
186
+ start=UiPathConversationContentPartStartEvent(
187
+ mime_type="text/plain"
188
+ ),
189
+ )
190
+
191
+ elif message.content_blocks:
192
+ for block in message.content_blocks:
193
+ block_type = block.get("type")
194
+
195
+ if block_type == "text":
196
+ text_block = cast(TextContentBlock, block)
197
+ text = text_block["text"]
198
+
199
+ msg_event.content_part = UiPathConversationContentPartEvent(
200
+ content_part_id=f"chunk-{message.id}-0",
201
+ chunk=UiPathConversationContentPartChunkEvent(
202
+ data=text,
203
+ content_part_sequence=0,
204
+ ),
205
+ )
206
+
207
+ elif block_type == "tool_call_chunk":
208
+ tool_chunk_block = cast(ToolCallChunk, block)
209
+
210
+ tool_call_id = tool_chunk_block.get("id")
211
+ if tool_call_id:
212
+ # Track tool_call_id -> ai_message_id mapping
213
+ self.tool_call_to_ai_message[str(tool_call_id)] = message.id
214
+
215
+ args = tool_chunk_block.get("args") or ""
216
+
217
+ msg_event.content_part = UiPathConversationContentPartEvent(
218
+ content_part_id=f"chunk-{message.id}-0",
219
+ chunk=UiPathConversationContentPartChunkEvent(
220
+ data=args,
221
+ content_part_sequence=0,
222
+ ),
223
+ )
224
+ # Continue so that multiple tool_call_chunks in the same block list
225
+ # are handled correctly
226
+ continue
227
+
228
+ # Fallback: raw string content on the chunk (rare when using content_blocks)
229
+ elif isinstance(message.content, str) and message.content:
230
+ msg_event.content_part = UiPathConversationContentPartEvent(
231
+ content_part_id=f"content-{message.id}",
232
+ chunk=UiPathConversationContentPartChunkEvent(
233
+ data=message.content,
234
+ content_part_sequence=0,
235
+ ),
236
+ )
237
+
238
+ if (
239
+ msg_event.start
240
+ or msg_event.content_part
241
+ or msg_event.tool_call
242
+ or msg_event.end
243
+ ):
244
+ return msg_event
245
+
246
+ return None
247
+
248
+ # --- ToolMessage ---
249
+ if isinstance(message, ToolMessage):
250
+ # Look up the AI message ID using the tool_call_id
251
+ result_message_id = (
252
+ self.tool_call_to_ai_message.get(message.tool_call_id)
253
+ if message.tool_call_id
254
+ else None
255
+ )
256
+
257
+ # If no AI message ID was found, we cannot properly associate this tool result
258
+ if not result_message_id:
259
+ logger.warning(
260
+ f"Tool message {message.tool_call_id} has no associated AI message ID. Skipping."
261
+ )
262
+
263
+ # Clean up the mapping after use
264
+ if (
265
+ message.tool_call_id
266
+ and message.tool_call_id in self.tool_call_to_ai_message
267
+ ):
268
+ del self.tool_call_to_ai_message[message.tool_call_id]
269
+
270
+ content_value: Any = message.content
271
+ if isinstance(content_value, str):
272
+ try:
273
+ content_value = json.loads(content_value)
274
+ except (json.JSONDecodeError, TypeError):
275
+ # Keep as string if not valid JSON
276
+ pass
277
+
278
+ return UiPathConversationMessageEvent(
279
+ message_id=result_message_id or str(uuid4()),
280
+ tool_call=UiPathConversationToolCallEvent(
281
+ tool_call_id=message.tool_call_id,
282
+ start=UiPathConversationToolCallStartEvent(
283
+ tool_name=message.name,
284
+ arguments=None,
285
+ timestamp=timestamp,
286
+ ),
287
+ end=UiPathConversationToolCallEndEvent(
288
+ timestamp=timestamp,
289
+ output=UiPathInlineValue(inline=content_value),
290
+ ),
291
+ ),
292
+ )
293
+
294
+ # --- Fallback for other BaseMessage types ---
295
+ text_content = self._extract_text(message.content)
296
+ return UiPathConversationMessageEvent(
297
+ message_id=message.id,
298
+ start=UiPathConversationMessageStartEvent(
299
+ role="assistant", timestamp=timestamp
300
+ ),
301
+ content_part=UiPathConversationContentPartEvent(
302
+ content_part_id=f"cp-{message.id}",
303
+ chunk=UiPathConversationContentPartChunkEvent(data=text_content),
304
+ ),
305
+ end=UiPathConversationMessageEndEvent(),
306
+ )
307
+
308
+
309
+ __all__ = ["UiPathChatMessagesMapper"]
@@ -1,15 +1,19 @@
1
1
  import json
2
2
  import logging
3
- from typing import Any, Dict, List, Literal, Optional, Union
3
+ from typing import Any, AsyncIterator, Iterator, Literal, Union
4
4
 
5
5
  from langchain_core.callbacks import (
6
6
  AsyncCallbackManagerForLLMRun,
7
7
  CallbackManagerForLLMRun,
8
8
  )
9
9
  from langchain_core.language_models import LanguageModelInput
10
- from langchain_core.messages import AIMessage, BaseMessage
10
+ from langchain_core.language_models.chat_models import (
11
+ agenerate_from_stream,
12
+ generate_from_stream,
13
+ )
14
+ from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage
11
15
  from langchain_core.messages.ai import UsageMetadata
12
- from langchain_core.outputs import ChatGeneration, ChatResult
16
+ from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
13
17
  from langchain_core.runnables import Runnable
14
18
  from langchain_openai.chat_models import AzureChatOpenAI
15
19
  from pydantic import BaseModel
@@ -25,37 +29,118 @@ class UiPathAzureChatOpenAI(UiPathRequestMixin, AzureChatOpenAI):
25
29
 
26
30
  def _generate(
27
31
  self,
28
- messages: List[BaseMessage],
29
- stop: Optional[List[str]] = None,
30
- run_manager: Optional[CallbackManagerForLLMRun] = None,
32
+ messages: list[BaseMessage],
33
+ stop: list[str] | None = None,
34
+ run_manager: CallbackManagerForLLMRun | None = None,
31
35
  **kwargs: Any,
32
36
  ) -> ChatResult:
33
37
  if "tools" in kwargs and not kwargs["tools"]:
34
38
  del kwargs["tools"]
39
+
40
+ if self.streaming:
41
+ stream_iter = self._stream(
42
+ messages, stop=stop, run_manager=run_manager, **kwargs
43
+ )
44
+ return generate_from_stream(stream_iter)
45
+
35
46
  payload = self._get_request_payload(messages, stop=stop, **kwargs)
36
47
  response = self._call(self.url, payload, self.auth_headers)
37
48
  return self._create_chat_result(response)
38
49
 
39
50
  async def _agenerate(
40
51
  self,
41
- messages: List[BaseMessage],
42
- stop: Optional[List[str]] = None,
43
- run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
52
+ messages: list[BaseMessage],
53
+ stop: list[str] | None = None,
54
+ run_manager: AsyncCallbackManagerForLLMRun | None = None,
44
55
  **kwargs: Any,
45
56
  ) -> ChatResult:
46
57
  if "tools" in kwargs and not kwargs["tools"]:
47
58
  del kwargs["tools"]
59
+
60
+ if self.streaming:
61
+ stream_iter = self._astream(
62
+ messages, stop=stop, run_manager=run_manager, **kwargs
63
+ )
64
+ return await agenerate_from_stream(stream_iter)
65
+
48
66
  payload = self._get_request_payload(messages, stop=stop, **kwargs)
49
67
  response = await self._acall(self.url, payload, self.auth_headers)
50
68
  return self._create_chat_result(response)
51
69
 
70
+ def _stream(
71
+ self,
72
+ messages: list[BaseMessage],
73
+ stop: list[str] | None = None,
74
+ run_manager: CallbackManagerForLLMRun | None = None,
75
+ **kwargs: Any,
76
+ ) -> Iterator[ChatGenerationChunk]:
77
+ if "tools" in kwargs and not kwargs["tools"]:
78
+ del kwargs["tools"]
79
+ kwargs["stream"] = True
80
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
81
+
82
+ default_chunk_class = AIMessageChunk
83
+
84
+ for chunk in self._stream_request(self.url, payload, self.auth_headers):
85
+ if self.logger:
86
+ self.logger.debug(f"[Stream] Got chunk from _stream_request: {chunk}")
87
+ generation_chunk = self._convert_chunk(
88
+ chunk, default_chunk_class, include_tool_calls=True
89
+ )
90
+ if generation_chunk is None:
91
+ if self.logger:
92
+ self.logger.debug("[Stream] Skipping None generation_chunk")
93
+ continue
94
+
95
+ if self.logger:
96
+ self.logger.debug(
97
+ f"[Stream] Yielding generation_chunk: {generation_chunk}"
98
+ )
99
+
100
+ if run_manager:
101
+ run_manager.on_llm_new_token(
102
+ generation_chunk.text,
103
+ chunk=generation_chunk,
104
+ )
105
+
106
+ yield generation_chunk
107
+
108
+ async def _astream(
109
+ self,
110
+ messages: list[BaseMessage],
111
+ stop: list[str] | None = None,
112
+ run_manager: AsyncCallbackManagerForLLMRun | None = None,
113
+ **kwargs: Any,
114
+ ) -> AsyncIterator[ChatGenerationChunk]:
115
+ if "tools" in kwargs and not kwargs["tools"]:
116
+ del kwargs["tools"]
117
+ kwargs["stream"] = True
118
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
119
+
120
+ default_chunk_class = AIMessageChunk
121
+
122
+ async for chunk in self._astream_request(self.url, payload, self.auth_headers):
123
+ generation_chunk = self._convert_chunk(
124
+ chunk, default_chunk_class, include_tool_calls=True
125
+ )
126
+ if generation_chunk is None:
127
+ continue
128
+
129
+ if run_manager:
130
+ await run_manager.on_llm_new_token(
131
+ generation_chunk.text,
132
+ chunk=generation_chunk,
133
+ )
134
+
135
+ yield generation_chunk
136
+
52
137
  def with_structured_output(
53
138
  self,
54
- schema: Optional[Any] = None,
139
+ schema: Any = None,
55
140
  *,
56
141
  method: Literal["function_calling", "json_mode", "json_schema"] = "json_schema",
57
142
  include_raw: bool = False,
58
- strict: Optional[bool] = None,
143
+ strict: bool | None = None,
59
144
  **kwargs: Any,
60
145
  ) -> Runnable[LanguageModelInput, Any]:
61
146
  """Model wrapper that returns outputs formatted to match the given schema."""
@@ -86,8 +171,8 @@ class UiPathChat(UiPathRequestMixin, AzureChatOpenAI):
86
171
 
87
172
  def _create_chat_result(
88
173
  self,
89
- response: Union[Dict[str, Any], BaseModel],
90
- generation_info: Optional[Dict[Any, Any]] = None,
174
+ response: Union[dict[str, Any], BaseModel],
175
+ generation_info: dict[Any, Any] | None = None,
91
176
  ) -> ChatResult:
92
177
  if not isinstance(response, dict):
93
178
  response = response.model_dump()
@@ -128,9 +213,9 @@ class UiPathChat(UiPathRequestMixin, AzureChatOpenAI):
128
213
  self,
129
214
  input_: LanguageModelInput,
130
215
  *,
131
- stop: Optional[List[str]] = None,
216
+ stop: list[str] | None = None,
132
217
  **kwargs: Any,
133
- ) -> Dict[Any, Any]:
218
+ ) -> dict[Any, Any]:
134
219
  payload = super()._get_request_payload(input_, stop=stop, **kwargs)
135
220
  # hacks to make the request work with uipath normalized
136
221
  for message in payload["messages"]:
@@ -149,11 +234,40 @@ class UiPathChat(UiPathRequestMixin, AzureChatOpenAI):
149
234
  }
150
235
  return payload
151
236
 
237
+ def _normalize_tool_choice(self, kwargs: dict[str, Any]) -> None:
238
+ """Normalize tool_choice for UiPath Gateway compatibility.
239
+
240
+ Converts LangChain tool_choice formats to UiPath Gateway format:
241
+ - String "required" -> {"type": "required"}
242
+ - String "auto" -> {"type": "auto"}
243
+ - Dict with function -> {"type": "tool", "name": "function_name"}
244
+ """
245
+ if "tool_choice" in kwargs:
246
+ tool_choice = kwargs["tool_choice"]
247
+
248
+ if isinstance(tool_choice, str):
249
+ if tool_choice in ("required", "auto", "none"):
250
+ logger.debug(
251
+ f"Converting tool_choice from '{tool_choice}' to {{'type': '{tool_choice}'}}"
252
+ )
253
+ kwargs["tool_choice"] = {"type": tool_choice}
254
+ elif (
255
+ isinstance(tool_choice, dict) and tool_choice.get("type") == "function"
256
+ ):
257
+ function_name = tool_choice["function"]["name"]
258
+ logger.debug(
259
+ f"Converting tool_choice from function '{function_name}' to tool format"
260
+ )
261
+ kwargs["tool_choice"] = {
262
+ "type": "tool",
263
+ "name": function_name,
264
+ }
265
+
152
266
  def _generate(
153
267
  self,
154
- messages: List[BaseMessage],
155
- stop: Optional[List[str]] = None,
156
- run_manager: Optional[CallbackManagerForLLMRun] = None,
268
+ messages: list[BaseMessage],
269
+ stop: list[str] | None = None,
270
+ run_manager: CallbackManagerForLLMRun | None = None,
157
271
  **kwargs: Any,
158
272
  ) -> ChatResult:
159
273
  """Override the _generate method to implement the chat model logic.
@@ -173,21 +287,23 @@ class UiPathChat(UiPathRequestMixin, AzureChatOpenAI):
173
287
  """
174
288
  if kwargs.get("tools"):
175
289
  kwargs["tools"] = [tool["function"] for tool in kwargs["tools"]]
176
- if "tool_choice" in kwargs and kwargs["tool_choice"]["type"] == "function":
177
- kwargs["tool_choice"] = {
178
- "type": "tool",
179
- "name": kwargs["tool_choice"]["function"]["name"],
180
- }
181
- payload = self._get_request_payload(messages, stop=stop, **kwargs)
290
+ self._normalize_tool_choice(kwargs)
291
+
292
+ if self.streaming:
293
+ stream_iter = self._stream(
294
+ messages, stop=stop, run_manager=run_manager, **kwargs
295
+ )
296
+ return generate_from_stream(stream_iter)
182
297
 
298
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
183
299
  response = self._call(self.url, payload, self.auth_headers)
184
300
  return self._create_chat_result(response)
185
301
 
186
302
  async def _agenerate(
187
303
  self,
188
- messages: List[BaseMessage],
189
- stop: Optional[List[str]] = None,
190
- run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
304
+ messages: list[BaseMessage],
305
+ stop: list[str] | None = None,
306
+ run_manager: AsyncCallbackManagerForLLMRun | None = None,
191
307
  **kwargs: Any,
192
308
  ) -> ChatResult:
193
309
  """Override the _generate method to implement the chat model logic.
@@ -207,25 +323,122 @@ class UiPathChat(UiPathRequestMixin, AzureChatOpenAI):
207
323
  """
208
324
  if kwargs.get("tools"):
209
325
  kwargs["tools"] = [tool["function"] for tool in kwargs["tools"]]
210
- if "tool_choice" in kwargs and kwargs["tool_choice"]["type"] == "function":
211
- kwargs["tool_choice"] = {
212
- "type": "tool",
213
- "name": kwargs["tool_choice"]["function"]["name"],
214
- }
215
- payload = self._get_request_payload(messages, stop=stop, **kwargs)
326
+ self._normalize_tool_choice(kwargs)
327
+
328
+ if self.streaming:
329
+ stream_iter = self._astream(
330
+ messages, stop=stop, run_manager=run_manager, **kwargs
331
+ )
332
+ return await agenerate_from_stream(stream_iter)
216
333
 
334
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
217
335
  response = await self._acall(self.url, payload, self.auth_headers)
218
336
  return self._create_chat_result(response)
219
337
 
338
+ def _stream(
339
+ self,
340
+ messages: list[BaseMessage],
341
+ stop: list[str] | None = None,
342
+ run_manager: CallbackManagerForLLMRun | None = None,
343
+ **kwargs: Any,
344
+ ) -> Iterator[ChatGenerationChunk]:
345
+ """Stream the LLM on a given prompt.
346
+
347
+ Args:
348
+ messages: the prompt composed of a list of messages.
349
+ stop: a list of strings on which the model should stop generating.
350
+ run_manager: A run manager with callbacks for the LLM.
351
+ **kwargs: Additional keyword arguments.
352
+
353
+ Returns:
354
+ An iterator of ChatGenerationChunk objects.
355
+ """
356
+ if kwargs.get("tools"):
357
+ kwargs["tools"] = [tool["function"] for tool in kwargs["tools"]]
358
+ self._normalize_tool_choice(kwargs)
359
+ kwargs["stream"] = True
360
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
361
+
362
+ default_chunk_class = AIMessageChunk
363
+
364
+ for chunk in self._stream_request(self.url, payload, self.auth_headers):
365
+ if self.logger:
366
+ self.logger.debug(f"[Stream] Got chunk from _stream_request: {chunk}")
367
+ generation_chunk = self._convert_chunk(
368
+ chunk, default_chunk_class, include_tool_calls=True
369
+ )
370
+ if generation_chunk is None:
371
+ if self.logger:
372
+ self.logger.debug("[Stream] Skipping None generation_chunk")
373
+ continue
374
+
375
+ if self.logger:
376
+ self.logger.debug(
377
+ f"[Stream] Yielding generation_chunk: {generation_chunk}"
378
+ )
379
+
380
+ if run_manager:
381
+ run_manager.on_llm_new_token(
382
+ generation_chunk.text,
383
+ chunk=generation_chunk,
384
+ )
385
+
386
+ yield generation_chunk
387
+
388
+ async def _astream(
389
+ self,
390
+ messages: list[BaseMessage],
391
+ stop: list[str] | None = None,
392
+ run_manager: AsyncCallbackManagerForLLMRun | None = None,
393
+ **kwargs: Any,
394
+ ) -> AsyncIterator[ChatGenerationChunk]:
395
+ """Async stream the LLM on a given prompt.
396
+
397
+ Args:
398
+ messages: the prompt composed of a list of messages.
399
+ stop: a list of strings on which the model should stop generating.
400
+ run_manager: A run manager with callbacks for the LLM.
401
+ **kwargs: Additional keyword arguments.
402
+
403
+ Returns:
404
+ An async iterator of ChatGenerationChunk objects.
405
+ """
406
+ if kwargs.get("tools"):
407
+ kwargs["tools"] = [tool["function"] for tool in kwargs["tools"]]
408
+ self._normalize_tool_choice(kwargs)
409
+ kwargs["stream"] = True
410
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
411
+
412
+ # Update headers to enable streaming
413
+ headers = {**self.auth_headers}
414
+ headers["X-UiPath-Streaming-Enabled"] = "true"
415
+
416
+ default_chunk_class = AIMessageChunk
417
+
418
+ async for chunk in self._astream_request(self.url, payload, headers):
419
+ generation_chunk = self._convert_chunk(
420
+ chunk, default_chunk_class, include_tool_calls=True
421
+ )
422
+ if generation_chunk is None:
423
+ continue
424
+
425
+ if run_manager:
426
+ await run_manager.on_llm_new_token(
427
+ generation_chunk.text,
428
+ chunk=generation_chunk,
429
+ )
430
+
431
+ yield generation_chunk
432
+
220
433
  def with_structured_output(
221
434
  self,
222
- schema: Optional[Any] = None,
435
+ schema: Any = None,
223
436
  *,
224
437
  method: Literal[
225
438
  "function_calling", "json_mode", "json_schema"
226
439
  ] = "function_calling",
227
440
  include_raw: bool = False,
228
- strict: Optional[bool] = None,
441
+ strict: bool | None = None,
229
442
  **kwargs: Any,
230
443
  ) -> Runnable[LanguageModelInput, Any]:
231
444
  """Model wrapper that returns outputs formatted to match the given schema."""