uipath-langchain 0.0.110__py3-none-any.whl → 0.0.111__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of uipath-langchain might be problematic. Click here for more details.

@@ -1,29 +1,22 @@
1
- import json
2
1
  import logging
3
2
  from typing import Any, Optional, cast
4
3
 
5
4
  from langgraph.types import Command
6
5
  from uipath import UiPath
7
6
  from uipath._cli._runtime._contracts import (
7
+ UiPathApiTrigger,
8
8
  UiPathErrorCategory,
9
+ UiPathResumeTrigger,
9
10
  UiPathResumeTriggerType,
10
- UiPathRuntimeStatus,
11
11
  )
12
+ from uipath._cli._runtime._hitl import HitlReader
12
13
 
13
14
  from ._context import LangGraphRuntimeContext
14
- from ._escalation import Escalation
15
15
  from ._exception import LangGraphRuntimeError
16
16
 
17
17
  logger = logging.getLogger(__name__)
18
18
 
19
19
 
20
- def try_convert_to_json_format(value: str) -> str:
21
- try:
22
- return json.loads(value)
23
- except json.decoder.JSONDecodeError:
24
- return value
25
-
26
-
27
20
  class LangGraphInputProcessor:
28
21
  """
29
22
  Handles input processing for graph execution, including resume scenarios
@@ -38,13 +31,29 @@ class LangGraphInputProcessor:
38
31
  context: The runtime context for the graph execution.
39
32
  """
40
33
  self.context = context
41
- self.escalation = Escalation(self.context.config_path)
42
34
  self.uipath = UiPath()
43
35
 
44
36
  async def process(self) -> Any:
45
37
  """
46
- Process the input data, handling resume scenarios by fetching
47
- necessary data from UiPath if needed.
38
+ Process the input data for graph execution, handling both fresh starts and resume scenarios.
39
+
40
+ This method determines whether the graph is being executed fresh or resumed from a previous state.
41
+ For fresh executions, it returns the input JSON directly. For resume scenarios, it fetches
42
+ the latest trigger information from the database and constructs a Command object with the
43
+ appropriate resume data.
44
+
45
+ The method handles different types of resume triggers:
46
+ - API triggers: Creates an UiPathApiTrigger with inbox_id and request payload
47
+ - Other triggers: Uses the HitlReader to process the resume data
48
+
49
+ Returns:
50
+ Any: For fresh executions, returns the input JSON data directly.
51
+ For resume scenarios, returns a Command object containing the resume data
52
+ processed through the appropriate trigger handler.
53
+
54
+ Raises:
55
+ LangGraphRuntimeError: If there's an error fetching trigger data from the database
56
+ during resume processing.
48
57
  """
49
58
  logger.debug(f"Resumed: {self.context.resume} Input: {self.context.input_json}")
50
59
 
@@ -58,47 +67,48 @@ class LangGraphInputProcessor:
58
67
  if not trigger:
59
68
  return Command(resume=self.context.input_json)
60
69
 
61
- type, key, folder_path, folder_key, payload = trigger
62
- logger.debug(f"ResumeTrigger: {type} {key}")
63
- if type == UiPathResumeTriggerType.ACTION.value and key:
64
- action = await self.uipath.actions.retrieve_async(
65
- key, app_folder_key=folder_key, app_folder_path=folder_path
70
+ trigger_type, key, folder_path, folder_key, payload = trigger
71
+ resume_trigger = UiPathResumeTrigger(
72
+ trigger_type=trigger_type,
73
+ item_key=key,
74
+ folder_path=folder_path,
75
+ folder_key=folder_key,
76
+ payload=payload,
77
+ )
78
+ logger.debug(f"ResumeTrigger: {trigger_type} {key}")
79
+
80
+ # populate back expected fields for api_triggers
81
+ if resume_trigger.trigger_type == UiPathResumeTriggerType.API:
82
+ resume_trigger.api_resume = UiPathApiTrigger(
83
+ inbox_id=resume_trigger.item_key, request=resume_trigger.payload
66
84
  )
67
- logger.debug(f"Action: {action}")
68
- if action.data is None:
69
- return Command(resume={})
70
- if self.escalation and self.escalation.enabled:
71
- extracted_value = self.escalation.extract_response_value(action.data)
72
- return Command(resume=extracted_value)
73
- return Command(resume=action.data)
74
- elif type == UiPathResumeTriggerType.API.value and key:
75
- payload = await self._get_api_payload(key)
76
- if payload:
77
- return Command(resume=payload)
78
- elif type == UiPathResumeTriggerType.JOB.value and key:
79
- job = await self.uipath.jobs.retrieve_async(key)
80
- if (
81
- job.state
82
- and not job.state.lower()
83
- == UiPathRuntimeStatus.SUCCESSFUL.value.lower()
84
- ):
85
- error_code = "INVOKED_PROCESS_FAILURE"
86
- error_title = "Invoked process did not finish successfully."
87
- error_detail = try_convert_to_json_format(
88
- str(job.job_error or job.info)
89
- )
90
- raise LangGraphRuntimeError(
91
- error_code,
92
- error_title,
93
- error_detail,
94
- UiPathErrorCategory.USER,
95
- )
96
- if job.output_arguments:
97
- return Command(resume=try_convert_to_json_format(job.output_arguments))
98
- return Command(resume=self.context.input_json)
85
+ return Command(resume=await HitlReader.read(resume_trigger))
99
86
 
100
87
  async def _get_latest_trigger(self) -> Optional[tuple[str, str, str, str, str]]:
101
- """Fetch the most recent trigger from the database."""
88
+ """
89
+ Fetch the most recent resume trigger from the database.
90
+
91
+ This private method queries the resume triggers table to retrieve the latest trigger
92
+ information based on timestamp. It handles database connection setup and executes
93
+ a SQL query to fetch trigger data needed for resume operations.
94
+
95
+ The method returns trigger information as a tuple containing:
96
+ - type: The type of trigger (e.g., 'API', 'MANUAL', etc.)
97
+ - key: The unique identifier for the trigger/item
98
+ - folder_path: The path to the folder containing the trigger
99
+ - folder_key: The unique identifier for the folder
100
+ - payload: The serialized payload data associated with the trigger
101
+
102
+ Returns:
103
+ Optional[tuple[str, str, str, str, str]]: A tuple containing (type, key, folder_path,
104
+ folder_key, payload) for the most recent trigger, or None if no triggers are found
105
+ or if the memory context is not available.
106
+
107
+ Raises:
108
+ LangGraphRuntimeError: If there's an error during database connection setup, query
109
+ execution, or result fetching. The original exception is wrapped with context
110
+ about the database operation failure.
111
+ """
102
112
  if self.context.memory is None:
103
113
  return None
104
114
  try:
@@ -124,30 +134,3 @@ class LangGraphInputProcessor:
124
134
  f"Error querying resume trigger information: {str(e)}",
125
135
  UiPathErrorCategory.SYSTEM,
126
136
  ) from e
127
-
128
- async def _get_api_payload(self, inbox_id: str) -> Any:
129
- """
130
- Fetch payload data for API triggers.
131
-
132
- Args:
133
- inbox_id: The Id of the inbox to fetch the payload for.
134
-
135
- Returns:
136
- The value field from the API response payload, or None if an error occurs.
137
- """
138
- try:
139
- response = self.uipath.api_client.request(
140
- "GET",
141
- f"/orchestrator_/api/JobTriggers/GetPayload/{inbox_id}",
142
- include_folder_headers=True,
143
- )
144
- data = response.json()
145
- return data.get("payload")
146
- except Exception as e:
147
- raise LangGraphRuntimeError(
148
- "API_CONNECTION_ERROR",
149
- "Failed to get trigger payload",
150
- f"Error fetching API trigger payload for inbox {inbox_id}: {str(e)}",
151
- UiPathErrorCategory.SYSTEM,
152
- response.status_code,
153
- ) from e
@@ -1,95 +1,30 @@
1
1
  import json
2
2
  import logging
3
- import uuid
4
- from dataclasses import asdict, dataclass
5
3
  from functools import cached_property
6
- from typing import Any, Dict, Optional, Union, cast
4
+ from typing import Any, Dict, Optional, cast
7
5
 
8
6
  from langgraph.types import Interrupt, StateSnapshot
9
- from uipath import UiPath
10
7
  from uipath._cli._runtime._contracts import (
11
- UiPathApiTrigger,
12
8
  UiPathErrorCategory,
13
9
  UiPathResumeTrigger,
14
- UiPathResumeTriggerType,
15
10
  UiPathRuntimeResult,
16
11
  UiPathRuntimeStatus,
17
12
  )
18
- from uipath.models import CreateAction, InvokeProcess, WaitAction, WaitJob
19
- from uipath.models.actions import Action
13
+ from uipath._cli._runtime._hitl import HitlProcessor
20
14
 
21
15
  from ._context import LangGraphRuntimeContext
22
- from ._escalation import Escalation
23
16
  from ._exception import LangGraphRuntimeError
24
17
 
25
18
  logger = logging.getLogger(__name__)
26
19
 
27
20
 
28
- @dataclass
29
- class InterruptInfo:
30
- """Contains all information about an interrupt."""
31
-
32
- value: Any
33
-
34
- @property
35
- def type(self) -> Optional[UiPathResumeTriggerType]:
36
- """Returns the type of the interrupt value."""
37
- if isinstance(self.value, CreateAction):
38
- return UiPathResumeTriggerType.ACTION
39
- if isinstance(self.value, WaitAction):
40
- return UiPathResumeTriggerType.ACTION
41
- if isinstance(self.value, InvokeProcess):
42
- return UiPathResumeTriggerType.JOB
43
- if isinstance(self.value, WaitJob):
44
- return UiPathResumeTriggerType.JOB
45
- return None
46
-
47
- @property
48
- def identifier(self) -> Optional[str]:
49
- """Returns the identifier based on the type."""
50
- if isinstance(self.value, Action):
51
- return str(self.value.key)
52
- return None
53
-
54
- def serialize(self) -> str:
55
- """
56
- Converts the interrupt value to a JSON string if possible,
57
- falls back to string representation if not.
58
- """
59
- try:
60
- if hasattr(self.value, "dict"):
61
- data = self.value.dict()
62
- elif hasattr(self.value, "to_dict"):
63
- data = self.value.to_dict()
64
- elif hasattr(self.value, "__dataclass_fields__"):
65
- data = asdict(self.value)
66
- else:
67
- data = dict(self.value)
68
-
69
- return json.dumps(data, default=str)
70
- except (TypeError, ValueError, json.JSONDecodeError):
71
- return str(self.value)
72
-
73
- @cached_property
74
- def resume_trigger(self) -> UiPathResumeTrigger:
75
- """Creates the resume trigger based on interrupt type."""
76
- if self.type is None:
77
- return UiPathResumeTrigger(
78
- api_resume=UiPathApiTrigger(
79
- inbox_id=str(uuid.uuid4()), request=self.serialize()
80
- )
81
- )
82
- else:
83
- return UiPathResumeTrigger(itemKey=self.identifier, triggerType=self.type)
84
-
85
-
86
21
  class LangGraphOutputProcessor:
87
22
  """
88
23
  Contains and manages the complete output information from graph execution.
89
24
  Handles serialization, interrupt data, and file output.
90
25
  """
91
26
 
92
- def __init__(self, context: LangGraphRuntimeContext):
27
+ def __init__(self, context: LangGraphRuntimeContext) -> None:
93
28
  """
94
29
  Initialize the LangGraphOutputProcessor.
95
30
 
@@ -97,48 +32,47 @@ class LangGraphOutputProcessor:
97
32
  context: The runtime context for the graph execution.
98
33
  """
99
34
  self.context = context
100
- self._interrupt_info: Optional[InterruptInfo] = None
35
+ self._hitl_processor: Optional[HitlProcessor] = None
101
36
  self._resume_trigger: Optional[UiPathResumeTrigger] = None
102
37
 
38
+ @classmethod
39
+ async def create(
40
+ cls, context: LangGraphRuntimeContext
41
+ ) -> "LangGraphOutputProcessor":
42
+ """
43
+ Create and initialize a new LangGraphOutputProcessor instance asynchronously.
44
+
45
+ Args:
46
+ context: The runtime context for the graph execution.
47
+
48
+ Returns:
49
+ LangGraphOutputProcessor: A new initialized instance.
50
+ """
51
+ instance = cls(context)
52
+
103
53
  # Process interrupt information during initialization
104
- state = cast(StateSnapshot, self.context.state)
54
+ state = cast(StateSnapshot, context.state)
105
55
  if not state or not hasattr(state, "next") or not state.next:
106
- return
56
+ return instance
107
57
 
108
58
  for task in state.tasks:
109
59
  if hasattr(task, "interrupts") and task.interrupts:
110
60
  for interrupt in task.interrupts:
111
61
  if isinstance(interrupt, Interrupt):
112
- self._interrupt_info = InterruptInfo(interrupt.value)
113
- self._resume_trigger = self._interrupt_info.resume_trigger
114
- return
62
+ instance._hitl_processor = HitlProcessor(interrupt.value)
63
+ return instance
64
+
65
+ return instance
115
66
 
116
67
  @property
117
68
  def status(self) -> UiPathRuntimeStatus:
118
69
  """Determines the execution status based on state."""
119
70
  return (
120
71
  UiPathRuntimeStatus.SUSPENDED
121
- if self._interrupt_info
72
+ if self._hitl_processor
122
73
  else UiPathRuntimeStatus.SUCCESSFUL
123
74
  )
124
75
 
125
- @property
126
- def interrupt_value(self) -> Union[Action, InvokeProcess, Any]:
127
- """Returns the actual value of the interrupt, with its specific type."""
128
- if self.interrupt_info is None:
129
- return None
130
- return self.interrupt_info.value
131
-
132
- @property
133
- def interrupt_info(self) -> Optional[InterruptInfo]:
134
- """Gets interrupt information if available."""
135
- return self._interrupt_info
136
-
137
- @property
138
- def resume_trigger(self) -> Optional[UiPathResumeTrigger]:
139
- """Gets resume trigger if interrupted."""
140
- return self._resume_trigger
141
-
142
76
  @cached_property
143
77
  def serialized_output(self) -> Dict[str, Any]:
144
78
  """Serializes the graph execution result."""
@@ -197,7 +131,7 @@ class LangGraphOutputProcessor:
197
131
  return UiPathRuntimeResult(
198
132
  output=self.serialized_output,
199
133
  status=self.status,
200
- resume=self.resume_trigger if self.resume_trigger else None,
134
+ resume=self._resume_trigger if self._resume_trigger else None,
201
135
  )
202
136
 
203
137
  except LangGraphRuntimeError:
@@ -217,7 +151,7 @@ class LangGraphOutputProcessor:
217
151
  Raises:
218
152
  LangGraphRuntimeError: If database operations fail.
219
153
  """
220
- if not self.resume_trigger or not self.context.memory:
154
+ if not self._hitl_processor or not self.context.memory:
221
155
  return
222
156
 
223
157
  try:
@@ -247,122 +181,48 @@ class LangGraphOutputProcessor:
247
181
  ) from e
248
182
 
249
183
  try:
250
- default_escalation = Escalation()
251
- if default_escalation.enabled and isinstance(
252
- self.interrupt_value, str
253
- ):
254
- action = await default_escalation.create(self.interrupt_value)
255
- if action:
256
- self._resume_trigger = UiPathResumeTrigger(
257
- trigger_type=UiPathResumeTriggerType.ACTION,
258
- item_key=action.key,
259
- )
260
- if isinstance(self.interrupt_info, InterruptInfo):
261
- uipath_sdk = UiPath()
262
- if self.interrupt_info.type is UiPathResumeTriggerType.JOB:
263
- if isinstance(self.interrupt_value, InvokeProcess):
264
- job = await uipath_sdk.processes.invoke_async(
265
- name=self.interrupt_value.name,
266
- input_arguments=self.interrupt_value.input_arguments,
267
- )
268
- if job:
269
- self._resume_trigger = UiPathResumeTrigger(
270
- trigger_type=UiPathResumeTriggerType.JOB,
271
- item_key=job.key,
272
- )
273
- elif isinstance(self.interrupt_value, WaitJob):
274
- self._resume_trigger = UiPathResumeTrigger(
275
- triggerType=UiPathResumeTriggerType.JOB,
276
- itemKey=self.interrupt_value.job.key,
277
- )
278
- elif self.interrupt_info.type is UiPathResumeTriggerType.ACTION:
279
- if isinstance(self.interrupt_value, CreateAction):
280
- action = uipath_sdk.actions.create(
281
- title=self.interrupt_value.title,
282
- app_name=self.interrupt_value.app_name
283
- if self.interrupt_value.app_name
284
- else "",
285
- app_folder_path=self.interrupt_value.app_folder_path
286
- if self.interrupt_value.app_folder_path
287
- else "",
288
- app_folder_key=self.interrupt_value.app_folder_key
289
- if self.interrupt_value.app_folder_key
290
- else "",
291
- app_key=self.interrupt_value.app_key
292
- if self.interrupt_value.app_key
293
- else "",
294
- app_version=self.interrupt_value.app_version
295
- if self.interrupt_value.app_version
296
- else 1,
297
- assignee=self.interrupt_value.assignee
298
- if self.interrupt_value.assignee
299
- else "",
300
- data=self.interrupt_value.data,
301
- )
302
- if action:
303
- self._resume_trigger = UiPathResumeTrigger(
304
- trigger_type=UiPathResumeTriggerType.ACTION,
305
- item_key=action.key,
306
- payload=self.interrupt_value.model_dump_json(),
307
- folder_path=self.interrupt_value.app_folder_path
308
- if self.interrupt_value.app_folder_path
309
- else None,
310
- folder_key=self.interrupt_value.app_folder_key
311
- if self.interrupt_value.app_folder_key
312
- else None,
313
- )
314
- elif isinstance(self.interrupt_value, WaitAction):
315
- self._resume_trigger = UiPathResumeTrigger(
316
- triggerType=UiPathResumeTriggerType.ACTION,
317
- itemKey=self.interrupt_value.action.key,
318
- payload=self.interrupt_value.model_dump_json(),
319
- folder_path=self.interrupt_value.app_folder_path
320
- if self.interrupt_value.app_folder_path
321
- else None,
322
- folder_key=self.interrupt_value.app_folder_key
323
- if self.interrupt_value.app_folder_key
324
- else None,
325
- )
326
-
327
- except Exception as e:
328
- raise LangGraphRuntimeError(
329
- "ESCALATION_CREATION_FAILED",
330
- "Failed to create escalation action",
331
- f"Error while creating escalation action: {str(e)}",
332
- UiPathErrorCategory.SYSTEM,
333
- ) from e
334
-
335
- if (
336
- self.resume_trigger.trigger_type.value
337
- == UiPathResumeTriggerType.API.value
338
- and self.resume_trigger.api_resume
339
- ):
340
- trigger_key = self.resume_trigger.api_resume.inbox_id
341
- trigger_type = self.resume_trigger.trigger_type.value
342
- else:
343
- trigger_key = self.resume_trigger.item_key
344
- trigger_type = self.resume_trigger.trigger_type.value
345
-
346
- try:
347
- logger.debug(f"ResumeTrigger: {trigger_type} {trigger_key}")
348
- await cur.execute(
349
- f"INSERT INTO {self.context.resume_triggers_table} (type, key, payload, folder_path, folder_key) VALUES (?, ?, ?, ?, ?)",
350
- (
351
- trigger_type,
352
- trigger_key,
353
- self.resume_trigger.payload,
354
- self.resume_trigger.folder_path,
355
- self.resume_trigger.folder_key,
356
- ),
184
+ self._resume_trigger = (
185
+ await self._hitl_processor.create_resume_trigger()
357
186
  )
358
- await self.context.memory.conn.commit()
359
187
  except Exception as e:
360
188
  raise LangGraphRuntimeError(
361
- "DB_INSERT_FAILED",
362
- "Failed to save resume trigger",
363
- f"Database error while saving resume trigger: {str(e)}",
189
+ "HITL_EVENT_CREATION_FAILED",
190
+ "Failed to process HITL request",
191
+ f"Error while trying to process HITL request: {str(e)}",
364
192
  UiPathErrorCategory.SYSTEM,
365
193
  ) from e
194
+ # if API trigger, override item_key and payload
195
+ if self._resume_trigger:
196
+ if self._resume_trigger.api_resume:
197
+ trigger_key = self._resume_trigger.api_resume.inbox_id
198
+ else:
199
+ trigger_key = self._resume_trigger.item_key
200
+ try:
201
+ logger.debug(
202
+ f"ResumeTrigger: {self._resume_trigger.trigger_type} {self._resume_trigger.item_key}"
203
+ )
204
+ if isinstance(self._resume_trigger.payload, dict):
205
+ payload = json.dumps(self._resume_trigger.payload)
206
+ else:
207
+ payload = str(self._resume_trigger.payload)
208
+ await cur.execute(
209
+ f"INSERT INTO {self.context.resume_triggers_table} (type, key, payload, folder_path, folder_key) VALUES (?, ?, ?, ?, ?)",
210
+ (
211
+ self._resume_trigger.trigger_type.value,
212
+ trigger_key,
213
+ payload,
214
+ self._resume_trigger.folder_path,
215
+ self._resume_trigger.folder_key,
216
+ ),
217
+ )
218
+ await self.context.memory.conn.commit()
219
+ except Exception as e:
220
+ raise LangGraphRuntimeError(
221
+ "DB_INSERT_FAILED",
222
+ "Failed to save resume trigger",
223
+ f"Database error while saving resume trigger: {str(e)}",
224
+ UiPathErrorCategory.SYSTEM,
225
+ ) from e
366
226
  except LangGraphRuntimeError:
367
227
  raise
368
228
  except Exception as e:
@@ -1,9 +1,10 @@
1
1
  import json
2
2
  import logging
3
3
  import os
4
- from typing import List, Optional
4
+ from typing import Any, Dict, List, Optional, Tuple, Union
5
5
 
6
6
  from langchain_core.callbacks.base import BaseCallbackHandler
7
+ from langchain_core.messages import BaseMessage
7
8
  from langchain_core.runnables.config import RunnableConfig
8
9
  from langchain_core.tracers.langchain import wait_for_all_tracers
9
10
  from langgraph.checkpoint.sqlite.aio import AsyncSqliteSaver
@@ -103,48 +104,16 @@ class LangGraphRuntime(UiPathBaseRuntime):
103
104
  if self.context.job_id is None:
104
105
  # Get final chunk while streaming
105
106
  final_chunk = None
106
- async for chunk in graph.astream(
107
+ async for stream_chunk in graph.astream(
107
108
  processed_input,
108
109
  graph_config,
109
- stream_mode="values",
110
+ stream_mode="updates",
110
111
  subgraphs=True,
111
112
  ):
112
- logger.info("%s", chunk)
113
- final_chunk = chunk
113
+ self._pretty_print(stream_chunk)
114
+ final_chunk = stream_chunk
114
115
 
115
- # Extract data from the subgraph tuple format (namespace, data)
116
- if isinstance(final_chunk, tuple) and len(final_chunk) == 2:
117
- final_chunk = final_chunk[1]
118
-
119
- # Process the final chunk to match ainvoke's output format
120
- if isinstance(final_chunk, dict) and hasattr(
121
- graph, "output_channels"
122
- ):
123
- output_channels = graph.output_channels
124
-
125
- # Case 1: Single output channel as string
126
- if (
127
- isinstance(output_channels, str)
128
- and output_channels in final_chunk
129
- ):
130
- self.context.output = final_chunk[output_channels]
131
-
132
- # Case 2: Sequence of output channels
133
- elif hasattr(output_channels, "__iter__") and not isinstance(
134
- output_channels, str
135
- ):
136
- # Check if all channels are present in the chunk
137
- if all(ch in final_chunk for ch in output_channels):
138
- result = {}
139
- for channel in output_channels:
140
- result[channel] = final_chunk[channel]
141
- self.context.output = result
142
- else:
143
- # Fallback if not all channels are present
144
- self.context.output = final_chunk
145
- else:
146
- # Use the whole chunk as output if we can't determine output channels
147
- self.context.output = final_chunk
116
+ self.context.output = self._extract_graph_result(final_chunk, graph)
148
117
  else:
149
118
  # Execute the graph normally at runtime
150
119
  self.context.output = await graph.ainvoke(
@@ -157,7 +126,7 @@ class LangGraphRuntime(UiPathBaseRuntime):
157
126
  except Exception:
158
127
  pass
159
128
 
160
- output_processor = LangGraphOutputProcessor(context=self.context)
129
+ output_processor = await LangGraphOutputProcessor.create(self.context)
161
130
 
162
131
  self.context.result = await output_processor.process()
163
132
 
@@ -301,3 +270,102 @@ class LangGraphRuntime(UiPathBaseRuntime):
301
270
  async def cleanup(self):
302
271
  if hasattr(self, "graph_config") and self.graph_config:
303
272
  await self.graph_config.cleanup()
273
+
274
+ def _extract_graph_result(self, final_chunk, graph: CompiledStateGraph):
275
+ """
276
+ Extract the result from a LangGraph output chunk according to the graph's output channels.
277
+
278
+ Args:
279
+ final_chunk: The final chunk from graph.astream()
280
+ graph: The LangGraph instance
281
+
282
+ Returns:
283
+ The extracted result according to the graph's output_channels configuration
284
+ """
285
+ # Unwrap from subgraph tuple format if needed
286
+ if isinstance(final_chunk, tuple) and len(final_chunk) == 2:
287
+ final_chunk = final_chunk[
288
+ 1
289
+ ] # Extract data part from (namespace, data) tuple
290
+
291
+ # If the result isn't a dict or graph doesn't define output channels, return as is
292
+ if not isinstance(final_chunk, dict) or not hasattr(graph, "output_channels"):
293
+ return final_chunk
294
+
295
+ output_channels = graph.output_channels
296
+
297
+ # Case 1: Single output channel as string
298
+ if isinstance(output_channels, str):
299
+ if output_channels in final_chunk:
300
+ return final_chunk[output_channels]
301
+ else:
302
+ return final_chunk
303
+
304
+ # Case 2: Multiple output channels as sequence
305
+ elif hasattr(output_channels, "__iter__") and not isinstance(
306
+ output_channels, str
307
+ ):
308
+ # Check which channels are present
309
+ available_channels = [ch for ch in output_channels if ch in final_chunk]
310
+
311
+ if available_channels:
312
+ # Create a dict with the available channels
313
+ return {channel: final_chunk[channel] for channel in available_channels}
314
+
315
+ # Fallback for any other case
316
+ return final_chunk
317
+
318
+ def _pretty_print(self, stream_chunk: Union[Tuple[Any, Any], Dict[str, Any], Any]):
319
+ """
320
+ Pretty print a chunk from a LangGraph stream with stream_mode="updates" and subgraphs=True.
321
+
322
+ Args:
323
+ stream_chunk: A tuple of (namespace, updates) from graph.astream()
324
+ """
325
+ if not isinstance(stream_chunk, tuple) or len(stream_chunk) < 2:
326
+ return
327
+
328
+ node_namespace = ""
329
+ chunk_namespace = stream_chunk[0]
330
+ node_updates = stream_chunk[1]
331
+
332
+ # Extract namespace if available
333
+ if chunk_namespace and len(chunk_namespace) > 0:
334
+ node_namespace = chunk_namespace[0]
335
+
336
+ if not isinstance(node_updates, dict):
337
+ logger.info("Raw update: %s", node_updates)
338
+ return
339
+
340
+ # Process each node's updates
341
+ for node_name, node_result in node_updates.items():
342
+ # Log node identifier with appropriate namespace context
343
+ if node_namespace:
344
+ logger.info("[%s][%s]", node_namespace, node_name)
345
+ else:
346
+ logger.info("[%s]", node_name)
347
+
348
+ # Handle non-dict results
349
+ if not isinstance(node_result, dict):
350
+ logger.info("%s", node_result)
351
+ continue
352
+
353
+ # Process messages specially
354
+ messages = node_result.get("messages", [])
355
+ if isinstance(messages, list):
356
+ for message in messages:
357
+ if isinstance(message, BaseMessage):
358
+ message.pretty_print()
359
+
360
+ # Exclude "messages" from node_result and pretty-print the rest
361
+ metadata = {k: v for k, v in node_result.items() if k != "messages"}
362
+ if metadata:
363
+ try:
364
+ formatted_metadata = json.dumps(
365
+ metadata,
366
+ indent=2,
367
+ ensure_ascii=False,
368
+ )
369
+ logger.info("%s", formatted_metadata)
370
+ except (TypeError, ValueError):
371
+ pass
@@ -25,12 +25,15 @@ def langgraph_run_middleware(
25
25
  ) # Continue with normal flow if no langgraph.json
26
26
 
27
27
  try:
28
+ bool_map = {"true": True, "false": False}
29
+ tracing = env.get("UIPATH_TRACING_ENABLED", True)
30
+ if isinstance(tracing, str) and tracing.lower() in bool_map:
31
+ tracing = bool_map[tracing.lower()]
28
32
 
29
33
  async def execute():
30
34
  context = LangGraphRuntimeContext.from_config(
31
35
  env.get("UIPATH_CONFIG_PATH", "uipath.json")
32
36
  )
33
-
34
37
  context.entrypoint = entrypoint
35
38
  context.input = input
36
39
  context.resume = resume
@@ -38,9 +41,9 @@ def langgraph_run_middleware(
38
41
  context.logs_min_level = env.get("LOG_LEVEL", "INFO")
39
42
  context.job_id = env.get("UIPATH_JOB_KEY")
40
43
  context.trace_id = env.get("UIPATH_TRACE_ID")
41
- context.tracing_enabled = env.get("UIPATH_TRACING_ENABLED", True)
44
+ context.tracing_enabled = tracing
42
45
  context.trace_context = UiPathTraceContext(
43
- enabled=env.get("UIPATH_TRACING_ENABLED", True),
46
+ enabled=tracing,
44
47
  trace_id=env.get("UIPATH_TRACE_ID"),
45
48
  parent_span_id=env.get("UIPATH_PARENT_SPAN_ID"),
46
49
  root_span_id=env.get("UIPATH_ROOT_SPAN_ID"),
@@ -36,8 +36,12 @@ class UiPathClientSettings(BaseSettings):
36
36
  base_url: str = Field(default="", alias="UIPATH_BASE_URL")
37
37
  org_id: str = Field(default="", alias="UIPATH_ORGANIZATION_ID")
38
38
  tenant_id: str = Field(default="", alias="UIPATH_TENANT_ID")
39
- requesting_product: str = Field(default="", alias="UIPATH_REQUESTING_PRODUCT")
40
- requesting_feature: str = Field(default="", alias="UIPATH_REQUESTING_FEATURE")
39
+ requesting_product: str = Field(
40
+ default="uipath-python-sdk", alias="UIPATH_REQUESTING_PRODUCT"
41
+ )
42
+ requesting_feature: str = Field(
43
+ default="langgraph-agent", alias="UIPATH_REQUESTING_FEATURE"
44
+ )
41
45
  timeout_seconds: str = Field(default="120", alias="UIPATH_TIMEOUT_SECONDS")
42
46
  action_name: str = Field(default="DefaultActionName", alias="UIPATH_ACTION_NAME")
43
47
  action_id: str = Field(default="DefaultActionId", alias="UIPATH_ACTION_ID")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: uipath-langchain
3
- Version: 0.0.110
3
+ Version: 0.0.111
4
4
  Summary: UiPath Langchain
5
5
  Project-URL: Homepage, https://uipath.com
6
6
  Project-URL: Repository, https://github.com/UiPath/uipath-langchain-python
@@ -24,7 +24,7 @@ Requires-Dist: langgraph>=0.2.70
24
24
  Requires-Dist: openai>=1.65.5
25
25
  Requires-Dist: pydantic-settings>=2.6.0
26
26
  Requires-Dist: python-dotenv>=1.0.1
27
- Requires-Dist: uipath<2.1.0,>=2.0.55
27
+ Requires-Dist: uipath<2.1.0,>=2.0.65
28
28
  Provides-Extra: langchain
29
29
  Description-Content-Type: text/markdown
30
30
 
@@ -3,19 +3,18 @@ uipath_langchain/middlewares.py,sha256=tre7o9DFMgWk1DJiEEUmT6_wiP-PPkWtKmG0iOyvr
3
3
  uipath_langchain/_cli/__init__.py,sha256=juqd9PbXs4yg45zMJ7BHAOPQjb7sgEbWE9InBtGZhfo,24
4
4
  uipath_langchain/_cli/cli_init.py,sha256=B5BVUA7pDvKyRRGEx5mgmeE5SJvLPM3cnLGt6a9iixY,7417
5
5
  uipath_langchain/_cli/cli_new.py,sha256=dL8-Rri6u67ZZdbb4nT38A5xD_Q3fVnG0UK9VSeKaqg,2563
6
- uipath_langchain/_cli/cli_run.py,sha256=8-7NBguH3ACwN3bHEtyj2d3N-FFLJQLaiHDNI_3hnQE,2863
6
+ uipath_langchain/_cli/cli_run.py,sha256=rGuYp9AEJdhHclOCFPX100Mo0kaDYt6e6pIKBgbS6ek,3023
7
7
  uipath_langchain/_cli/_runtime/_context.py,sha256=wr4aNn06ReIXmetEZ6b6AnpAt64p13anQ2trZ5Bzgio,807
8
- uipath_langchain/_cli/_runtime/_escalation.py,sha256=oA5NvZvCo8ngELFJRyhZNM69DxVHrshhMY6CUk_cukQ,8055
9
8
  uipath_langchain/_cli/_runtime/_exception.py,sha256=USKkLYkG-dzjX3fEiMMOHnVUpiXJs_xF0OQXCCOvbYM,546
10
- uipath_langchain/_cli/_runtime/_input.py,sha256=gKzPaGW-EzgeAskWJjbCWnfZRLu_BM7lCXkq0XkVGLU,5614
11
- uipath_langchain/_cli/_runtime/_output.py,sha256=zGSGEUiWHg-q8-Ccm_NU8Vi-7oWCzq5GN1oFqO9FV9Q,16099
12
- uipath_langchain/_cli/_runtime/_runtime.py,sha256=ai3qxNmdPACxowq7WcNBn9m8-KXe4vFwvpBn0lh6kmQ,11689
9
+ uipath_langchain/_cli/_runtime/_input.py,sha256=vZ8vfVxvPSaPWmIPghvNx1VRKzbalHsKUMBPiKDvJWM,5492
10
+ uipath_langchain/_cli/_runtime/_output.py,sha256=yJOZPWv2FRUJWv1NRs9JmpB4QMTDXu8jrxoaKrfJvzw,9078
11
+ uipath_langchain/_cli/_runtime/_runtime.py,sha256=SjOZkal6c1ZeZNFzW23DoAnF6LNQ2nN0dISSu2cSFhQ,13881
13
12
  uipath_langchain/_cli/_templates/langgraph.json.template,sha256=eeh391Gta_hoRgaNaZ58nW1LNvCVXA7hlAH6l7Veous,107
14
13
  uipath_langchain/_cli/_templates/main.py.template,sha256=9JEyPxwc4Ce8Dfd2UAgHgpwkkjuXwWXOQZ-65P53QuM,1195
15
14
  uipath_langchain/_cli/_utils/_graph.py,sha256=P7m03i6kcLda8XVpVtppcM8GOrSW62zWcv3rCR1H5zs,7086
16
15
  uipath_langchain/_utils/__init__.py,sha256=WoY66enCygRXTh6v5B1UrRcFCnQYuPJ8oqDkwomXzLc,194
17
16
  uipath_langchain/_utils/_request_mixin.py,sha256=t_1HWBxqEl-wsSk9ubmIM-8vs9BlNy4ZVBxtDxktn6U,18489
18
- uipath_langchain/_utils/_settings.py,sha256=MhwEVj4gVRSar0RBf2w2hTjO-5Qm-HpCuufqN3gSWjA,3390
17
+ uipath_langchain/_utils/_settings.py,sha256=mCJ2-XXfpedjxT7DSXINPITHrXzQJCESgNo-nz3irDk,3450
19
18
  uipath_langchain/_utils/_sleep_policy.py,sha256=e9pHdjmcCj4CVoFM1jMyZFelH11YatsgWfpyrfXzKBQ,1251
20
19
  uipath_langchain/chat/__init__.py,sha256=WDcvy91ixvZ3Mq7Ae94g5CjyQwXovDBnEv1NlD5SXBE,116
21
20
  uipath_langchain/chat/models.py,sha256=sLz8yzEMUMSNsCFyywRxFwe2JisR3TP-n1vbeRKl9H8,10225
@@ -30,8 +29,8 @@ uipath_langchain/tracers/_instrument_traceable.py,sha256=0e841zVzcPWjOGtmBx0GeHb
30
29
  uipath_langchain/tracers/_utils.py,sha256=JOT1tKMdvqjMDtj2WbmbOWMeMlTXBWavxWpogX7KlRA,1543
31
30
  uipath_langchain/vectorstores/__init__.py,sha256=w8qs1P548ud1aIcVA_QhBgf_jZDrRMK5Lono78yA8cs,114
32
31
  uipath_langchain/vectorstores/context_grounding_vectorstore.py,sha256=eTa5sX43-ydB1pj9VNHUPbB-hC36fZK_CGrNe5U2Nrw,9393
33
- uipath_langchain-0.0.110.dist-info/METADATA,sha256=hgEO7QrBVpVmOUglrPN6Gi0VCZcKXGvuNjjgiC9lIPA,4166
34
- uipath_langchain-0.0.110.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
35
- uipath_langchain-0.0.110.dist-info/entry_points.txt,sha256=FUtzqGOEntlJKMJIXhQUfT7ZTbQmGhke1iCmDWZaQZI,81
36
- uipath_langchain-0.0.110.dist-info/licenses/LICENSE,sha256=JDpt-uotAkHFmxpwxi6gwx6HQ25e-lG4U_Gzcvgp7JY,1063
37
- uipath_langchain-0.0.110.dist-info/RECORD,,
32
+ uipath_langchain-0.0.111.dist-info/METADATA,sha256=joWYzrNDv0StGp2R_xBl4fNFxoCffcIi4mQ-2O84t9o,4166
33
+ uipath_langchain-0.0.111.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
34
+ uipath_langchain-0.0.111.dist-info/entry_points.txt,sha256=FUtzqGOEntlJKMJIXhQUfT7ZTbQmGhke1iCmDWZaQZI,81
35
+ uipath_langchain-0.0.111.dist-info/licenses/LICENSE,sha256=JDpt-uotAkHFmxpwxi6gwx6HQ25e-lG4U_Gzcvgp7JY,1063
36
+ uipath_langchain-0.0.111.dist-info/RECORD,,
@@ -1,245 +0,0 @@
1
- import json
2
- import logging
3
- from pathlib import Path
4
- from typing import Any, Dict, Optional, Union
5
-
6
- from uipath import UiPath
7
- from uipath.models.actions import Action
8
-
9
- logger = logging.getLogger(__name__)
10
-
11
-
12
- class Escalation:
13
- """
14
- Class to handle default escalation.
15
- """
16
-
17
- def __init__(self, config_path: Union[str, Path] = "uipath.json"):
18
- """
19
- Initialize the escalation with a config file path.
20
-
21
- Args:
22
- config_path: Path to the configuration file (string or Path object)
23
- """
24
- self.config_path = Path(config_path)
25
- self._config = None
26
- self._enabled = False
27
-
28
- self._load_config()
29
-
30
- def _load_config(self) -> None:
31
- """
32
- Load and validate the default escalation from the config file.
33
-
34
- If the 'defaultEscalation' section exists, validates required fields.
35
- Raises error if required fields are missing.
36
- """
37
- try:
38
- config_data = json.loads(self.config_path.read_text(encoding="utf-8"))
39
- escalation_config = config_data.get("defaultEscalation")
40
-
41
- if escalation_config:
42
- required_fields = {"request", "title"}
43
- missing_fields = [
44
- field for field in required_fields if field not in escalation_config
45
- ]
46
-
47
- if not any(key in escalation_config for key in ("appName", "appKey")):
48
- missing_fields.append("appName or appKey")
49
-
50
- if missing_fields:
51
- raise ValueError(
52
- f"Missing required fields in configuration: {', '.join(missing_fields)}"
53
- )
54
-
55
- self._config = escalation_config
56
- self._enabled = True
57
- logger.debug("Escalation configuration loaded successfully")
58
- else:
59
- self._enabled = False
60
-
61
- except FileNotFoundError:
62
- logger.debug(f"Config file not found: {self.config_path}")
63
- self._enabled = False
64
-
65
- except json.JSONDecodeError:
66
- logger.warning(
67
- f"Failed to parse config file {self.config_path}: Invalid JSON"
68
- )
69
- self._enabled = False
70
-
71
- except ValueError as e:
72
- logger.error(str(e))
73
- raise
74
-
75
- except Exception as e:
76
- logger.error(f"Unexpected error loading config {self.config_path}: {e}")
77
- self._enabled = False
78
-
79
- @property
80
- def enabled(self) -> bool:
81
- """
82
- Check if escalation is enabled.
83
-
84
- Returns:
85
- True if configuration is valid and loaded
86
- """
87
- return self._enabled
88
-
89
- def prepare_data(self, value: Any) -> Dict[str, Any]:
90
- """
91
- Prepare action data by replacing $VALUE placeholders with the provided value.
92
-
93
- Args:
94
- value: The value to substitute into the template
95
-
96
- Returns:
97
- Prepared data dictionary with substitutions applied
98
- """
99
- if not self.enabled or not self._config:
100
- return {}
101
-
102
- template = self._config.get("request", {})
103
-
104
- if isinstance(value, str):
105
- try:
106
- value_obj = json.loads(value)
107
- except json.JSONDecodeError:
108
- value_obj = value
109
- else:
110
- value_obj = value
111
-
112
- return self._substitute_values(template, value_obj)
113
-
114
- def _substitute_values(
115
- self, template: Dict[str, Any], value: Any
116
- ) -> Dict[str, Any]:
117
- """
118
- Replace template placeholders with actual values.
119
-
120
- Args:
121
- template: Template dictionary containing placeholders
122
- value: Values to substitute into the template
123
-
124
- Returns:
125
- Template with values substituted
126
- """
127
-
128
- def process_value(template_value):
129
- if isinstance(template_value, dict):
130
- return {k: process_value(v) for k, v in template_value.items()}
131
- elif isinstance(template_value, list):
132
- return [process_value(item) for item in template_value]
133
- elif isinstance(template_value, str):
134
- if template_value == "$VALUE":
135
- return value
136
- elif template_value.startswith("$VALUE."):
137
- return self._resolve_value_path(template_value, value)
138
-
139
- return template_value
140
-
141
- return process_value(template)
142
-
143
- def _resolve_value_path(self, path_expr: str, value: Any) -> Any:
144
- """
145
- Resolve a dot-notation path expression against a value.
146
-
147
- Args:
148
- path_expr: Path expression (e.g. "$VALUE.user.name")
149
- value: Value object to extract data from
150
-
151
- Returns:
152
- Extracted value or None if path doesn't exist
153
- """
154
- path_parts = path_expr.replace("$VALUE.", "").split(".")
155
- current = value
156
-
157
- for part in path_parts:
158
- if not isinstance(current, dict) or part not in current:
159
- return None
160
- current = current.get(part)
161
-
162
- return current
163
-
164
- def extract_response_value(self, action_data: Dict[str, Any]) -> Any:
165
- if not self._config:
166
- return ""
167
-
168
- response_template = self._config.get("response")
169
- if not response_template:
170
- return ""
171
-
172
- for key, template_value in response_template.items():
173
- if key in action_data:
174
- extracted_value = None
175
-
176
- if template_value == "$VALUE":
177
- extracted_value = action_data[key]
178
- elif isinstance(template_value, str) and template_value.startswith(
179
- "$VALUE."
180
- ):
181
- path_parts = template_value.replace("$VALUE.", "").split(".")
182
- current = action_data[key]
183
-
184
- valid_path = True
185
- for part in path_parts:
186
- if not isinstance(current, dict) or part not in current:
187
- valid_path = False
188
- break
189
- current = current.get(part)
190
-
191
- if valid_path:
192
- extracted_value = current
193
-
194
- if extracted_value is not None:
195
- if isinstance(extracted_value, str):
196
- if extracted_value.lower() == "true":
197
- return True
198
- elif extracted_value.lower() == "false":
199
- return False
200
-
201
- try:
202
- if "." in extracted_value:
203
- return float(extracted_value)
204
- else:
205
- return int(extracted_value)
206
- except ValueError:
207
- pass
208
-
209
- return extracted_value
210
-
211
- return action_data
212
-
213
- async def create(self, value: Any) -> Optional[Action]:
214
- """
215
- Create an escalation Action with the prepared data.
216
-
217
- Args:
218
- value: The dynamic value to be substituted into the template
219
-
220
- Returns:
221
- The created Action object or None if creation fails
222
- """
223
- if not self.enabled or not self._config:
224
- return None
225
-
226
- action_data = self.prepare_data(value)
227
-
228
- if not action_data:
229
- logger.warning("Action creation skipped: empty data after preparation")
230
- return None
231
-
232
- try:
233
- uipath = UiPath()
234
- action = uipath.actions.create(
235
- title=self._config.get("title", "Default escalation"),
236
- app_name=self._config.get("appName"),
237
- app_key=self._config.get("appKey"),
238
- app_version=self._config.get("appVersion", 1),
239
- data=action_data,
240
- )
241
- logger.info(f"Action created successfully: {action.key}")
242
- return action
243
- except Exception as e:
244
- logger.error(f"Error creating action: {e}")
245
- return None