uipath-langchain 0.1.34__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. uipath_langchain/_cli/_templates/langgraph.json.template +2 -4
  2. uipath_langchain/_cli/cli_new.py +1 -2
  3. uipath_langchain/agent/guardrails/actions/escalate_action.py +252 -108
  4. uipath_langchain/agent/guardrails/actions/filter_action.py +247 -12
  5. uipath_langchain/agent/guardrails/guardrail_nodes.py +47 -12
  6. uipath_langchain/agent/guardrails/guardrails_factory.py +40 -15
  7. uipath_langchain/agent/guardrails/utils.py +64 -33
  8. uipath_langchain/agent/react/agent.py +4 -2
  9. uipath_langchain/agent/react/file_type_handler.py +123 -0
  10. uipath_langchain/agent/react/guardrails/guardrails_subgraph.py +67 -12
  11. uipath_langchain/agent/react/init_node.py +16 -1
  12. uipath_langchain/agent/react/job_attachments.py +125 -0
  13. uipath_langchain/agent/react/json_utils.py +183 -0
  14. uipath_langchain/agent/react/jsonschema_pydantic_converter.py +76 -0
  15. uipath_langchain/agent/react/llm_with_files.py +76 -0
  16. uipath_langchain/agent/react/types.py +4 -0
  17. uipath_langchain/agent/react/utils.py +29 -3
  18. uipath_langchain/agent/tools/__init__.py +5 -1
  19. uipath_langchain/agent/tools/context_tool.py +151 -1
  20. uipath_langchain/agent/tools/escalation_tool.py +46 -15
  21. uipath_langchain/agent/tools/integration_tool.py +20 -16
  22. uipath_langchain/agent/tools/internal_tools/__init__.py +5 -0
  23. uipath_langchain/agent/tools/internal_tools/analyze_files_tool.py +113 -0
  24. uipath_langchain/agent/tools/internal_tools/internal_tool_factory.py +54 -0
  25. uipath_langchain/agent/tools/process_tool.py +8 -1
  26. uipath_langchain/agent/tools/static_args.py +18 -40
  27. uipath_langchain/agent/tools/tool_factory.py +13 -5
  28. uipath_langchain/agent/tools/tool_node.py +133 -4
  29. uipath_langchain/agent/tools/utils.py +31 -0
  30. uipath_langchain/agent/wrappers/__init__.py +6 -0
  31. uipath_langchain/agent/wrappers/job_attachment_wrapper.py +62 -0
  32. uipath_langchain/agent/wrappers/static_args_wrapper.py +34 -0
  33. uipath_langchain/chat/mapper.py +60 -42
  34. uipath_langchain/runtime/factory.py +10 -5
  35. uipath_langchain/runtime/runtime.py +38 -35
  36. uipath_langchain/runtime/storage.py +178 -71
  37. {uipath_langchain-0.1.34.dist-info → uipath_langchain-0.3.1.dist-info}/METADATA +5 -4
  38. {uipath_langchain-0.1.34.dist-info → uipath_langchain-0.3.1.dist-info}/RECORD +41 -30
  39. {uipath_langchain-0.1.34.dist-info → uipath_langchain-0.3.1.dist-info}/WHEEL +0 -0
  40. {uipath_langchain-0.1.34.dist-info → uipath_langchain-0.3.1.dist-info}/entry_points.txt +0 -0
  41. {uipath_langchain-0.1.34.dist-info → uipath_langchain-0.3.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,7 +1,5 @@
1
1
  {
2
- "dependencies": ["."],
3
2
  "graphs": {
4
3
  "agent": "./main.py:graph"
5
- },
6
- "env": ".env"
7
- }
4
+ }
5
+ }
@@ -31,7 +31,7 @@ version = "0.0.1"
31
31
  description = "{project_name}"
32
32
  authors = [{{ name = "John Doe", email = "john.doe@myemail.com" }}]
33
33
  dependencies = [
34
- "uipath-langchain>=0.1.0",
34
+ "uipath-langchain>=0.2.0",
35
35
  ]
36
36
  requires-python = ">=3.11"
37
37
  """
@@ -47,7 +47,6 @@ def langgraph_new_middleware(name: str) -> MiddlewareResult:
47
47
 
48
48
  try:
49
49
  with console.spinner(f"Creating new agent {name} in current directory ..."):
50
- generate_pyproject(directory, name)
51
50
  generate_script(directory)
52
51
  console.success("Created 'main.py' file.")
53
52
  console.success("Created 'langgraph.json' file.")
@@ -1,10 +1,11 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import ast
3
4
  import json
4
5
  import re
5
- from typing import Any, Dict, Literal
6
+ from typing import Any, Dict, Literal, cast
6
7
 
7
- from langchain_core.messages import AIMessage, ToolMessage
8
+ from langchain_core.messages import AIMessage, AnyMessage, BaseMessage, ToolMessage
8
9
  from langgraph.types import Command, interrupt
9
10
  from uipath.platform.common import CreateEscalation
10
11
  from uipath.platform.guardrails import (
@@ -72,25 +73,59 @@ class EscalateAction(GuardrailAction):
72
73
  async def _node(
73
74
  state: AgentGuardrailsGraphState,
74
75
  ) -> Dict[str, Any] | Command[Any]:
75
- input = _extract_escalation_content(
76
- state, scope, execution_stage, guarded_component_name
77
- )
78
- escalation_field = _execution_stage_to_escalation_field(execution_stage)
76
+ # Validate message count based on execution stage
77
+ _validate_message_count(state, execution_stage)
79
78
 
80
- data = {
79
+ # Build base data dictionary with common fields
80
+ data: Dict[str, Any] = {
81
81
  "GuardrailName": guardrail.name,
82
82
  "GuardrailDescription": guardrail.description,
83
83
  "Component": scope.name.lower(),
84
84
  "ExecutionStage": _execution_stage_to_string(execution_stage),
85
85
  "GuardrailResult": state.guardrail_validation_result,
86
- escalation_field: input,
87
86
  }
88
87
 
88
+ # Add stage-specific fields
89
+ if execution_stage == ExecutionStage.PRE_EXECUTION:
90
+ # PRE_EXECUTION: Only Inputs field from last message
91
+ input_content = _extract_escalation_content(
92
+ state.messages[-1],
93
+ state,
94
+ scope,
95
+ execution_stage,
96
+ guarded_component_name,
97
+ )
98
+ data["Inputs"] = input_content
99
+ else: # POST_EXECUTION
100
+ if scope == GuardrailScope.AGENT:
101
+ input_message = state.messages[1]
102
+ else:
103
+ input_message = state.messages[-2]
104
+ input_content = _extract_escalation_content(
105
+ input_message,
106
+ state,
107
+ scope,
108
+ ExecutionStage.PRE_EXECUTION,
109
+ guarded_component_name,
110
+ )
111
+
112
+ # Extract Outputs from last message using POST_EXECUTION logic
113
+ output_content = _extract_escalation_content(
114
+ state.messages[-1],
115
+ state,
116
+ scope,
117
+ execution_stage,
118
+ guarded_component_name,
119
+ )
120
+
121
+ data["Inputs"] = input_content
122
+ data["Outputs"] = output_content
123
+
89
124
  escalation_result = interrupt(
90
125
  CreateEscalation(
91
126
  app_name=self.app_name,
92
127
  app_folder_path=self.app_folder_path,
93
- title=self.app_name,
128
+ title="Agents Guardrail Task",
94
129
  data=data,
95
130
  assignee=self.assignee,
96
131
  )
@@ -108,17 +143,50 @@ class EscalateAction(GuardrailAction):
108
143
  raise AgentTerminationException(
109
144
  code=UiPathErrorCode.EXECUTION_ERROR,
110
145
  title="Escalation rejected",
111
- detail=f"Action was rejected after reviewing the task created by guardrail [{guardrail.name}]. Please contact your administrator.",
146
+ detail=f"Please contact your administrator. Action was rejected after reviewing the task created by guardrail [{guardrail.name}], with reason: {escalation_result.data['Reason']}",
112
147
  )
113
148
 
114
149
  return node_name, _node
115
150
 
116
151
 
152
+ def _validate_message_count(
153
+ state: AgentGuardrailsGraphState,
154
+ execution_stage: ExecutionStage,
155
+ ) -> None:
156
+ """Validate that state has the required number of messages for the execution stage.
157
+
158
+ Args:
159
+ state: The current agent graph state.
160
+ execution_stage: The execution stage (PRE_EXECUTION or POST_EXECUTION).
161
+
162
+ Raises:
163
+ AgentTerminationException: If the state doesn't have enough messages.
164
+ """
165
+ required_messages = 1 if execution_stage == ExecutionStage.PRE_EXECUTION else 2
166
+ actual_messages = len(state.messages)
167
+
168
+ if actual_messages < required_messages:
169
+ stage_name = (
170
+ "PRE_EXECUTION"
171
+ if execution_stage == ExecutionStage.PRE_EXECUTION
172
+ else "POST_EXECUTION"
173
+ )
174
+ detail = f"{stage_name} requires at least {required_messages} message{'s' if required_messages > 1 else ''} in state, but found {actual_messages}."
175
+ if execution_stage == ExecutionStage.POST_EXECUTION:
176
+ detail += " Cannot extract Inputs from previous message."
177
+
178
+ raise AgentTerminationException(
179
+ code=UiPathErrorCode.EXECUTION_ERROR,
180
+ title=f"Invalid state for {stage_name}",
181
+ detail=detail,
182
+ )
183
+
184
+
117
185
  def _get_node_name(
118
186
  execution_stage: ExecutionStage, guardrail: BaseGuardrail, scope: GuardrailScope
119
187
  ) -> str:
120
- sanitized = re.sub(r"\W+", "_", guardrail.name).strip("_").lower()
121
- node_name = f"{sanitized}_hitl_{execution_stage.name.lower()}_{scope.lower()}"
188
+ raw_node_name = f"{scope.name}_{execution_stage.name}_{guardrail.name}_hitl"
189
+ node_name = re.sub(r"\W+", "_", raw_node_name.lower()).strip("_")
122
190
  return node_name
123
191
 
124
192
 
@@ -138,8 +206,8 @@ def _process_escalation_response(
138
206
  execution_stage: The execution stage (PRE_EXECUTION or POST_EXECUTION).
139
207
 
140
208
  Returns:
141
- For LLM/TOOL scope: Command to update messages with reviewed inputs/outputs, or empty dict.
142
- For AGENT scope: Empty dict (no message alteration).
209
+ Command updates for the state (e.g., updating messages / tool calls / agent_result),
210
+ or an empty dict if no update is needed.
143
211
  """
144
212
  match scope:
145
213
  case GuardrailScope.LLM:
@@ -151,8 +219,71 @@ def _process_escalation_response(
151
219
  state, escalation_result, execution_stage, guarded_node_name
152
220
  )
153
221
  case GuardrailScope.AGENT:
222
+ return _process_agent_escalation_response(
223
+ state, escalation_result, execution_stage
224
+ )
225
+
226
+
227
+ def _process_agent_escalation_response(
228
+ state: AgentGuardrailsGraphState,
229
+ escalation_result: Dict[str, Any],
230
+ execution_stage: ExecutionStage,
231
+ ) -> Dict[str, Any] | Command[Any]:
232
+ """Process escalation response for AGENT scope guardrails.
233
+
234
+ For AGENT scope:
235
+ - PRE_EXECUTION: updates the last message content using `ReviewedInputs`
236
+ - POST_EXECUTION: updates `agent_result` using `ReviewedOutputs`
237
+
238
+ Args:
239
+ state: The current agent graph state.
240
+ escalation_result: The result from the escalation interrupt containing reviewed inputs/outputs.
241
+ execution_stage: The execution stage (PRE_EXECUTION or POST_EXECUTION).
242
+
243
+ Returns:
244
+ Command to update state, or empty dict if no updates are needed.
245
+
246
+ Raises:
247
+ AgentTerminationException: If escalation response processing fails.
248
+ """
249
+ try:
250
+ reviewed_field = get_reviewed_field_name(execution_stage)
251
+ if reviewed_field not in escalation_result:
252
+ return {}
253
+
254
+ reviewed_value = escalation_result.get(reviewed_field)
255
+ if not reviewed_value:
154
256
  return {}
155
257
 
258
+ try:
259
+ parsed = json.loads(reviewed_value)
260
+ except json.JSONDecodeError:
261
+ parsed = reviewed_value
262
+
263
+ if execution_stage == ExecutionStage.PRE_EXECUTION:
264
+ msgs = state.messages.copy()
265
+ if not msgs:
266
+ return {}
267
+ msgs[-1].content = parsed
268
+ return Command(update={"messages": msgs})
269
+
270
+ # POST_EXECUTION: update agent_result
271
+ return Command(update={"agent_result": parsed})
272
+ except Exception as e:
273
+ raise AgentTerminationException(
274
+ code=UiPathErrorCode.EXECUTION_ERROR,
275
+ title="Escalation rejected",
276
+ detail=str(e),
277
+ ) from e
278
+
279
+
280
+ def get_reviewed_field_name(execution_stage):
281
+ return (
282
+ "ReviewedInputs"
283
+ if execution_stage == ExecutionStage.PRE_EXECUTION
284
+ else "ReviewedOutputs"
285
+ )
286
+
156
287
 
157
288
  def _process_llm_escalation_response(
158
289
  state: AgentGuardrailsGraphState,
@@ -175,11 +306,7 @@ def _process_llm_escalation_response(
175
306
  AgentTerminationException: If escalation response processing fails.
176
307
  """
177
308
  try:
178
- reviewed_field = (
179
- "ReviewedInputs"
180
- if execution_stage == ExecutionStage.PRE_EXECUTION
181
- else "ReviewedOutputs"
182
- )
309
+ reviewed_field = get_reviewed_field_name(execution_stage)
183
310
 
184
311
  msgs = state.messages.copy()
185
312
  if not msgs or reviewed_field not in escalation_result:
@@ -196,39 +323,54 @@ def _process_llm_escalation_response(
196
323
  if not reviewed_outputs_json:
197
324
  return {}
198
325
 
199
- content_list = json.loads(reviewed_outputs_json)
200
- if not content_list:
326
+ reviewed_tool_calls_list = json.loads(reviewed_outputs_json)
327
+ if not reviewed_tool_calls_list:
201
328
  return {}
202
329
 
330
+ # Track if tool calls were successfully processed
331
+ tool_calls_processed = False
332
+
203
333
  # For AI messages, process tool calls if present
204
334
  if isinstance(last_message, AIMessage):
205
335
  ai_message: AIMessage = last_message
206
- content_index = 0
207
336
 
208
- if ai_message.tool_calls:
337
+ if ai_message.tool_calls and isinstance(reviewed_tool_calls_list, list):
209
338
  tool_calls = list(ai_message.tool_calls)
210
- for tool_call in tool_calls:
211
- args = tool_call["args"]
339
+
340
+ # Create a name-to-args mapping from reviewed tool call data
341
+ reviewed_tool_calls_map = {}
342
+ for reviewed_data in reviewed_tool_calls_list:
212
343
  if (
213
- isinstance(args, dict)
214
- and "content" in args
215
- and args["content"] is not None
344
+ isinstance(reviewed_data, dict)
345
+ and "name" in reviewed_data
346
+ and "args" in reviewed_data
216
347
  ):
217
- if content_index < len(content_list):
218
- updated_content = json.loads(
219
- content_list[content_index]
220
- )
221
- args["content"] = updated_content
222
- tool_call["args"] = args
223
- content_index += 1
224
- ai_message.tool_calls = tool_calls
225
-
226
- if len(content_list) > content_index:
227
- ai_message.content = content_list[-1]
228
- else:
229
- # Fallback for other message types
230
- if content_list:
231
- last_message.content = content_list[-1]
348
+ reviewed_tool_calls_map[reviewed_data["name"]] = (
349
+ reviewed_data["args"]
350
+ )
351
+
352
+ # Update tool calls with reviewed args by matching name
353
+ if reviewed_tool_calls_map:
354
+ for tool_call in tool_calls:
355
+ tool_name = (
356
+ tool_call.get("name")
357
+ if isinstance(tool_call, dict)
358
+ else getattr(tool_call, "name", None)
359
+ )
360
+ if tool_name and tool_name in reviewed_tool_calls_map:
361
+ if isinstance(tool_call, dict):
362
+ tool_call["args"] = reviewed_tool_calls_map[
363
+ tool_name
364
+ ]
365
+ else:
366
+ tool_call.args = reviewed_tool_calls_map[tool_name]
367
+
368
+ ai_message.tool_calls = tool_calls
369
+ tool_calls_processed = True
370
+
371
+ # Fallback: update message content if tool_calls weren't processed
372
+ if not tool_calls_processed:
373
+ last_message.content = reviewed_outputs_json
232
374
 
233
375
  return Command(update={"messages": msgs})
234
376
  except Exception as e:
@@ -265,11 +407,7 @@ def _process_tool_escalation_response(
265
407
  AgentTerminationException: If escalation response processing fails.
266
408
  """
267
409
  try:
268
- reviewed_field = (
269
- "ReviewedInputs"
270
- if execution_stage == ExecutionStage.PRE_EXECUTION
271
- else "ReviewedOutputs"
272
- )
410
+ reviewed_field = get_reviewed_field_name(execution_stage)
273
411
 
274
412
  msgs = state.messages.copy()
275
413
  if not msgs or reviewed_field not in escalation_result:
@@ -326,50 +464,65 @@ def _process_tool_escalation_response(
326
464
 
327
465
 
328
466
  def _extract_escalation_content(
467
+ message: BaseMessage,
329
468
  state: AgentGuardrailsGraphState,
330
469
  scope: GuardrailScope,
331
470
  execution_stage: ExecutionStage,
332
471
  guarded_node_name: str,
333
472
  ) -> str | list[str | Dict[str, Any]]:
334
- """Extract escalation content from state based on guardrail scope and execution stage.
473
+ """Extract escalation content from a message based on guardrail scope and execution stage.
335
474
 
336
475
  Args:
337
- state: The current agent graph state.
476
+ message: The message to extract content from.
338
477
  scope: The guardrail scope (LLM/AGENT/TOOL).
339
478
  execution_stage: The execution stage (PRE_EXECUTION or POST_EXECUTION).
479
+ guarded_node_name: Name of the guarded component.
340
480
 
341
481
  Returns:
342
482
  str or list[str | Dict[str, Any]]: For LLM scope, returns JSON string or list with message/tool call content.
343
483
  For AGENT scope, returns empty string. For TOOL scope, returns JSON string or list with tool-specific content.
344
-
345
- Raises:
346
- AgentTerminationException: If no messages are found in state.
347
484
  """
348
- if not state.messages:
349
- raise AgentTerminationException(
350
- code=UiPathErrorCode.EXECUTION_ERROR,
351
- title="Invalid state message",
352
- detail="No message found into agent state",
353
- )
354
-
355
485
  match scope:
356
486
  case GuardrailScope.LLM:
357
- return _extract_llm_escalation_content(state, execution_stage)
487
+ return _extract_llm_escalation_content(message, execution_stage)
358
488
  case GuardrailScope.AGENT:
359
- return _extract_agent_escalation_content(state, execution_stage)
489
+ return _extract_agent_escalation_content(message, state, execution_stage)
360
490
  case GuardrailScope.TOOL:
361
491
  return _extract_tool_escalation_content(
362
- state, execution_stage, guarded_node_name
492
+ message, execution_stage, guarded_node_name
363
493
  )
364
494
 
365
495
 
496
+ def _extract_agent_escalation_content(
497
+ message: BaseMessage,
498
+ state: AgentGuardrailsGraphState,
499
+ execution_stage: ExecutionStage,
500
+ ) -> str | list[str | Dict[str, Any]]:
501
+ """Extract escalation content for AGENT scope guardrails.
502
+
503
+ Args:
504
+ message: The message used to extract the agent input content.
505
+ state: The current agent guardrails graph state. Used to read `agent_result` for POST_EXECUTION.
506
+ execution_stage: PRE_EXECUTION or POST_EXECUTION.
507
+
508
+ Returns:
509
+ - PRE_EXECUTION: the agent input string (from message content).
510
+ - POST_EXECUTION: a JSON-serialized representation of `state.agent_result`.
511
+ """
512
+ if execution_stage == ExecutionStage.PRE_EXECUTION:
513
+ return get_message_content(cast(AnyMessage, message))
514
+
515
+ output_content = state.agent_result or ""
516
+ return json.dumps(output_content)
517
+
518
+
366
519
  def _extract_llm_escalation_content(
367
- state: AgentGuardrailsGraphState, execution_stage: ExecutionStage
520
+ message: BaseMessage, execution_stage: ExecutionStage
368
521
  ) -> str | list[str | Dict[str, Any]]:
369
522
  """Extract escalation content for LLM scope guardrails.
370
523
 
371
524
  Args:
372
- state: The current agent graph state.
525
+ message: The message to extract content from.
373
526
  execution_stage: The execution stage (PRE_EXECUTION or POST_EXECUTION).
374
527
 
375
528
  Returns:
@@ -377,61 +530,37 @@ def _extract_llm_escalation_content(
377
530
  For PostExecution, returns JSON string (array) with tool call content and message content.
378
531
  Returns empty string if no content found.
379
532
  """
380
- last_message = state.messages[-1]
381
533
  if execution_stage == ExecutionStage.PRE_EXECUTION:
382
- if isinstance(last_message, ToolMessage):
383
- return last_message.content
534
+ if isinstance(message, ToolMessage):
535
+ return message.content
384
536
 
385
- content = get_message_content(last_message)
386
- return json.dumps(content) if content else ""
537
+ return get_message_content(cast(AnyMessage, message))
387
538
 
388
539
  # For AI messages, process tool calls if present
389
- if isinstance(last_message, AIMessage):
390
- ai_message: AIMessage = last_message
391
- content_list: list[str] = []
540
+ if isinstance(message, AIMessage):
541
+ ai_message: AIMessage = message
392
542
 
393
543
  if ai_message.tool_calls:
544
+ content_list: list[Dict[str, Any]] = []
394
545
  for tool_call in ai_message.tool_calls:
395
- args = tool_call["args"]
396
- if (
397
- isinstance(args, dict)
398
- and "content" in args
399
- and args["content"] is not None
400
- ):
401
- content_list.append(json.dumps(args["content"]))
402
-
403
- message_content = get_message_content(last_message)
404
- if message_content:
405
- content_list.append(message_content)
406
-
407
- return json.dumps(content_list)
546
+ tool_call_data = {
547
+ "name": tool_call.get("name"),
548
+ "args": tool_call.get("args"),
549
+ }
550
+ content_list.append(tool_call_data)
551
+ return json.dumps(content_list)
408
552
 
409
553
  # Fallback for other message types
410
- return get_message_content(last_message)
411
-
412
-
413
- def _extract_agent_escalation_content(
414
- state: AgentGuardrailsGraphState, execution_stage: ExecutionStage
415
- ) -> str | list[str | Dict[str, Any]]:
416
- """Extract escalation content for AGENT scope guardrails.
417
-
418
- Args:
419
- state: The current agent graph state.
420
- execution_stage: The execution stage (PRE_EXECUTION or POST_EXECUTION).
421
-
422
- Returns:
423
- str: Empty string (AGENT scope guardrails do not extract escalation content).
424
- """
425
- return ""
554
+ return get_message_content(cast(AnyMessage, message))
426
555
 
427
556
 
428
557
  def _extract_tool_escalation_content(
429
- state: AgentGuardrailsGraphState, execution_stage: ExecutionStage, tool_name: str
558
+ message: BaseMessage, execution_stage: ExecutionStage, tool_name: str
430
559
  ) -> str | list[str | Dict[str, Any]]:
431
560
  """Extract escalation content for TOOL scope guardrails.
432
561
 
433
562
  Args:
434
- state: The current agent graph state.
563
+ message: The message to extract content from.
435
564
  execution_stage: The execution stage (PRE_EXECUTION or POST_EXECUTION).
436
565
  tool_name: Optional tool name to filter tool calls. If provided, only extracts args for matching tool.
437
566
 
@@ -440,16 +569,31 @@ def _extract_tool_escalation_content(
440
569
  for the specified tool name, or empty string if not found. For PostExecution, returns string with
441
570
  tool message content, or empty string if message type doesn't match.
442
571
  """
443
- last_message = state.messages[-1]
444
572
  if execution_stage == ExecutionStage.PRE_EXECUTION:
445
- args = _extract_tool_args_from_message(last_message, tool_name)
573
+ args = _extract_tool_args_from_message(cast(AnyMessage, message), tool_name)
446
574
  if args:
447
575
  return json.dumps(args)
448
576
  return ""
449
577
  else:
450
- if not isinstance(last_message, ToolMessage):
578
+ if not isinstance(message, ToolMessage):
451
579
  return ""
452
- return last_message.content
580
+ content = message.content
581
+
582
+ # If content is already dict/list, serialize to JSON
583
+ if isinstance(content, (dict, list)):
584
+ return json.dumps(content)
585
+
586
+ # If content is a string that looks like a Python literal, convert to JSON
587
+ if isinstance(content, str):
588
+ try:
589
+ # Try to parse as Python literal and convert to JSON
590
+ parsed_content = ast.literal_eval(content)
591
+ return json.dumps(parsed_content)
592
+ except (ValueError, SyntaxError):
593
+ # If parsing fails, return as-is
594
+ pass
595
+
596
+ return content
453
597
 
454
598
 
455
599
  def _execution_stage_to_escalation_field(