uipath-langchain 0.1.34__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. uipath_langchain/_cli/_templates/langgraph.json.template +2 -4
  2. uipath_langchain/_cli/cli_new.py +1 -2
  3. uipath_langchain/agent/guardrails/actions/escalate_action.py +252 -108
  4. uipath_langchain/agent/guardrails/actions/filter_action.py +247 -12
  5. uipath_langchain/agent/guardrails/guardrail_nodes.py +47 -12
  6. uipath_langchain/agent/guardrails/guardrails_factory.py +40 -15
  7. uipath_langchain/agent/guardrails/utils.py +64 -33
  8. uipath_langchain/agent/react/agent.py +4 -2
  9. uipath_langchain/agent/react/file_type_handler.py +123 -0
  10. uipath_langchain/agent/react/guardrails/guardrails_subgraph.py +67 -12
  11. uipath_langchain/agent/react/init_node.py +16 -1
  12. uipath_langchain/agent/react/job_attachments.py +125 -0
  13. uipath_langchain/agent/react/json_utils.py +183 -0
  14. uipath_langchain/agent/react/jsonschema_pydantic_converter.py +76 -0
  15. uipath_langchain/agent/react/llm_with_files.py +76 -0
  16. uipath_langchain/agent/react/types.py +4 -0
  17. uipath_langchain/agent/react/utils.py +29 -3
  18. uipath_langchain/agent/tools/__init__.py +5 -1
  19. uipath_langchain/agent/tools/context_tool.py +151 -1
  20. uipath_langchain/agent/tools/escalation_tool.py +46 -15
  21. uipath_langchain/agent/tools/integration_tool.py +20 -16
  22. uipath_langchain/agent/tools/internal_tools/__init__.py +5 -0
  23. uipath_langchain/agent/tools/internal_tools/analyze_files_tool.py +113 -0
  24. uipath_langchain/agent/tools/internal_tools/internal_tool_factory.py +54 -0
  25. uipath_langchain/agent/tools/process_tool.py +8 -1
  26. uipath_langchain/agent/tools/static_args.py +18 -40
  27. uipath_langchain/agent/tools/tool_factory.py +13 -5
  28. uipath_langchain/agent/tools/tool_node.py +133 -4
  29. uipath_langchain/agent/tools/utils.py +31 -0
  30. uipath_langchain/agent/wrappers/__init__.py +6 -0
  31. uipath_langchain/agent/wrappers/job_attachment_wrapper.py +62 -0
  32. uipath_langchain/agent/wrappers/static_args_wrapper.py +34 -0
  33. uipath_langchain/chat/mapper.py +60 -42
  34. uipath_langchain/runtime/factory.py +10 -5
  35. uipath_langchain/runtime/runtime.py +38 -35
  36. uipath_langchain/runtime/storage.py +178 -71
  37. {uipath_langchain-0.1.34.dist-info → uipath_langchain-0.3.1.dist-info}/METADATA +5 -4
  38. {uipath_langchain-0.1.34.dist-info → uipath_langchain-0.3.1.dist-info}/RECORD +41 -30
  39. {uipath_langchain-0.1.34.dist-info → uipath_langchain-0.3.1.dist-info}/WHEEL +0 -0
  40. {uipath_langchain-0.1.34.dist-info → uipath_langchain-0.3.1.dist-info}/entry_points.txt +0 -0
  41. {uipath_langchain-0.1.34.dist-info → uipath_langchain-0.3.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,76 @@
1
+ """LLM invocation with file attachments support."""
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Any
5
+
6
+ from langchain_core.language_models import BaseChatModel
7
+ from langchain_core.messages import AIMessage, AnyMessage, HumanMessage
8
+
9
+ from .file_type_handler import build_message_content_part_from_data
10
+
11
+
12
+ @dataclass
13
+ class FileInfo:
14
+ """File information for LLM file attachments."""
15
+
16
+ url: str
17
+ name: str
18
+ mime_type: str
19
+
20
+
21
+ def _get_model_name(model: BaseChatModel) -> str:
22
+ """Extract model name from a BaseChatModel instance."""
23
+ for attr in ["model_name", "_model_name", "model", "model_id"]:
24
+ value = getattr(model, attr, None)
25
+ if value and isinstance(value, str):
26
+ return value
27
+ raise ValueError(f"Model name not found in model {model}")
28
+
29
+
30
+ async def create_part_for_file(
31
+ file_info: FileInfo,
32
+ model: BaseChatModel,
33
+ ) -> dict[str, Any]:
34
+ """Create a provider-specific message content part for a file attachment.
35
+
36
+ Downloads the file from file_info.url and formats it for the model's provider.
37
+ """
38
+ model_name = _get_model_name(model)
39
+ return await build_message_content_part_from_data(
40
+ url=file_info.url,
41
+ filename=file_info.name,
42
+ mime_type=file_info.mime_type,
43
+ model=model_name,
44
+ )
45
+
46
+
47
+ async def llm_call_with_files(
48
+ messages: list[AnyMessage],
49
+ files: list[FileInfo],
50
+ model: BaseChatModel,
51
+ ) -> AIMessage:
52
+ """Invoke an LLM with file attachments.
53
+
54
+ Downloads files, creates provider-specific content parts, and appends them
55
+ as a HumanMessage. If no files are provided, equivalent to model.ainvoke().
56
+ """
57
+ if not files:
58
+ response = await model.ainvoke(messages)
59
+ if not isinstance(response, AIMessage):
60
+ raise TypeError(
61
+ f"LLM returned {type(response).__name__} instead of AIMessage"
62
+ )
63
+ return response
64
+
65
+ content_parts: list[str | dict[Any, Any]] = []
66
+ for file_info in files:
67
+ content_part = await create_part_for_file(file_info, model)
68
+ content_parts.append(content_part)
69
+
70
+ file_message = HumanMessage(content=content_parts)
71
+ all_messages = list(messages) + [file_message]
72
+
73
+ response = await model.ainvoke(all_messages)
74
+ if not isinstance(response, AIMessage):
75
+ raise TypeError(f"LLM returned {type(response).__name__} instead of AIMessage")
76
+ return response
@@ -4,6 +4,9 @@ from typing import Annotated, Any, Optional
4
4
  from langchain_core.messages import AnyMessage
5
5
  from langgraph.graph.message import add_messages
6
6
  from pydantic import BaseModel, Field
7
+ from uipath.platform.attachments import Attachment
8
+
9
+ from uipath_langchain.agent.react.utils import add_job_attachments
7
10
 
8
11
 
9
12
  class AgentTerminationSource(StrEnum):
@@ -22,6 +25,7 @@ class AgentGraphState(BaseModel):
22
25
  """Agent Graph state for standard loop execution."""
23
26
 
24
27
  messages: Annotated[list[AnyMessage], add_messages] = []
28
+ job_attachments: Annotated[dict[str, Attachment], add_job_attachments] = {}
25
29
  termination: AgentTermination | None = None
26
30
 
27
31
 
@@ -5,7 +5,9 @@ from typing import Any, Sequence
5
5
  from langchain_core.messages import AIMessage, BaseMessage
6
6
  from pydantic import BaseModel
7
7
  from uipath.agent.react import END_EXECUTION_TOOL
8
- from uipath.utils.dynamic_schema import jsonschema_to_pydantic
8
+ from uipath.platform.attachments import Attachment
9
+
10
+ from uipath_langchain.agent.react.jsonschema_pydantic_converter import create_model
9
11
 
10
12
 
11
13
  def resolve_input_model(
@@ -13,7 +15,7 @@ def resolve_input_model(
13
15
  ) -> type[BaseModel]:
14
16
  """Resolve the input model from the input schema."""
15
17
  if input_schema:
16
- return jsonschema_to_pydantic(input_schema)
18
+ return create_model(input_schema)
17
19
 
18
20
  return BaseModel
19
21
 
@@ -23,7 +25,7 @@ def resolve_output_model(
23
25
  ) -> type[BaseModel]:
24
26
  """Fallback to default end_execution tool schema when no agent output schema is provided."""
25
27
  if output_schema:
26
- return jsonschema_to_pydantic(output_schema)
28
+ return create_model(output_schema)
27
29
 
28
30
  return END_EXECUTION_TOOL.args_schema
29
31
 
@@ -47,3 +49,27 @@ def count_consecutive_thinking_messages(messages: Sequence[BaseMessage]) -> int:
47
49
  count += 1
48
50
 
49
51
  return count
52
+
53
+
54
+ def add_job_attachments(
55
+ left: dict[str, Attachment], right: dict[str, Attachment]
56
+ ) -> dict[str, Attachment]:
57
+ """Merge attachment dictionaries, with right values taking precedence.
58
+
59
+ This reducer function merges two dictionaries of attachments by UUID string.
60
+ If the same UUID exists in both dictionaries, the value from 'right' takes precedence.
61
+
62
+ Args:
63
+ left: Existing dictionary of attachments keyed by UUID string
64
+ right: New dictionary of attachments to merge
65
+
66
+ Returns:
67
+ Merged dictionary with right values overriding left values for duplicate keys
68
+ """
69
+ if not right:
70
+ return left
71
+
72
+ if not left:
73
+ return right
74
+
75
+ return {**left, **right}
@@ -1,13 +1,14 @@
1
1
  """Tool creation and management for LowCode agents."""
2
2
 
3
3
  from .context_tool import create_context_tool
4
+ from .escalation_tool import create_escalation_tool
4
5
  from .integration_tool import create_integration_tool
5
6
  from .mcp_tool import create_mcp_tools
6
7
  from .process_tool import create_process_tool
7
8
  from .tool_factory import (
8
9
  create_tools_from_resources,
9
10
  )
10
- from .tool_node import create_tool_node
11
+ from .tool_node import ToolWrapperMixin, UiPathToolNode, create_tool_node
11
12
 
12
13
  __all__ = [
13
14
  "create_tools_from_resources",
@@ -15,5 +16,8 @@ __all__ = [
15
16
  "create_context_tool",
16
17
  "create_process_tool",
17
18
  "create_integration_tool",
19
+ "create_escalation_tool",
18
20
  "create_mcp_tools",
21
+ "UiPathToolNode",
22
+ "ToolWrapperMixin",
19
23
  ]
@@ -1,12 +1,24 @@
1
1
  """Context tool creation for semantic index retrieval."""
2
2
 
3
+ import uuid
3
4
  from typing import Any
4
5
 
5
6
  from langchain_core.documents import Document
6
7
  from langchain_core.tools import StructuredTool
8
+ from langgraph.types import interrupt
7
9
  from pydantic import BaseModel, Field
8
- from uipath.agent.models.agent import AgentContextResourceConfig
10
+ from uipath.agent.models.agent import (
11
+ AgentContextResourceConfig,
12
+ AgentContextRetrievalMode,
13
+ )
9
14
  from uipath.eval.mocks import mockable
15
+ from uipath.platform.common import CreateBatchTransform, CreateDeepRag
16
+ from uipath.platform.context_grounding import (
17
+ BatchTransformOutputColumn,
18
+ BatchTransformResponse,
19
+ CitationMode,
20
+ DeepRagResponse,
21
+ )
10
22
 
11
23
  from uipath_langchain.retrievers import ContextGroundingRetriever
12
24
 
@@ -16,6 +28,18 @@ from .utils import sanitize_tool_name
16
28
 
17
29
  def create_context_tool(resource: AgentContextResourceConfig) -> StructuredTool:
18
30
  tool_name = sanitize_tool_name(resource.name)
31
+ retrieval_mode = resource.settings.retrieval_mode.lower()
32
+ if retrieval_mode == AgentContextRetrievalMode.DEEP_RAG.value.lower():
33
+ return handle_deep_rag(tool_name, resource)
34
+ elif retrieval_mode == AgentContextRetrievalMode.BATCH_TRANSFORM.value.lower():
35
+ return handle_batch_transform(tool_name, resource)
36
+ else:
37
+ return handle_semantic_search(tool_name, resource)
38
+
39
+
40
+ def handle_semantic_search(
41
+ tool_name: str, resource: AgentContextResourceConfig
42
+ ) -> StructuredTool:
19
43
  retriever = ContextGroundingRetriever(
20
44
  index_name=resource.index_name,
21
45
  folder_path=resource.folder_path,
@@ -40,6 +64,7 @@ def create_context_tool(resource: AgentContextResourceConfig) -> StructuredTool:
40
64
  description=resource.description,
41
65
  input_schema=input_model.model_json_schema(),
42
66
  output_schema=output_model.model_json_schema(),
67
+ example_calls=[], # Examples cannot be provided for context.
43
68
  )
44
69
  async def context_tool_fn(query: str) -> dict[str, Any]:
45
70
  return {"documents": await retriever.ainvoke(query)}
@@ -51,3 +76,128 @@ def create_context_tool(resource: AgentContextResourceConfig) -> StructuredTool:
51
76
  coroutine=context_tool_fn,
52
77
  output_type=output_model,
53
78
  )
79
+
80
+
81
+ def handle_deep_rag(
82
+ tool_name: str, resource: AgentContextResourceConfig
83
+ ) -> StructuredTool:
84
+ ensure_valid_fields(resource)
85
+ # needed for type checking
86
+ assert resource.settings.query is not None
87
+ assert resource.settings.query.value is not None
88
+
89
+ index_name = resource.index_name
90
+ prompt = resource.settings.query.value
91
+ if not resource.settings.citation_mode:
92
+ raise ValueError("Citation mode is required for Deep RAG")
93
+ citation_mode = CitationMode(resource.settings.citation_mode.value)
94
+
95
+ input_model = None
96
+ output_model = DeepRagResponse
97
+
98
+ @mockable(
99
+ name=resource.name,
100
+ description=resource.description,
101
+ input_schema=input_model,
102
+ output_schema=output_model.model_json_schema(),
103
+ example_calls=[], # Examples cannot be provided for context.
104
+ )
105
+ async def context_tool_fn() -> dict[str, Any]:
106
+ # TODO: add glob pattern support
107
+ return interrupt(
108
+ CreateDeepRag(
109
+ name=f"task-{uuid.uuid4()}",
110
+ index_name=index_name,
111
+ prompt=prompt,
112
+ citation_mode=citation_mode,
113
+ )
114
+ )
115
+
116
+ return StructuredToolWithOutputType(
117
+ name=tool_name,
118
+ description=resource.description,
119
+ args_schema=input_model,
120
+ coroutine=context_tool_fn,
121
+ output_type=output_model,
122
+ )
123
+
124
+
125
+ def handle_batch_transform(
126
+ tool_name: str, resource: AgentContextResourceConfig
127
+ ) -> StructuredTool:
128
+ ensure_valid_fields(resource)
129
+
130
+ # needed for type checking
131
+ assert resource.settings.query is not None
132
+ assert resource.settings.query.value is not None
133
+
134
+ index_name = resource.index_name
135
+ prompt = resource.settings.query.value
136
+
137
+ index_folder_path = resource.folder_path
138
+ if not resource.settings.web_search_grounding:
139
+ raise ValueError("Web search grounding field is required for Batch Transform")
140
+ enable_web_search_grounding = (
141
+ resource.settings.web_search_grounding.value.lower() == "enabled"
142
+ )
143
+
144
+ batch_transform_output_columns: list[BatchTransformOutputColumn] = []
145
+ if (output_columns := resource.settings.output_columns) is None or not len(
146
+ output_columns
147
+ ):
148
+ raise ValueError(
149
+ "Batch transform requires at least one output column to be specified in settings.output_columns"
150
+ )
151
+
152
+ for column in output_columns:
153
+ batch_transform_output_columns.append(
154
+ BatchTransformOutputColumn(
155
+ name=column.name,
156
+ description=column.description,
157
+ )
158
+ )
159
+
160
+ class BatchTransformSchemaModel(BaseModel):
161
+ destination_path: str = Field(
162
+ ...,
163
+ description="The relative file path destination for the modified csv file",
164
+ )
165
+
166
+ input_model = BatchTransformSchemaModel
167
+ output_model = BatchTransformResponse
168
+
169
+ @mockable(
170
+ name=resource.name,
171
+ description=resource.description,
172
+ input_schema=input_model.model_json_schema(),
173
+ output_schema=output_model.model_json_schema(),
174
+ example_calls=[], # Examples cannot be provided for context.
175
+ )
176
+ async def context_tool_fn(destination_path: str) -> dict[str, Any]:
177
+ # TODO: storage_bucket_folder_path_prefix support
178
+ return interrupt(
179
+ CreateBatchTransform(
180
+ name=f"task-{uuid.uuid4()}",
181
+ index_name=index_name,
182
+ prompt=prompt,
183
+ destination_path=destination_path,
184
+ index_folder_path=index_folder_path,
185
+ enable_web_search_grounding=enable_web_search_grounding,
186
+ output_columns=batch_transform_output_columns,
187
+ )
188
+ )
189
+
190
+ return StructuredToolWithOutputType(
191
+ name=tool_name,
192
+ description=resource.description,
193
+ args_schema=input_model,
194
+ coroutine=context_tool_fn,
195
+ output_type=output_model,
196
+ )
197
+
198
+
199
+ def ensure_valid_fields(resource_config: AgentContextResourceConfig):
200
+ if not resource_config.settings.query:
201
+ raise ValueError("Query object is required")
202
+ if not resource_config.settings.query.value:
203
+ raise ValueError("Query prompt is required")
@@ -3,10 +3,9 @@
3
3
  from enum import Enum
4
4
  from typing import Any
5
5
 
6
- from jsonschema_pydantic_converter import transform as create_model
7
- from langchain.tools import ToolRuntime
8
6
  from langchain_core.messages import ToolMessage
9
- from langchain_core.tools import StructuredTool
7
+ from langchain_core.messages.tool import ToolCall
8
+ from langchain_core.tools import BaseTool, StructuredTool
10
9
  from langgraph.types import Command, interrupt
11
10
  from uipath.agent.models.agent import (
12
11
  AgentEscalationChannel,
@@ -16,7 +15,10 @@ from uipath.agent.models.agent import (
16
15
  from uipath.eval.mocks import mockable
17
16
  from uipath.platform.common import CreateEscalation
18
17
 
19
- from ..react.types import AgentGraphNode, AgentTerminationSource
18
+ from uipath_langchain.agent.react.jsonschema_pydantic_converter import create_model
19
+
20
+ from ..react.types import AgentGraphNode, AgentGraphState, AgentTerminationSource
21
+ from .tool_node import ToolWrapperMixin
20
22
  from .utils import sanitize_tool_name
21
23
 
22
24
 
@@ -27,7 +29,11 @@ class EscalationAction(str, Enum):
27
29
  END = "end"
28
30
 
29
31
 
30
- def create_escalation_tool(resource: AgentEscalationResourceConfig) -> StructuredTool:
32
+ class StructuredToolWithWrapper(StructuredTool, ToolWrapperMixin):
33
+ pass
34
+
35
+
36
+ def create_escalation_tool(resource: AgentEscalationResourceConfig) -> BaseTool:
31
37
  """Uses interrupt() for Action Center human-in-the-loop."""
32
38
 
33
39
  tool_name: str = f"escalate_{sanitize_tool_name(resource.name)}"
@@ -48,10 +54,9 @@ def create_escalation_tool(resource: AgentEscalationResourceConfig) -> Structure
48
54
  description=resource.description,
49
55
  input_schema=input_model.model_json_schema(),
50
56
  output_schema=output_model.model_json_schema(),
57
+ example_calls=channel.properties.example_calls,
51
58
  )
52
- async def escalation_tool_fn(
53
- runtime: ToolRuntime, **kwargs: Any
54
- ) -> Command[Any] | Any:
59
+ async def escalation_tool_fn(**kwargs: Any) -> dict[str, Any]:
55
60
  task_title = channel.task_title or "Escalation Task"
56
61
 
57
62
  result = interrupt(
@@ -72,22 +77,41 @@ def create_escalation_tool(resource: AgentEscalationResourceConfig) -> Structure
72
77
  escalation_action = getattr(result, "action", None)
73
78
  escalation_output = getattr(result, "data", {})
74
79
 
75
- outcome = (
80
+ outcome_str = (
76
81
  channel.outcome_mapping.get(escalation_action)
77
82
  if channel.outcome_mapping and escalation_action
78
83
  else None
79
84
  )
85
+ outcome = (
86
+ EscalationAction(outcome_str) if outcome_str else EscalationAction.CONTINUE
87
+ )
80
88
 
81
- if outcome == EscalationAction.END:
82
- output_detail = f"Escalation output: {escalation_output}"
83
- termination_title = f"Agent run ended based on escalation outcome {outcome} with directive {escalation_action}"
89
+ return {
90
+ "action": outcome,
91
+ "output": escalation_output,
92
+ "escalation_action": escalation_action,
93
+ }
94
+
95
+ async def escalation_wrapper(
96
+ tool: BaseTool,
97
+ call: ToolCall,
98
+ state: AgentGraphState,
99
+ ) -> dict[str, Any] | Command[Any]:
100
+ result = await tool.ainvoke(call["args"])
101
+
102
+ if result["action"] == EscalationAction.END:
103
+ output_detail = f"Escalation output: {result['output']}"
104
+ termination_title = (
105
+ f"Agent run ended based on escalation outcome {result['action']} "
106
+ f"with directive {result['escalation_action']}"
107
+ )
84
108
 
85
109
  return Command(
86
110
  update={
87
111
  "messages": [
88
112
  ToolMessage(
89
113
  content=f"{termination_title}. {output_detail}",
90
- tool_call_id=runtime.tool_call_id,
114
+ tool_call_id=call["id"],
91
115
  )
92
116
  ],
93
117
  "termination": {
@@ -99,13 +123,20 @@ def create_escalation_tool(resource: AgentEscalationResourceConfig) -> Structure
99
123
  goto=AgentGraphNode.TERMINATE,
100
124
  )
101
125
 
102
- return escalation_output
126
+ return result["output"]
103
127
 
104
- tool = StructuredTool(
128
+ tool = StructuredToolWithWrapper(
105
129
  name=tool_name,
106
130
  description=resource.description,
107
131
  args_schema=input_model,
108
132
  coroutine=escalation_tool_fn,
133
+ metadata={
134
+ "tool_type": "escalation",
135
+ "display_name": channel.properties.app_name,
136
+ "channel_type": channel.type,
137
+ "assignee": assignee,
138
+ },
109
139
  )
140
+ tool.set_tool_wrappers(awrapper=escalation_wrapper)
110
141
 
111
142
  return tool
@@ -3,17 +3,21 @@
3
3
  import copy
4
4
  from typing import Any
5
5
 
6
- from jsonschema_pydantic_converter import transform as create_model
7
- from langchain.tools import ToolRuntime
8
6
  from langchain_core.tools import StructuredTool
9
7
  from uipath.agent.models.agent import AgentIntegrationToolResourceConfig
10
8
  from uipath.eval.mocks import mockable
11
9
  from uipath.platform import UiPath
12
10
  from uipath.platform.connections import ActivityMetadata, ActivityParameterLocationInfo
13
11
 
14
- from .static_args import handle_static_args
12
+ from uipath_langchain.agent.react.jsonschema_pydantic_converter import create_model
13
+ from uipath_langchain.agent.tools.tool_node import ToolWrapperMixin
14
+
15
15
  from .structured_tool_with_output_type import StructuredToolWithOutputType
16
- from .utils import sanitize_tool_name
16
+ from .utils import sanitize_dict_for_serialization, sanitize_tool_name
17
+
18
+
19
+ class StructuredToolWithStaticArgs(StructuredToolWithOutputType, ToolWrapperMixin):
20
+ pass
17
21
 
18
22
 
19
23
  def remove_asterisk_from_properties(fields: dict[str, Any]) -> dict[str, Any]:
@@ -149,33 +153,33 @@ def create_integration_tool(
149
153
  description=resource.description,
150
154
  input_schema=input_model.model_json_schema(),
151
155
  output_schema=output_model.model_json_schema(),
156
+ example_calls=resource.properties.example_calls,
152
157
  )
153
- async def integration_tool_fn(runtime: ToolRuntime, **kwargs: Any):
158
+ async def integration_tool_fn(**kwargs: Any):
154
159
  try:
155
- # we manually validating here and not passing input_model to StructuredTool
156
- # because langchain itself will block their own injected arguments (like runtime) if the model is strict
157
- val_args = input_model.model_validate(kwargs)
158
- args = handle_static_args(
159
- resource=resource,
160
- runtime=runtime,
161
- input_args=val_args.model_dump(),
162
- )
163
160
  result = await sdk.connections.invoke_activity_async(
164
161
  activity_metadata=activity_metadata,
165
162
  connection_id=connection_id,
166
- activity_input=args,
163
+ activity_input=sanitize_dict_for_serialization(kwargs),
167
164
  )
168
165
  except Exception:
169
166
  raise
170
167
 
171
168
  return result
172
169
 
173
- tool = StructuredToolWithOutputType(
170
+ from uipath_langchain.agent.wrappers.static_args_wrapper import (
171
+ get_static_args_wrapper,
172
+ )
173
+
174
+ wrapper = get_static_args_wrapper(resource)
175
+
176
+ tool = StructuredToolWithStaticArgs(
174
177
  name=tool_name,
175
178
  description=resource.description,
176
- args_schema=resource.input_schema,
179
+ args_schema=input_model,
177
180
  coroutine=integration_tool_fn,
178
181
  output_type=output_model,
179
182
  )
183
+ tool.set_tool_wrappers(awrapper=wrapper)
180
184
 
181
185
  return tool
@@ -0,0 +1,5 @@
1
+ """Internal Tool creation and management for LowCode agents."""
2
+
3
+ from .internal_tool_factory import create_internal_tool
4
+
5
+ __all__ = ["create_internal_tool"]
@@ -0,0 +1,113 @@
1
+ import uuid
2
+ from typing import Any
3
+
4
+ from langchain_core.language_models import BaseChatModel
5
+ from langchain_core.messages import AnyMessage, HumanMessage, SystemMessage
6
+ from langchain_core.tools import StructuredTool
7
+ from uipath.agent.models.agent import (
8
+ AgentInternalToolResourceConfig,
9
+ )
10
+ from uipath.eval.mocks import mockable
11
+ from uipath.platform import UiPath
12
+
13
+ from uipath_langchain.agent.react.jsonschema_pydantic_converter import create_model
14
+ from uipath_langchain.agent.react.llm_with_files import FileInfo, llm_call_with_files
15
+ from uipath_langchain.agent.tools.structured_tool_with_output_type import (
16
+ StructuredToolWithOutputType,
17
+ )
18
+ from uipath_langchain.agent.tools.tool_node import ToolWrapperMixin
19
+ from uipath_langchain.agent.tools.utils import sanitize_tool_name
20
+
21
+ ANALYZE_FILES_SYSTEM_MESSAGE = (
22
+ "Process the provided files to complete the given task. "
23
+ "Analyze the files contents thoroughly to deliver an accurate response "
24
+ "based on the extracted information."
25
+ )
26
+
27
+
28
+ class AnalyzeFileTool(StructuredToolWithOutputType, ToolWrapperMixin):
29
+ pass
30
+
31
+
32
+ def create_analyze_file_tool(
33
+ resource: AgentInternalToolResourceConfig, llm: BaseChatModel
34
+ ) -> StructuredTool:
35
+ from uipath_langchain.agent.wrappers.job_attachment_wrapper import (
36
+ get_job_attachment_wrapper,
37
+ )
38
+
39
+ tool_name = sanitize_tool_name(resource.name)
40
+ input_model = create_model(resource.input_schema)
41
+ output_model = create_model(resource.output_schema)
42
+
43
+ @mockable(
44
+ name=resource.name,
45
+ description=resource.description,
46
+ input_schema=input_model.model_json_schema(),
47
+ output_schema=output_model.model_json_schema(),
48
+ )
49
+ async def tool_fn(**kwargs: Any):
50
+ if "analysisTask" not in kwargs:
51
+ raise ValueError("Argument 'analysisTask' is not available")
52
+ if "attachments" not in kwargs:
53
+ raise ValueError("Argument 'attachments' is not available")
54
+
55
+ attachments = kwargs["attachments"]
56
+ analysisTask = kwargs["analysisTask"]
57
+
58
+ files = await _resolve_job_attachment_arguments(attachments)
59
+ messages: list[AnyMessage] = [
60
+ SystemMessage(content=ANALYZE_FILES_SYSTEM_MESSAGE),
61
+ HumanMessage(content=analysisTask),
62
+ ]
63
+ result = await llm_call_with_files(messages, files, llm)
64
+ return result
65
+
66
+ wrapper = get_job_attachment_wrapper()
67
+ tool = AnalyzeFileTool(
68
+ name=tool_name,
69
+ description=resource.description,
70
+ args_schema=input_model,
71
+ coroutine=tool_fn,
72
+ output_type=output_model,
73
+ )
74
+ tool.set_tool_wrappers(awrapper=wrapper)
75
+ return tool
76
+
77
+
78
+ async def _resolve_job_attachment_arguments(
79
+ attachments: list[Any],
80
+ ) -> list[FileInfo]:
81
+ """Resolve job attachments to FileInfo objects.
82
+
83
+ Args:
84
+ attachments: List of job attachment objects (dynamically typed from schema)
85
+
86
+ Returns:
87
+ List of FileInfo objects with blob URIs for each attachment
88
+ """
89
+ client = UiPath()
90
+ file_infos: list[FileInfo] = []
91
+
92
+ for attachment in attachments:
93
+ # Access using Pydantic field aliases (ID, FullName, MimeType)
94
+ # These are dynamically created from the JSON schema
95
+ attachment_id_value = getattr(attachment, "ID", None)
96
+ if attachment_id_value is None:
97
+ continue
98
+
99
+ attachment_id = uuid.UUID(attachment_id_value)
100
+ mime_type = getattr(attachment, "MimeType", "")
101
+
102
+ blob_info = await client.attachments.get_blob_file_access_uri_async(
103
+ key=attachment_id
104
+ )
105
+
106
+ file_info = FileInfo(
107
+ url=blob_info.uri,
108
+ name=blob_info.name,
109
+ mime_type=mime_type,
110
+ )
111
+ file_infos.append(file_info)
112
+
113
+ return file_infos