uipath-langchain 0.0.138__py3-none-any.whl → 0.0.140__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of uipath-langchain might be problematic. Click here for more details.

@@ -1,17 +1,12 @@
1
1
  from typing import Any, Optional, Union
2
2
 
3
3
  from langgraph.checkpoint.sqlite.aio import AsyncSqliteSaver
4
- from langgraph.graph import StateGraph
5
4
  from uipath._cli._runtime._contracts import UiPathRuntimeContext
6
5
 
7
- from .._utils._graph import LangGraphConfig
8
-
9
6
 
10
7
  class LangGraphRuntimeContext(UiPathRuntimeContext):
11
8
  """Context information passed throughout the runtime execution."""
12
9
 
13
- langgraph_config: Optional[LangGraphConfig] = None
14
- state_graph: Optional[StateGraph[Any, Any]] = None
15
10
  output: Optional[Any] = None
16
11
  state: Optional[Any] = (
17
12
  None # TypedDict issue, the actual type is: Optional[langgraph.types.StateSnapshot]
@@ -173,6 +173,14 @@ def map_message(
173
173
  content_part_sequence=idx,
174
174
  ),
175
175
  )
176
+ elif isinstance(message.content, str) and message.content:
177
+ msg_event.content_part = UiPathConversationContentPartEvent(
178
+ content_part_id=f"content-{message.id}",
179
+ chunk=UiPathConversationContentPartChunkEvent(
180
+ data=message.content,
181
+ content_part_sequence=0,
182
+ ),
183
+ )
176
184
 
177
185
  stop_reason = message.response_metadata.get("stop_reason")
178
186
  if not message.content and stop_reason in ("tool_use", "end_turn"):
@@ -0,0 +1,143 @@
1
+ import asyncio
2
+ from typing import Any, Awaitable, Callable, Optional
3
+
4
+ from langgraph.graph.state import CompiledStateGraph, StateGraph
5
+ from uipath._cli._runtime._contracts import (
6
+ UiPathErrorCategory,
7
+ )
8
+
9
+ from .._utils._graph import GraphConfig, LangGraphConfig
10
+ from ._exception import LangGraphRuntimeError
11
+
12
+
13
+ class LangGraphJsonResolver:
14
+ def __init__(self, entrypoint: Optional[str] = None) -> None:
15
+ self.entrypoint = entrypoint
16
+ self.graph_config: Optional[GraphConfig] = None
17
+ self._lock = asyncio.Lock()
18
+ self._graph_cache: Optional[StateGraph[Any, Any, Any]] = None
19
+ self._resolving: bool = False
20
+
21
+ async def __call__(self) -> StateGraph[Any, Any, Any]:
22
+ # Fast path: if already resolved, return immediately without locking
23
+ if self._graph_cache is not None:
24
+ return self._graph_cache
25
+
26
+ # Slow path: acquire lock and resolve
27
+ async with self._lock:
28
+ # Double-check after acquiring lock (another coroutine may have resolved it)
29
+ if self._graph_cache is not None:
30
+ return self._graph_cache
31
+
32
+ self._graph_cache = await self._resolve(self.entrypoint)
33
+ return self._graph_cache
34
+
35
+ async def _resolve(self, entrypoint: Optional[str]) -> StateGraph[Any, Any, Any]:
36
+ config = LangGraphConfig()
37
+ if not config.exists:
38
+ raise LangGraphRuntimeError(
39
+ "CONFIG_MISSING",
40
+ "Invalid configuration",
41
+ "Failed to load configuration",
42
+ UiPathErrorCategory.DEPLOYMENT,
43
+ )
44
+
45
+ try:
46
+ config.load_config()
47
+ except Exception as e:
48
+ raise LangGraphRuntimeError(
49
+ "CONFIG_INVALID",
50
+ "Invalid configuration",
51
+ f"Failed to load configuration: {str(e)}",
52
+ UiPathErrorCategory.DEPLOYMENT,
53
+ ) from e
54
+
55
+ # Determine entrypoint if not provided
56
+ graphs = config.graphs
57
+ if not entrypoint and len(graphs) == 1:
58
+ entrypoint = graphs[0].name
59
+ elif not entrypoint:
60
+ graph_names = ", ".join(g.name for g in graphs)
61
+ raise LangGraphRuntimeError(
62
+ "ENTRYPOINT_MISSING",
63
+ "Entrypoint required",
64
+ f"Multiple graphs available. Please specify one of: {graph_names}.",
65
+ UiPathErrorCategory.DEPLOYMENT,
66
+ )
67
+
68
+ # Get the specified graph
69
+ self.graph_config = config.get_graph(entrypoint)
70
+ if not self.graph_config:
71
+ raise LangGraphRuntimeError(
72
+ "GRAPH_NOT_FOUND",
73
+ "Graph not found",
74
+ f"Graph '{entrypoint}' not found.",
75
+ UiPathErrorCategory.DEPLOYMENT,
76
+ )
77
+ try:
78
+ loaded_graph = await self.graph_config.load_graph()
79
+ return (
80
+ loaded_graph.builder
81
+ if isinstance(loaded_graph, CompiledStateGraph)
82
+ else loaded_graph
83
+ )
84
+ except ImportError as e:
85
+ raise LangGraphRuntimeError(
86
+ "GRAPH_IMPORT_ERROR",
87
+ "Graph import failed",
88
+ f"Failed to import graph '{entrypoint}': {str(e)}",
89
+ UiPathErrorCategory.USER,
90
+ ) from e
91
+ except TypeError as e:
92
+ raise LangGraphRuntimeError(
93
+ "GRAPH_TYPE_ERROR",
94
+ "Invalid graph type",
95
+ f"Graph '{entrypoint}' is not a valid StateGraph or CompiledStateGraph: {str(e)}",
96
+ UiPathErrorCategory.USER,
97
+ ) from e
98
+ except ValueError as e:
99
+ raise LangGraphRuntimeError(
100
+ "GRAPH_VALUE_ERROR",
101
+ "Invalid graph value",
102
+ f"Invalid value in graph '{entrypoint}': {str(e)}",
103
+ UiPathErrorCategory.USER,
104
+ ) from e
105
+ except Exception as e:
106
+ raise LangGraphRuntimeError(
107
+ "GRAPH_LOAD_ERROR",
108
+ "Failed to load graph",
109
+ f"Unexpected error loading graph '{entrypoint}': {str(e)}",
110
+ UiPathErrorCategory.USER,
111
+ ) from e
112
+
113
+ async def cleanup(self):
114
+ """Clean up resources"""
115
+ async with self._lock:
116
+ if self.graph_config:
117
+ await self.graph_config.cleanup()
118
+ self.graph_config = None
119
+ self._graph_cache = None
120
+
121
+
122
+ AsyncResolver = Callable[[], Awaitable[StateGraph[Any, Any, Any]]]
123
+
124
+
125
+ class LangGraphJsonResolverContext:
126
+ """
127
+ Async context manager wrapping LangGraphJsonResolver.
128
+ Returns a callable that can be passed directly as AsyncResolver to LangGraphRuntime.
129
+ Thread-safe and reuses the same resolved graph across concurrent executions.
130
+ """
131
+
132
+ def __init__(self, entrypoint: Optional[str] = None) -> None:
133
+ self._resolver = LangGraphJsonResolver(entrypoint)
134
+
135
+ async def __aenter__(self) -> AsyncResolver:
136
+ # Return a callable that safely reuses the cached graph
137
+ async def resolver_callable() -> StateGraph[Any, Any, Any]:
138
+ return await self._resolver()
139
+
140
+ return resolver_callable
141
+
142
+ async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
143
+ await self._resolver.cleanup()
@@ -1,24 +1,22 @@
1
- import json
2
1
  import logging
3
2
  import os
4
- from typing import Any, Dict, List, Optional, Tuple, Union
3
+ from typing import Any, List, Optional, Sequence
5
4
 
6
5
  from langchain_core.callbacks.base import BaseCallbackHandler
7
6
  from langchain_core.messages import BaseMessage
8
7
  from langchain_core.runnables.config import RunnableConfig
9
8
  from langgraph.checkpoint.sqlite.aio import AsyncSqliteSaver
10
9
  from langgraph.errors import EmptyInputError, GraphRecursionError, InvalidUpdateError
11
- from langgraph.graph.state import CompiledStateGraph
12
10
  from uipath._cli._runtime._contracts import (
13
11
  UiPathBaseRuntime,
14
12
  UiPathErrorCategory,
15
13
  UiPathRuntimeResult,
16
14
  )
17
15
 
18
- from .._utils._graph import LangGraphConfig
19
16
  from ._context import LangGraphRuntimeContext
20
17
  from ._conversation import map_message
21
18
  from ._exception import LangGraphRuntimeError
19
+ from ._graph_resolver import AsyncResolver, LangGraphJsonResolver
22
20
  from ._input import LangGraphInputProcessor
23
21
  from ._output import LangGraphOutputProcessor
24
22
 
@@ -31,9 +29,10 @@ class LangGraphRuntime(UiPathBaseRuntime):
31
29
  This allows using the class with 'async with' statements.
32
30
  """
33
31
 
34
- def __init__(self, context: LangGraphRuntimeContext):
32
+ def __init__(self, context: LangGraphRuntimeContext, graph_resolver: AsyncResolver):
35
33
  super().__init__(context)
36
34
  self.context: LangGraphRuntimeContext = context
35
+ self.graph_resolver: AsyncResolver = graph_resolver
37
36
 
38
37
  async def execute(self) -> Optional[UiPathRuntimeResult]:
39
38
  """
@@ -46,7 +45,8 @@ class LangGraphRuntime(UiPathBaseRuntime):
46
45
  LangGraphRuntimeError: If execution fails
47
46
  """
48
47
 
49
- if self.context.state_graph is None:
48
+ graph = await self.graph_resolver()
49
+ if not graph:
50
50
  return None
51
51
 
52
52
  try:
@@ -56,9 +56,7 @@ class LangGraphRuntime(UiPathBaseRuntime):
56
56
  self.context.memory = memory
57
57
 
58
58
  # Compile the graph with the checkpointer
59
- graph = self.context.state_graph.compile(
60
- checkpointer=self.context.memory
61
- )
59
+ compiled_graph = graph.compile(checkpointer=self.context.memory)
62
60
 
63
61
  # Process input, handling resume if needed
64
62
  input_processor = LangGraphInputProcessor(context=self.context)
@@ -86,48 +84,50 @@ class LangGraphRuntime(UiPathBaseRuntime):
86
84
  if max_concurrency is not None:
87
85
  graph_config["max_concurrency"] = int(max_concurrency)
88
86
 
89
- if self.context.chat_handler:
90
- async for stream_chunk in graph.astream(
87
+ if self.context.chat_handler or self.is_debug_run():
88
+ final_chunk: Optional[dict[Any, Any]] = None
89
+ async for stream_chunk in compiled_graph.astream(
91
90
  processed_input,
92
91
  graph_config,
93
- stream_mode="messages",
92
+ stream_mode=["messages", "updates"],
94
93
  subgraphs=True,
95
94
  ):
96
- if not isinstance(stream_chunk, tuple) or len(stream_chunk) < 2:
97
- continue
98
-
99
- _, (message, _) = stream_chunk
100
- event = map_message(
101
- message=message,
102
- conversation_id=self.context.execution_id,
103
- exchange_id=self.context.execution_id,
104
- )
105
- if event:
106
- self.context.chat_handler.on_event(event)
107
-
108
- # Stream the output at debug time
109
- elif self.is_debug_run():
110
- # Get final chunk while streaming
111
- final_chunk = None
112
- async for stream_chunk in graph.astream(
113
- processed_input,
114
- graph_config,
115
- stream_mode="updates",
116
- subgraphs=True,
117
- ):
118
- self._pretty_print(stream_chunk)
119
- final_chunk = stream_chunk
120
-
121
- self.context.output = self._extract_graph_result(final_chunk, graph)
95
+ _, chunk_type, data = stream_chunk
96
+ if chunk_type == "messages":
97
+ if self.context.chat_handler:
98
+ if isinstance(data, tuple):
99
+ message, _ = data
100
+ event = map_message(
101
+ message=message,
102
+ conversation_id=self.context.execution_id,
103
+ exchange_id=self.context.execution_id,
104
+ )
105
+ if event:
106
+ self.context.chat_handler.on_event(event)
107
+ elif chunk_type == "updates":
108
+ if isinstance(data, dict):
109
+ # data is a dict, e.g. {'agent': {'messages': [...]}}
110
+ for agent_data in data.values():
111
+ if isinstance(agent_data, dict):
112
+ messages = agent_data.get("messages", [])
113
+ if isinstance(messages, list):
114
+ for message in messages:
115
+ if isinstance(message, BaseMessage):
116
+ message.pretty_print()
117
+ final_chunk = data
118
+
119
+ self.context.output = self._extract_graph_result(
120
+ final_chunk, compiled_graph.output_channels
121
+ )
122
122
  else:
123
123
  # Execute the graph normally at runtime or eval
124
- self.context.output = await graph.ainvoke(
124
+ self.context.output = await compiled_graph.ainvoke(
125
125
  processed_input, graph_config
126
126
  )
127
127
 
128
128
  # Get the state if available
129
129
  try:
130
- self.context.state = await graph.aget_state(graph_config)
130
+ self.context.state = await compiled_graph.aget_state(graph_config)
131
131
  except Exception:
132
132
  pass
133
133
 
@@ -177,95 +177,12 @@ class LangGraphRuntime(UiPathBaseRuntime):
177
177
  pass
178
178
 
179
179
  async def validate(self) -> None:
180
- """Validate runtime inputs."""
181
- """Load and validate the graph configuration ."""
182
- if self.context.langgraph_config is None:
183
- self.context.langgraph_config = LangGraphConfig()
184
- if not self.context.langgraph_config.exists:
185
- raise LangGraphRuntimeError(
186
- "CONFIG_MISSING",
187
- "Invalid configuration",
188
- "Failed to load configuration",
189
- UiPathErrorCategory.DEPLOYMENT,
190
- )
191
-
192
- try:
193
- self.context.langgraph_config.load_config()
194
- except Exception as e:
195
- raise LangGraphRuntimeError(
196
- "CONFIG_INVALID",
197
- "Invalid configuration",
198
- f"Failed to load configuration: {str(e)}",
199
- UiPathErrorCategory.DEPLOYMENT,
200
- ) from e
201
-
202
- # Determine entrypoint if not provided
203
- graphs = self.context.langgraph_config.graphs
204
- if not self.context.entrypoint and len(graphs) == 1:
205
- self.context.entrypoint = graphs[0].name
206
- elif not self.context.entrypoint:
207
- graph_names = ", ".join(g.name for g in graphs)
208
- raise LangGraphRuntimeError(
209
- "ENTRYPOINT_MISSING",
210
- "Entrypoint required",
211
- f"Multiple graphs available. Please specify one of: {graph_names}.",
212
- UiPathErrorCategory.DEPLOYMENT,
213
- )
214
-
215
- # Get the specified graph
216
- self.graph_config = self.context.langgraph_config.get_graph(
217
- self.context.entrypoint
218
- )
219
- if not self.graph_config:
220
- raise LangGraphRuntimeError(
221
- "GRAPH_NOT_FOUND",
222
- "Graph not found",
223
- f"Graph '{self.context.entrypoint}' not found.",
224
- UiPathErrorCategory.DEPLOYMENT,
225
- )
226
- try:
227
- loaded_graph = await self.graph_config.load_graph()
228
- self.context.state_graph = (
229
- loaded_graph.builder
230
- if isinstance(loaded_graph, CompiledStateGraph)
231
- else loaded_graph
232
- )
233
- except ImportError as e:
234
- raise LangGraphRuntimeError(
235
- "GRAPH_IMPORT_ERROR",
236
- "Graph import failed",
237
- f"Failed to import graph '{self.context.entrypoint}': {str(e)}",
238
- UiPathErrorCategory.USER,
239
- ) from e
240
- except TypeError as e:
241
- raise LangGraphRuntimeError(
242
- "GRAPH_TYPE_ERROR",
243
- "Invalid graph type",
244
- f"Graph '{self.context.entrypoint}' is not a valid StateGraph or CompiledStateGraph: {str(e)}",
245
- UiPathErrorCategory.USER,
246
- ) from e
247
- except ValueError as e:
248
- raise LangGraphRuntimeError(
249
- "GRAPH_VALUE_ERROR",
250
- "Invalid graph value",
251
- f"Invalid value in graph '{self.context.entrypoint}': {str(e)}",
252
- UiPathErrorCategory.USER,
253
- ) from e
254
- except Exception as e:
255
- raise LangGraphRuntimeError(
256
- "GRAPH_LOAD_ERROR",
257
- "Failed to load graph",
258
- f"Unexpected error loading graph '{self.context.entrypoint}': {str(e)}",
259
- UiPathErrorCategory.USER,
260
- ) from e
180
+ pass
261
181
 
262
182
  async def cleanup(self):
263
- if hasattr(self, "graph_config") and self.graph_config:
264
- await self.graph_config.cleanup()
183
+ pass
265
184
 
266
- def _extract_graph_result(
267
- self, final_chunk, graph: CompiledStateGraph[Any, Any, Any]
268
- ):
185
+ def _extract_graph_result(self, final_chunk, output_channels: str | Sequence[str]):
269
186
  """
270
187
  Extract the result from a LangGraph output chunk according to the graph's output channels.
271
188
 
@@ -283,11 +200,9 @@ class LangGraphRuntime(UiPathBaseRuntime):
283
200
  ] # Extract data part from (namespace, data) tuple
284
201
 
285
202
  # If the result isn't a dict or graph doesn't define output channels, return as is
286
- if not isinstance(final_chunk, dict) or not hasattr(graph, "output_channels"):
203
+ if not isinstance(final_chunk, dict):
287
204
  return final_chunk
288
205
 
289
- output_channels = graph.output_channels
290
-
291
206
  # Case 1: Single output channel as string
292
207
  if isinstance(output_channels, str):
293
208
  if output_channels in final_chunk:
@@ -323,57 +238,18 @@ class LangGraphRuntime(UiPathBaseRuntime):
323
238
  # Fallback for any other case
324
239
  return final_chunk
325
240
 
326
- def _pretty_print(self, stream_chunk: Union[Tuple[Any, Any], Dict[str, Any], Any]):
327
- """
328
- Pretty print a chunk from a LangGraph stream with stream_mode="updates" and subgraphs=True.
329
241
 
330
- Args:
331
- stream_chunk: A tuple of (namespace, updates) from graph.astream()
332
- """
333
- if not isinstance(stream_chunk, tuple) or len(stream_chunk) < 2:
334
- return
335
-
336
- node_namespace = ""
337
- chunk_namespace = stream_chunk[0]
338
- node_updates = stream_chunk[1]
339
-
340
- # Extract namespace if available
341
- if chunk_namespace and len(chunk_namespace) > 0:
342
- node_namespace = chunk_namespace[0]
343
-
344
- if not isinstance(node_updates, dict):
345
- logger.info("Raw update: %s", node_updates)
346
- return
347
-
348
- # Process each node's updates
349
- for node_name, node_result in node_updates.items():
350
- # Log node identifier with appropriate namespace context
351
- if node_namespace:
352
- logger.info("[%s][%s]", node_namespace, node_name)
353
- else:
354
- logger.info("[%s]", node_name)
355
-
356
- # Handle non-dict results
357
- if not isinstance(node_result, dict):
358
- logger.info("%s", node_result)
359
- continue
360
-
361
- # Process messages specially
362
- messages = node_result.get("messages", [])
363
- if isinstance(messages, list):
364
- for message in messages:
365
- if isinstance(message, BaseMessage):
366
- message.pretty_print()
367
-
368
- # Exclude "messages" from node_result and pretty-print the rest
369
- metadata = {k: v for k, v in node_result.items() if k != "messages"}
370
- if metadata:
371
- try:
372
- formatted_metadata = json.dumps(
373
- metadata,
374
- indent=2,
375
- ensure_ascii=False,
376
- )
377
- logger.info("%s", formatted_metadata)
378
- except (TypeError, ValueError):
379
- pass
242
+ class LangGraphScriptRuntime(LangGraphRuntime):
243
+ """
244
+ Resolves the graph from langgraph.json config file and passes it to the base runtime.
245
+ """
246
+
247
+ def __init__(
248
+ self, context: LangGraphRuntimeContext, entrypoint: Optional[str] = None
249
+ ):
250
+ self.resolver = LangGraphJsonResolver(entrypoint=entrypoint)
251
+ super().__init__(context, self.resolver)
252
+
253
+ async def cleanup(self):
254
+ await super().cleanup()
255
+ await self.resolver.cleanup()
@@ -12,7 +12,7 @@ from uipath._cli.middlewares import MiddlewareResult
12
12
 
13
13
  from .._tracing import _instrument_traceable_attributes
14
14
  from ._runtime._context import LangGraphRuntimeContext
15
- from ._runtime._runtime import LangGraphRuntime
15
+ from ._runtime._runtime import LangGraphScriptRuntime
16
16
 
17
17
  console = ConsoleLogger()
18
18
 
@@ -22,8 +22,14 @@ def langgraph_dev_middleware(interface: Optional[str]) -> MiddlewareResult:
22
22
 
23
23
  try:
24
24
  if interface == "terminal":
25
+
26
+ def generate_runtime(
27
+ ctx: LangGraphRuntimeContext,
28
+ ) -> LangGraphScriptRuntime:
29
+ return LangGraphScriptRuntime(ctx, ctx.entrypoint)
30
+
25
31
  runtime_factory = UiPathRuntimeFactory(
26
- LangGraphRuntime, LangGraphRuntimeContext
32
+ LangGraphScriptRuntime, LangGraphRuntimeContext, generate_runtime
27
33
  )
28
34
 
29
35
  _instrument_traceable_attributes()
@@ -17,7 +17,7 @@ from uipath._events._event_bus import EventBus
17
17
  from uipath.eval._helpers import auto_discover_entrypoint
18
18
 
19
19
  from uipath_langchain._cli._runtime._context import LangGraphRuntimeContext
20
- from uipath_langchain._cli._runtime._runtime import LangGraphRuntime
20
+ from uipath_langchain._cli._runtime._runtime import LangGraphScriptRuntime
21
21
  from uipath_langchain._cli._utils._graph import LangGraphConfig
22
22
  from uipath_langchain._tracing import (
23
23
  LangChainExporter,
@@ -48,10 +48,9 @@ def langgraph_eval_middleware(
48
48
  asyncio.run(console_reporter.subscribe_to_eval_runtime_events(event_bus))
49
49
 
50
50
  def generate_runtime_context(
51
- context_entrypoint: str, langgraph_config: LangGraphConfig, **context_kwargs
51
+ context_entrypoint: str, **context_kwargs
52
52
  ) -> LangGraphRuntimeContext:
53
53
  context = LangGraphRuntimeContext.with_defaults(**context_kwargs)
54
- context.langgraph_config = langgraph_config
55
54
  context.entrypoint = context_entrypoint
56
55
  return context
57
56
 
@@ -63,14 +62,17 @@ def langgraph_eval_middleware(
63
62
  eval_context.eval_set = eval_set or EvalHelpers.auto_discover_eval_set()
64
63
  eval_context.eval_ids = eval_ids
65
64
 
65
+ def generate_runtime(ctx: LangGraphRuntimeContext) -> LangGraphScriptRuntime:
66
+ return LangGraphScriptRuntime(ctx, ctx.entrypoint)
67
+
66
68
  runtime_factory = UiPathRuntimeFactory(
67
- LangGraphRuntime,
69
+ LangGraphScriptRuntime,
68
70
  LangGraphRuntimeContext,
69
71
  context_generator=lambda **context_kwargs: generate_runtime_context(
70
72
  context_entrypoint=runtime_entrypoint,
71
- langgraph_config=config,
72
73
  **context_kwargs,
73
74
  ),
75
+ runtime_generator=generate_runtime,
74
76
  )
75
77
 
76
78
  if eval_context.job_id:
@@ -14,8 +14,8 @@ from uipath._cli.middlewares import MiddlewareResult
14
14
  from .._tracing import LangChainExporter, _instrument_traceable_attributes
15
15
  from ._runtime._exception import LangGraphRuntimeError
16
16
  from ._runtime._runtime import ( # type: ignore[attr-defined]
17
- LangGraphRuntime,
18
17
  LangGraphRuntimeContext,
18
+ LangGraphScriptRuntime,
19
19
  )
20
20
  from ._utils._graph import LangGraphConfig
21
21
 
@@ -32,15 +32,14 @@ def langgraph_run_middleware(
32
32
 
33
33
  try:
34
34
  context = LangGraphRuntimeContext.with_defaults(**kwargs)
35
- context.langgraph_config = config
36
35
  context.entrypoint = entrypoint
37
36
  context.input = input
38
37
  context.resume = resume
39
38
 
40
39
  _instrument_traceable_attributes()
41
40
 
42
- def generate_runtime(ctx: LangGraphRuntimeContext) -> LangGraphRuntime:
43
- runtime = LangGraphRuntime(ctx)
41
+ def generate_runtime(ctx: LangGraphRuntimeContext) -> LangGraphScriptRuntime:
42
+ runtime = LangGraphScriptRuntime(ctx, ctx.entrypoint)
44
43
  # If not resuming and no job id, delete the previous state file
45
44
  if not ctx.resume and ctx.job_id is None:
46
45
  if os.path.exists(runtime.state_file_path):
@@ -49,7 +48,7 @@ def langgraph_run_middleware(
49
48
 
50
49
  async def execute():
51
50
  runtime_factory = UiPathRuntimeFactory(
52
- LangGraphRuntime,
51
+ LangGraphScriptRuntime,
53
52
  LangGraphRuntimeContext,
54
53
  runtime_generator=generate_runtime,
55
54
  )
@@ -9,7 +9,7 @@ import httpx
9
9
  import openai
10
10
  from langchain_core.embeddings import Embeddings
11
11
  from langchain_core.language_models.chat_models import _cleanup_llm_representation
12
- from pydantic import BaseModel, ConfigDict, Field, SecretStr
12
+ from pydantic import BaseModel, ConfigDict, Field, SecretStr, ValidationError
13
13
  from tenacity import (
14
14
  AsyncRetrying,
15
15
  Retrying,
@@ -17,7 +17,7 @@ from tenacity import (
17
17
  stop_after_attempt,
18
18
  wait_exponential_jitter,
19
19
  )
20
- from uipath._cli._runtime._contracts import UiPathErrorCategory
20
+ from uipath._cli._runtime._contracts import UiPathErrorCategory, UiPathRuntimeError
21
21
  from uipath._utils._ssl_context import get_httpx_client_kwargs
22
22
 
23
23
  from uipath_langchain._cli._runtime._exception import LangGraphRuntimeError
@@ -36,6 +36,33 @@ def get_from_uipath_url():
36
36
  return None
37
37
 
38
38
 
39
+ def _get_access_token(data):
40
+ """Get access token from settings, environment variables, or UiPath client factory."""
41
+ token = (
42
+ getattr(data["settings"], "access_token", None)
43
+ or os.getenv("UIPATH_ACCESS_TOKEN")
44
+ or os.getenv("UIPATH_SERVICE_TOKEN")
45
+ )
46
+
47
+ if token:
48
+ return token
49
+
50
+ try:
51
+ settings = UiPathClientFactorySettings(
52
+ UIPATH_BASE_URL=data["base_url"],
53
+ UIPATH_CLIENT_ID=data["client_id"],
54
+ UIPATH_CLIENT_SECRET=data["client_secret"],
55
+ )
56
+ return get_uipath_token_header(settings)
57
+ except ValidationError:
58
+ raise UiPathRuntimeError(
59
+ code="AUTHENTICATION_REQUIRED",
60
+ title="Authorization required",
61
+ detail="Authorization required. Please run uipath auth",
62
+ category=UiPathErrorCategory.USER,
63
+ ) from None
64
+
65
+
39
66
  class UiPathRequestMixin(BaseModel):
40
67
  model_config = ConfigDict(arbitrary_types_allowed=True)
41
68
 
@@ -62,19 +89,9 @@ class UiPathRequestMixin(BaseModel):
62
89
  alias="azure_endpoint",
63
90
  )
64
91
  access_token: Optional[str] = Field(
65
- default_factory=lambda data: (
66
- getattr(data["settings"], "access_token", None)
67
- or os.getenv("UIPATH_ACCESS_TOKEN") # Environment variable
68
- or os.getenv("UIPATH_SERVICE_TOKEN") # Environment variable
69
- or get_uipath_token_header(
70
- UiPathClientFactorySettings(
71
- UIPATH_BASE_URL=data["base_url"],
72
- UIPATH_CLIENT_ID=data["client_id"],
73
- UIPATH_CLIENT_SECRET=data["client_secret"],
74
- )
75
- ) # Get service token from UiPath
76
- )
92
+ default_factory=lambda data: _get_access_token(data)
77
93
  )
94
+
78
95
  org_id: Any = Field(
79
96
  default_factory=lambda data: getattr(data["settings"], "org_id", None)
80
97
  or os.getenv("UIPATH_ORGANIZATION_ID", "")
@@ -1,15 +1,15 @@
1
1
  import json
2
2
  import logging
3
- from typing import Any, Dict, List, Literal, Optional, Union
3
+ from typing import Any, AsyncIterator, Dict, Iterator, List, Literal, Optional, Union
4
4
 
5
5
  from langchain_core.callbacks import (
6
6
  AsyncCallbackManagerForLLMRun,
7
7
  CallbackManagerForLLMRun,
8
8
  )
9
9
  from langchain_core.language_models import LanguageModelInput
10
- from langchain_core.messages import AIMessage, BaseMessage
10
+ from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage
11
11
  from langchain_core.messages.ai import UsageMetadata
12
- from langchain_core.outputs import ChatGeneration, ChatResult
12
+ from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
13
13
  from langchain_core.runnables import Runnable
14
14
  from langchain_openai.chat_models import AzureChatOpenAI
15
15
  from pydantic import BaseModel
@@ -49,6 +49,54 @@ class UiPathAzureChatOpenAI(UiPathRequestMixin, AzureChatOpenAI):
49
49
  response = await self._acall(self.url, payload, self.auth_headers)
50
50
  return self._create_chat_result(response)
51
51
 
52
+ def _stream(
53
+ self,
54
+ messages: List[BaseMessage],
55
+ stop: Optional[List[str]] = None,
56
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
57
+ **kwargs: Any,
58
+ ) -> Iterator[ChatGenerationChunk]:
59
+ if "tools" in kwargs and not kwargs["tools"]:
60
+ del kwargs["tools"]
61
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
62
+ response = self._call(self.url, payload, self.auth_headers)
63
+
64
+ # For non-streaming response, yield single chunk
65
+ chat_result = self._create_chat_result(response)
66
+ chunk = ChatGenerationChunk(
67
+ message=AIMessageChunk(
68
+ content=chat_result.generations[0].message.content,
69
+ additional_kwargs=chat_result.generations[0].message.additional_kwargs,
70
+ response_metadata=chat_result.generations[0].message.response_metadata,
71
+ usage_metadata=chat_result.generations[0].message.usage_metadata, # type: ignore
72
+ )
73
+ )
74
+ yield chunk
75
+
76
+ async def _astream(
77
+ self,
78
+ messages: List[BaseMessage],
79
+ stop: Optional[List[str]] = None,
80
+ run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
81
+ **kwargs: Any,
82
+ ) -> AsyncIterator[ChatGenerationChunk]:
83
+ if "tools" in kwargs and not kwargs["tools"]:
84
+ del kwargs["tools"]
85
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
86
+ response = await self._acall(self.url, payload, self.auth_headers)
87
+
88
+ # For non-streaming response, yield single chunk
89
+ chat_result = self._create_chat_result(response)
90
+ chunk = ChatGenerationChunk(
91
+ message=AIMessageChunk(
92
+ content=chat_result.generations[0].message.content,
93
+ additional_kwargs=chat_result.generations[0].message.additional_kwargs,
94
+ response_metadata=chat_result.generations[0].message.response_metadata,
95
+ usage_metadata=chat_result.generations[0].message.usage_metadata, # type: ignore
96
+ )
97
+ )
98
+ yield chunk
99
+
52
100
  def with_structured_output(
53
101
  self,
54
102
  schema: Optional[Any] = None,
@@ -217,6 +265,92 @@ class UiPathChat(UiPathRequestMixin, AzureChatOpenAI):
217
265
  response = await self._acall(self.url, payload, self.auth_headers)
218
266
  return self._create_chat_result(response)
219
267
 
268
+ def _stream(
269
+ self,
270
+ messages: List[BaseMessage],
271
+ stop: Optional[List[str]] = None,
272
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
273
+ **kwargs: Any,
274
+ ) -> Iterator[ChatGenerationChunk]:
275
+ """Stream the LLM on a given prompt.
276
+
277
+ Args:
278
+ messages: the prompt composed of a list of messages.
279
+ stop: a list of strings on which the model should stop generating.
280
+ run_manager: A run manager with callbacks for the LLM.
281
+ **kwargs: Additional keyword arguments.
282
+
283
+ Returns:
284
+ An iterator of ChatGenerationChunk objects.
285
+ """
286
+ if kwargs.get("tools"):
287
+ kwargs["tools"] = [tool["function"] for tool in kwargs["tools"]]
288
+ if "tool_choice" in kwargs and kwargs["tool_choice"]["type"] == "function":
289
+ kwargs["tool_choice"] = {
290
+ "type": "tool",
291
+ "name": kwargs["tool_choice"]["function"]["name"],
292
+ }
293
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
294
+ response = self._call(self.url, payload, self.auth_headers)
295
+
296
+ # For non-streaming response, yield single chunk
297
+ chat_result = self._create_chat_result(response)
298
+ chunk = ChatGenerationChunk(
299
+ message=AIMessageChunk(
300
+ content=chat_result.generations[0].message.content,
301
+ additional_kwargs=chat_result.generations[0].message.additional_kwargs,
302
+ response_metadata=chat_result.generations[0].message.response_metadata,
303
+ usage_metadata=chat_result.generations[0].message.usage_metadata, # type: ignore
304
+ tool_calls=getattr(
305
+ chat_result.generations[0].message, "tool_calls", None
306
+ ),
307
+ )
308
+ )
309
+ yield chunk
310
+
311
+ async def _astream(
312
+ self,
313
+ messages: List[BaseMessage],
314
+ stop: Optional[List[str]] = None,
315
+ run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
316
+ **kwargs: Any,
317
+ ) -> AsyncIterator[ChatGenerationChunk]:
318
+ """Async stream the LLM on a given prompt.
319
+
320
+ Args:
321
+ messages: the prompt composed of a list of messages.
322
+ stop: a list of strings on which the model should stop generating.
323
+ run_manager: A run manager with callbacks for the LLM.
324
+ **kwargs: Additional keyword arguments.
325
+
326
+ Returns:
327
+ An async iterator of ChatGenerationChunk objects.
328
+ """
329
+ if kwargs.get("tools"):
330
+ kwargs["tools"] = [tool["function"] for tool in kwargs["tools"]]
331
+ if "tool_choice" in kwargs and kwargs["tool_choice"]["type"] == "function":
332
+ kwargs["tool_choice"] = {
333
+ "type": "tool",
334
+ "name": kwargs["tool_choice"]["function"]["name"],
335
+ }
336
+ payload = self._get_request_payload(messages, stop=stop, **kwargs)
337
+ response = await self._acall(self.url, payload, self.auth_headers)
338
+
339
+ # For non-streaming response, yield single chunk
340
+ chat_result = self._create_chat_result(response)
341
+ chunk = ChatGenerationChunk(
342
+ message=AIMessageChunk(
343
+ content=chat_result.generations[0].message.content,
344
+ additional_kwargs=chat_result.generations[0].message.additional_kwargs,
345
+ response_metadata=chat_result.generations[0].message.response_metadata,
346
+ usage_metadata=chat_result.generations[0].message.usage_metadata, # type: ignore
347
+ tool_calls=getattr(
348
+ chat_result.generations[0].message, "tool_calls", None
349
+ ),
350
+ )
351
+ )
352
+ yield chunk
353
+
220
354
  def with_structured_output(
221
355
  self,
222
356
  schema: Optional[Any] = None,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: uipath-langchain
3
- Version: 0.0.138
3
+ Version: 0.0.140
4
4
  Summary: UiPath Langchain
5
5
  Project-URL: Homepage, https://uipath.com
6
6
  Project-URL: Repository, https://github.com/UiPath/uipath-langchain-python
@@ -1,17 +1,18 @@
1
1
  uipath_langchain/__init__.py,sha256=VBrvQn7d3nuOdN7zEnV2_S-uhmkjgEIlXiFVeZxZakQ,80
2
2
  uipath_langchain/middlewares.py,sha256=6ljfbtWekrYc5G9KWDLSaViJ1DVIaNM-4qeB1BfHywE,731
3
3
  uipath_langchain/_cli/__init__.py,sha256=juqd9PbXs4yg45zMJ7BHAOPQjb7sgEbWE9InBtGZhfo,24
4
- uipath_langchain/_cli/cli_dev.py,sha256=3e9RldNGirIk9184NdLK6kDuGeeqZjekTxbSZRtXjBE,1505
5
- uipath_langchain/_cli/cli_eval.py,sha256=rsa13I3bPxAsc0Gf5KFyvf5AMT8WdhgY7kT7ZAyXrJM,3572
4
+ uipath_langchain/_cli/cli_dev.py,sha256=l3XFHrh-0OUFJq3zLMKuzedJAluGQBIZQTHP1KWOmpw,1725
5
+ uipath_langchain/_cli/cli_eval.py,sha256=r8mGlKh-ymxfKrvrU4n0Hg3pQv36c_NhTNR_eokyQEM,3650
6
6
  uipath_langchain/_cli/cli_init.py,sha256=xhxJ8tuMSrVUNHvltgyPpOrvgMA-wq9shHeYYwvHILs,8199
7
7
  uipath_langchain/_cli/cli_new.py,sha256=KKLxCzz7cDQ__rRr_a496IHWlSQXhmrBNgmKHnXAnTY,2336
8
- uipath_langchain/_cli/cli_run.py,sha256=hRcoXJgOIFceCswzTfZKyzqVee3j-oSh-13EfuQmmE8,2614
9
- uipath_langchain/_cli/_runtime/_context.py,sha256=yyzYJDmk2fkH4T5gm4cLGRyXtjLESrpzHBT9euqluTA,817
10
- uipath_langchain/_cli/_runtime/_conversation.py,sha256=S1KTx_q-La7ikPRT3nBcIp8t-J9CF0QB0DCduQIIB28,11149
8
+ uipath_langchain/_cli/cli_run.py,sha256=X_DI3VZ2RtID0aC5s-RiNy2_XD6qtJqxz6atX61oukM,2612
9
+ uipath_langchain/_cli/_runtime/_context.py,sha256=UnELD9qmW6zEPIYJ3nXjP6EI88-DEJZ2_t1CRPvfypQ,623
10
+ uipath_langchain/_cli/_runtime/_conversation.py,sha256=ayghRqhyLeVUZg1WHnpeOYtPNhRwDOl4z8OSYiJkWSU,11529
11
11
  uipath_langchain/_cli/_runtime/_exception.py,sha256=USKkLYkG-dzjX3fEiMMOHnVUpiXJs_xF0OQXCCOvbYM,546
12
+ uipath_langchain/_cli/_runtime/_graph_resolver.py,sha256=5SmYr3KJ_Iy13QtN8XPOOmoSrdysDGlLsgfiebHDXfs,5296
12
13
  uipath_langchain/_cli/_runtime/_input.py,sha256=Zx-8ZEr5Z796gdd3NnrlNObMIuXJobAV9mZwOql67Lo,5658
13
14
  uipath_langchain/_cli/_runtime/_output.py,sha256=yJOZPWv2FRUJWv1NRs9JmpB4QMTDXu8jrxoaKrfJvzw,9078
14
- uipath_langchain/_cli/_runtime/_runtime.py,sha256=cVt0TlsEBmKv6EcFTtCOT9ITsguzLDqFGMdhPOiBPRc,14318
15
+ uipath_langchain/_cli/_runtime/_runtime.py,sha256=HTLT9fDSZuAS1e0cAeFhBWxG1iJC0ISjGYgSOuCnfRM,9801
15
16
  uipath_langchain/_cli/_templates/langgraph.json.template,sha256=eeh391Gta_hoRgaNaZ58nW1LNvCVXA7hlAH6l7Veous,107
16
17
  uipath_langchain/_cli/_templates/main.py.template,sha256=GpSblGH2hwS9ibqQmX2iB2nsmOA5zDfEEF4ChLiMxbQ,875
17
18
  uipath_langchain/_cli/_utils/_graph.py,sha256=nMJWy8FmaD9rqPUY2lHc5uVpUzbXD1RO12uJnhe0kdo,6803
@@ -20,12 +21,11 @@ uipath_langchain/_tracing/_instrument_traceable.py,sha256=8f9FyAKWE6kH1N8ErbpwqZ
20
21
  uipath_langchain/_tracing/_oteladapter.py,sha256=PD0gsC39ZNvrm0gsfnt1ti6DEy56sBA9sIoxaAbHFFM,8887
21
22
  uipath_langchain/_tracing/_utils.py,sha256=r_fiSk3HDDAcePY_UbbEYiSbNqzn5gFeMPYBDvGrFx0,902
22
23
  uipath_langchain/_utils/__init__.py,sha256=-w-4TD9ZnJDCpj4VIPXhJciukrmDJJbmnOFnhAkAaEU,81
23
- uipath_langchain/_utils/_request_mixin.py,sha256=ddKFs_0mjoFCmvPTiOTPJh1IIqYUo5CUka-B7zAZphE,19695
24
+ uipath_langchain/_utils/_request_mixin.py,sha256=sYvvn3_fUJxtF893xFpVGwJx2YoEbw1m5gp_U_lWjR8,20092
24
25
  uipath_langchain/_utils/_settings.py,sha256=2fExMQJ88YptfldmzMfZIpsx-m1gfMkeYGf5t6KIe0A,3084
25
26
  uipath_langchain/_utils/_sleep_policy.py,sha256=e9pHdjmcCj4CVoFM1jMyZFelH11YatsgWfpyrfXzKBQ,1251
26
- uipath_langchain/builder/agent_config.py,sha256=b9WODKPjvufj41Ow_dQn5CnaTAjAZyQoNhuAl8vfiso,5809
27
27
  uipath_langchain/chat/__init__.py,sha256=WDcvy91ixvZ3Mq7Ae94g5CjyQwXovDBnEv1NlD5SXBE,116
28
- uipath_langchain/chat/models.py,sha256=m5PRAFXzUamt6-1K9uSlWUvZg_NfVyYHkgoQDJ-1rGs,10527
28
+ uipath_langchain/chat/models.py,sha256=PifcbDURqfttqVYKSnzdbOdbSiLiwHfQ6lWgVAtoLj8,16407
29
29
  uipath_langchain/embeddings/__init__.py,sha256=QICtYB58ZyqFfDQrEaO8lTEgAU5NuEKlR7iIrS0OBtc,156
30
30
  uipath_langchain/embeddings/embeddings.py,sha256=45gKyb6HVKigwE-0CXeZcAk33c0mteaEdPGa8hviqcw,4339
31
31
  uipath_langchain/retrievers/__init__.py,sha256=rOn7PyyHgZ4pMnXWPkGqmuBmx8eGuo-Oyndo7Wm9IUU,108
@@ -34,8 +34,8 @@ uipath_langchain/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3
34
34
  uipath_langchain/tools/preconfigured.py,sha256=xCP0hiQuFKIv45PTvMsoWlwsxJDs7goyZujKflYBngY,7476
35
35
  uipath_langchain/vectorstores/__init__.py,sha256=w8qs1P548ud1aIcVA_QhBgf_jZDrRMK5Lono78yA8cs,114
36
36
  uipath_langchain/vectorstores/context_grounding_vectorstore.py,sha256=TncIXG-YsUlO0R5ZYzWsM-Dj1SVCZbzmo2LraVxXelc,9559
37
- uipath_langchain-0.0.138.dist-info/METADATA,sha256=aM1VS1o-g7Fc7KBltSpLGugqNLi3b3qAbzHE2cy_xs8,4275
38
- uipath_langchain-0.0.138.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
39
- uipath_langchain-0.0.138.dist-info/entry_points.txt,sha256=FUtzqGOEntlJKMJIXhQUfT7ZTbQmGhke1iCmDWZaQZI,81
40
- uipath_langchain-0.0.138.dist-info/licenses/LICENSE,sha256=JDpt-uotAkHFmxpwxi6gwx6HQ25e-lG4U_Gzcvgp7JY,1063
41
- uipath_langchain-0.0.138.dist-info/RECORD,,
37
+ uipath_langchain-0.0.140.dist-info/METADATA,sha256=RD2Nx8F1T9nL3QEtgGWFRrtVgJN6rnVSdzMH1QtXYJ0,4275
38
+ uipath_langchain-0.0.140.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
39
+ uipath_langchain-0.0.140.dist-info/entry_points.txt,sha256=FUtzqGOEntlJKMJIXhQUfT7ZTbQmGhke1iCmDWZaQZI,81
40
+ uipath_langchain-0.0.140.dist-info/licenses/LICENSE,sha256=JDpt-uotAkHFmxpwxi6gwx6HQ25e-lG4U_Gzcvgp7JY,1063
41
+ uipath_langchain-0.0.140.dist-info/RECORD,,
@@ -1,191 +0,0 @@
1
- from enum import Enum
2
- from typing import Annotated, Any, Dict, List, Literal, Optional, Union
3
-
4
- from pydantic import BaseModel, ConfigDict, Field
5
-
6
-
7
- class AgentMessageRole(str, Enum):
8
- """Enum for message roles"""
9
-
10
- SYSTEM = "System"
11
- USER = "User"
12
-
13
-
14
- class AgentMessage(BaseModel):
15
- """Message model for agent conversations"""
16
-
17
- role: AgentMessageRole
18
- content: str
19
-
20
- model_config = ConfigDict(
21
- validate_by_name=True, validate_by_alias=True, extra="allow"
22
- )
23
-
24
-
25
- class AgentSettings(BaseModel):
26
- """Settings for agent configuration"""
27
-
28
- engine: str = Field(..., description="Engine type, e.g., 'basic-v1'")
29
- model: str = Field(..., description="LLM model identifier")
30
- max_tokens: int = Field(
31
- ..., alias="maxTokens", description="Maximum number of tokens"
32
- )
33
- temperature: float = Field(..., description="Temperature for response generation")
34
-
35
- model_config = ConfigDict(
36
- validate_by_name=True, validate_by_alias=True, extra="allow"
37
- )
38
-
39
-
40
- class AgentResourceType(str, Enum):
41
- """Enum for resource types"""
42
-
43
- TOOL = "tool"
44
- CONTEXT = "context"
45
- ESCALATION = "escalation"
46
-
47
-
48
- class AgentBaseResourceConfig(BaseModel):
49
- """Base resource model with common properties"""
50
-
51
- name: str
52
- description: str
53
-
54
- model_config = ConfigDict(
55
- validate_by_name=True, validate_by_alias=True, extra="allow"
56
- )
57
-
58
-
59
- class AgentUnknownResourceConfig(AgentBaseResourceConfig):
60
- """Fallback for unknown or future resource types"""
61
-
62
- resource_type: str = Field(alias="$resourceType")
63
-
64
- model_config = ConfigDict(extra="allow")
65
-
66
-
67
- class AgentToolSettings(BaseModel):
68
- """Settings for tool configuration"""
69
-
70
- max_attempts: int = Field(0, alias="maxAttempts")
71
- retry_delay: int = Field(0, alias="retryDelay")
72
- timeout: int = Field(0)
73
-
74
- model_config = ConfigDict(
75
- validate_by_name=True, validate_by_alias=True, extra="allow"
76
- )
77
-
78
-
79
- class AgentToolProperties(BaseModel):
80
- """Properties specific to tool configuration"""
81
-
82
- folder_path: Optional[str] = Field(None, alias="folderPath")
83
- process_name: Optional[str] = Field(None, alias="processName")
84
-
85
- model_config = ConfigDict(
86
- validate_by_name=True, validate_by_alias=True, extra="allow"
87
- )
88
-
89
-
90
- class AgentToolResourceConfig(AgentBaseResourceConfig):
91
- """Tool resource with tool-specific properties"""
92
-
93
- resource_type: Literal[AgentResourceType.TOOL] = Field(alias="$resourceType")
94
- type: str = Field(..., description="Tool type")
95
- arguments: Dict[str, Any] = Field(
96
- default_factory=dict, description="Tool arguments"
97
- )
98
- input_schema: Dict[str, Any] = Field(
99
- ..., alias="inputSchema", description="Input schema for the tool"
100
- )
101
- output_schema: Dict[str, Any] = Field(
102
- ..., alias="outputSchema", description="Output schema for the tool"
103
- )
104
- properties: AgentToolProperties = Field(..., description="Tool-specific properties")
105
- settings: AgentToolSettings = Field(
106
- default_factory=AgentToolSettings, description="Tool settings"
107
- )
108
-
109
- model_config = ConfigDict(
110
- validate_by_name=True, validate_by_alias=True, extra="allow"
111
- )
112
-
113
-
114
- class AgentContextSettings(BaseModel):
115
- """Settings for context configuration"""
116
-
117
- result_count: int = Field(alias="resultCount")
118
- retrieval_mode: Literal["Semantic", "Structured"] = Field(alias="retrievalMode")
119
- threshold: float = Field(default=0)
120
-
121
- model_config = ConfigDict(
122
- validate_by_name=True, validate_by_alias=True, extra="allow"
123
- )
124
-
125
-
126
- class AgentContextResourceConfig(AgentBaseResourceConfig):
127
- """Context resource with context-specific properties"""
128
-
129
- resource_type: Literal[AgentResourceType.CONTEXT] = Field(alias="$resourceType")
130
- folder_path: str = Field(alias="folderPath")
131
- index_name: str = Field(alias="indexName")
132
- settings: AgentContextSettings = Field(..., description="Context settings")
133
-
134
- model_config = ConfigDict(
135
- validate_by_name=True, validate_by_alias=True, extra="allow"
136
- )
137
-
138
-
139
- class AgentEscalationResourceConfig(AgentBaseResourceConfig):
140
- """Escalation resource with escalation-specific properties"""
141
-
142
- resource_type: Literal[AgentResourceType.ESCALATION] = Field(alias="$resourceType")
143
-
144
- model_config = ConfigDict(
145
- validate_by_name=True, validate_by_alias=True, extra="allow"
146
- )
147
-
148
-
149
- # Discriminated union for known types
150
- KnownAgentResourceConfig = Annotated[
151
- Union[
152
- AgentToolResourceConfig,
153
- AgentContextResourceConfig,
154
- AgentEscalationResourceConfig,
155
- ],
156
- Field(discriminator="resource_type"),
157
- ]
158
-
159
- # Final union includes unknowns as a catch-all
160
- AgentResourceConfig = Union[
161
- KnownAgentResourceConfig,
162
- AgentUnknownResourceConfig,
163
- ]
164
-
165
-
166
- class AgentConfig(BaseModel):
167
- """Main agent model"""
168
-
169
- id: str = Field(..., description="Agent id or project name")
170
- name: str = Field(..., description="Agent name or project name")
171
- input_schema: Dict[str, Any] = Field(
172
- ..., alias="inputSchema", description="JSON schema for input arguments"
173
- )
174
- output_schema: Dict[str, Any] = Field(
175
- ..., alias="outputSchema", description="JSON schema for output arguments"
176
- )
177
- messages: List[AgentMessage] = Field(
178
- ..., description="List of system and user messages"
179
- )
180
- features: List[Any] = Field(
181
- default_factory=list, description="Currently empty feature list"
182
- )
183
- version: str = Field("1.0.0", description="Agent version")
184
- settings: AgentSettings = Field(..., description="Agent settings configuration")
185
- resources: List[AgentResourceConfig] = Field(
186
- ..., description="List of tools, context, and escalation resources"
187
- )
188
-
189
- model_config = ConfigDict(
190
- validate_by_name=True, validate_by_alias=True, extra="allow"
191
- )