langchain 1.0.2__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

@@ -1,1786 +1,20 @@
1
- """Tool execution node for LangGraph workflows.
1
+ """Utils file included for backwards compat imports."""
2
2
 
3
- This module provides prebuilt functionality for executing tools in LangGraph.
4
-
5
- Tools are functions that models can call to interact with external systems,
6
- APIs, databases, or perform computations.
7
-
8
- The module implements design patterns for:
9
- - Parallel execution of multiple tool calls for efficiency
10
- - Robust error handling with customizable error messages
11
- - State injection for tools that need access to graph state
12
- - Store injection for tools that need persistent storage
13
- - Command-based state updates for advanced control flow
14
-
15
- Key Components:
16
- `ToolNode`: Main class for executing tools in LangGraph workflows
17
- `InjectedState`: Annotation for injecting graph state into tools
18
- `InjectedStore`: Annotation for injecting persistent store into tools
19
- `tools_condition`: Utility function for conditional routing based on tool calls
20
-
21
- Typical Usage:
22
- ```python
23
- from langchain_core.tools import tool
24
- from langchain.tools import ToolNode
25
-
26
-
27
- @tool
28
- def my_tool(x: int) -> str:
29
- return f"Result: {x}"
30
-
31
-
32
- tool_node = ToolNode([my_tool])
33
- ```
34
- """
35
-
36
- from __future__ import annotations
37
-
38
- import asyncio
39
- import inspect
40
- import json
41
- from collections.abc import Awaitable, Callable
42
- from copy import copy, deepcopy
43
- from dataclasses import dataclass, replace
44
- from types import UnionType
45
- from typing import (
46
- TYPE_CHECKING,
47
- Annotated,
48
- Any,
49
- Generic,
50
- Literal,
51
- TypedDict,
52
- Union,
53
- cast,
54
- get_args,
55
- get_origin,
56
- get_type_hints,
57
- )
58
-
59
- from langchain_core.messages import (
60
- AIMessage,
61
- AnyMessage,
62
- RemoveMessage,
63
- ToolCall,
64
- ToolMessage,
65
- convert_to_messages,
66
- )
67
- from langchain_core.runnables.config import (
68
- RunnableConfig,
69
- get_config_list,
70
- get_executor_for_config,
71
- )
72
- from langchain_core.tools import BaseTool, InjectedToolArg
73
- from langchain_core.tools import tool as create_tool
74
- from langchain_core.tools.base import (
75
- TOOL_MESSAGE_BLOCK_TYPES,
76
- ToolException,
77
- _DirectlyInjectedToolArg,
78
- get_all_basemodel_annotations,
79
- )
80
- from langgraph._internal._runnable import RunnableCallable
81
- from langgraph.errors import GraphBubbleUp
82
- from langgraph.graph.message import REMOVE_ALL_MESSAGES
83
- from langgraph.store.base import BaseStore # noqa: TC002
84
- from langgraph.types import Command, Send, StreamWriter
85
- from pydantic import BaseModel, ValidationError
86
- from typing_extensions import TypeVar, Unpack
87
-
88
- if TYPE_CHECKING:
89
- from collections.abc import Sequence
90
-
91
- from langgraph.runtime import Runtime
92
- from pydantic_core import ErrorDetails
93
-
94
- # right now we use a dict as the default, can change this to AgentState, but depends
95
- # on if this lives in LangChain or LangGraph... ideally would have some typed
96
- # messages key
97
- StateT = TypeVar("StateT", default=dict)
98
- ContextT = TypeVar("ContextT", default=None)
99
-
100
- INVALID_TOOL_NAME_ERROR_TEMPLATE = (
101
- "Error: {requested_tool} is not a valid tool, try one of [{available_tools}]."
3
+ from langgraph.prebuilt import InjectedState, InjectedStore, ToolRuntime
4
+ from langgraph.prebuilt.tool_node import (
5
+ ToolCallRequest,
6
+ ToolCallWithContext,
7
+ ToolCallWrapper,
102
8
  )
103
- TOOL_CALL_ERROR_TEMPLATE = "Error: {error}\n Please fix your mistakes."
104
- TOOL_EXECUTION_ERROR_TEMPLATE = (
105
- "Error executing tool '{tool_name}' with kwargs {tool_kwargs} with error:\n"
106
- " {error}\n"
107
- " Please fix the error and try again."
9
+ from langgraph.prebuilt.tool_node import (
10
+ ToolNode as _ToolNode, # noqa: F401
108
11
  )
109
- TOOL_INVOCATION_ERROR_TEMPLATE = (
110
- "Error invoking tool '{tool_name}' with kwargs {tool_kwargs} with error:\n"
111
- " {error}\n"
112
- " Please fix the error and try again."
113
- )
114
-
115
-
116
- class _ToolCallRequestOverrides(TypedDict, total=False):
117
- """Possible overrides for ToolCallRequest.override() method."""
118
-
119
- tool_call: ToolCall
120
-
121
-
122
- @dataclass()
123
- class ToolCallRequest:
124
- """Tool execution request passed to tool call interceptors.
125
-
126
- Attributes:
127
- tool_call: Tool call dict with name, args, and id from model output.
128
- tool: BaseTool instance to be invoked, or None if tool is not
129
- registered with the `ToolNode`. When tool is `None`, interceptors can
130
- handle the request without validation. If the interceptor calls `execute()`,
131
- validation will occur and raise an error for unregistered tools.
132
- state: Agent state (`dict`, `list`, or `BaseModel`).
133
- runtime: LangGraph runtime context (optional, `None` if outside graph).
134
- """
135
-
136
- tool_call: ToolCall
137
- tool: BaseTool | None
138
- state: Any
139
- runtime: ToolRuntime
140
-
141
- def override(self, **overrides: Unpack[_ToolCallRequestOverrides]) -> ToolCallRequest:
142
- """Replace the request with a new request with the given overrides.
143
-
144
- Returns a new `ToolCallRequest` instance with the specified attributes replaced.
145
- This follows an immutable pattern, leaving the original request unchanged.
146
-
147
- Args:
148
- **overrides: Keyword arguments for attributes to override. Supported keys:
149
- - tool_call: Tool call dict with name, args, and id
150
-
151
- Returns:
152
- New ToolCallRequest instance with specified overrides applied.
153
-
154
- Examples:
155
- ```python
156
- # Modify tool call arguments without mutating original
157
- modified_call = {**request.tool_call, "args": {"value": 10}}
158
- new_request = request.override(tool_call=modified_call)
159
-
160
- # Override multiple attributes
161
- new_request = request.override(tool_call=modified_call, state=new_state)
162
- ```
163
- """
164
- return replace(self, **overrides)
165
-
166
-
167
- ToolCallWrapper = Callable[
168
- [ToolCallRequest, Callable[[ToolCallRequest], ToolMessage | Command]],
169
- ToolMessage | Command,
170
- ]
171
- """Wrapper for tool call execution with multi-call support.
172
-
173
- Wrapper receives:
174
- request: ToolCallRequest with tool_call, tool, state, and runtime.
175
- execute: Callable to execute the tool (CAN BE CALLED MULTIPLE TIMES).
176
-
177
- Returns:
178
- ToolMessage or Command (the final result).
179
12
 
180
- The execute callable can be invoked multiple times for retry logic,
181
- with potentially modified requests each time. Each call to execute
182
- is independent and stateless.
183
-
184
- !!! note
185
- When implementing middleware for `create_agent`, use
186
- `AgentMiddleware.wrap_tool_call` which provides properly typed
187
- state parameter for better type safety.
188
-
189
- Examples:
190
- Passthrough (execute once):
191
-
192
- def handler(request, execute):
193
- return execute(request)
194
-
195
- Modify request before execution:
196
-
197
- ```python
198
- def handler(request, execute):
199
- request.tool_call["args"]["value"] *= 2
200
- return execute(request)
201
- ```
202
-
203
- Retry on error (execute multiple times):
204
-
205
- ```python
206
- def handler(request, execute):
207
- for attempt in range(3):
208
- try:
209
- result = execute(request)
210
- if is_valid(result):
211
- return result
212
- except Exception:
213
- if attempt == 2:
214
- raise
215
- return result
216
- ```
217
-
218
- Conditional retry based on response:
219
-
220
- ```python
221
- def handler(request, execute):
222
- for attempt in range(3):
223
- result = execute(request)
224
- if isinstance(result, ToolMessage) and result.status != "error":
225
- return result
226
- if attempt < 2:
227
- continue
228
- return result
229
- ```
230
-
231
- Cache/short-circuit without calling execute:
232
-
233
- ```python
234
- def handler(request, execute):
235
- if cached := get_cache(request):
236
- return ToolMessage(content=cached, tool_call_id=request.tool_call["id"])
237
- result = execute(request)
238
- save_cache(request, result)
239
- return result
240
- ```
241
- """
242
-
243
- AsyncToolCallWrapper = Callable[
244
- [ToolCallRequest, Callable[[ToolCallRequest], Awaitable[ToolMessage | Command]]],
245
- Awaitable[ToolMessage | Command],
13
+ __all__ = [
14
+ "InjectedState",
15
+ "InjectedStore",
16
+ "ToolCallRequest",
17
+ "ToolCallWithContext",
18
+ "ToolCallWrapper",
19
+ "ToolRuntime",
246
20
  ]
247
- """Async wrapper for tool call execution with multi-call support."""
248
-
249
-
250
- class ToolCallWithContext(TypedDict):
251
- """ToolCall with additional context for graph state.
252
-
253
- This is an internal data structure meant to help the `ToolNode` accept
254
- tool calls with additional context (e.g. state) when dispatched using the
255
- Send API.
256
-
257
- The Send API is used in create_agent to distribute tool calls in parallel
258
- and support human-in-the-loop workflows where graph execution may be paused
259
- for an indefinite time.
260
- """
261
-
262
- tool_call: ToolCall
263
- __type: Literal["tool_call_with_context"]
264
- """Type to parameterize the payload.
265
-
266
- Using "__" as a prefix to be defensive against potential name collisions with
267
- regular user state.
268
- """
269
- state: Any
270
- """The state is provided as additional context."""
271
-
272
-
273
- def msg_content_output(output: Any) -> str | list[dict]:
274
- """Convert tool output to `ToolMessage` content format.
275
-
276
- Handles `str`, `list[dict]` (content blocks), and arbitrary objects by attempting
277
- JSON serialization with fallback to str().
278
-
279
- Args:
280
- output: Tool execution output of any type.
281
-
282
- Returns:
283
- String or list of content blocks suitable for `ToolMessage.content`.
284
- """
285
- if isinstance(output, str) or (
286
- isinstance(output, list)
287
- and all(isinstance(x, dict) and x.get("type") in TOOL_MESSAGE_BLOCK_TYPES for x in output)
288
- ):
289
- return output
290
- # Technically a list of strings is also valid message content, but it's
291
- # not currently well tested that all chat models support this.
292
- # And for backwards compatibility we want to make sure we don't break
293
- # any existing ToolNode usage.
294
- try:
295
- return json.dumps(output, ensure_ascii=False)
296
- except Exception: # noqa: BLE001
297
- return str(output)
298
-
299
-
300
- class ToolInvocationError(ToolException):
301
- """An error occurred while invoking a tool due to invalid arguments.
302
-
303
- This exception is only raised when invoking a tool using the `ToolNode`!
304
- """
305
-
306
- def __init__(
307
- self,
308
- tool_name: str,
309
- source: ValidationError,
310
- tool_kwargs: dict[str, Any],
311
- filtered_errors: list[ErrorDetails] | None = None,
312
- ) -> None:
313
- """Initialize the ToolInvocationError.
314
-
315
- Args:
316
- tool_name: The name of the tool that failed.
317
- source: The exception that occurred.
318
- tool_kwargs: The keyword arguments that were passed to the tool.
319
- filtered_errors: Optional list of filtered validation errors excluding
320
- injected arguments.
321
- """
322
- # Format error display based on filtered errors if provided
323
- if filtered_errors is not None:
324
- # Manually format the filtered errors without URLs or fancy formatting
325
- error_str_parts = []
326
- for error in filtered_errors:
327
- loc_str = ".".join(str(loc) for loc in error.get("loc", ()))
328
- msg = error.get("msg", "Unknown error")
329
- error_str_parts.append(f"{loc_str}: {msg}")
330
- error_display_str = "\n".join(error_str_parts)
331
- else:
332
- error_display_str = str(source)
333
-
334
- self.message = TOOL_INVOCATION_ERROR_TEMPLATE.format(
335
- tool_name=tool_name, tool_kwargs=tool_kwargs, error=error_display_str
336
- )
337
- self.tool_name = tool_name
338
- self.tool_kwargs = tool_kwargs
339
- self.source = source
340
- self.filtered_errors = filtered_errors
341
- super().__init__(self.message)
342
-
343
-
344
- def _default_handle_tool_errors(e: Exception) -> str:
345
- """Default error handler for tool errors.
346
-
347
- If the tool is a tool invocation error, return its message.
348
- Otherwise, raise the error.
349
- """
350
- if isinstance(e, ToolInvocationError):
351
- return e.message
352
- raise e
353
-
354
-
355
- def _handle_tool_error(
356
- e: Exception,
357
- *,
358
- flag: bool | str | Callable[..., str] | type[Exception] | tuple[type[Exception], ...],
359
- ) -> str:
360
- """Generate error message content based on exception handling configuration.
361
-
362
- This function centralizes error message generation logic, supporting different
363
- error handling strategies configured via the `ToolNode`'s `handle_tool_errors`
364
- parameter.
365
-
366
- Args:
367
- e: The exception that occurred during tool execution.
368
- flag: Configuration for how to handle the error. Can be:
369
- - bool: If `True`, use default error template
370
- - str: Use this string as the error message
371
- - Callable: Call this function with the exception to get error message
372
- - tuple: Not used in this context (handled by caller)
373
-
374
- Returns:
375
- A string containing the error message to include in the `ToolMessage`.
376
-
377
- Raises:
378
- ValueError: If flag is not one of the supported types.
379
-
380
- !!! note
381
- The tuple case is handled by the caller through exception type checking,
382
- not by this function directly.
383
- """
384
- if isinstance(flag, (bool, tuple)) or (isinstance(flag, type) and issubclass(flag, Exception)):
385
- content = TOOL_CALL_ERROR_TEMPLATE.format(error=repr(e))
386
- elif isinstance(flag, str):
387
- content = flag
388
- elif callable(flag):
389
- content = flag(e) # type: ignore [assignment, call-arg]
390
- else:
391
- msg = (
392
- f"Got unexpected type of `handle_tool_error`. Expected bool, str "
393
- f"or callable. Received: {flag}"
394
- )
395
- raise ValueError(msg)
396
- return content
397
-
398
-
399
- def _infer_handled_types(handler: Callable[..., str]) -> tuple[type[Exception], ...]:
400
- """Infer exception types handled by a custom error handler function.
401
-
402
- This function analyzes the type annotations of a custom error handler to determine
403
- which exception types it's designed to handle. This enables type-safe error handling
404
- where only specific exceptions are caught and processed by the handler.
405
-
406
- Args:
407
- handler: A callable that takes an exception and returns an error message string.
408
- The first parameter (after self/cls if present) should be type-annotated
409
- with the exception type(s) to handle.
410
-
411
- Returns:
412
- A tuple of exception types that the handler can process. Returns (Exception,)
413
- if no specific type information is available for backward compatibility.
414
-
415
- Raises:
416
- ValueError: If the handler's annotation contains non-Exception types or
417
- if Union types contain non-Exception types.
418
-
419
- !!! note
420
- This function supports both single exception types and Union types for
421
- handlers that need to handle multiple exception types differently.
422
- """
423
- sig = inspect.signature(handler)
424
- params = list(sig.parameters.values())
425
- if params:
426
- # If it's a method, the first argument is typically 'self' or 'cls'
427
- if params[0].name in ["self", "cls"] and len(params) == 2:
428
- first_param = params[1]
429
- else:
430
- first_param = params[0]
431
-
432
- type_hints = get_type_hints(handler)
433
- if first_param.name in type_hints:
434
- origin = get_origin(first_param.annotation)
435
- if origin in [Union, UnionType]:
436
- args = get_args(first_param.annotation)
437
- if all(issubclass(arg, Exception) for arg in args):
438
- return tuple(args)
439
- msg = (
440
- "All types in the error handler error annotation must be "
441
- "Exception types. For example, "
442
- "`def custom_handler(e: Union[ValueError, TypeError])`. "
443
- f"Got '{first_param.annotation}' instead."
444
- )
445
- raise ValueError(msg)
446
-
447
- exception_type = type_hints[first_param.name]
448
- if Exception in exception_type.__mro__:
449
- return (exception_type,)
450
- msg = (
451
- f"Arbitrary types are not supported in the error handler "
452
- f"signature. Please annotate the error with either a "
453
- f"specific Exception type or a union of Exception types. "
454
- "For example, `def custom_handler(e: ValueError)` or "
455
- "`def custom_handler(e: Union[ValueError, TypeError])`. "
456
- f"Got '{exception_type}' instead."
457
- )
458
- raise ValueError(msg)
459
-
460
- # If no type information is available, return (Exception,)
461
- # for backwards compatibility.
462
- return (Exception,)
463
-
464
-
465
- def _filter_validation_errors(
466
- validation_error: ValidationError,
467
- tool_to_state_args: dict[str, str | None],
468
- tool_to_store_arg: str | None,
469
- tool_to_runtime_arg: str | None,
470
- ) -> list[ErrorDetails]:
471
- """Filter validation errors to only include LLM-controlled arguments.
472
-
473
- When a tool invocation fails validation, only errors for arguments that the LLM
474
- controls should be included in error messages. This ensures the LLM receives
475
- focused, actionable feedback about parameters it can actually fix. System-injected
476
- arguments (state, store, runtime) are filtered out since the LLM has no control
477
- over them.
478
-
479
- This function also removes injected argument values from the `input` field in error
480
- details, ensuring that only LLM-provided arguments appear in error messages.
481
-
482
- Args:
483
- validation_error: The Pydantic ValidationError raised during tool invocation.
484
- tool_to_state_args: Mapping of state argument names to state field names.
485
- tool_to_store_arg: Name of the store argument, if any.
486
- tool_to_runtime_arg: Name of the runtime argument, if any.
487
-
488
- Returns:
489
- List of ErrorDetails containing only errors for LLM-controlled arguments,
490
- with system-injected argument values removed from the input field.
491
- """
492
- injected_args = set(tool_to_state_args.keys())
493
- if tool_to_store_arg:
494
- injected_args.add(tool_to_store_arg)
495
- if tool_to_runtime_arg:
496
- injected_args.add(tool_to_runtime_arg)
497
-
498
- filtered_errors: list[ErrorDetails] = []
499
- for error in validation_error.errors():
500
- # Check if error location contains any injected argument
501
- # error['loc'] is a tuple like ('field_name',) or ('field_name', 'nested_field')
502
- if error["loc"] and error["loc"][0] not in injected_args:
503
- # Create a copy of the error dict to avoid mutating the original
504
- error_copy: dict[str, Any] = {**error}
505
-
506
- # Remove injected arguments from input_value if it's a dict
507
- if isinstance(error_copy.get("input"), dict):
508
- input_dict = error_copy["input"]
509
- input_copy = {k: v for k, v in input_dict.items() if k not in injected_args}
510
- error_copy["input"] = input_copy
511
-
512
- # Cast is safe because ErrorDetails is a TypedDict compatible with this structure
513
- filtered_errors.append(error_copy) # type: ignore[arg-type]
514
-
515
- return filtered_errors
516
-
517
-
518
- class _ToolNode(RunnableCallable):
519
- """A node for executing tools in LangGraph workflows.
520
-
521
- Handles tool execution patterns including function calls, state injection,
522
- persistent storage, and control flow. Manages parallel execution,
523
- error handling.
524
-
525
- Input Formats:
526
- 1. Graph state with `messages` key that has a list of messages:
527
- - Common representation for agentic workflows
528
- - Supports custom messages key via `messages_key` parameter
529
-
530
- 2. **Message List**: `[AIMessage(..., tool_calls=[...])]`
531
- - List of messages with tool calls in the last AIMessage
532
-
533
- 3. **Direct Tool Calls**: `[{"name": "tool", "args": {...}, "id": "1", "type": "tool_call"}]`
534
- - Bypasses message parsing for direct tool execution
535
- - For programmatic tool invocation and testing
536
-
537
- Output Formats:
538
- Output format depends on input type and tool behavior:
539
-
540
- **For Regular tools**:
541
- - Dict input → `{"messages": [ToolMessage(...)]}`
542
- - List input → `[ToolMessage(...)]`
543
-
544
- **For Command tools**:
545
- - Returns `[Command(...)]` or mixed list with regular tool outputs
546
- - Commands can update state, trigger navigation, or send messages
547
-
548
- Args:
549
- tools: A sequence of tools that can be invoked by this node. Supports:
550
- - **BaseTool instances**: Tools with schemas and metadata
551
- - **Plain functions**: Automatically converted to tools with inferred schemas
552
- name: The name identifier for this node in the graph. Used for debugging
553
- and visualization. Defaults to "tools".
554
- tags: Optional metadata tags to associate with the node for filtering
555
- and organization. Defaults to `None`.
556
- handle_tool_errors: Configuration for error handling during tool execution.
557
- Supports multiple strategies:
558
-
559
- - **True**: Catch all errors and return a ToolMessage with the default
560
- error template containing the exception details.
561
- - **str**: Catch all errors and return a ToolMessage with this custom
562
- error message string.
563
- - **type[Exception]**: Only catch exceptions with the specified type and
564
- return the default error message for it.
565
- - **tuple[type[Exception], ...]**: Only catch exceptions with the specified
566
- types and return default error messages for them.
567
- - **Callable[..., str]**: Catch exceptions matching the callable's signature
568
- and return the string result of calling it with the exception.
569
- - **False**: Disable error handling entirely, allowing exceptions to
570
- propagate.
571
-
572
- Defaults to a callable that:
573
- - catches tool invocation errors (due to invalid arguments provided by the model) and returns a descriptive error message
574
- - ignores tool execution errors (they will be re-raised)
575
-
576
- messages_key: The key in the state dictionary that contains the message list.
577
- This same key will be used for the output `ToolMessage` objects.
578
- Defaults to "messages".
579
- Allows custom state schemas with different message field names.
580
-
581
- Examples:
582
- Basic usage:
583
-
584
- ```python
585
- from langchain.tools import ToolNode
586
- from langchain_core.tools import tool
587
-
588
- @tool
589
- def calculator(a: int, b: int) -> int:
590
- \"\"\"Add two numbers.\"\"\"
591
- return a + b
592
-
593
- tool_node = ToolNode([calculator])
594
- ```
595
-
596
- State injection:
597
-
598
- ```python
599
- from typing_extensions import Annotated
600
- from langchain.tools import InjectedState
601
-
602
- @tool
603
- def context_tool(query: str, state: Annotated[dict, InjectedState]) -> str:
604
- \"\"\"Some tool that uses state.\"\"\"
605
- return f"Query: {query}, Messages: {len(state['messages'])}"
606
-
607
- tool_node = ToolNode([context_tool])
608
- ```
609
-
610
- Error handling:
611
-
612
- ```python
613
- def handle_errors(e: ValueError) -> str:
614
- return "Invalid input provided"
615
-
616
-
617
- tool_node = ToolNode([my_tool], handle_tool_errors=handle_errors)
618
- ```
619
- """ # noqa: E501
620
-
621
- name: str = "tools"
622
-
623
- def __init__(
624
- self,
625
- tools: Sequence[BaseTool | Callable],
626
- *,
627
- name: str = "tools",
628
- tags: list[str] | None = None,
629
- handle_tool_errors: bool
630
- | str
631
- | Callable[..., str]
632
- | type[Exception]
633
- | tuple[type[Exception], ...] = _default_handle_tool_errors,
634
- messages_key: str = "messages",
635
- wrap_tool_call: ToolCallWrapper | None = None,
636
- awrap_tool_call: AsyncToolCallWrapper | None = None,
637
- ) -> None:
638
- """Initialize `ToolNode` with tools and configuration.
639
-
640
- Args:
641
- tools: Sequence of tools to make available for execution.
642
- name: Node name for graph identification.
643
- tags: Optional metadata tags.
644
- handle_tool_errors: Error handling configuration.
645
- messages_key: State key containing messages.
646
- wrap_tool_call: Sync wrapper function to intercept tool execution. Receives
647
- ToolCallRequest and execute callable, returns ToolMessage or Command.
648
- Enables retries, caching, request modification, and control flow.
649
- awrap_tool_call: Async wrapper function to intercept tool execution.
650
- If not provided, falls back to wrap_tool_call for async execution.
651
- """
652
- super().__init__(self._func, self._afunc, name=name, tags=tags, trace=False)
653
- self._tools_by_name: dict[str, BaseTool] = {}
654
- self._tool_to_state_args: dict[str, dict[str, str | None]] = {}
655
- self._tool_to_store_arg: dict[str, str | None] = {}
656
- self._tool_to_runtime_arg: dict[str, str | None] = {}
657
- self._handle_tool_errors = handle_tool_errors
658
- self._messages_key = messages_key
659
- self._wrap_tool_call = wrap_tool_call
660
- self._awrap_tool_call = awrap_tool_call
661
- for tool in tools:
662
- if not isinstance(tool, BaseTool):
663
- tool_ = create_tool(cast("type[BaseTool]", tool))
664
- else:
665
- tool_ = tool
666
- self._tools_by_name[tool_.name] = tool_
667
- self._tool_to_state_args[tool_.name] = _get_state_args(tool_)
668
- self._tool_to_store_arg[tool_.name] = _get_store_arg(tool_)
669
- self._tool_to_runtime_arg[tool_.name] = _get_runtime_arg(tool_)
670
-
671
- @property
672
- def tools_by_name(self) -> dict[str, BaseTool]:
673
- """Mapping from tool name to BaseTool instance."""
674
- return self._tools_by_name
675
-
676
- def _func(
677
- self,
678
- input: list[AnyMessage] | dict[str, Any] | BaseModel,
679
- config: RunnableConfig,
680
- runtime: Runtime,
681
- ) -> Any:
682
- tool_calls, input_type = self._parse_input(input)
683
- config_list = get_config_list(config, len(tool_calls))
684
-
685
- # Construct ToolRuntime instances at the top level for each tool call
686
- tool_runtimes = []
687
- for call, cfg in zip(tool_calls, config_list, strict=False):
688
- state = self._extract_state(input)
689
- tool_runtime = ToolRuntime(
690
- state=state,
691
- tool_call_id=call["id"],
692
- config=cfg,
693
- context=runtime.context,
694
- store=runtime.store,
695
- stream_writer=runtime.stream_writer,
696
- )
697
- tool_runtimes.append(tool_runtime)
698
-
699
- # Pass original tool calls without injection
700
- input_types = [input_type] * len(tool_calls)
701
- with get_executor_for_config(config) as executor:
702
- outputs = list(executor.map(self._run_one, tool_calls, input_types, tool_runtimes))
703
-
704
- return self._combine_tool_outputs(outputs, input_type)
705
-
706
- async def _afunc(
707
- self,
708
- input: list[AnyMessage] | dict[str, Any] | BaseModel,
709
- config: RunnableConfig,
710
- runtime: Runtime,
711
- ) -> Any:
712
- tool_calls, input_type = self._parse_input(input)
713
- config_list = get_config_list(config, len(tool_calls))
714
-
715
- # Construct ToolRuntime instances at the top level for each tool call
716
- tool_runtimes = []
717
- for call, cfg in zip(tool_calls, config_list, strict=False):
718
- state = self._extract_state(input)
719
- tool_runtime = ToolRuntime(
720
- state=state,
721
- tool_call_id=call["id"],
722
- config=cfg,
723
- context=runtime.context,
724
- store=runtime.store,
725
- stream_writer=runtime.stream_writer,
726
- )
727
- tool_runtimes.append(tool_runtime)
728
-
729
- # Pass original tool calls without injection
730
- coros = []
731
- for call, tool_runtime in zip(tool_calls, tool_runtimes, strict=False):
732
- coros.append(self._arun_one(call, input_type, tool_runtime)) # type: ignore[arg-type]
733
- outputs = await asyncio.gather(*coros)
734
-
735
- return self._combine_tool_outputs(outputs, input_type)
736
-
737
- def _combine_tool_outputs(
738
- self,
739
- outputs: list[ToolMessage | Command],
740
- input_type: Literal["list", "dict", "tool_calls"],
741
- ) -> list[Command | list[ToolMessage] | dict[str, list[ToolMessage]]]:
742
- # preserve existing behavior for non-command tool outputs for backwards
743
- # compatibility
744
- if not any(isinstance(output, Command) for output in outputs):
745
- # TypedDict, pydantic, dataclass, etc. should all be able to load from dict
746
- return outputs if input_type == "list" else {self._messages_key: outputs} # type: ignore[return-value, return-value]
747
-
748
- # LangGraph will automatically handle list of Command and non-command node
749
- # updates
750
- combined_outputs: list[Command | list[ToolMessage] | dict[str, list[ToolMessage]]] = []
751
-
752
- # combine all parent commands with goto into a single parent command
753
- parent_command: Command | None = None
754
- for output in outputs:
755
- if isinstance(output, Command):
756
- if (
757
- output.graph is Command.PARENT
758
- and isinstance(output.goto, list)
759
- and all(isinstance(send, Send) for send in output.goto)
760
- ):
761
- if parent_command:
762
- parent_command = replace(
763
- parent_command,
764
- goto=cast("list[Send]", parent_command.goto) + output.goto,
765
- )
766
- else:
767
- parent_command = Command(graph=Command.PARENT, goto=output.goto)
768
- else:
769
- combined_outputs.append(output)
770
- else:
771
- combined_outputs.append(
772
- [output] if input_type == "list" else {self._messages_key: [output]}
773
- )
774
-
775
- if parent_command:
776
- combined_outputs.append(parent_command)
777
- return combined_outputs
778
-
779
- def _execute_tool_sync(
780
- self,
781
- request: ToolCallRequest,
782
- input_type: Literal["list", "dict", "tool_calls"],
783
- config: RunnableConfig,
784
- ) -> ToolMessage | Command:
785
- """Execute tool call with configured error handling.
786
-
787
- Args:
788
- request: Tool execution request.
789
- input_type: Input format.
790
- config: Runnable configuration.
791
-
792
- Returns:
793
- ToolMessage or Command.
794
-
795
- Raises:
796
- Exception: If tool fails and handle_tool_errors is False.
797
- """
798
- call = request.tool_call
799
- tool = request.tool
800
-
801
- # Validate tool exists when we actually need to execute it
802
- if tool is None:
803
- if invalid_tool_message := self._validate_tool_call(call):
804
- return invalid_tool_message
805
- # This should never happen if validation works correctly
806
- msg = f"Tool {call['name']} is not registered with ToolNode"
807
- raise TypeError(msg)
808
-
809
- # Inject state, store, and runtime right before invocation
810
- injected_call = self._inject_tool_args(call, request.runtime)
811
- call_args = {**injected_call, "type": "tool_call"}
812
-
813
- try:
814
- try:
815
- response = tool.invoke(call_args, config)
816
- except ValidationError as exc:
817
- # Filter out errors for injected arguments
818
- filtered_errors = _filter_validation_errors(
819
- exc,
820
- self._tool_to_state_args.get(call["name"], {}),
821
- self._tool_to_store_arg.get(call["name"]),
822
- self._tool_to_runtime_arg.get(call["name"]),
823
- )
824
- # Use original call["args"] without injected values for error reporting
825
- raise ToolInvocationError(call["name"], exc, call["args"], filtered_errors) from exc
826
-
827
- # GraphInterrupt is a special exception that will always be raised.
828
- # It can be triggered in the following scenarios,
829
- # Where GraphInterrupt(GraphBubbleUp) is raised from an `interrupt` invocation
830
- # most commonly:
831
- # (1) a GraphInterrupt is raised inside a tool
832
- # (2) a GraphInterrupt is raised inside a graph node for a graph called as a tool
833
- # (3) a GraphInterrupt is raised when a subgraph is interrupted inside a graph
834
- # called as a tool
835
- # (2 and 3 can happen in a "supervisor w/ tools" multi-agent architecture)
836
- except GraphBubbleUp:
837
- raise
838
- except Exception as e:
839
- # Determine which exception types are handled
840
- handled_types: tuple[type[Exception], ...]
841
- if isinstance(self._handle_tool_errors, type) and issubclass(
842
- self._handle_tool_errors, Exception
843
- ):
844
- handled_types = (self._handle_tool_errors,)
845
- elif isinstance(self._handle_tool_errors, tuple):
846
- handled_types = self._handle_tool_errors
847
- elif callable(self._handle_tool_errors) and not isinstance(
848
- self._handle_tool_errors, type
849
- ):
850
- handled_types = _infer_handled_types(self._handle_tool_errors)
851
- else:
852
- # default behavior is catching all exceptions
853
- handled_types = (Exception,)
854
-
855
- # Check if this error should be handled
856
- if not self._handle_tool_errors or not isinstance(e, handled_types):
857
- raise
858
-
859
- # Error is handled - create error ToolMessage
860
- content = _handle_tool_error(e, flag=self._handle_tool_errors)
861
- return ToolMessage(
862
- content=content,
863
- name=call["name"],
864
- tool_call_id=call["id"],
865
- status="error",
866
- )
867
-
868
- # Process successful response
869
- if isinstance(response, Command):
870
- # Validate Command before returning to handler
871
- return self._validate_tool_command(response, request.tool_call, input_type)
872
- if isinstance(response, ToolMessage):
873
- response.content = cast("str | list", msg_content_output(response.content))
874
- return response
875
-
876
- msg = f"Tool {call['name']} returned unexpected type: {type(response)}"
877
- raise TypeError(msg)
878
-
879
- def _run_one(
880
- self,
881
- call: ToolCall,
882
- input_type: Literal["list", "dict", "tool_calls"],
883
- tool_runtime: ToolRuntime,
884
- ) -> ToolMessage | Command:
885
- """Execute single tool call with wrap_tool_call wrapper if configured.
886
-
887
- Args:
888
- call: Tool call dict.
889
- input_type: Input format.
890
- tool_runtime: Tool runtime.
891
-
892
- Returns:
893
- ToolMessage or Command.
894
- """
895
- # Validation is deferred to _execute_tool_sync to allow interceptors
896
- # to short-circuit requests for unregistered tools
897
- tool = self.tools_by_name.get(call["name"])
898
-
899
- # Create the tool request with state and runtime
900
- tool_request = ToolCallRequest(
901
- tool_call=call,
902
- tool=tool,
903
- state=tool_runtime.state,
904
- runtime=tool_runtime,
905
- )
906
-
907
- config = tool_runtime.config
908
-
909
- if self._wrap_tool_call is None:
910
- # No wrapper - execute directly
911
- return self._execute_tool_sync(tool_request, input_type, config)
912
-
913
- # Define execute callable that can be called multiple times
914
- def execute(req: ToolCallRequest) -> ToolMessage | Command:
915
- """Execute tool with given request. Can be called multiple times."""
916
- return self._execute_tool_sync(req, input_type, config)
917
-
918
- # Call wrapper with request and execute callable
919
- try:
920
- return self._wrap_tool_call(tool_request, execute)
921
- except Exception as e:
922
- # Wrapper threw an exception
923
- if not self._handle_tool_errors:
924
- raise
925
- # Convert to error message
926
- content = _handle_tool_error(e, flag=self._handle_tool_errors)
927
- return ToolMessage(
928
- content=content,
929
- name=tool_request.tool_call["name"],
930
- tool_call_id=tool_request.tool_call["id"],
931
- status="error",
932
- )
933
-
934
- async def _execute_tool_async(
935
- self,
936
- request: ToolCallRequest,
937
- input_type: Literal["list", "dict", "tool_calls"],
938
- config: RunnableConfig,
939
- ) -> ToolMessage | Command:
940
- """Execute tool call asynchronously with configured error handling.
941
-
942
- Args:
943
- request: Tool execution request.
944
- input_type: Input format.
945
- config: Runnable configuration.
946
-
947
- Returns:
948
- ToolMessage or Command.
949
-
950
- Raises:
951
- Exception: If tool fails and handle_tool_errors is False.
952
- """
953
- call = request.tool_call
954
- tool = request.tool
955
-
956
- # Validate tool exists when we actually need to execute it
957
- if tool is None:
958
- if invalid_tool_message := self._validate_tool_call(call):
959
- return invalid_tool_message
960
- # This should never happen if validation works correctly
961
- msg = f"Tool {call['name']} is not registered with ToolNode"
962
- raise TypeError(msg)
963
-
964
- # Inject state, store, and runtime right before invocation
965
- injected_call = self._inject_tool_args(call, request.runtime)
966
- call_args = {**injected_call, "type": "tool_call"}
967
-
968
- try:
969
- try:
970
- response = await tool.ainvoke(call_args, config)
971
- except ValidationError as exc:
972
- # Filter out errors for injected arguments
973
- filtered_errors = _filter_validation_errors(
974
- exc,
975
- self._tool_to_state_args.get(call["name"], {}),
976
- self._tool_to_store_arg.get(call["name"]),
977
- self._tool_to_runtime_arg.get(call["name"]),
978
- )
979
- # Use original call["args"] without injected values for error reporting
980
- raise ToolInvocationError(call["name"], exc, call["args"], filtered_errors) from exc
981
-
982
- # GraphInterrupt is a special exception that will always be raised.
983
- # It can be triggered in the following scenarios,
984
- # Where GraphInterrupt(GraphBubbleUp) is raised from an `interrupt` invocation
985
- # most commonly:
986
- # (1) a GraphInterrupt is raised inside a tool
987
- # (2) a GraphInterrupt is raised inside a graph node for a graph called as a tool
988
- # (3) a GraphInterrupt is raised when a subgraph is interrupted inside a graph
989
- # called as a tool
990
- # (2 and 3 can happen in a "supervisor w/ tools" multi-agent architecture)
991
- except GraphBubbleUp:
992
- raise
993
- except Exception as e:
994
- # Determine which exception types are handled
995
- handled_types: tuple[type[Exception], ...]
996
- if isinstance(self._handle_tool_errors, type) and issubclass(
997
- self._handle_tool_errors, Exception
998
- ):
999
- handled_types = (self._handle_tool_errors,)
1000
- elif isinstance(self._handle_tool_errors, tuple):
1001
- handled_types = self._handle_tool_errors
1002
- elif callable(self._handle_tool_errors) and not isinstance(
1003
- self._handle_tool_errors, type
1004
- ):
1005
- handled_types = _infer_handled_types(self._handle_tool_errors)
1006
- else:
1007
- # default behavior is catching all exceptions
1008
- handled_types = (Exception,)
1009
-
1010
- # Check if this error should be handled
1011
- if not self._handle_tool_errors or not isinstance(e, handled_types):
1012
- raise
1013
-
1014
- # Error is handled - create error ToolMessage
1015
- content = _handle_tool_error(e, flag=self._handle_tool_errors)
1016
- return ToolMessage(
1017
- content=content,
1018
- name=call["name"],
1019
- tool_call_id=call["id"],
1020
- status="error",
1021
- )
1022
-
1023
- # Process successful response
1024
- if isinstance(response, Command):
1025
- # Validate Command before returning to handler
1026
- return self._validate_tool_command(response, request.tool_call, input_type)
1027
- if isinstance(response, ToolMessage):
1028
- response.content = cast("str | list", msg_content_output(response.content))
1029
- return response
1030
-
1031
- msg = f"Tool {call['name']} returned unexpected type: {type(response)}"
1032
- raise TypeError(msg)
1033
-
1034
- async def _arun_one(
1035
- self,
1036
- call: ToolCall,
1037
- input_type: Literal["list", "dict", "tool_calls"],
1038
- tool_runtime: ToolRuntime,
1039
- ) -> ToolMessage | Command:
1040
- """Execute single tool call asynchronously with awrap_tool_call wrapper if configured.
1041
-
1042
- Args:
1043
- call: Tool call dict.
1044
- input_type: Input format.
1045
- tool_runtime: Tool runtime.
1046
-
1047
- Returns:
1048
- ToolMessage or Command.
1049
- """
1050
- # Validation is deferred to _execute_tool_async to allow interceptors
1051
- # to short-circuit requests for unregistered tools
1052
- tool = self.tools_by_name.get(call["name"])
1053
-
1054
- # Create the tool request with state and runtime
1055
- tool_request = ToolCallRequest(
1056
- tool_call=call,
1057
- tool=tool,
1058
- state=tool_runtime.state,
1059
- runtime=tool_runtime,
1060
- )
1061
-
1062
- config = tool_runtime.config
1063
-
1064
- if self._awrap_tool_call is None and self._wrap_tool_call is None:
1065
- # No wrapper - execute directly
1066
- return await self._execute_tool_async(tool_request, input_type, config)
1067
-
1068
- # Define async execute callable that can be called multiple times
1069
- async def execute(req: ToolCallRequest) -> ToolMessage | Command:
1070
- """Execute tool with given request. Can be called multiple times."""
1071
- return await self._execute_tool_async(req, input_type, config)
1072
-
1073
- def _sync_execute(req: ToolCallRequest) -> ToolMessage | Command:
1074
- """Sync execute fallback for sync wrapper."""
1075
- return self._execute_tool_sync(req, input_type, config)
1076
-
1077
- # Call wrapper with request and execute callable
1078
- try:
1079
- if self._awrap_tool_call is not None:
1080
- return await self._awrap_tool_call(tool_request, execute)
1081
- # None check was performed above already
1082
- self._wrap_tool_call = cast("ToolCallWrapper", self._wrap_tool_call)
1083
- return self._wrap_tool_call(tool_request, _sync_execute)
1084
- except Exception as e:
1085
- # Wrapper threw an exception
1086
- if not self._handle_tool_errors:
1087
- raise
1088
- # Convert to error message
1089
- content = _handle_tool_error(e, flag=self._handle_tool_errors)
1090
- return ToolMessage(
1091
- content=content,
1092
- name=tool_request.tool_call["name"],
1093
- tool_call_id=tool_request.tool_call["id"],
1094
- status="error",
1095
- )
1096
-
1097
- def _parse_input(
1098
- self,
1099
- input: list[AnyMessage] | dict[str, Any] | BaseModel,
1100
- ) -> tuple[list[ToolCall], Literal["list", "dict", "tool_calls"]]:
1101
- input_type: Literal["list", "dict", "tool_calls"]
1102
- if isinstance(input, list):
1103
- if isinstance(input[-1], dict) and input[-1].get("type") == "tool_call":
1104
- input_type = "tool_calls"
1105
- tool_calls = cast("list[ToolCall]", input)
1106
- return tool_calls, input_type
1107
- input_type = "list"
1108
- messages = input
1109
- elif isinstance(input, dict) and input.get("__type") == "tool_call_with_context":
1110
- # Handle ToolCallWithContext from Send API
1111
- # mypy will not be able to type narrow correctly since the signature
1112
- # for input contains dict[str, Any]. We'd need to narrow dict[str, Any]
1113
- # before we can apply correct typing.
1114
- input_with_ctx = cast("ToolCallWithContext", input)
1115
- input_type = "tool_calls"
1116
- return [input_with_ctx["tool_call"]], input_type
1117
- elif isinstance(input, dict) and (messages := input.get(self._messages_key, [])):
1118
- input_type = "dict"
1119
- elif messages := getattr(input, self._messages_key, []):
1120
- # Assume dataclass-like state that can coerce from dict
1121
- input_type = "dict"
1122
- else:
1123
- msg = "No message found in input"
1124
- raise ValueError(msg)
1125
-
1126
- try:
1127
- latest_ai_message = next(m for m in reversed(messages) if isinstance(m, AIMessage))
1128
- except StopIteration:
1129
- msg = "No AIMessage found in input"
1130
- raise ValueError(msg)
1131
-
1132
- tool_calls = list(latest_ai_message.tool_calls)
1133
- return tool_calls, input_type
1134
-
1135
- def _validate_tool_call(self, call: ToolCall) -> ToolMessage | None:
1136
- requested_tool = call["name"]
1137
- if requested_tool not in self.tools_by_name:
1138
- all_tool_names = list(self.tools_by_name.keys())
1139
- content = INVALID_TOOL_NAME_ERROR_TEMPLATE.format(
1140
- requested_tool=requested_tool,
1141
- available_tools=", ".join(all_tool_names),
1142
- )
1143
- return ToolMessage(
1144
- content, name=requested_tool, tool_call_id=call["id"], status="error"
1145
- )
1146
- return None
1147
-
1148
- def _extract_state(
1149
- self, input: list[AnyMessage] | dict[str, Any] | BaseModel
1150
- ) -> list[AnyMessage] | dict[str, Any] | BaseModel:
1151
- """Extract state from input, handling ToolCallWithContext if present.
1152
-
1153
- Args:
1154
- input: The input which may be raw state or ToolCallWithContext.
1155
-
1156
- Returns:
1157
- The actual state to pass to wrap_tool_call wrappers.
1158
- """
1159
- if isinstance(input, dict) and input.get("__type") == "tool_call_with_context":
1160
- return input["state"]
1161
- return input
1162
-
1163
- def _inject_state(
1164
- self,
1165
- tool_call: ToolCall,
1166
- state: list[AnyMessage] | dict[str, Any] | BaseModel,
1167
- ) -> ToolCall:
1168
- state_args = self._tool_to_state_args[tool_call["name"]]
1169
-
1170
- if state_args and isinstance(state, list):
1171
- required_fields = list(state_args.values())
1172
- if (
1173
- len(required_fields) == 1 and required_fields[0] == self._messages_key
1174
- ) or required_fields[0] is None:
1175
- state = {self._messages_key: state}
1176
- else:
1177
- err_msg = (
1178
- f"Invalid input to ToolNode. Tool {tool_call['name']} requires "
1179
- f"graph state dict as input."
1180
- )
1181
- if any(state_field for state_field in state_args.values()):
1182
- required_fields_str = ", ".join(f for f in required_fields if f)
1183
- err_msg += f" State should contain fields {required_fields_str}."
1184
- raise ValueError(err_msg)
1185
-
1186
- if isinstance(state, dict):
1187
- tool_state_args = {
1188
- tool_arg: state[state_field] if state_field else state
1189
- for tool_arg, state_field in state_args.items()
1190
- }
1191
- else:
1192
- tool_state_args = {
1193
- tool_arg: getattr(state, state_field) if state_field else state
1194
- for tool_arg, state_field in state_args.items()
1195
- }
1196
-
1197
- tool_call["args"] = {
1198
- **tool_call["args"],
1199
- **tool_state_args,
1200
- }
1201
- return tool_call
1202
-
1203
- def _inject_store(self, tool_call: ToolCall, store: BaseStore | None) -> ToolCall:
1204
- store_arg = self._tool_to_store_arg[tool_call["name"]]
1205
- if not store_arg:
1206
- return tool_call
1207
-
1208
- if store is None:
1209
- msg = (
1210
- "Cannot inject store into tools with InjectedStore annotations - "
1211
- "please compile your graph with a store."
1212
- )
1213
- raise ValueError(msg)
1214
-
1215
- tool_call["args"] = {
1216
- **tool_call["args"],
1217
- store_arg: store,
1218
- }
1219
- return tool_call
1220
-
1221
- def _inject_runtime(self, tool_call: ToolCall, tool_runtime: ToolRuntime) -> ToolCall:
1222
- """Inject ToolRuntime into tool call arguments.
1223
-
1224
- Args:
1225
- tool_call: The tool call to inject runtime into.
1226
- tool_runtime: The ToolRuntime instance to inject.
1227
-
1228
- Returns:
1229
- The tool call with runtime injected if needed.
1230
- """
1231
- runtime_arg = self._tool_to_runtime_arg.get(tool_call["name"])
1232
- if not runtime_arg:
1233
- return tool_call
1234
-
1235
- tool_call["args"] = {
1236
- **tool_call["args"],
1237
- runtime_arg: tool_runtime,
1238
- }
1239
- return tool_call
1240
-
1241
- def _inject_tool_args(
1242
- self,
1243
- tool_call: ToolCall,
1244
- tool_runtime: ToolRuntime,
1245
- ) -> ToolCall:
1246
- """Inject graph state, store, and runtime into tool call arguments.
1247
-
1248
- This is an internal method that enables tools to access graph context that
1249
- should not be controlled by the model. Tools can declare dependencies on graph
1250
- state, persistent storage, or runtime context using InjectedState, InjectedStore,
1251
- and ToolRuntime annotations. This method automatically identifies these
1252
- dependencies and injects the appropriate values.
1253
-
1254
- The injection process preserves the original tool call structure while adding
1255
- the necessary context arguments. This allows tools to be both model-callable
1256
- and context-aware without exposing internal state management to the model.
1257
-
1258
- Args:
1259
- tool_call: The tool call dictionary to augment with injected arguments.
1260
- Must contain 'name', 'args', 'id', and 'type' fields.
1261
- tool_runtime: The ToolRuntime instance containing all runtime context
1262
- (state, config, store, context, stream_writer) to inject into tools.
1263
-
1264
- Returns:
1265
- A new ToolCall dictionary with the same structure as the input but with
1266
- additional arguments injected based on the tool's annotation requirements.
1267
-
1268
- Raises:
1269
- ValueError: If a tool requires store injection but no store is provided,
1270
- or if state injection requirements cannot be satisfied.
1271
-
1272
- !!! note
1273
- This method is called automatically during tool execution. It should not
1274
- be called from outside the `ToolNode`.
1275
- """
1276
- if tool_call["name"] not in self.tools_by_name:
1277
- return tool_call
1278
-
1279
- tool_call_copy: ToolCall = copy(tool_call)
1280
- tool_call_with_state = self._inject_state(tool_call_copy, tool_runtime.state)
1281
- tool_call_with_store = self._inject_store(tool_call_with_state, tool_runtime.store)
1282
- return self._inject_runtime(tool_call_with_store, tool_runtime)
1283
-
1284
- def _validate_tool_command(
1285
- self,
1286
- command: Command,
1287
- call: ToolCall,
1288
- input_type: Literal["list", "dict", "tool_calls"],
1289
- ) -> Command:
1290
- if isinstance(command.update, dict):
1291
- # input type is dict when ToolNode is invoked with a dict input
1292
- # (e.g. {"messages": [AIMessage(..., tool_calls=[...])]})
1293
- if input_type not in ("dict", "tool_calls"):
1294
- msg = (
1295
- "Tools can provide a dict in Command.update only when using dict "
1296
- f"with '{self._messages_key}' key as ToolNode input, "
1297
- f"got: {command.update} for tool '{call['name']}'"
1298
- )
1299
- raise ValueError(msg)
1300
-
1301
- updated_command = deepcopy(command)
1302
- state_update = cast("dict[str, Any]", updated_command.update) or {}
1303
- messages_update = state_update.get(self._messages_key, [])
1304
- elif isinstance(command.update, list):
1305
- # Input type is list when ToolNode is invoked with a list input
1306
- # (e.g. [AIMessage(..., tool_calls=[...])])
1307
- if input_type != "list":
1308
- msg = (
1309
- "Tools can provide a list of messages in Command.update "
1310
- "only when using list of messages as ToolNode input, "
1311
- f"got: {command.update} for tool '{call['name']}'"
1312
- )
1313
- raise ValueError(msg)
1314
-
1315
- updated_command = deepcopy(command)
1316
- messages_update = updated_command.update
1317
- else:
1318
- return command
1319
-
1320
- # convert to message objects if updates are in a dict format
1321
- messages_update = convert_to_messages(messages_update)
1322
-
1323
- # no validation needed if all messages are being removed
1324
- if messages_update == [RemoveMessage(id=REMOVE_ALL_MESSAGES)]:
1325
- return updated_command
1326
-
1327
- has_matching_tool_message = False
1328
- for message in messages_update:
1329
- if not isinstance(message, ToolMessage):
1330
- continue
1331
-
1332
- if message.tool_call_id == call["id"]:
1333
- message.name = call["name"]
1334
- has_matching_tool_message = True
1335
-
1336
- # validate that we always have a ToolMessage matching the tool call in
1337
- # Command.update if command is sent to the CURRENT graph
1338
- if updated_command.graph is None and not has_matching_tool_message:
1339
- example_update = (
1340
- '`Command(update={"messages": '
1341
- '[ToolMessage("Success", tool_call_id=tool_call_id), ...]}, ...)`'
1342
- if input_type == "dict"
1343
- else "`Command(update="
1344
- '[ToolMessage("Success", tool_call_id=tool_call_id), ...], ...)`'
1345
- )
1346
- msg = (
1347
- "Expected to have a matching ToolMessage in Command.update "
1348
- f"for tool '{call['name']}', got: {messages_update}. "
1349
- "Every tool call (LLM requesting to call a tool) "
1350
- "in the message history MUST have a corresponding ToolMessage. "
1351
- f"You can fix it by modifying the tool to return {example_update}."
1352
- )
1353
- raise ValueError(msg)
1354
- return updated_command
1355
-
1356
-
1357
- def tools_condition(
1358
- state: list[AnyMessage] | dict[str, Any] | BaseModel,
1359
- messages_key: str = "messages",
1360
- ) -> Literal["tools", "__end__"]:
1361
- """Conditional routing function for tool-calling workflows.
1362
-
1363
- This utility function implements the standard conditional logic for ReAct-style
1364
- agents: if the last AI message contains tool calls, route to the tool execution
1365
- node; otherwise, end the workflow. This pattern is fundamental to most tool-calling
1366
- agent architectures.
1367
-
1368
- The function handles multiple state formats commonly used in LangGraph applications,
1369
- making it flexible for different graph designs while maintaining consistent behavior.
1370
-
1371
- Args:
1372
- state: The current graph state to examine for tool calls. Supported formats:
1373
- - Dictionary containing a messages key (for StateGraph)
1374
- - BaseModel instance with a messages attribute
1375
- messages_key: The key or attribute name containing the message list in the state.
1376
- This allows customization for graphs using different state schemas.
1377
- Defaults to "messages".
1378
-
1379
- Returns:
1380
- Either "tools" if tool calls are present in the last AI message, or "__end__"
1381
- to terminate the workflow. These are the standard routing destinations for
1382
- tool-calling conditional edges.
1383
-
1384
- Raises:
1385
- ValueError: If no messages can be found in the provided state format.
1386
-
1387
- Example:
1388
- Basic usage in a ReAct agent:
1389
-
1390
- ```python
1391
- from langgraph.graph import StateGraph
1392
- from langchain.tools import ToolNode
1393
- from langchain.tools.tool_node import tools_condition
1394
- from typing_extensions import TypedDict
1395
-
1396
-
1397
- class State(TypedDict):
1398
- messages: list
1399
-
1400
-
1401
- graph = StateGraph(State)
1402
- graph.add_node("llm", call_model)
1403
- graph.add_node("tools", ToolNode([my_tool]))
1404
- graph.add_conditional_edges(
1405
- "llm",
1406
- tools_condition, # Routes to "tools" or "__end__"
1407
- {"tools": "tools", "__end__": "__end__"},
1408
- )
1409
- ```
1410
-
1411
- Custom messages key:
1412
-
1413
- ```python
1414
- def custom_condition(state):
1415
- return tools_condition(state, messages_key="chat_history")
1416
- ```
1417
-
1418
- !!! note
1419
- This function is designed to work seamlessly with `ToolNode` and standard
1420
- LangGraph patterns. It expects the last message to be an `AIMessage` when
1421
- tool calls are present, which is the standard output format for tool-calling
1422
- language models.
1423
- """
1424
- if isinstance(state, list):
1425
- ai_message = state[-1]
1426
- elif (isinstance(state, dict) and (messages := state.get(messages_key, []))) or (
1427
- messages := getattr(state, messages_key, [])
1428
- ):
1429
- ai_message = messages[-1]
1430
- else:
1431
- msg = f"No messages found in input state to tool_edge: {state}"
1432
- raise ValueError(msg)
1433
- if hasattr(ai_message, "tool_calls") and len(ai_message.tool_calls) > 0:
1434
- return "tools"
1435
- return "__end__"
1436
-
1437
-
1438
- @dataclass
1439
- class ToolRuntime(_DirectlyInjectedToolArg, Generic[ContextT, StateT]):
1440
- """Runtime context automatically injected into tools.
1441
-
1442
- When a tool function has a parameter named `tool_runtime` with type hint
1443
- `ToolRuntime`, the tool execution system will automatically inject an instance
1444
- containing:
1445
-
1446
- - `state`: The current graph state
1447
- - `tool_call_id`: The ID of the current tool call
1448
- - `config`: `RunnableConfig` for the current execution
1449
- - `context`: Runtime context (from langgraph `Runtime`)
1450
- - `store`: `BaseStore` instance for persistent storage (from langgraph `Runtime`)
1451
- - `stream_writer`: `StreamWriter` for streaming output (from langgraph `Runtime`)
1452
-
1453
- No `Annotated` wrapper is needed - just use `runtime: ToolRuntime`
1454
- as a parameter.
1455
-
1456
- Example:
1457
- ```python
1458
- from langchain_core.tools import tool
1459
- from langchain.tools import ToolRuntime
1460
-
1461
- @tool
1462
- def my_tool(x: int, runtime: ToolRuntime) -> str:
1463
- \"\"\"Tool that accesses runtime context.\"\"\"
1464
- # Access state
1465
- messages = tool_runtime.state["messages"]
1466
-
1467
- # Access tool_call_id
1468
- print(f"Tool call ID: {tool_runtime.tool_call_id}")
1469
-
1470
- # Access config
1471
- print(f"Run ID: {tool_runtime.config.get('run_id')}")
1472
-
1473
- # Access runtime context
1474
- user_id = tool_runtime.context.get("user_id")
1475
-
1476
- # Access store
1477
- tool_runtime.store.put(("metrics",), "count", 1)
1478
-
1479
- # Stream output
1480
- tool_runtime.stream_writer.write("Processing...")
1481
-
1482
- return f"Processed {x}"
1483
- ```
1484
-
1485
- !!! note
1486
- This is a marker class used for type checking and detection.
1487
- The actual runtime object will be constructed during tool execution.
1488
- """
1489
-
1490
- state: StateT
1491
- context: ContextT
1492
- config: RunnableConfig
1493
- stream_writer: StreamWriter
1494
- tool_call_id: str | None
1495
- store: BaseStore | None
1496
-
1497
-
1498
- class InjectedState(InjectedToolArg):
1499
- """Annotation for injecting graph state into tool arguments.
1500
-
1501
- This annotation enables tools to access graph state without exposing state
1502
- management details to the language model. Tools annotated with `InjectedState`
1503
- receive state data automatically during execution while remaining invisible
1504
- to the model's tool-calling interface.
1505
-
1506
- Args:
1507
- field: Optional key to extract from the state dictionary. If `None`, the entire
1508
- state is injected. If specified, only that field's value is injected.
1509
- This allows tools to request specific state components rather than
1510
- processing the full state structure.
1511
-
1512
- Example:
1513
- ```python
1514
- from typing import List
1515
- from typing_extensions import Annotated, TypedDict
1516
-
1517
- from langchain_core.messages import BaseMessage, AIMessage
1518
- from langchain.tools import InjectedState, ToolNode, tool
1519
-
1520
-
1521
- class AgentState(TypedDict):
1522
- messages: List[BaseMessage]
1523
- foo: str
1524
-
1525
-
1526
- @tool
1527
- def state_tool(x: int, state: Annotated[dict, InjectedState]) -> str:
1528
- '''Do something with state.'''
1529
- if len(state["messages"]) > 2:
1530
- return state["foo"] + str(x)
1531
- else:
1532
- return "not enough messages"
1533
-
1534
-
1535
- @tool
1536
- def foo_tool(x: int, foo: Annotated[str, InjectedState("foo")]) -> str:
1537
- '''Do something else with state.'''
1538
- return foo + str(x + 1)
1539
-
1540
-
1541
- node = ToolNode([state_tool, foo_tool])
1542
-
1543
- tool_call1 = {"name": "state_tool", "args": {"x": 1}, "id": "1", "type": "tool_call"}
1544
- tool_call2 = {"name": "foo_tool", "args": {"x": 1}, "id": "2", "type": "tool_call"}
1545
- state = {
1546
- "messages": [AIMessage("", tool_calls=[tool_call1, tool_call2])],
1547
- "foo": "bar",
1548
- }
1549
- node.invoke(state)
1550
- ```
1551
-
1552
- ```python
1553
- [
1554
- ToolMessage(content="not enough messages", name="state_tool", tool_call_id="1"),
1555
- ToolMessage(content="bar2", name="foo_tool", tool_call_id="2"),
1556
- ]
1557
- ```
1558
-
1559
- !!! note
1560
- - `InjectedState` arguments are automatically excluded from tool schemas
1561
- presented to language models
1562
- - `ToolNode` handles the injection process during execution
1563
- - Tools can mix regular arguments (controlled by the model) with injected
1564
- arguments (controlled by the system)
1565
- - State injection occurs after the model generates tool calls but before
1566
- tool execution
1567
- """
1568
-
1569
- def __init__(self, field: str | None = None) -> None:
1570
- """Initialize the `InjectedState` annotation."""
1571
- self.field = field
1572
-
1573
-
1574
- class InjectedStore(InjectedToolArg):
1575
- """Annotation for injecting persistent store into tool arguments.
1576
-
1577
- This annotation enables tools to access LangGraph's persistent storage system
1578
- without exposing storage details to the language model. Tools annotated with
1579
- InjectedStore receive the store instance automatically during execution while
1580
- remaining invisible to the model's tool-calling interface.
1581
-
1582
- The store provides persistent, cross-session data storage that tools can use
1583
- for maintaining context, user preferences, or any other data that needs to
1584
- persist beyond individual workflow executions.
1585
-
1586
- !!! warning
1587
- `InjectedStore` annotation requires `langchain-core >= 0.3.8`
1588
-
1589
- Example:
1590
- ```python
1591
- from typing_extensions import Annotated
1592
- from langgraph.store.memory import InMemoryStore
1593
- from langchain.tools import InjectedStore, ToolNode, tool
1594
-
1595
- @tool
1596
- def save_preference(
1597
- key: str,
1598
- value: str,
1599
- store: Annotated[Any, InjectedStore()]
1600
- ) -> str:
1601
- \"\"\"Save user preference to persistent storage.\"\"\"
1602
- store.put(("preferences",), key, value)
1603
- return f"Saved {key} = {value}"
1604
-
1605
- @tool
1606
- def get_preference(
1607
- key: str,
1608
- store: Annotated[Any, InjectedStore()]
1609
- ) -> str:
1610
- \"\"\"Retrieve user preference from persistent storage.\"\"\"
1611
- result = store.get(("preferences",), key)
1612
- return result.value if result else "Not found"
1613
- ```
1614
-
1615
- Usage with `ToolNode` and graph compilation:
1616
-
1617
- ```python
1618
- from langgraph.graph import StateGraph
1619
- from langgraph.store.memory import InMemoryStore
1620
-
1621
- store = InMemoryStore()
1622
- tool_node = ToolNode([save_preference, get_preference])
1623
-
1624
- graph = StateGraph(State)
1625
- graph.add_node("tools", tool_node)
1626
- compiled_graph = graph.compile(store=store) # Store is injected automatically
1627
- ```
1628
-
1629
- Cross-session persistence:
1630
-
1631
- ```python
1632
- # First session
1633
- result1 = graph.invoke({"messages": [HumanMessage("Save my favorite color as blue")]})
1634
-
1635
- # Later session - data persists
1636
- result2 = graph.invoke({"messages": [HumanMessage("What's my favorite color?")]})
1637
- ```
1638
-
1639
- !!! note
1640
- - `InjectedStore` arguments are automatically excluded from tool schemas
1641
- presented to language models
1642
- - The store instance is automatically injected by `ToolNode` during execution
1643
- - Tools can access namespaced storage using the store's get/put methods
1644
- - Store injection requires the graph to be compiled with a store instance
1645
- - Multiple tools can share the same store instance for data consistency
1646
- """
1647
-
1648
-
1649
- def _is_injection(
1650
- type_arg: Any,
1651
- injection_type: type[InjectedState | InjectedStore | ToolRuntime],
1652
- ) -> bool:
1653
- """Check if a type argument represents an injection annotation.
1654
-
1655
- This utility function determines whether a type annotation indicates that
1656
- an argument should be injected with state or store data. It handles both
1657
- direct annotations and nested annotations within Union or Annotated types.
1658
-
1659
- Args:
1660
- type_arg: The type argument to check for injection annotations.
1661
- injection_type: The injection type to look for (InjectedState or InjectedStore).
1662
-
1663
- Returns:
1664
- True if the type argument contains the specified injection annotation.
1665
- """
1666
- if isinstance(type_arg, injection_type) or (
1667
- isinstance(type_arg, type) and issubclass(type_arg, injection_type)
1668
- ):
1669
- return True
1670
- origin_ = get_origin(type_arg)
1671
- if origin_ is Union or origin_ is Annotated:
1672
- return any(_is_injection(ta, injection_type) for ta in get_args(type_arg))
1673
- return False
1674
-
1675
-
1676
- def _get_state_args(tool: BaseTool) -> dict[str, str | None]:
1677
- """Extract state injection mappings from tool annotations.
1678
-
1679
- This function analyzes a tool's input schema to identify arguments that should
1680
- be injected with graph state. It processes InjectedState annotations to build
1681
- a mapping of tool argument names to state field names.
1682
-
1683
- Args:
1684
- tool: The tool to analyze for state injection requirements.
1685
-
1686
- Returns:
1687
- A dictionary mapping tool argument names to state field names. If a field
1688
- name is None, the entire state should be injected for that argument.
1689
- """
1690
- full_schema = tool.get_input_schema()
1691
- tool_args_to_state_fields: dict = {}
1692
-
1693
- for name, type_ in get_all_basemodel_annotations(full_schema).items():
1694
- injections = [
1695
- type_arg for type_arg in get_args(type_) if _is_injection(type_arg, InjectedState)
1696
- ]
1697
- if len(injections) > 1:
1698
- msg = (
1699
- "A tool argument should not be annotated with InjectedState more than "
1700
- f"once. Received arg {name} with annotations {injections}."
1701
- )
1702
- raise ValueError(msg)
1703
- if len(injections) == 1:
1704
- injection = injections[0]
1705
- if isinstance(injection, InjectedState) and injection.field:
1706
- tool_args_to_state_fields[name] = injection.field
1707
- else:
1708
- tool_args_to_state_fields[name] = None
1709
- else:
1710
- pass
1711
- return tool_args_to_state_fields
1712
-
1713
-
1714
- def _get_store_arg(tool: BaseTool) -> str | None:
1715
- """Extract store injection argument from tool annotations.
1716
-
1717
- This function analyzes a tool's input schema to identify the argument that
1718
- should be injected with the graph store. Only one store argument is supported
1719
- per tool.
1720
-
1721
- Args:
1722
- tool: The tool to analyze for store injection requirements.
1723
-
1724
- Returns:
1725
- The name of the argument that should receive the store injection, or None
1726
- if no store injection is required.
1727
-
1728
- Raises:
1729
- ValueError: If a tool argument has multiple InjectedStore annotations.
1730
- """
1731
- full_schema = tool.get_input_schema()
1732
- for name, type_ in get_all_basemodel_annotations(full_schema).items():
1733
- injections = [
1734
- type_arg for type_arg in get_args(type_) if _is_injection(type_arg, InjectedStore)
1735
- ]
1736
- if len(injections) > 1:
1737
- msg = (
1738
- "A tool argument should not be annotated with InjectedStore more than "
1739
- f"once. Received arg {name} with annotations {injections}."
1740
- )
1741
- raise ValueError(msg)
1742
- if len(injections) == 1:
1743
- return name
1744
-
1745
- return None
1746
-
1747
-
1748
- def _get_runtime_arg(tool: BaseTool) -> str | None:
1749
- """Extract runtime injection argument from tool annotations.
1750
-
1751
- This function analyzes a tool's input schema to identify the argument that
1752
- should be injected with the ToolRuntime instance. Only one runtime argument
1753
- is supported per tool.
1754
-
1755
- Args:
1756
- tool: The tool to analyze for runtime injection requirements.
1757
-
1758
- Returns:
1759
- The name of the argument that should receive the runtime injection, or None
1760
- if no runtime injection is required.
1761
-
1762
- Raises:
1763
- ValueError: If a tool argument has multiple ToolRuntime annotations.
1764
- """
1765
- full_schema = tool.get_input_schema()
1766
- for name, type_ in get_all_basemodel_annotations(full_schema).items():
1767
- # Check if the parameter name is "runtime" (regardless of type)
1768
- if name == "runtime":
1769
- return name
1770
- # Check if the type itself is ToolRuntime (direct usage)
1771
- if _is_injection(type_, ToolRuntime):
1772
- return name
1773
- # Check if ToolRuntime is in Annotated args
1774
- injections = [
1775
- type_arg for type_arg in get_args(type_) if _is_injection(type_arg, ToolRuntime)
1776
- ]
1777
- if len(injections) > 1:
1778
- msg = (
1779
- "A tool argument should not be annotated with ToolRuntime more than "
1780
- f"once. Received arg {name} with annotations {injections}."
1781
- )
1782
- raise ValueError(msg)
1783
- if len(injections) == 1:
1784
- return name
1785
-
1786
- return None