langchain 1.0.0a12__py3-none-any.whl → 1.0.0a14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (40) hide show
  1. langchain/__init__.py +1 -1
  2. langchain/agents/factory.py +597 -171
  3. langchain/agents/middleware/__init__.py +9 -3
  4. langchain/agents/middleware/context_editing.py +15 -14
  5. langchain/agents/middleware/human_in_the_loop.py +213 -170
  6. langchain/agents/middleware/model_call_limit.py +2 -2
  7. langchain/agents/middleware/model_fallback.py +46 -36
  8. langchain/agents/middleware/pii.py +25 -27
  9. langchain/agents/middleware/planning.py +16 -11
  10. langchain/agents/middleware/prompt_caching.py +14 -11
  11. langchain/agents/middleware/summarization.py +1 -1
  12. langchain/agents/middleware/tool_call_limit.py +5 -5
  13. langchain/agents/middleware/tool_emulator.py +200 -0
  14. langchain/agents/middleware/tool_selection.py +25 -21
  15. langchain/agents/middleware/types.py +623 -225
  16. langchain/chat_models/base.py +85 -90
  17. langchain/embeddings/__init__.py +0 -2
  18. langchain/embeddings/base.py +20 -20
  19. langchain/messages/__init__.py +34 -0
  20. langchain/tools/__init__.py +2 -6
  21. langchain/tools/tool_node.py +410 -83
  22. {langchain-1.0.0a12.dist-info → langchain-1.0.0a14.dist-info}/METADATA +8 -5
  23. langchain-1.0.0a14.dist-info/RECORD +30 -0
  24. langchain/_internal/__init__.py +0 -0
  25. langchain/_internal/_documents.py +0 -35
  26. langchain/_internal/_lazy_import.py +0 -35
  27. langchain/_internal/_prompts.py +0 -158
  28. langchain/_internal/_typing.py +0 -70
  29. langchain/_internal/_utils.py +0 -7
  30. langchain/agents/_internal/__init__.py +0 -1
  31. langchain/agents/_internal/_typing.py +0 -13
  32. langchain/documents/__init__.py +0 -7
  33. langchain/embeddings/cache.py +0 -361
  34. langchain/storage/__init__.py +0 -22
  35. langchain/storage/encoder_backed.py +0 -123
  36. langchain/storage/exceptions.py +0 -5
  37. langchain/storage/in_memory.py +0 -13
  38. langchain-1.0.0a12.dist-info/RECORD +0 -43
  39. {langchain-1.0.0a12.dist-info → langchain-1.0.0a14.dist-info}/WHEEL +0 -0
  40. {langchain-1.0.0a12.dist-info → langchain-1.0.0a14.dist-info}/licenses/LICENSE +0 -0
@@ -5,7 +5,7 @@ This module provides prebuilt functionality for executing tools in LangGraph.
5
5
  Tools are functions that models can call to interact with external systems,
6
6
  APIs, databases, or perform computations.
7
7
 
8
- The module implements several key design patterns:
8
+ The module implements design patterns for:
9
9
  - Parallel execution of multiple tool calls for efficiency
10
10
  - Robust error handling with customizable error messages
11
11
  - State injection for tools that need access to graph state
@@ -38,8 +38,9 @@ from __future__ import annotations
38
38
  import asyncio
39
39
  import inspect
40
40
  import json
41
+ from collections.abc import Awaitable, Callable
41
42
  from copy import copy, deepcopy
42
- from dataclasses import replace
43
+ from dataclasses import dataclass, replace
43
44
  from types import UnionType
44
45
  from typing import (
45
46
  TYPE_CHECKING,
@@ -47,6 +48,7 @@ from typing import (
47
48
  Any,
48
49
  Literal,
49
50
  Optional,
51
+ TypedDict,
50
52
  Union,
51
53
  cast,
52
54
  get_args,
@@ -70,16 +72,18 @@ from langchain_core.tools import BaseTool, InjectedToolArg
70
72
  from langchain_core.tools import tool as create_tool
71
73
  from langchain_core.tools.base import (
72
74
  TOOL_MESSAGE_BLOCK_TYPES,
75
+ ToolException,
73
76
  get_all_basemodel_annotations,
74
77
  )
75
78
  from langgraph._internal._runnable import RunnableCallable
76
79
  from langgraph.errors import GraphBubbleUp
77
80
  from langgraph.graph.message import REMOVE_ALL_MESSAGES
81
+ from langgraph.runtime import get_runtime
78
82
  from langgraph.types import Command, Send
79
83
  from pydantic import BaseModel, ValidationError
80
84
 
81
85
  if TYPE_CHECKING:
82
- from collections.abc import Callable, Sequence
86
+ from collections.abc import Sequence
83
87
 
84
88
  from langchain_core.runnables import RunnableConfig
85
89
  from langgraph.store.base import BaseStore
@@ -100,24 +104,132 @@ TOOL_INVOCATION_ERROR_TEMPLATE = (
100
104
  )
101
105
 
102
106
 
107
+ @dataclass()
108
+ class ToolCallRequest:
109
+ """Tool execution request passed to tool call interceptors.
110
+
111
+ Attributes:
112
+ tool_call: Tool call dict with name, args, and id from model output.
113
+ tool: BaseTool instance to be invoked.
114
+ state: Agent state (dict, list, or BaseModel).
115
+ runtime: LangGraph runtime context (optional, None if outside graph).
116
+ """
117
+
118
+ tool_call: ToolCall
119
+ tool: BaseTool
120
+ state: Any
121
+ runtime: Any
122
+
123
+
124
+ ToolCallWrapper = Callable[
125
+ [ToolCallRequest, Callable[[ToolCallRequest], ToolMessage | Command]],
126
+ ToolMessage | Command,
127
+ ]
128
+ """Wrapper for tool call execution with multi-call support.
129
+
130
+ Wrapper receives:
131
+ request: ToolCallRequest with tool_call, tool, state, and runtime.
132
+ execute: Callable to execute the tool (CAN BE CALLED MULTIPLE TIMES).
133
+
134
+ Returns:
135
+ ToolMessage or Command (the final result).
136
+
137
+ The execute callable can be invoked multiple times for retry logic,
138
+ with potentially modified requests each time. Each call to execute
139
+ is independent and stateless.
140
+
141
+ Note:
142
+ When implementing middleware for `create_agent`, use
143
+ `AgentMiddleware.wrap_tool_call` which provides properly typed
144
+ state parameter for better type safety.
145
+
146
+ Examples:
147
+ Passthrough (execute once):
148
+
149
+ def handler(request, execute):
150
+ return execute(request)
151
+
152
+ Modify request before execution:
153
+
154
+ def handler(request, execute):
155
+ request.tool_call["args"]["value"] *= 2
156
+ return execute(request)
157
+
158
+ Retry on error (execute multiple times):
159
+
160
+ def handler(request, execute):
161
+ for attempt in range(3):
162
+ try:
163
+ result = execute(request)
164
+ if is_valid(result):
165
+ return result
166
+ except Exception:
167
+ if attempt == 2:
168
+ raise
169
+ return result
170
+
171
+ Conditional retry based on response:
172
+
173
+ def handler(request, execute):
174
+ for attempt in range(3):
175
+ result = execute(request)
176
+ if isinstance(result, ToolMessage) and result.status != "error":
177
+ return result
178
+ if attempt < 2:
179
+ continue
180
+ return result
181
+
182
+ Cache/short-circuit without calling execute:
183
+
184
+ def handler(request, execute):
185
+ if cached := get_cache(request):
186
+ return ToolMessage(content=cached, tool_call_id=request.tool_call["id"])
187
+ result = execute(request)
188
+ save_cache(request, result)
189
+ return result
190
+ """
191
+
192
+ AsyncToolCallWrapper = Callable[
193
+ [ToolCallRequest, Callable[[ToolCallRequest], Awaitable[ToolMessage | Command]]],
194
+ Awaitable[ToolMessage | Command],
195
+ ]
196
+ """Async wrapper for tool call execution with multi-call support."""
197
+
198
+
199
+ class ToolCallWithContext(TypedDict):
200
+ """ToolCall with additional context for graph state.
201
+
202
+ This is an internal data structure meant to help the ToolNode accept
203
+ tool calls with additional context (e.g. state) when dispatched using the
204
+ Send API.
205
+
206
+ The Send API is used in create_agent to distribute tool calls in parallel
207
+ and support human-in-the-loop workflows where graph execution may be paused
208
+ for an indefinite time.
209
+ """
210
+
211
+ tool_call: ToolCall
212
+ __type: Literal["tool_call_with_context"]
213
+ """Type to parameterize the payload.
214
+
215
+ Using "__" as a prefix to be defensive against potential name collisions with
216
+ regular user state.
217
+ """
218
+ state: Any
219
+ """The state is provided as additional context."""
220
+
221
+
103
222
  def msg_content_output(output: Any) -> str | list[dict]:
104
- """Convert tool output to valid message content format.
223
+ """Convert tool output to ToolMessage content format.
105
224
 
106
- LangChain ToolMessages accept either string content or a list of content blocks.
107
- This function ensures tool outputs are properly formatted for message consumption
108
- by attempting to preserve structured data when possible, falling back to JSON
109
- serialization or string conversion.
225
+ Handles str, list[dict] (content blocks), and arbitrary objects by attempting
226
+ JSON serialization with fallback to str().
110
227
 
111
228
  Args:
112
- output: The raw output from a tool execution. Can be any type.
229
+ output: Tool execution output of any type.
113
230
 
114
231
  Returns:
115
- Either a string representation of the output or a list of content blocks
116
- if the output is already in the correct format for structured content.
117
-
118
- Note:
119
- This function prioritizes backward compatibility by defaulting to JSON
120
- serialization rather than supporting all possible message content formats.
232
+ String or list of content blocks suitable for ToolMessage.content.
121
233
  """
122
234
  if isinstance(output, str) or (
123
235
  isinstance(output, list)
@@ -134,8 +246,11 @@ def msg_content_output(output: Any) -> str | list[dict]:
134
246
  return str(output)
135
247
 
136
248
 
137
- class ToolInvocationError(Exception):
138
- """Exception raised when a tool invocation fails due to invalid arguments."""
249
+ class ToolInvocationError(ToolException):
250
+ """An error occurred while invoking a tool due to invalid arguments.
251
+
252
+ This exception is only raised when invoking a tool using the ToolNode!
253
+ """
139
254
 
140
255
  def __init__(
141
256
  self, tool_name: str, source: ValidationError, tool_kwargs: dict[str, Any]
@@ -181,7 +296,7 @@ def _handle_tool_error(
181
296
  Args:
182
297
  e: The exception that occurred during tool execution.
183
298
  flag: Configuration for how to handle the error. Can be:
184
- - bool: If True, use default error template
299
+ - bool: If `True`, use default error template
185
300
  - str: Use this string as the error message
186
301
  - Callable: Call this function with the exception to get error message
187
302
  - tuple: Not used in this context (handled by caller)
@@ -277,7 +392,7 @@ def _infer_handled_types(handler: Callable[..., str]) -> tuple[type[Exception],
277
392
  return (Exception,)
278
393
 
279
394
 
280
- class ToolNode(RunnableCallable):
395
+ class _ToolNode(RunnableCallable):
281
396
  """A node for executing tools in LangGraph workflows.
282
397
 
283
398
  Handles tool execution patterns including function calls, state injection,
@@ -286,25 +401,25 @@ class ToolNode(RunnableCallable):
286
401
 
287
402
  Input Formats:
288
403
  1. Graph state with `messages` key that has a list of messages:
289
- - Common representation for agentic workflows
290
- - Supports custom messages key via ``messages_key`` parameter
404
+ - Common representation for agentic workflows
405
+ - Supports custom messages key via `messages_key` parameter
291
406
 
292
- 2. **Message List**: ``[AIMessage(..., tool_calls=[...])]``
293
- - List of messages with tool calls in the last AIMessage
407
+ 2. **Message List**: `[AIMessage(..., tool_calls=[...])]`
408
+ - List of messages with tool calls in the last AIMessage
294
409
 
295
- 3. **Direct Tool Calls**: ``[{"name": "tool", "args": {...}, "id": "1", "type": "tool_call"}]``
296
- - Bypasses message parsing for direct tool execution
297
- - For programmatic tool invocation and testing
410
+ 3. **Direct Tool Calls**: `[{"name": "tool", "args": {...}, "id": "1", "type": "tool_call"}]`
411
+ - Bypasses message parsing for direct tool execution
412
+ - For programmatic tool invocation and testing
298
413
 
299
414
  Output Formats:
300
415
  Output format depends on input type and tool behavior:
301
416
 
302
417
  **For Regular tools**:
303
- - Dict input → ``{"messages": [ToolMessage(...)]}``
304
- - List input → ``[ToolMessage(...)]``
418
+ - Dict input → `{"messages": [ToolMessage(...)]}`
419
+ - List input → `[ToolMessage(...)]`
305
420
 
306
421
  **For Command tools**:
307
- - Returns ``[Command(...)]`` or mixed list with regular tool outputs
422
+ - Returns `[Command(...)]` or mixed list with regular tool outputs
308
423
  - Commands can update state, trigger navigation, or send messages
309
424
 
310
425
  Args:
@@ -314,29 +429,29 @@ class ToolNode(RunnableCallable):
314
429
  name: The name identifier for this node in the graph. Used for debugging
315
430
  and visualization. Defaults to "tools".
316
431
  tags: Optional metadata tags to associate with the node for filtering
317
- and organization. Defaults to None.
432
+ and organization. Defaults to `None`.
318
433
  handle_tool_errors: Configuration for error handling during tool execution.
319
434
  Supports multiple strategies:
320
435
 
321
436
  - **True**: Catch all errors and return a ToolMessage with the default
322
- error template containing the exception details.
437
+ error template containing the exception details.
323
438
  - **str**: Catch all errors and return a ToolMessage with this custom
324
- error message string.
439
+ error message string.
325
440
  - **type[Exception]**: Only catch exceptions with the specified type and
326
- return the default error message for it.
441
+ return the default error message for it.
327
442
  - **tuple[type[Exception], ...]**: Only catch exceptions with the specified
328
- types and return default error messages for them.
443
+ types and return default error messages for them.
329
444
  - **Callable[..., str]**: Catch exceptions matching the callable's signature
330
- and return the string result of calling it with the exception.
445
+ and return the string result of calling it with the exception.
331
446
  - **False**: Disable error handling entirely, allowing exceptions to
332
- propagate.
447
+ propagate.
333
448
 
334
449
  Defaults to a callable that:
335
450
  - catches tool invocation errors (due to invalid arguments provided by the model) and returns a descriptive error message
336
451
  - ignores tool execution errors (they will be re-raised)
337
452
 
338
453
  messages_key: The key in the state dictionary that contains the message list.
339
- This same key will be used for the output ToolMessages.
454
+ This same key will be used for the output `ToolMessage` objects.
340
455
  Defaults to "messages".
341
456
  Allows custom state schemas with different message field names.
342
457
 
@@ -394,8 +509,10 @@ class ToolNode(RunnableCallable):
394
509
  | type[Exception]
395
510
  | tuple[type[Exception], ...] = _default_handle_tool_errors,
396
511
  messages_key: str = "messages",
512
+ wrap_tool_call: ToolCallWrapper | None = None,
513
+ awrap_tool_call: AsyncToolCallWrapper | None = None,
397
514
  ) -> None:
398
- """Initialize the ToolNode with the provided tools and configuration.
515
+ """Initialize ToolNode with tools and configuration.
399
516
 
400
517
  Args:
401
518
  tools: Sequence of tools to make available for execution.
@@ -403,6 +520,11 @@ class ToolNode(RunnableCallable):
403
520
  tags: Optional metadata tags.
404
521
  handle_tool_errors: Error handling configuration.
405
522
  messages_key: State key containing messages.
523
+ wrap_tool_call: Sync wrapper function to intercept tool execution. Receives
524
+ ToolCallRequest and execute callable, returns ToolMessage or Command.
525
+ Enables retries, caching, request modification, and control flow.
526
+ awrap_tool_call: Async wrapper function to intercept tool execution.
527
+ If not provided, falls back to wrap_tool_call for async execution.
406
528
  """
407
529
  super().__init__(self._func, self._afunc, name=name, tags=tags, trace=False)
408
530
  self._tools_by_name: dict[str, BaseTool] = {}
@@ -410,6 +532,8 @@ class ToolNode(RunnableCallable):
410
532
  self._tool_to_store_arg: dict[str, str | None] = {}
411
533
  self._handle_tool_errors = handle_tool_errors
412
534
  self._messages_key = messages_key
535
+ self._wrap_tool_call = wrap_tool_call
536
+ self._awrap_tool_call = awrap_tool_call
413
537
  for tool in tools:
414
538
  if not isinstance(tool, BaseTool):
415
539
  tool_ = create_tool(cast("type[BaseTool]", tool))
@@ -431,11 +555,23 @@ class ToolNode(RunnableCallable):
431
555
  *,
432
556
  store: Optional[BaseStore], # noqa: UP045
433
557
  ) -> Any:
434
- tool_calls, input_type = self._parse_input(input, store)
558
+ try:
559
+ runtime = get_runtime()
560
+ except RuntimeError:
561
+ # Running outside of LangGraph runtime context (e.g., unit tests)
562
+ runtime = None
563
+
564
+ tool_calls, input_type = self._parse_input(input)
565
+ tool_calls = [self._inject_tool_args(call, input, store) for call in tool_calls]
566
+
435
567
  config_list = get_config_list(config, len(tool_calls))
436
568
  input_types = [input_type] * len(tool_calls)
569
+ inputs = [input] * len(tool_calls)
570
+ runtimes = [runtime] * len(tool_calls)
437
571
  with get_executor_for_config(config) as executor:
438
- outputs = [*executor.map(self._run_one, tool_calls, input_types, config_list)]
572
+ outputs = [
573
+ *executor.map(self._run_one, tool_calls, input_types, config_list, inputs, runtimes)
574
+ ]
439
575
 
440
576
  return self._combine_tool_outputs(outputs, input_type)
441
577
 
@@ -446,9 +582,16 @@ class ToolNode(RunnableCallable):
446
582
  *,
447
583
  store: Optional[BaseStore], # noqa: UP045
448
584
  ) -> Any:
449
- tool_calls, input_type = self._parse_input(input, store)
585
+ try:
586
+ runtime = get_runtime()
587
+ except RuntimeError:
588
+ # Running outside of LangGraph runtime context (e.g., unit tests)
589
+ runtime = None
590
+
591
+ tool_calls, input_type = self._parse_input(input)
592
+ tool_calls = [self._inject_tool_args(call, input, store) for call in tool_calls]
450
593
  outputs = await asyncio.gather(
451
- *(self._arun_one(call, input_type, config) for call in tool_calls)
594
+ *(self._arun_one(call, input_type, config, input, runtime) for call in tool_calls)
452
595
  )
453
596
 
454
597
  return self._combine_tool_outputs(outputs, input_type)
@@ -495,20 +638,30 @@ class ToolNode(RunnableCallable):
495
638
  combined_outputs.append(parent_command)
496
639
  return combined_outputs
497
640
 
498
- def _run_one(
641
+ def _execute_tool_sync(
499
642
  self,
500
- call: ToolCall,
643
+ request: ToolCallRequest,
501
644
  input_type: Literal["list", "dict", "tool_calls"],
502
645
  config: RunnableConfig,
503
646
  ) -> ToolMessage | Command:
504
- """Run a single tool call synchronously."""
505
- if invalid_tool_message := self._validate_tool_call(call):
506
- return invalid_tool_message
647
+ """Execute tool call with configured error handling.
507
648
 
508
- try:
509
- call_args = {**call, "type": "tool_call"}
510
- tool = self.tools_by_name[call["name"]]
649
+ Args:
650
+ request: Tool execution request.
651
+ input_type: Input format.
652
+ config: Runnable configuration.
653
+
654
+ Returns:
655
+ ToolMessage or Command.
656
+
657
+ Raises:
658
+ Exception: If tool fails and handle_tool_errors is False.
659
+ """
660
+ call = request.tool_call
661
+ tool = request.tool
662
+ call_args = {**call, "type": "tool_call"}
511
663
 
664
+ try:
512
665
  try:
513
666
  response = tool.invoke(call_args, config)
514
667
  except ValidationError as exc:
@@ -526,6 +679,7 @@ class ToolNode(RunnableCallable):
526
679
  except GraphBubbleUp:
527
680
  raise
528
681
  except Exception as e:
682
+ # Determine which exception types are handled
529
683
  handled_types: tuple[type[Exception], ...]
530
684
  if isinstance(self._handle_tool_errors, type) and issubclass(
531
685
  self._handle_tool_errors, Exception
@@ -541,10 +695,11 @@ class ToolNode(RunnableCallable):
541
695
  # default behavior is catching all exceptions
542
696
  handled_types = (Exception,)
543
697
 
544
- # Unhandled
698
+ # Check if this error should be handled
545
699
  if not self._handle_tool_errors or not isinstance(e, handled_types):
546
700
  raise
547
- # Handled
701
+
702
+ # Error is handled - create error ToolMessage
548
703
  content = _handle_tool_error(e, flag=self._handle_tool_errors)
549
704
  return ToolMessage(
550
705
  content=content,
@@ -553,28 +708,102 @@ class ToolNode(RunnableCallable):
553
708
  status="error",
554
709
  )
555
710
 
711
+ # Process successful response
556
712
  if isinstance(response, Command):
557
- return self._validate_tool_command(response, call, input_type)
713
+ # Validate Command before returning to handler
714
+ return self._validate_tool_command(response, request.tool_call, input_type)
558
715
  if isinstance(response, ToolMessage):
559
716
  response.content = cast("str | list", msg_content_output(response.content))
560
717
  return response
718
+
561
719
  msg = f"Tool {call['name']} returned unexpected type: {type(response)}"
562
720
  raise TypeError(msg)
563
721
 
564
- async def _arun_one(
722
+ def _run_one(
565
723
  self,
566
724
  call: ToolCall,
567
725
  input_type: Literal["list", "dict", "tool_calls"],
568
726
  config: RunnableConfig,
727
+ input: list[AnyMessage] | dict[str, Any] | BaseModel,
728
+ runtime: Any,
569
729
  ) -> ToolMessage | Command:
570
- """Run a single tool call asynchronously."""
730
+ """Execute single tool call with wrap_tool_call wrapper if configured.
731
+
732
+ Args:
733
+ call: Tool call dict.
734
+ input_type: Input format.
735
+ config: Runnable configuration.
736
+ input: Agent state.
737
+ runtime: LangGraph runtime or None.
738
+
739
+ Returns:
740
+ ToolMessage or Command.
741
+ """
571
742
  if invalid_tool_message := self._validate_tool_call(call):
572
743
  return invalid_tool_message
573
744
 
745
+ tool = self.tools_by_name[call["name"]]
746
+
747
+ # Extract state from ToolCallWithContext if present
748
+ state = self._extract_state(input)
749
+
750
+ # Create the tool request with state and runtime
751
+ tool_request = ToolCallRequest(
752
+ tool_call=call,
753
+ tool=tool,
754
+ state=state,
755
+ runtime=runtime,
756
+ )
757
+
758
+ if self._wrap_tool_call is None:
759
+ # No wrapper - execute directly
760
+ return self._execute_tool_sync(tool_request, input_type, config)
761
+
762
+ # Define execute callable that can be called multiple times
763
+ def execute(req: ToolCallRequest) -> ToolMessage | Command:
764
+ """Execute tool with given request. Can be called multiple times."""
765
+ return self._execute_tool_sync(req, input_type, config)
766
+
767
+ # Call wrapper with request and execute callable
574
768
  try:
575
- call_args = {**call, "type": "tool_call"}
576
- tool = self.tools_by_name[call["name"]]
769
+ return self._wrap_tool_call(tool_request, execute)
770
+ except Exception as e:
771
+ # Wrapper threw an exception
772
+ if not self._handle_tool_errors:
773
+ raise
774
+ # Convert to error message
775
+ content = _handle_tool_error(e, flag=self._handle_tool_errors)
776
+ return ToolMessage(
777
+ content=content,
778
+ name=tool_request.tool_call["name"],
779
+ tool_call_id=tool_request.tool_call["id"],
780
+ status="error",
781
+ )
782
+
783
+ async def _execute_tool_async(
784
+ self,
785
+ request: ToolCallRequest,
786
+ input_type: Literal["list", "dict", "tool_calls"],
787
+ config: RunnableConfig,
788
+ ) -> ToolMessage | Command:
789
+ """Execute tool call asynchronously with configured error handling.
790
+
791
+ Args:
792
+ request: Tool execution request.
793
+ input_type: Input format.
794
+ config: Runnable configuration.
795
+
796
+ Returns:
797
+ ToolMessage or Command.
577
798
 
799
+ Raises:
800
+ Exception: If tool fails and handle_tool_errors is False.
801
+ """
802
+ call = request.tool_call
803
+ tool = request.tool
804
+ call_args = {**call, "type": "tool_call"}
805
+
806
+ try:
578
807
  try:
579
808
  response = await tool.ainvoke(call_args, config)
580
809
  except ValidationError as exc:
@@ -592,6 +821,7 @@ class ToolNode(RunnableCallable):
592
821
  except GraphBubbleUp:
593
822
  raise
594
823
  except Exception as e:
824
+ # Determine which exception types are handled
595
825
  handled_types: tuple[type[Exception], ...]
596
826
  if isinstance(self._handle_tool_errors, type) and issubclass(
597
827
  self._handle_tool_errors, Exception
@@ -607,12 +837,12 @@ class ToolNode(RunnableCallable):
607
837
  # default behavior is catching all exceptions
608
838
  handled_types = (Exception,)
609
839
 
610
- # Unhandled
840
+ # Check if this error should be handled
611
841
  if not self._handle_tool_errors or not isinstance(e, handled_types):
612
842
  raise
613
- # Handled
614
- content = _handle_tool_error(e, flag=self._handle_tool_errors)
615
843
 
844
+ # Error is handled - create error ToolMessage
845
+ content = _handle_tool_error(e, flag=self._handle_tool_errors)
616
846
  return ToolMessage(
617
847
  content=content,
618
848
  name=call["name"],
@@ -620,18 +850,89 @@ class ToolNode(RunnableCallable):
620
850
  status="error",
621
851
  )
622
852
 
853
+ # Process successful response
623
854
  if isinstance(response, Command):
624
- return self._validate_tool_command(response, call, input_type)
855
+ # Validate Command before returning to handler
856
+ return self._validate_tool_command(response, request.tool_call, input_type)
625
857
  if isinstance(response, ToolMessage):
626
858
  response.content = cast("str | list", msg_content_output(response.content))
627
859
  return response
860
+
628
861
  msg = f"Tool {call['name']} returned unexpected type: {type(response)}"
629
862
  raise TypeError(msg)
630
863
 
864
+ async def _arun_one(
865
+ self,
866
+ call: ToolCall,
867
+ input_type: Literal["list", "dict", "tool_calls"],
868
+ config: RunnableConfig,
869
+ input: list[AnyMessage] | dict[str, Any] | BaseModel,
870
+ runtime: Any,
871
+ ) -> ToolMessage | Command:
872
+ """Execute single tool call asynchronously with awrap_tool_call wrapper if configured.
873
+
874
+ Args:
875
+ call: Tool call dict.
876
+ input_type: Input format.
877
+ config: Runnable configuration.
878
+ input: Agent state.
879
+ runtime: LangGraph runtime or None.
880
+
881
+ Returns:
882
+ ToolMessage or Command.
883
+ """
884
+ if invalid_tool_message := self._validate_tool_call(call):
885
+ return invalid_tool_message
886
+
887
+ tool = self.tools_by_name[call["name"]]
888
+
889
+ # Extract state from ToolCallWithContext if present
890
+ state = self._extract_state(input)
891
+
892
+ # Create the tool request with state and runtime
893
+ tool_request = ToolCallRequest(
894
+ tool_call=call,
895
+ tool=tool,
896
+ state=state,
897
+ runtime=runtime,
898
+ )
899
+
900
+ if self._awrap_tool_call is None and self._wrap_tool_call is None:
901
+ # No wrapper - execute directly
902
+ return await self._execute_tool_async(tool_request, input_type, config)
903
+
904
+ # Define async execute callable that can be called multiple times
905
+ async def execute(req: ToolCallRequest) -> ToolMessage | Command:
906
+ """Execute tool with given request. Can be called multiple times."""
907
+ return await self._execute_tool_async(req, input_type, config)
908
+
909
+ def _sync_execute(req: ToolCallRequest) -> ToolMessage | Command:
910
+ """Sync execute fallback for sync wrapper."""
911
+ return self._execute_tool_sync(req, input_type, config)
912
+
913
+ # Call wrapper with request and execute callable
914
+ try:
915
+ if self._awrap_tool_call is not None:
916
+ return await self._awrap_tool_call(tool_request, execute)
917
+ # None check was performed above already
918
+ self._wrap_tool_call = cast("ToolCallWrapper", self._wrap_tool_call)
919
+ return self._wrap_tool_call(tool_request, _sync_execute)
920
+ except Exception as e:
921
+ # Wrapper threw an exception
922
+ if not self._handle_tool_errors:
923
+ raise
924
+ # Convert to error message
925
+ content = _handle_tool_error(e, flag=self._handle_tool_errors)
926
+ return ToolMessage(
927
+ content=content,
928
+ name=tool_request.tool_call["name"],
929
+ tool_call_id=tool_request.tool_call["id"],
930
+ status="error",
931
+ )
932
+
631
933
  def _parse_input(
632
934
  self,
633
935
  input: list[AnyMessage] | dict[str, Any] | BaseModel,
634
- store: BaseStore | None,
635
936
  ) -> tuple[list[ToolCall], Literal["list", "dict", "tool_calls"]]:
636
937
  input_type: Literal["list", "dict", "tool_calls"]
637
938
  if isinstance(input, list):
@@ -641,6 +942,14 @@ class ToolNode(RunnableCallable):
641
942
  return tool_calls, input_type
642
943
  input_type = "list"
643
944
  messages = input
945
+ elif isinstance(input, dict) and input.get("__type") == "tool_call_with_context":
946
+ # Handle ToolCallWithContext from Send API
947
+ # mypy will not be able to type narrow correctly since the signature
948
+ # for input contains dict[str, Any]. We'd need to narrow dict[str, Any]
949
+ # before we can apply correct typing.
950
+ input_with_ctx = cast("ToolCallWithContext", input)
951
+ input_type = "tool_calls"
952
+ return [input_with_ctx["tool_call"]], input_type
644
953
  elif isinstance(input, dict) and (messages := input.get(self._messages_key, [])):
645
954
  input_type = "dict"
646
955
  elif messages := getattr(input, self._messages_key, []):
@@ -656,9 +965,7 @@ class ToolNode(RunnableCallable):
656
965
  msg = "No AIMessage found in input"
657
966
  raise ValueError(msg)
658
967
 
659
- tool_calls = [
660
- self.inject_tool_args(call, input, store) for call in latest_ai_message.tool_calls
661
- ]
968
+ tool_calls = list(latest_ai_message.tool_calls)
662
969
  return tool_calls, input_type
663
970
 
664
971
  def _validate_tool_call(self, call: ToolCall) -> ToolMessage | None:
@@ -674,6 +981,21 @@ class ToolNode(RunnableCallable):
674
981
  )
675
982
  return None
676
983
 
984
+ def _extract_state(
985
+ self, input: list[AnyMessage] | dict[str, Any] | BaseModel
986
+ ) -> list[AnyMessage] | dict[str, Any] | BaseModel:
987
+ """Extract state from input, handling ToolCallWithContext if present.
988
+
989
+ Args:
990
+ input: The input which may be raw state or ToolCallWithContext.
991
+
992
+ Returns:
993
+ The actual state to pass to wrap_tool_call wrappers.
994
+ """
995
+ if isinstance(input, dict) and input.get("__type") == "tool_call_with_context":
996
+ return input["state"]
997
+ return input
998
+
677
999
  def _inject_state(
678
1000
  self,
679
1001
  tool_call: ToolCall,
@@ -696,14 +1018,20 @@ class ToolNode(RunnableCallable):
696
1018
  err_msg += f" State should contain fields {required_fields_str}."
697
1019
  raise ValueError(err_msg)
698
1020
 
699
- if isinstance(input, dict):
1021
+ # Extract state from ToolCallWithContext if present
1022
+ if isinstance(input, dict) and input.get("__type") == "tool_call_with_context":
1023
+ state = input["state"]
1024
+ else:
1025
+ state = input
1026
+
1027
+ if isinstance(state, dict):
700
1028
  tool_state_args = {
701
- tool_arg: input[state_field] if state_field else input
1029
+ tool_arg: state[state_field] if state_field else state
702
1030
  for tool_arg, state_field in state_args.items()
703
1031
  }
704
1032
  else:
705
1033
  tool_state_args = {
706
- tool_arg: getattr(input, state_field) if state_field else input
1034
+ tool_arg: getattr(state, state_field) if state_field else state
707
1035
  for tool_arg, state_field in state_args.items()
708
1036
  }
709
1037
 
@@ -731,7 +1059,7 @@ class ToolNode(RunnableCallable):
731
1059
  }
732
1060
  return tool_call
733
1061
 
734
- def inject_tool_args(
1062
+ def _inject_tool_args(
735
1063
  self,
736
1064
  tool_call: ToolCall,
737
1065
  input: list[AnyMessage] | dict[str, Any] | BaseModel,
@@ -739,10 +1067,11 @@ class ToolNode(RunnableCallable):
739
1067
  ) -> ToolCall:
740
1068
  """Inject graph state and store into tool call arguments.
741
1069
 
742
- This method enables tools to access graph context that should not be controlled
743
- by the model. Tools can declare dependencies on graph state or persistent storage
744
- using InjectedState and InjectedStore annotations. This method automatically
745
- identifies these dependencies and injects the appropriate values.
1070
+ This is an internal method that enables tools to access graph context that
1071
+ should not be controlled by the model. Tools can declare dependencies on graph
1072
+ state or persistent storage using InjectedState and InjectedStore annotations.
1073
+ This method automatically identifies these dependencies and injects the
1074
+ appropriate values.
746
1075
 
747
1076
  The injection process preserves the original tool call structure while adding
748
1077
  the necessary context arguments. This allows tools to be both model-callable
@@ -765,10 +1094,8 @@ class ToolNode(RunnableCallable):
765
1094
  or if state injection requirements cannot be satisfied.
766
1095
 
767
1096
  Note:
768
- This method is automatically called during tool execution but can also
769
- be used manually when working with the Send API or custom routing logic.
770
- The injection is performed on a copy of the tool call to avoid mutating
771
- the original.
1097
+ This method is called automatically during tool execution. It should not
1098
+ be called from outside the ToolNode.
772
1099
  """
773
1100
  if tool_call["name"] not in self.tools_by_name:
774
1101
  return tool_call
@@ -940,7 +1267,7 @@ class InjectedState(InjectedToolArg):
940
1267
  to the model's tool-calling interface.
941
1268
 
942
1269
  Args:
943
- field: Optional key to extract from the state dictionary. If None, the entire
1270
+ field: Optional key to extract from the state dictionary. If `None`, the entire
944
1271
  state is injected. If specified, only that field's value is injected.
945
1272
  This allows tools to request specific state components rather than
946
1273
  processing the full state structure.
@@ -985,7 +1312,7 @@ class InjectedState(InjectedToolArg):
985
1312
  node.invoke(state)
986
1313
  ```
987
1314
 
988
- ```pycon
1315
+ ```python
989
1316
  [
990
1317
  ToolMessage(content="not enough messages", name="state_tool", tool_call_id="1"),
991
1318
  ToolMessage(content="bar2", name="foo_tool", tool_call_id="2"),
@@ -994,12 +1321,12 @@ class InjectedState(InjectedToolArg):
994
1321
 
995
1322
  Note:
996
1323
  - InjectedState arguments are automatically excluded from tool schemas
997
- presented to language models
1324
+ presented to language models
998
1325
  - ToolNode handles the injection process during execution
999
1326
  - Tools can mix regular arguments (controlled by the model) with injected
1000
- arguments (controlled by the system)
1327
+ arguments (controlled by the system)
1001
1328
  - State injection occurs after the model generates tool calls but before
1002
- tool execution
1329
+ tool execution
1003
1330
  """
1004
1331
 
1005
1332
  def __init__(self, field: str | None = None) -> None:
@@ -1074,7 +1401,7 @@ class InjectedStore(InjectedToolArg):
1074
1401
 
1075
1402
  Note:
1076
1403
  - InjectedStore arguments are automatically excluded from tool schemas
1077
- presented to language models
1404
+ presented to language models
1078
1405
  - The store instance is automatically injected by ToolNode during execution
1079
1406
  - Tools can access namespaced storage using the store's get/put methods
1080
1407
  - Store injection requires the graph to be compiled with a store instance