langchain 1.0.0a11__py3-none-any.whl → 1.0.0a13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (34) hide show
  1. langchain/__init__.py +1 -1
  2. langchain/agents/factory.py +511 -180
  3. langchain/agents/middleware/__init__.py +9 -3
  4. langchain/agents/middleware/context_editing.py +15 -14
  5. langchain/agents/middleware/human_in_the_loop.py +213 -170
  6. langchain/agents/middleware/model_call_limit.py +2 -2
  7. langchain/agents/middleware/model_fallback.py +46 -36
  8. langchain/agents/middleware/pii.py +19 -19
  9. langchain/agents/middleware/planning.py +16 -11
  10. langchain/agents/middleware/prompt_caching.py +14 -11
  11. langchain/agents/middleware/summarization.py +1 -1
  12. langchain/agents/middleware/tool_call_limit.py +5 -5
  13. langchain/agents/middleware/tool_emulator.py +200 -0
  14. langchain/agents/middleware/tool_selection.py +25 -21
  15. langchain/agents/middleware/types.py +484 -225
  16. langchain/chat_models/base.py +85 -90
  17. langchain/embeddings/base.py +20 -20
  18. langchain/embeddings/cache.py +21 -21
  19. langchain/messages/__init__.py +2 -0
  20. langchain/storage/encoder_backed.py +22 -23
  21. langchain/tools/tool_node.py +388 -80
  22. {langchain-1.0.0a11.dist-info → langchain-1.0.0a13.dist-info}/METADATA +8 -5
  23. langchain-1.0.0a13.dist-info/RECORD +36 -0
  24. langchain/_internal/__init__.py +0 -0
  25. langchain/_internal/_documents.py +0 -35
  26. langchain/_internal/_lazy_import.py +0 -35
  27. langchain/_internal/_prompts.py +0 -158
  28. langchain/_internal/_typing.py +0 -70
  29. langchain/_internal/_utils.py +0 -7
  30. langchain/agents/_internal/__init__.py +0 -1
  31. langchain/agents/_internal/_typing.py +0 -13
  32. langchain-1.0.0a11.dist-info/RECORD +0 -43
  33. {langchain-1.0.0a11.dist-info → langchain-1.0.0a13.dist-info}/WHEEL +0 -0
  34. {langchain-1.0.0a11.dist-info → langchain-1.0.0a13.dist-info}/licenses/LICENSE +0 -0
@@ -5,7 +5,7 @@ This module provides prebuilt functionality for executing tools in LangGraph.
5
5
  Tools are functions that models can call to interact with external systems,
6
6
  APIs, databases, or perform computations.
7
7
 
8
- The module implements several key design patterns:
8
+ The module implements design patterns for:
9
9
  - Parallel execution of multiple tool calls for efficiency
10
10
  - Robust error handling with customizable error messages
11
11
  - State injection for tools that need access to graph state
@@ -38,8 +38,9 @@ from __future__ import annotations
38
38
  import asyncio
39
39
  import inspect
40
40
  import json
41
+ from collections.abc import Callable
41
42
  from copy import copy, deepcopy
42
- from dataclasses import replace
43
+ from dataclasses import dataclass, replace
43
44
  from types import UnionType
44
45
  from typing import (
45
46
  TYPE_CHECKING,
@@ -47,6 +48,7 @@ from typing import (
47
48
  Any,
48
49
  Literal,
49
50
  Optional,
51
+ TypedDict,
50
52
  Union,
51
53
  cast,
52
54
  get_args,
@@ -75,11 +77,12 @@ from langchain_core.tools.base import (
75
77
  from langgraph._internal._runnable import RunnableCallable
76
78
  from langgraph.errors import GraphBubbleUp
77
79
  from langgraph.graph.message import REMOVE_ALL_MESSAGES
80
+ from langgraph.runtime import get_runtime
78
81
  from langgraph.types import Command, Send
79
82
  from pydantic import BaseModel, ValidationError
80
83
 
81
84
  if TYPE_CHECKING:
82
- from collections.abc import Callable, Sequence
85
+ from collections.abc import Sequence
83
86
 
84
87
  from langchain_core.runnables import RunnableConfig
85
88
  from langgraph.store.base import BaseStore
@@ -100,24 +103,126 @@ TOOL_INVOCATION_ERROR_TEMPLATE = (
100
103
  )
101
104
 
102
105
 
106
+ @dataclass()
107
+ class ToolCallRequest:
108
+ """Tool execution request passed to tool call interceptors.
109
+
110
+ Attributes:
111
+ tool_call: Tool call dict with name, args, and id from model output.
112
+ tool: BaseTool instance to be invoked.
113
+ state: Agent state (dict, list, or BaseModel).
114
+ runtime: LangGraph runtime context (optional, None if outside graph).
115
+ """
116
+
117
+ tool_call: ToolCall
118
+ tool: BaseTool
119
+ state: Any
120
+ runtime: Any
121
+
122
+
123
+ ToolCallWrapper = Callable[
124
+ [ToolCallRequest, Callable[[ToolCallRequest], ToolMessage | Command]],
125
+ ToolMessage | Command,
126
+ ]
127
+ """Wrapper for tool call execution with multi-call support.
128
+
129
+ Wrapper receives:
130
+ request: ToolCallRequest with tool_call, tool, state, and runtime.
131
+ execute: Callable to execute the tool (CAN BE CALLED MULTIPLE TIMES).
132
+
133
+ Returns:
134
+ ToolMessage or Command (the final result).
135
+
136
+ The execute callable can be invoked multiple times for retry logic,
137
+ with potentially modified requests each time. Each call to execute
138
+ is independent and stateless.
139
+
140
+ Note:
141
+ When implementing middleware for `create_agent`, use
142
+ `AgentMiddleware.wrap_tool_call` which provides properly typed
143
+ state parameter for better type safety.
144
+
145
+ Examples:
146
+ Passthrough (execute once):
147
+
148
+ def handler(request, execute):
149
+ return execute(request)
150
+
151
+ Modify request before execution:
152
+
153
+ def handler(request, execute):
154
+ request.tool_call["args"]["value"] *= 2
155
+ return execute(request)
156
+
157
+ Retry on error (execute multiple times):
158
+
159
+ def handler(request, execute):
160
+ for attempt in range(3):
161
+ try:
162
+ result = execute(request)
163
+ if is_valid(result):
164
+ return result
165
+ except Exception:
166
+ if attempt == 2:
167
+ raise
168
+ return result
169
+
170
+ Conditional retry based on response:
171
+
172
+ def handler(request, execute):
173
+ for attempt in range(3):
174
+ result = execute(request)
175
+ if isinstance(result, ToolMessage) and result.status != "error":
176
+ return result
177
+ if attempt < 2:
178
+ continue
179
+ return result
180
+
181
+ Cache/short-circuit without calling execute:
182
+
183
+ def handler(request, execute):
184
+ if cached := get_cache(request):
185
+ return ToolMessage(content=cached, tool_call_id=request.tool_call["id"])
186
+ result = execute(request)
187
+ save_cache(request, result)
188
+ return result
189
+ """
190
+
191
+
192
+ class ToolCallWithContext(TypedDict):
193
+ """ToolCall with additional context for graph state.
194
+
195
+ This is an internal data structure meant to help the ToolNode accept
196
+ tool calls with additional context (e.g. state) when dispatched using the
197
+ Send API.
198
+
199
+ The Send API is used in create_agent to distribute tool calls in parallel
200
+ and support human-in-the-loop workflows where graph execution may be paused
201
+ for an indefinite time.
202
+ """
203
+
204
+ tool_call: ToolCall
205
+ __type: Literal["tool_call_with_context"]
206
+ """Type to parameterize the payload.
207
+
208
+ Using "__" as a prefix to be defensive against potential name collisions with
209
+ regular user state.
210
+ """
211
+ state: Any
212
+ """The state is provided as additional context."""
213
+
214
+
103
215
  def msg_content_output(output: Any) -> str | list[dict]:
104
- """Convert tool output to valid message content format.
216
+ """Convert tool output to ToolMessage content format.
105
217
 
106
- LangChain ToolMessages accept either string content or a list of content blocks.
107
- This function ensures tool outputs are properly formatted for message consumption
108
- by attempting to preserve structured data when possible, falling back to JSON
109
- serialization or string conversion.
218
+ Handles str, list[dict] (content blocks), and arbitrary objects by attempting
219
+ JSON serialization with fallback to str().
110
220
 
111
221
  Args:
112
- output: The raw output from a tool execution. Can be any type.
222
+ output: Tool execution output of any type.
113
223
 
114
224
  Returns:
115
- Either a string representation of the output or a list of content blocks
116
- if the output is already in the correct format for structured content.
117
-
118
- Note:
119
- This function prioritizes backward compatibility by defaulting to JSON
120
- serialization rather than supporting all possible message content formats.
225
+ String or list of content blocks suitable for ToolMessage.content.
121
226
  """
122
227
  if isinstance(output, str) or (
123
228
  isinstance(output, list)
@@ -181,7 +286,7 @@ def _handle_tool_error(
181
286
  Args:
182
287
  e: The exception that occurred during tool execution.
183
288
  flag: Configuration for how to handle the error. Can be:
184
- - bool: If True, use default error template
289
+ - bool: If `True`, use default error template
185
290
  - str: Use this string as the error message
186
291
  - Callable: Call this function with the exception to get error message
187
292
  - tuple: Not used in this context (handled by caller)
@@ -286,25 +391,25 @@ class ToolNode(RunnableCallable):
286
391
 
287
392
  Input Formats:
288
393
  1. Graph state with `messages` key that has a list of messages:
289
- - Common representation for agentic workflows
290
- - Supports custom messages key via ``messages_key`` parameter
394
+ - Common representation for agentic workflows
395
+ - Supports custom messages key via `messages_key` parameter
291
396
 
292
- 2. **Message List**: ``[AIMessage(..., tool_calls=[...])]``
293
- - List of messages with tool calls in the last AIMessage
397
+ 2. **Message List**: `[AIMessage(..., tool_calls=[...])]`
398
+ - List of messages with tool calls in the last AIMessage
294
399
 
295
- 3. **Direct Tool Calls**: ``[{"name": "tool", "args": {...}, "id": "1", "type": "tool_call"}]``
296
- - Bypasses message parsing for direct tool execution
297
- - For programmatic tool invocation and testing
400
+ 3. **Direct Tool Calls**: `[{"name": "tool", "args": {...}, "id": "1", "type": "tool_call"}]`
401
+ - Bypasses message parsing for direct tool execution
402
+ - For programmatic tool invocation and testing
298
403
 
299
404
  Output Formats:
300
405
  Output format depends on input type and tool behavior:
301
406
 
302
407
  **For Regular tools**:
303
- - Dict input → ``{"messages": [ToolMessage(...)]}``
304
- - List input → ``[ToolMessage(...)]``
408
+ - Dict input → `{"messages": [ToolMessage(...)]}`
409
+ - List input → `[ToolMessage(...)]`
305
410
 
306
411
  **For Command tools**:
307
- - Returns ``[Command(...)]`` or mixed list with regular tool outputs
412
+ - Returns `[Command(...)]` or mixed list with regular tool outputs
308
413
  - Commands can update state, trigger navigation, or send messages
309
414
 
310
415
  Args:
@@ -314,29 +419,29 @@ class ToolNode(RunnableCallable):
314
419
  name: The name identifier for this node in the graph. Used for debugging
315
420
  and visualization. Defaults to "tools".
316
421
  tags: Optional metadata tags to associate with the node for filtering
317
- and organization. Defaults to None.
422
+ and organization. Defaults to `None`.
318
423
  handle_tool_errors: Configuration for error handling during tool execution.
319
424
  Supports multiple strategies:
320
425
 
321
426
  - **True**: Catch all errors and return a ToolMessage with the default
322
- error template containing the exception details.
427
+ error template containing the exception details.
323
428
  - **str**: Catch all errors and return a ToolMessage with this custom
324
- error message string.
429
+ error message string.
325
430
  - **type[Exception]**: Only catch exceptions with the specified type and
326
- return the default error message for it.
431
+ return the default error message for it.
327
432
  - **tuple[type[Exception], ...]**: Only catch exceptions with the specified
328
- types and return default error messages for them.
433
+ types and return default error messages for them.
329
434
  - **Callable[..., str]**: Catch exceptions matching the callable's signature
330
- and return the string result of calling it with the exception.
435
+ and return the string result of calling it with the exception.
331
436
  - **False**: Disable error handling entirely, allowing exceptions to
332
- propagate.
437
+ propagate.
333
438
 
334
439
  Defaults to a callable that:
335
440
  - catches tool invocation errors (due to invalid arguments provided by the model) and returns a descriptive error message
336
441
  - ignores tool execution errors (they will be re-raised)
337
442
 
338
443
  messages_key: The key in the state dictionary that contains the message list.
339
- This same key will be used for the output ToolMessages.
444
+ This same key will be used for the output `ToolMessage` objects.
340
445
  Defaults to "messages".
341
446
  Allows custom state schemas with different message field names.
342
447
 
@@ -394,8 +499,9 @@ class ToolNode(RunnableCallable):
394
499
  | type[Exception]
395
500
  | tuple[type[Exception], ...] = _default_handle_tool_errors,
396
501
  messages_key: str = "messages",
502
+ wrap_tool_call: ToolCallWrapper | None = None,
397
503
  ) -> None:
398
- """Initialize the ToolNode with the provided tools and configuration.
504
+ """Initialize ToolNode with tools and configuration.
399
505
 
400
506
  Args:
401
507
  tools: Sequence of tools to make available for execution.
@@ -403,6 +509,9 @@ class ToolNode(RunnableCallable):
403
509
  tags: Optional metadata tags.
404
510
  handle_tool_errors: Error handling configuration.
405
511
  messages_key: State key containing messages.
512
+ wrap_tool_call: Wrapper function to intercept tool execution. Receives
513
+ ToolCallRequest and execute callable, returns ToolMessage or Command.
514
+ Enables retries, caching, request modification, and control flow.
406
515
  """
407
516
  super().__init__(self._func, self._afunc, name=name, tags=tags, trace=False)
408
517
  self._tools_by_name: dict[str, BaseTool] = {}
@@ -410,6 +519,7 @@ class ToolNode(RunnableCallable):
410
519
  self._tool_to_store_arg: dict[str, str | None] = {}
411
520
  self._handle_tool_errors = handle_tool_errors
412
521
  self._messages_key = messages_key
522
+ self._wrap_tool_call = wrap_tool_call
413
523
  for tool in tools:
414
524
  if not isinstance(tool, BaseTool):
415
525
  tool_ = create_tool(cast("type[BaseTool]", tool))
@@ -431,11 +541,23 @@ class ToolNode(RunnableCallable):
431
541
  *,
432
542
  store: Optional[BaseStore], # noqa: UP045
433
543
  ) -> Any:
434
- tool_calls, input_type = self._parse_input(input, store)
544
+ try:
545
+ runtime = get_runtime()
546
+ except RuntimeError:
547
+ # Running outside of LangGraph runtime context (e.g., unit tests)
548
+ runtime = None
549
+
550
+ tool_calls, input_type = self._parse_input(input)
551
+ tool_calls = [self._inject_tool_args(call, input, store) for call in tool_calls]
552
+
435
553
  config_list = get_config_list(config, len(tool_calls))
436
554
  input_types = [input_type] * len(tool_calls)
555
+ inputs = [input] * len(tool_calls)
556
+ runtimes = [runtime] * len(tool_calls)
437
557
  with get_executor_for_config(config) as executor:
438
- outputs = [*executor.map(self._run_one, tool_calls, input_types, config_list)]
558
+ outputs = [
559
+ *executor.map(self._run_one, tool_calls, input_types, config_list, inputs, runtimes)
560
+ ]
439
561
 
440
562
  return self._combine_tool_outputs(outputs, input_type)
441
563
 
@@ -446,9 +568,16 @@ class ToolNode(RunnableCallable):
446
568
  *,
447
569
  store: Optional[BaseStore], # noqa: UP045
448
570
  ) -> Any:
449
- tool_calls, input_type = self._parse_input(input, store)
571
+ try:
572
+ runtime = get_runtime()
573
+ except RuntimeError:
574
+ # Running outside of LangGraph runtime context (e.g., unit tests)
575
+ runtime = None
576
+
577
+ tool_calls, input_type = self._parse_input(input)
578
+ tool_calls = [self._inject_tool_args(call, input, store) for call in tool_calls]
450
579
  outputs = await asyncio.gather(
451
- *(self._arun_one(call, input_type, config) for call in tool_calls)
580
+ *(self._arun_one(call, input_type, config, input, runtime) for call in tool_calls)
452
581
  )
453
582
 
454
583
  return self._combine_tool_outputs(outputs, input_type)
@@ -495,20 +624,30 @@ class ToolNode(RunnableCallable):
495
624
  combined_outputs.append(parent_command)
496
625
  return combined_outputs
497
626
 
498
- def _run_one(
627
+ def _execute_tool_sync(
499
628
  self,
500
- call: ToolCall,
629
+ request: ToolCallRequest,
501
630
  input_type: Literal["list", "dict", "tool_calls"],
502
631
  config: RunnableConfig,
503
632
  ) -> ToolMessage | Command:
504
- """Run a single tool call synchronously."""
505
- if invalid_tool_message := self._validate_tool_call(call):
506
- return invalid_tool_message
633
+ """Execute tool call with configured error handling.
507
634
 
508
- try:
509
- call_args = {**call, "type": "tool_call"}
510
- tool = self.tools_by_name[call["name"]]
635
+ Args:
636
+ request: Tool execution request.
637
+ input_type: Input format.
638
+ config: Runnable configuration.
639
+
640
+ Returns:
641
+ ToolMessage or Command.
511
642
 
643
+ Raises:
644
+ Exception: If tool fails and handle_tool_errors is False.
645
+ """
646
+ call = request.tool_call
647
+ tool = request.tool
648
+ call_args = {**call, "type": "tool_call"}
649
+
650
+ try:
512
651
  try:
513
652
  response = tool.invoke(call_args, config)
514
653
  except ValidationError as exc:
@@ -526,6 +665,7 @@ class ToolNode(RunnableCallable):
526
665
  except GraphBubbleUp:
527
666
  raise
528
667
  except Exception as e:
668
+ # Determine which exception types are handled
529
669
  handled_types: tuple[type[Exception], ...]
530
670
  if isinstance(self._handle_tool_errors, type) and issubclass(
531
671
  self._handle_tool_errors, Exception
@@ -541,10 +681,11 @@ class ToolNode(RunnableCallable):
541
681
  # default behavior is catching all exceptions
542
682
  handled_types = (Exception,)
543
683
 
544
- # Unhandled
684
+ # Check if this error should be handled
545
685
  if not self._handle_tool_errors or not isinstance(e, handled_types):
546
686
  raise
547
- # Handled
687
+
688
+ # Error is handled - create error ToolMessage
548
689
  content = _handle_tool_error(e, flag=self._handle_tool_errors)
549
690
  return ToolMessage(
550
691
  content=content,
@@ -553,28 +694,102 @@ class ToolNode(RunnableCallable):
553
694
  status="error",
554
695
  )
555
696
 
697
+ # Process successful response
556
698
  if isinstance(response, Command):
557
- return self._validate_tool_command(response, call, input_type)
699
+ # Validate Command before returning to handler
700
+ return self._validate_tool_command(response, request.tool_call, input_type)
558
701
  if isinstance(response, ToolMessage):
559
702
  response.content = cast("str | list", msg_content_output(response.content))
560
703
  return response
704
+
561
705
  msg = f"Tool {call['name']} returned unexpected type: {type(response)}"
562
706
  raise TypeError(msg)
563
707
 
564
- async def _arun_one(
708
+ def _run_one(
565
709
  self,
566
710
  call: ToolCall,
567
711
  input_type: Literal["list", "dict", "tool_calls"],
568
712
  config: RunnableConfig,
713
+ input: list[AnyMessage] | dict[str, Any] | BaseModel,
714
+ runtime: Any,
569
715
  ) -> ToolMessage | Command:
570
- """Run a single tool call asynchronously."""
716
+ """Execute single tool call with wrap_tool_call wrapper if configured.
717
+
718
+ Args:
719
+ call: Tool call dict.
720
+ input_type: Input format.
721
+ config: Runnable configuration.
722
+ input: Agent state.
723
+ runtime: LangGraph runtime or None.
724
+
725
+ Returns:
726
+ ToolMessage or Command.
727
+ """
571
728
  if invalid_tool_message := self._validate_tool_call(call):
572
729
  return invalid_tool_message
573
730
 
731
+ tool = self.tools_by_name[call["name"]]
732
+
733
+ # Extract state from ToolCallWithContext if present
734
+ state = self._extract_state(input)
735
+
736
+ # Create the tool request with state and runtime
737
+ tool_request = ToolCallRequest(
738
+ tool_call=call,
739
+ tool=tool,
740
+ state=state,
741
+ runtime=runtime,
742
+ )
743
+
744
+ if self._wrap_tool_call is None:
745
+ # No wrapper - execute directly
746
+ return self._execute_tool_sync(tool_request, input_type, config)
747
+
748
+ # Define execute callable that can be called multiple times
749
+ def execute(req: ToolCallRequest) -> ToolMessage | Command:
750
+ """Execute tool with given request. Can be called multiple times."""
751
+ return self._execute_tool_sync(req, input_type, config)
752
+
753
+ # Call wrapper with request and execute callable
574
754
  try:
575
- call_args = {**call, "type": "tool_call"}
576
- tool = self.tools_by_name[call["name"]]
755
+ return self._wrap_tool_call(tool_request, execute)
756
+ except Exception as e:
757
+ # Wrapper threw an exception
758
+ if not self._handle_tool_errors:
759
+ raise
760
+ # Convert to error message
761
+ content = _handle_tool_error(e, flag=self._handle_tool_errors)
762
+ return ToolMessage(
763
+ content=content,
764
+ name=tool_request.tool_call["name"],
765
+ tool_call_id=tool_request.tool_call["id"],
766
+ status="error",
767
+ )
768
+
769
+ async def _execute_tool_async(
770
+ self,
771
+ request: ToolCallRequest,
772
+ input_type: Literal["list", "dict", "tool_calls"],
773
+ config: RunnableConfig,
774
+ ) -> ToolMessage | Command:
775
+ """Execute tool call asynchronously with configured error handling.
776
+
777
+ Args:
778
+ request: Tool execution request.
779
+ input_type: Input format.
780
+ config: Runnable configuration.
781
+
782
+ Returns:
783
+ ToolMessage or Command.
577
784
 
785
+ Raises:
786
+ Exception: If tool fails and handle_tool_errors is False.
787
+ """
788
+ call = request.tool_call
789
+ tool = request.tool
790
+ call_args = {**call, "type": "tool_call"}
791
+
792
+ try:
578
793
  try:
579
794
  response = await tool.ainvoke(call_args, config)
580
795
  except ValidationError as exc:
@@ -592,6 +807,7 @@ class ToolNode(RunnableCallable):
592
807
  except GraphBubbleUp:
593
808
  raise
594
809
  except Exception as e:
810
+ # Determine which exception types are handled
595
811
  handled_types: tuple[type[Exception], ...]
596
812
  if isinstance(self._handle_tool_errors, type) and issubclass(
597
813
  self._handle_tool_errors, Exception
@@ -607,12 +823,12 @@ class ToolNode(RunnableCallable):
607
823
  # default behavior is catching all exceptions
608
824
  handled_types = (Exception,)
609
825
 
610
- # Unhandled
826
+ # Check if this error should be handled
611
827
  if not self._handle_tool_errors or not isinstance(e, handled_types):
612
828
  raise
613
- # Handled
614
- content = _handle_tool_error(e, flag=self._handle_tool_errors)
615
829
 
830
+ # Error is handled - create error ToolMessage
831
+ content = _handle_tool_error(e, flag=self._handle_tool_errors)
616
832
  return ToolMessage(
617
833
  content=content,
618
834
  name=call["name"],
@@ -620,18 +836,84 @@ class ToolNode(RunnableCallable):
620
836
  status="error",
621
837
  )
622
838
 
839
+ # Process successful response
623
840
  if isinstance(response, Command):
624
- return self._validate_tool_command(response, call, input_type)
841
+ # Validate Command before returning to handler
842
+ return self._validate_tool_command(response, request.tool_call, input_type)
625
843
  if isinstance(response, ToolMessage):
626
844
  response.content = cast("str | list", msg_content_output(response.content))
627
845
  return response
846
+
628
847
  msg = f"Tool {call['name']} returned unexpected type: {type(response)}"
629
848
  raise TypeError(msg)
630
849
 
850
+ async def _arun_one(
851
+ self,
852
+ call: ToolCall,
853
+ input_type: Literal["list", "dict", "tool_calls"],
854
+ config: RunnableConfig,
855
+ input: list[AnyMessage] | dict[str, Any] | BaseModel,
856
+ runtime: Any,
857
+ ) -> ToolMessage | Command:
858
+ """Execute single tool call asynchronously with wrap_tool_call wrapper if configured.
859
+
860
+ Args:
861
+ call: Tool call dict.
862
+ input_type: Input format.
863
+ config: Runnable configuration.
864
+ input: Agent state.
865
+ runtime: LangGraph runtime or None.
866
+
867
+ Returns:
868
+ ToolMessage or Command.
869
+ """
870
+ if invalid_tool_message := self._validate_tool_call(call):
871
+ return invalid_tool_message
872
+
873
+ tool = self.tools_by_name[call["name"]]
874
+
875
+ # Extract state from ToolCallWithContext if present
876
+ state = self._extract_state(input)
877
+
878
+ # Create the tool request with state and runtime
879
+ tool_request = ToolCallRequest(
880
+ tool_call=call,
881
+ tool=tool,
882
+ state=state,
883
+ runtime=runtime,
884
+ )
885
+
886
+ if self._wrap_tool_call is None:
887
+ # No wrapper - execute directly
888
+ return await self._execute_tool_async(tool_request, input_type, config)
889
+
890
+ # Define async execute callable that can be called multiple times
891
+ async def execute(req: ToolCallRequest) -> ToolMessage | Command:
892
+ """Execute tool with given request. Can be called multiple times."""
893
+ return await self._execute_tool_async(req, input_type, config)
894
+
895
+ # Call wrapper with request and execute callable
896
+ # Note: wrapper is sync, but execute callable is async
897
+ try:
898
+ result = self._wrap_tool_call(tool_request, execute) # type: ignore[arg-type]
899
+ # If result is a coroutine, await it (though wrapper should be sync)
900
+ return await result if hasattr(result, "__await__") else result
901
+ except Exception as e:
902
+ # Wrapper threw an exception
903
+ if not self._handle_tool_errors:
904
+ raise
905
+ # Convert to error message
906
+ content = _handle_tool_error(e, flag=self._handle_tool_errors)
907
+ return ToolMessage(
908
+ content=content,
909
+ name=tool_request.tool_call["name"],
910
+ tool_call_id=tool_request.tool_call["id"],
911
+ status="error",
912
+ )
913
+
631
914
  def _parse_input(
632
915
  self,
633
916
  input: list[AnyMessage] | dict[str, Any] | BaseModel,
634
- store: BaseStore | None,
635
917
  ) -> tuple[list[ToolCall], Literal["list", "dict", "tool_calls"]]:
636
918
  input_type: Literal["list", "dict", "tool_calls"]
637
919
  if isinstance(input, list):
@@ -641,6 +923,14 @@ class ToolNode(RunnableCallable):
641
923
  return tool_calls, input_type
642
924
  input_type = "list"
643
925
  messages = input
926
+ elif isinstance(input, dict) and input.get("__type") == "tool_call_with_context":
927
+ # Handle ToolCallWithContext from Send API
928
+ # mypy will not be able to type narrow correctly since the signature
929
+ # for input contains dict[str, Any]. We'd need to narrow dict[str, Any]
930
+ # before we can apply correct typing.
931
+ input_with_ctx = cast("ToolCallWithContext", input)
932
+ input_type = "tool_calls"
933
+ return [input_with_ctx["tool_call"]], input_type
644
934
  elif isinstance(input, dict) and (messages := input.get(self._messages_key, [])):
645
935
  input_type = "dict"
646
936
  elif messages := getattr(input, self._messages_key, []):
@@ -656,9 +946,7 @@ class ToolNode(RunnableCallable):
656
946
  msg = "No AIMessage found in input"
657
947
  raise ValueError(msg)
658
948
 
659
- tool_calls = [
660
- self.inject_tool_args(call, input, store) for call in latest_ai_message.tool_calls
661
- ]
949
+ tool_calls = list(latest_ai_message.tool_calls)
662
950
  return tool_calls, input_type
663
951
 
664
952
  def _validate_tool_call(self, call: ToolCall) -> ToolMessage | None:
@@ -674,6 +962,21 @@ class ToolNode(RunnableCallable):
674
962
  )
675
963
  return None
676
964
 
965
+ def _extract_state(
966
+ self, input: list[AnyMessage] | dict[str, Any] | BaseModel
967
+ ) -> list[AnyMessage] | dict[str, Any] | BaseModel:
968
+ """Extract state from input, handling ToolCallWithContext if present.
969
+
970
+ Args:
971
+ input: The input which may be raw state or ToolCallWithContext.
972
+
973
+ Returns:
974
+ The actual state to pass to wrap_tool_call wrappers.
975
+ """
976
+ if isinstance(input, dict) and input.get("__type") == "tool_call_with_context":
977
+ return input["state"]
978
+ return input
979
+
677
980
  def _inject_state(
678
981
  self,
679
982
  tool_call: ToolCall,
@@ -696,14 +999,20 @@ class ToolNode(RunnableCallable):
696
999
  err_msg += f" State should contain fields {required_fields_str}."
697
1000
  raise ValueError(err_msg)
698
1001
 
699
- if isinstance(input, dict):
1002
+ # Extract state from ToolCallWithContext if present
1003
+ if isinstance(input, dict) and input.get("__type") == "tool_call_with_context":
1004
+ state = input["state"]
1005
+ else:
1006
+ state = input
1007
+
1008
+ if isinstance(state, dict):
700
1009
  tool_state_args = {
701
- tool_arg: input[state_field] if state_field else input
1010
+ tool_arg: state[state_field] if state_field else state
702
1011
  for tool_arg, state_field in state_args.items()
703
1012
  }
704
1013
  else:
705
1014
  tool_state_args = {
706
- tool_arg: getattr(input, state_field) if state_field else input
1015
+ tool_arg: getattr(state, state_field) if state_field else state
707
1016
  for tool_arg, state_field in state_args.items()
708
1017
  }
709
1018
 
@@ -731,7 +1040,7 @@ class ToolNode(RunnableCallable):
731
1040
  }
732
1041
  return tool_call
733
1042
 
734
- def inject_tool_args(
1043
+ def _inject_tool_args(
735
1044
  self,
736
1045
  tool_call: ToolCall,
737
1046
  input: list[AnyMessage] | dict[str, Any] | BaseModel,
@@ -739,10 +1048,11 @@ class ToolNode(RunnableCallable):
739
1048
  ) -> ToolCall:
740
1049
  """Inject graph state and store into tool call arguments.
741
1050
 
742
- This method enables tools to access graph context that should not be controlled
743
- by the model. Tools can declare dependencies on graph state or persistent storage
744
- using InjectedState and InjectedStore annotations. This method automatically
745
- identifies these dependencies and injects the appropriate values.
1051
+ This is an internal method that enables tools to access graph context that
1052
+ should not be controlled by the model. Tools can declare dependencies on graph
1053
+ state or persistent storage using InjectedState and InjectedStore annotations.
1054
+ This method automatically identifies these dependencies and injects the
1055
+ appropriate values.
746
1056
 
747
1057
  The injection process preserves the original tool call structure while adding
748
1058
  the necessary context arguments. This allows tools to be both model-callable
@@ -765,10 +1075,8 @@ class ToolNode(RunnableCallable):
765
1075
  or if state injection requirements cannot be satisfied.
766
1076
 
767
1077
  Note:
768
- This method is automatically called during tool execution but can also
769
- be used manually when working with the Send API or custom routing logic.
770
- The injection is performed on a copy of the tool call to avoid mutating
771
- the original.
1078
+ This method is called automatically during tool execution. It should not
1079
+ be called from outside the ToolNode.
772
1080
  """
773
1081
  if tool_call["name"] not in self.tools_by_name:
774
1082
  return tool_call
@@ -940,7 +1248,7 @@ class InjectedState(InjectedToolArg):
940
1248
  to the model's tool-calling interface.
941
1249
 
942
1250
  Args:
943
- field: Optional key to extract from the state dictionary. If None, the entire
1251
+ field: Optional key to extract from the state dictionary. If `None`, the entire
944
1252
  state is injected. If specified, only that field's value is injected.
945
1253
  This allows tools to request specific state components rather than
946
1254
  processing the full state structure.
@@ -985,7 +1293,7 @@ class InjectedState(InjectedToolArg):
985
1293
  node.invoke(state)
986
1294
  ```
987
1295
 
988
- ```pycon
1296
+ ```python
989
1297
  [
990
1298
  ToolMessage(content="not enough messages", name="state_tool", tool_call_id="1"),
991
1299
  ToolMessage(content="bar2", name="foo_tool", tool_call_id="2"),
@@ -994,12 +1302,12 @@ class InjectedState(InjectedToolArg):
994
1302
 
995
1303
  Note:
996
1304
  - InjectedState arguments are automatically excluded from tool schemas
997
- presented to language models
1305
+ presented to language models
998
1306
  - ToolNode handles the injection process during execution
999
1307
  - Tools can mix regular arguments (controlled by the model) with injected
1000
- arguments (controlled by the system)
1308
+ arguments (controlled by the system)
1001
1309
  - State injection occurs after the model generates tool calls but before
1002
- tool execution
1310
+ tool execution
1003
1311
  """
1004
1312
 
1005
1313
  def __init__(self, field: str | None = None) -> None:
@@ -1074,7 +1382,7 @@ class InjectedStore(InjectedToolArg):
1074
1382
 
1075
1383
  Note:
1076
1384
  - InjectedStore arguments are automatically excluded from tool schemas
1077
- presented to language models
1385
+ presented to language models
1078
1386
  - The store instance is automatically injected by ToolNode during execution
1079
1387
  - Tools can access namespaced storage using the store's get/put methods
1080
1388
  - Store injection requires the graph to be compiled with a store instance