langchain 1.0.0a15__py3-none-any.whl → 1.0.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

@@ -46,9 +46,10 @@ from typing import (
46
46
  TYPE_CHECKING,
47
47
  Annotated,
48
48
  Any,
49
+ Generic,
49
50
  Literal,
50
- Optional,
51
51
  TypedDict,
52
+ TypeVar,
52
53
  Union,
53
54
  cast,
54
55
  get_args,
@@ -65,6 +66,7 @@ from langchain_core.messages import (
65
66
  convert_to_messages,
66
67
  )
67
68
  from langchain_core.runnables.config import (
69
+ RunnableConfig,
68
70
  get_config_list,
69
71
  get_executor_for_config,
70
72
  )
@@ -73,21 +75,24 @@ from langchain_core.tools import tool as create_tool
73
75
  from langchain_core.tools.base import (
74
76
  TOOL_MESSAGE_BLOCK_TYPES,
75
77
  ToolException,
78
+ _DirectlyInjectedToolArg,
76
79
  get_all_basemodel_annotations,
77
80
  )
78
81
  from langgraph._internal._runnable import RunnableCallable
79
82
  from langgraph.errors import GraphBubbleUp
80
83
  from langgraph.graph.message import REMOVE_ALL_MESSAGES
81
- from langgraph.runtime import get_runtime
82
- from langgraph.types import Command, Send
84
+ from langgraph.store.base import BaseStore # noqa: TC002
85
+ from langgraph.types import Command, Send, StreamWriter
83
86
  from pydantic import BaseModel, ValidationError
84
87
  from typing_extensions import Unpack
85
88
 
86
89
  if TYPE_CHECKING:
87
90
  from collections.abc import Sequence
88
91
 
89
- from langchain_core.runnables import RunnableConfig
90
- from langgraph.store.base import BaseStore
92
+ from langgraph.runtime import Runtime
93
+
94
+ StateT = TypeVar("StateT")
95
+ ContextT = TypeVar("ContextT")
91
96
 
92
97
  INVALID_TOOL_NAME_ERROR_TEMPLATE = (
93
98
  "Error: {requested_tool} is not a valid tool, try one of [{available_tools}]."
@@ -117,15 +122,18 @@ class ToolCallRequest:
117
122
 
118
123
  Attributes:
119
124
  tool_call: Tool call dict with name, args, and id from model output.
120
- tool: BaseTool instance to be invoked.
125
+ tool: BaseTool instance to be invoked, or None if tool is not
126
+ registered with the ToolNode. When tool is None, interceptors can
127
+ handle the request without validation. If the interceptor calls execute(),
128
+ validation will occur and raise an error for unregistered tools.
121
129
  state: Agent state (dict, list, or BaseModel).
122
130
  runtime: LangGraph runtime context (optional, None if outside graph).
123
131
  """
124
132
 
125
133
  tool_call: ToolCall
126
- tool: BaseTool
134
+ tool: BaseTool | None
127
135
  state: Any
128
- runtime: Any
136
+ runtime: ToolRuntime
129
137
 
130
138
  def override(self, **overrides: Unpack[_ToolCallRequestOverrides]) -> ToolCallRequest:
131
139
  """Replace the request with a new request with the given overrides.
@@ -183,12 +191,15 @@ Examples:
183
191
 
184
192
  Modify request before execution:
185
193
 
194
+ ```python
186
195
  def handler(request, execute):
187
196
  request.tool_call["args"]["value"] *= 2
188
197
  return execute(request)
198
+ ```
189
199
 
190
200
  Retry on error (execute multiple times):
191
201
 
202
+ ```python
192
203
  def handler(request, execute):
193
204
  for attempt in range(3):
194
205
  try:
@@ -199,9 +210,11 @@ Examples:
199
210
  if attempt == 2:
200
211
  raise
201
212
  return result
213
+ ```
202
214
 
203
215
  Conditional retry based on response:
204
216
 
217
+ ```python
205
218
  def handler(request, execute):
206
219
  for attempt in range(3):
207
220
  result = execute(request)
@@ -210,15 +223,18 @@ Examples:
210
223
  if attempt < 2:
211
224
  continue
212
225
  return result
226
+ ```
213
227
 
214
228
  Cache/short-circuit without calling execute:
215
229
 
230
+ ```python
216
231
  def handler(request, execute):
217
232
  if cached := get_cache(request):
218
233
  return ToolMessage(content=cached, tool_call_id=request.tool_call["id"])
219
234
  result = execute(request)
220
235
  save_cache(request, result)
221
236
  return result
237
+ ```
222
238
  """
223
239
 
224
240
  AsyncToolCallWrapper = Callable[
@@ -562,6 +578,7 @@ class _ToolNode(RunnableCallable):
562
578
  self._tools_by_name: dict[str, BaseTool] = {}
563
579
  self._tool_to_state_args: dict[str, dict[str, str | None]] = {}
564
580
  self._tool_to_store_arg: dict[str, str | None] = {}
581
+ self._tool_to_runtime_arg: dict[str, str | None] = {}
565
582
  self._handle_tool_errors = handle_tool_errors
566
583
  self._messages_key = messages_key
567
584
  self._wrap_tool_call = wrap_tool_call
@@ -574,6 +591,7 @@ class _ToolNode(RunnableCallable):
574
591
  self._tools_by_name[tool_.name] = tool_
575
592
  self._tool_to_state_args[tool_.name] = _get_state_args(tool_)
576
593
  self._tool_to_store_arg[tool_.name] = _get_store_arg(tool_)
594
+ self._tool_to_runtime_arg[tool_.name] = _get_runtime_arg(tool_)
577
595
 
578
596
  @property
579
597
  def tools_by_name(self) -> dict[str, BaseTool]:
@@ -584,26 +602,36 @@ class _ToolNode(RunnableCallable):
584
602
  self,
585
603
  input: list[AnyMessage] | dict[str, Any] | BaseModel,
586
604
  config: RunnableConfig,
587
- *,
588
- store: Optional[BaseStore], # noqa: UP045
605
+ runtime: Runtime,
589
606
  ) -> Any:
590
- try:
591
- runtime = get_runtime()
592
- except RuntimeError:
593
- # Running outside of LangGraph runtime context (e.g., unit tests)
594
- runtime = None
595
-
596
607
  tool_calls, input_type = self._parse_input(input)
597
- tool_calls = [self._inject_tool_args(call, input, store) for call in tool_calls]
598
-
599
608
  config_list = get_config_list(config, len(tool_calls))
609
+
610
+ # Construct ToolRuntime instances at the top level for each tool call
611
+ tool_runtimes = []
612
+ for call, cfg in zip(tool_calls, config_list, strict=False):
613
+ state = self._extract_state(input)
614
+ tool_runtime = ToolRuntime(
615
+ state=state,
616
+ tool_call_id=call["id"],
617
+ config=cfg,
618
+ context=runtime.context,
619
+ store=runtime.store,
620
+ stream_writer=runtime.stream_writer,
621
+ )
622
+ tool_runtimes.append(tool_runtime)
623
+
624
+ # Inject tool arguments (including runtime)
625
+
626
+ injected_tool_calls = []
600
627
  input_types = [input_type] * len(tool_calls)
601
- inputs = [input] * len(tool_calls)
602
- runtimes = [runtime] * len(tool_calls)
628
+ for call, tool_runtime in zip(tool_calls, tool_runtimes, strict=False):
629
+ injected_call = self._inject_tool_args(call, tool_runtime)
630
+ injected_tool_calls.append(injected_call)
603
631
  with get_executor_for_config(config) as executor:
604
- outputs = [
605
- *executor.map(self._run_one, tool_calls, input_types, config_list, inputs, runtimes)
606
- ]
632
+ outputs = list(
633
+ executor.map(self._run_one, injected_tool_calls, input_types, tool_runtimes)
634
+ )
607
635
 
608
636
  return self._combine_tool_outputs(outputs, input_type)
609
637
 
@@ -611,20 +639,32 @@ class _ToolNode(RunnableCallable):
611
639
  self,
612
640
  input: list[AnyMessage] | dict[str, Any] | BaseModel,
613
641
  config: RunnableConfig,
614
- *,
615
- store: Optional[BaseStore], # noqa: UP045
642
+ runtime: Runtime,
616
643
  ) -> Any:
617
- try:
618
- runtime = get_runtime()
619
- except RuntimeError:
620
- # Running outside of LangGraph runtime context (e.g., unit tests)
621
- runtime = None
622
-
623
644
  tool_calls, input_type = self._parse_input(input)
624
- tool_calls = [self._inject_tool_args(call, input, store) for call in tool_calls]
625
- outputs = await asyncio.gather(
626
- *(self._arun_one(call, input_type, config, input, runtime) for call in tool_calls)
627
- )
645
+ config_list = get_config_list(config, len(tool_calls))
646
+
647
+ # Construct ToolRuntime instances at the top level for each tool call
648
+ tool_runtimes = []
649
+ for call, cfg in zip(tool_calls, config_list, strict=False):
650
+ state = self._extract_state(input)
651
+ tool_runtime = ToolRuntime(
652
+ state=state,
653
+ tool_call_id=call["id"],
654
+ config=cfg,
655
+ context=runtime.context,
656
+ store=runtime.store,
657
+ stream_writer=runtime.stream_writer,
658
+ )
659
+ tool_runtimes.append(tool_runtime)
660
+
661
+ injected_tool_calls = []
662
+ coros = []
663
+ for call, tool_runtime in zip(tool_calls, tool_runtimes, strict=False):
664
+ injected_call = self._inject_tool_args(call, tool_runtime)
665
+ injected_tool_calls.append(injected_call)
666
+ coros.append(self._arun_one(injected_call, input_type, tool_runtime))
667
+ outputs = await asyncio.gather(*coros)
628
668
 
629
669
  return self._combine_tool_outputs(outputs, input_type)
630
670
 
@@ -691,6 +731,15 @@ class _ToolNode(RunnableCallable):
691
731
  """
692
732
  call = request.tool_call
693
733
  tool = request.tool
734
+
735
+ # Validate tool exists when we actually need to execute it
736
+ if tool is None:
737
+ if invalid_tool_message := self._validate_tool_call(call):
738
+ return invalid_tool_message
739
+ # This should never happen if validation works correctly
740
+ msg = f"Tool {call['name']} is not registered with ToolNode"
741
+ raise TypeError(msg)
742
+
694
743
  call_args = {**call, "type": "tool_call"}
695
744
 
696
745
  try:
@@ -755,38 +804,32 @@ class _ToolNode(RunnableCallable):
755
804
  self,
756
805
  call: ToolCall,
757
806
  input_type: Literal["list", "dict", "tool_calls"],
758
- config: RunnableConfig,
759
- input: list[AnyMessage] | dict[str, Any] | BaseModel,
760
- runtime: Any,
807
+ tool_runtime: ToolRuntime,
761
808
  ) -> ToolMessage | Command:
762
809
  """Execute single tool call with wrap_tool_call wrapper if configured.
763
810
 
764
811
  Args:
765
812
  call: Tool call dict.
766
813
  input_type: Input format.
767
- config: Runnable configuration.
768
- input: Agent state.
769
- runtime: LangGraph runtime or None.
814
+ tool_runtime: Tool runtime.
770
815
 
771
816
  Returns:
772
817
  ToolMessage or Command.
773
818
  """
774
- if invalid_tool_message := self._validate_tool_call(call):
775
- return invalid_tool_message
776
-
777
- tool = self.tools_by_name[call["name"]]
778
-
779
- # Extract state from ToolCallWithContext if present
780
- state = self._extract_state(input)
819
+ # Validation is deferred to _execute_tool_sync to allow interceptors
820
+ # to short-circuit requests for unregistered tools
821
+ tool = self.tools_by_name.get(call["name"])
781
822
 
782
823
  # Create the tool request with state and runtime
783
824
  tool_request = ToolCallRequest(
784
825
  tool_call=call,
785
826
  tool=tool,
786
- state=state,
787
- runtime=runtime,
827
+ state=tool_runtime.state,
828
+ runtime=tool_runtime,
788
829
  )
789
830
 
831
+ config = tool_runtime.config
832
+
790
833
  if self._wrap_tool_call is None:
791
834
  # No wrapper - execute directly
792
835
  return self._execute_tool_sync(tool_request, input_type, config)
@@ -833,6 +876,15 @@ class _ToolNode(RunnableCallable):
833
876
  """
834
877
  call = request.tool_call
835
878
  tool = request.tool
879
+
880
+ # Validate tool exists when we actually need to execute it
881
+ if tool is None:
882
+ if invalid_tool_message := self._validate_tool_call(call):
883
+ return invalid_tool_message
884
+ # This should never happen if validation works correctly
885
+ msg = f"Tool {call['name']} is not registered with ToolNode"
886
+ raise TypeError(msg)
887
+
836
888
  call_args = {**call, "type": "tool_call"}
837
889
 
838
890
  try:
@@ -897,38 +949,32 @@ class _ToolNode(RunnableCallable):
897
949
  self,
898
950
  call: ToolCall,
899
951
  input_type: Literal["list", "dict", "tool_calls"],
900
- config: RunnableConfig,
901
- input: list[AnyMessage] | dict[str, Any] | BaseModel,
902
- runtime: Any,
952
+ tool_runtime: ToolRuntime,
903
953
  ) -> ToolMessage | Command:
904
954
  """Execute single tool call asynchronously with awrap_tool_call wrapper if configured.
905
955
 
906
956
  Args:
907
957
  call: Tool call dict.
908
958
  input_type: Input format.
909
- config: Runnable configuration.
910
- input: Agent state.
911
- runtime: LangGraph runtime or None.
959
+ tool_runtime: Tool runtime.
912
960
 
913
961
  Returns:
914
962
  ToolMessage or Command.
915
963
  """
916
- if invalid_tool_message := self._validate_tool_call(call):
917
- return invalid_tool_message
918
-
919
- tool = self.tools_by_name[call["name"]]
920
-
921
- # Extract state from ToolCallWithContext if present
922
- state = self._extract_state(input)
964
+ # Validation is deferred to _execute_tool_async to allow interceptors
965
+ # to short-circuit requests for unregistered tools
966
+ tool = self.tools_by_name.get(call["name"])
923
967
 
924
968
  # Create the tool request with state and runtime
925
969
  tool_request = ToolCallRequest(
926
970
  tool_call=call,
927
971
  tool=tool,
928
- state=state,
929
- runtime=runtime,
972
+ state=tool_runtime.state,
973
+ runtime=tool_runtime,
930
974
  )
931
975
 
976
+ config = tool_runtime.config
977
+
932
978
  if self._awrap_tool_call is None and self._wrap_tool_call is None:
933
979
  # No wrapper - execute directly
934
980
  return await self._execute_tool_async(tool_request, input_type, config)
@@ -1031,15 +1077,16 @@ class _ToolNode(RunnableCallable):
1031
1077
  def _inject_state(
1032
1078
  self,
1033
1079
  tool_call: ToolCall,
1034
- input: list[AnyMessage] | dict[str, Any] | BaseModel,
1080
+ state: list[AnyMessage] | dict[str, Any] | BaseModel,
1035
1081
  ) -> ToolCall:
1036
1082
  state_args = self._tool_to_state_args[tool_call["name"]]
1037
- if state_args and isinstance(input, list):
1083
+
1084
+ if state_args and isinstance(state, list):
1038
1085
  required_fields = list(state_args.values())
1039
1086
  if (
1040
1087
  len(required_fields) == 1 and required_fields[0] == self._messages_key
1041
1088
  ) or required_fields[0] is None:
1042
- input = {self._messages_key: input}
1089
+ state = {self._messages_key: state}
1043
1090
  else:
1044
1091
  err_msg = (
1045
1092
  f"Invalid input to ToolNode. Tool {tool_call['name']} requires "
@@ -1050,12 +1097,6 @@ class _ToolNode(RunnableCallable):
1050
1097
  err_msg += f" State should contain fields {required_fields_str}."
1051
1098
  raise ValueError(err_msg)
1052
1099
 
1053
- # Extract state from ToolCallWithContext if present
1054
- if isinstance(input, dict) and input.get("__type") == "tool_call_with_context":
1055
- state = input["state"]
1056
- else:
1057
- state = input
1058
-
1059
1100
  if isinstance(state, dict):
1060
1101
  tool_state_args = {
1061
1102
  tool_arg: state[state_field] if state_field else state
@@ -1091,19 +1132,38 @@ class _ToolNode(RunnableCallable):
1091
1132
  }
1092
1133
  return tool_call
1093
1134
 
1135
+ def _inject_runtime(self, tool_call: ToolCall, tool_runtime: ToolRuntime) -> ToolCall:
1136
+ """Inject ToolRuntime into tool call arguments.
1137
+
1138
+ Args:
1139
+ tool_call: The tool call to inject runtime into.
1140
+ tool_runtime: The ToolRuntime instance to inject.
1141
+
1142
+ Returns:
1143
+ The tool call with runtime injected if needed.
1144
+ """
1145
+ runtime_arg = self._tool_to_runtime_arg.get(tool_call["name"])
1146
+ if not runtime_arg:
1147
+ return tool_call
1148
+
1149
+ tool_call["args"] = {
1150
+ **tool_call["args"],
1151
+ runtime_arg: tool_runtime,
1152
+ }
1153
+ return tool_call
1154
+
1094
1155
  def _inject_tool_args(
1095
1156
  self,
1096
1157
  tool_call: ToolCall,
1097
- input: list[AnyMessage] | dict[str, Any] | BaseModel,
1098
- store: BaseStore | None,
1158
+ tool_runtime: ToolRuntime,
1099
1159
  ) -> ToolCall:
1100
- """Inject graph state and store into tool call arguments.
1160
+ """Inject graph state, store, and runtime into tool call arguments.
1101
1161
 
1102
1162
  This is an internal method that enables tools to access graph context that
1103
1163
  should not be controlled by the model. Tools can declare dependencies on graph
1104
- state or persistent storage using InjectedState and InjectedStore annotations.
1105
- This method automatically identifies these dependencies and injects the
1106
- appropriate values.
1164
+ state, persistent storage, or runtime context using InjectedState, InjectedStore,
1165
+ and ToolRuntime annotations. This method automatically identifies these
1166
+ dependencies and injects the appropriate values.
1107
1167
 
1108
1168
  The injection process preserves the original tool call structure while adding
1109
1169
  the necessary context arguments. This allows tools to be both model-callable
@@ -1112,10 +1172,8 @@ class _ToolNode(RunnableCallable):
1112
1172
  Args:
1113
1173
  tool_call: The tool call dictionary to augment with injected arguments.
1114
1174
  Must contain 'name', 'args', 'id', and 'type' fields.
1115
- input: The current graph state to inject into tools requiring state access.
1116
- Can be a message list, state dictionary, or BaseModel instance.
1117
- store: The persistent store instance to inject into tools requiring storage.
1118
- Will be None if no store is configured for the graph.
1175
+ tool_runtime: The ToolRuntime instance containing all runtime context
1176
+ (state, config, store, context, stream_writer) to inject into tools.
1119
1177
 
1120
1178
  Returns:
1121
1179
  A new ToolCall dictionary with the same structure as the input but with
@@ -1133,8 +1191,9 @@ class _ToolNode(RunnableCallable):
1133
1191
  return tool_call
1134
1192
 
1135
1193
  tool_call_copy: ToolCall = copy(tool_call)
1136
- tool_call_with_state = self._inject_state(tool_call_copy, input)
1137
- return self._inject_store(tool_call_with_state, store)
1194
+ tool_call_with_state = self._inject_state(tool_call_copy, tool_runtime.state)
1195
+ tool_call_with_store = self._inject_store(tool_call_with_state, tool_runtime.store)
1196
+ return self._inject_runtime(tool_call_with_store, tool_runtime)
1138
1197
 
1139
1198
  def _validate_tool_command(
1140
1199
  self,
@@ -1290,11 +1349,71 @@ def tools_condition(
1290
1349
  return "__end__"
1291
1350
 
1292
1351
 
1352
+ @dataclass
1353
+ class ToolRuntime(_DirectlyInjectedToolArg, Generic[ContextT, StateT]):
1354
+ """Runtime context automatically injected into tools.
1355
+
1356
+ When a tool function has a parameter named 'tool_runtime' with type hint
1357
+ 'ToolRuntime', the tool execution system will automatically inject
1358
+ an instance containing:
1359
+
1360
+ - state: The current graph state
1361
+ - tool_call_id: The ID of the current tool call
1362
+ - config: RunnableConfig for the current execution
1363
+ - context: Runtime context (from langgraph Runtime)
1364
+ - store: BaseStore instance for persistent storage (from langgraph Runtime)
1365
+ - stream_writer: StreamWriter for streaming output (from langgraph Runtime)
1366
+
1367
+ No `Annotated` wrapper is needed - just use `runtime: ToolRuntime`
1368
+ as a parameter.
1369
+
1370
+ Example:
1371
+ ```python
1372
+ from langchain_core.tools import tool
1373
+ from langchain.tools import ToolRuntime
1374
+
1375
+ @tool
1376
+ def my_tool(x: int, runtime: ToolRuntime) -> str:
1377
+ \"\"\"Tool that accesses runtime context.\"\"\"
1378
+ # Access state
1379
+ messages = tool_runtime.state["messages"]
1380
+
1381
+ # Access tool_call_id
1382
+ print(f"Tool call ID: {tool_runtime.tool_call_id}")
1383
+
1384
+ # Access config
1385
+ print(f"Run ID: {tool_runtime.config.get('run_id')}")
1386
+
1387
+ # Access runtime context
1388
+ user_id = tool_runtime.context.get("user_id")
1389
+
1390
+ # Access store
1391
+ tool_runtime.store.put(("metrics",), "count", 1)
1392
+
1393
+ # Stream output
1394
+ tool_runtime.stream_writer.write("Processing...")
1395
+
1396
+ return f"Processed {x}"
1397
+ ```
1398
+
1399
+ Note:
1400
+ This is a marker class used for type checking and detection.
1401
+ The actual runtime object will be constructed during tool execution.
1402
+ """
1403
+
1404
+ state: StateT
1405
+ context: ContextT
1406
+ config: RunnableConfig
1407
+ stream_writer: StreamWriter
1408
+ tool_call_id: str | None
1409
+ store: BaseStore | None
1410
+
1411
+
1293
1412
  class InjectedState(InjectedToolArg):
1294
1413
  """Annotation for injecting graph state into tool arguments.
1295
1414
 
1296
1415
  This annotation enables tools to access graph state without exposing state
1297
- management details to the language model. Tools annotated with InjectedState
1416
+ management details to the language model. Tools annotated with `InjectedState`
1298
1417
  receive state data automatically during execution while remaining invisible
1299
1418
  to the model's tool-calling interface.
1300
1419
 
@@ -1352,9 +1471,9 @@ class InjectedState(InjectedToolArg):
1352
1471
  ```
1353
1472
 
1354
1473
  Note:
1355
- - InjectedState arguments are automatically excluded from tool schemas
1474
+ - `InjectedState` arguments are automatically excluded from tool schemas
1356
1475
  presented to language models
1357
- - ToolNode handles the injection process during execution
1476
+ - `ToolNode` handles the injection process during execution
1358
1477
  - Tools can mix regular arguments (controlled by the model) with injected
1359
1478
  arguments (controlled by the system)
1360
1479
  - State injection occurs after the model generates tool calls but before
@@ -1362,7 +1481,7 @@ class InjectedState(InjectedToolArg):
1362
1481
  """
1363
1482
 
1364
1483
  def __init__(self, field: str | None = None) -> None:
1365
- """Initialize the InjectedState annotation."""
1484
+ """Initialize the `InjectedState` annotation."""
1366
1485
  self.field = field
1367
1486
 
1368
1487
 
@@ -1407,7 +1526,7 @@ class InjectedStore(InjectedToolArg):
1407
1526
  return result.value if result else "Not found"
1408
1527
  ```
1409
1528
 
1410
- Usage with ToolNode and graph compilation:
1529
+ Usage with `ToolNode` and graph compilation:
1411
1530
 
1412
1531
  ```python
1413
1532
  from langgraph.graph import StateGraph
@@ -1432,16 +1551,19 @@ class InjectedStore(InjectedToolArg):
1432
1551
  ```
1433
1552
 
1434
1553
  Note:
1435
- - InjectedStore arguments are automatically excluded from tool schemas
1554
+ - `InjectedStore` arguments are automatically excluded from tool schemas
1436
1555
  presented to language models
1437
- - The store instance is automatically injected by ToolNode during execution
1556
+ - The store instance is automatically injected by `ToolNode` during execution
1438
1557
  - Tools can access namespaced storage using the store's get/put methods
1439
1558
  - Store injection requires the graph to be compiled with a store instance
1440
1559
  - Multiple tools can share the same store instance for data consistency
1441
1560
  """
1442
1561
 
1443
1562
 
1444
- def _is_injection(type_arg: Any, injection_type: type[InjectedState | InjectedStore]) -> bool:
1563
+ def _is_injection(
1564
+ type_arg: Any,
1565
+ injection_type: type[InjectedState | InjectedStore | ToolRuntime],
1566
+ ) -> bool:
1445
1567
  """Check if a type argument represents an injection annotation.
1446
1568
 
1447
1569
  This utility function determines whether a type annotation indicates that
@@ -1535,3 +1657,44 @@ def _get_store_arg(tool: BaseTool) -> str | None:
1535
1657
  return name
1536
1658
 
1537
1659
  return None
1660
+
1661
+
1662
+ def _get_runtime_arg(tool: BaseTool) -> str | None:
1663
+ """Extract runtime injection argument from tool annotations.
1664
+
1665
+ This function analyzes a tool's input schema to identify the argument that
1666
+ should be injected with the ToolRuntime instance. Only one runtime argument
1667
+ is supported per tool.
1668
+
1669
+ Args:
1670
+ tool: The tool to analyze for runtime injection requirements.
1671
+
1672
+ Returns:
1673
+ The name of the argument that should receive the runtime injection, or None
1674
+ if no runtime injection is required.
1675
+
1676
+ Raises:
1677
+ ValueError: If a tool argument has multiple ToolRuntime annotations.
1678
+ """
1679
+ full_schema = tool.get_input_schema()
1680
+ for name, type_ in get_all_basemodel_annotations(full_schema).items():
1681
+ # Check if the parameter name is "runtime" (regardless of type)
1682
+ if name == "runtime":
1683
+ return name
1684
+ # Check if the type itself is ToolRuntime (direct usage)
1685
+ if _is_injection(type_, ToolRuntime):
1686
+ return name
1687
+ # Check if ToolRuntime is in Annotated args
1688
+ injections = [
1689
+ type_arg for type_arg in get_args(type_) if _is_injection(type_arg, ToolRuntime)
1690
+ ]
1691
+ if len(injections) > 1:
1692
+ msg = (
1693
+ "A tool argument should not be annotated with ToolRuntime more than "
1694
+ f"once. Received arg {name} with annotations {injections}."
1695
+ )
1696
+ raise ValueError(msg)
1697
+ if len(injections) == 1:
1698
+ return name
1699
+
1700
+ return None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain
3
- Version: 1.0.0a15
3
+ Version: 1.0.0rc2
4
4
  Summary: Building applications with LLMs through composability
5
5
  Project-URL: homepage, https://docs.langchain.com/
6
6
  Project-URL: repository, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
@@ -11,7 +11,7 @@ Project-URL: reddit, https://www.reddit.com/r/LangChain/
11
11
  License: MIT
12
12
  License-File: LICENSE
13
13
  Requires-Python: <4.0.0,>=3.10.0
14
- Requires-Dist: langchain-core<2.0.0,>=1.0.0a7
14
+ Requires-Dist: langchain-core<2.0.0,>=1.0.0rc3
15
15
  Requires-Dist: langgraph<2.0.0,>=1.0.0a4
16
16
  Requires-Dist: pydantic<3.0.0,>=2.7.4
17
17
  Provides-Extra: anthropic
@@ -0,0 +1,30 @@
1
+ langchain/__init__.py,sha256=zTVJXeQnKRDdEeQc-ivRY9P9TV6An_dmmkgW6phY86I,64
2
+ langchain/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ langchain/agents/__init__.py,sha256=tDjbhFSC6XHQUZ_XvjHwmbICFfjxmE9xKFMbUVSIwfs,522
4
+ langchain/agents/factory.py,sha256=RhClhqWInb2i0pjNJj9-0OvnB6waj0r8Qbm5bN7ofw8,62214
5
+ langchain/agents/structured_output.py,sha256=BDSF6PWVih41M7IGyjCHWy46jmDxZsfBt_B4llE9OOc,13764
6
+ langchain/agents/middleware/__init__.py,sha256=FBoTr4TAyuLJiOKV-mJN3oaLE6D6Q5ubI5sCbnsCCSs,1955
7
+ langchain/agents/middleware/context_editing.py,sha256=0sUpDc0FvOKMERNnEKnhBqrTjX_rCVWjIX8hH3RTG8U,8749
8
+ langchain/agents/middleware/human_in_the_loop.py,sha256=N7Vt31rlHS7J-cA0EBDS2mlQW-SMvvxyAwjBnAY9vZU,12650
9
+ langchain/agents/middleware/model_call_limit.py,sha256=yYBcOY5DKNIG6_9f-rkTjIj_BLVUO1tuShgxt00P8W0,7735
10
+ langchain/agents/middleware/model_fallback.py,sha256=VKDN81jfFB9zJOaJZ94tfwzodk3zRXRwz6CqQ6MkJtw,4097
11
+ langchain/agents/middleware/pii.py,sha256=rkGojBFIJGMs1p1cKNh0npafts_0UUJ0_NeZsyJo498,24760
12
+ langchain/agents/middleware/summarization.py,sha256=H1VxRkkbauw4p4sMMKyc_uZGbJhtqoVvOF7y_5JBXTc,10329
13
+ langchain/agents/middleware/todo.py,sha256=0PyHV4u5JaBBuMmPWmDr3orZ5T5F6lk2jiVoBzVVMM4,9808
14
+ langchain/agents/middleware/tool_call_limit.py,sha256=AHA-No4oUze2-2d1INnX8d_9otFuDB8uoWayJpt9nPo,12321
15
+ langchain/agents/middleware/tool_emulator.py,sha256=UmN5UIMsikDnFvcPbNLNDOF4RXvIxqd-AMG46LVI0iA,7211
16
+ langchain/agents/middleware/tool_retry.py,sha256=M76csBFFZa37djxtfUCqNU-x2erTNtxZqoO__DozWxA,13787
17
+ langchain/agents/middleware/tool_selection.py,sha256=6RYdgkg6aSNx1w-YxRyL2Hct7UPnMRgGg6YVZVtW5TU,11638
18
+ langchain/agents/middleware/types.py,sha256=FVuYue9cLB5C1ZNcYLIGNPN6IUaNXY3UsWQ6gC-gZNs,55547
19
+ langchain/chat_models/__init__.py,sha256=lQwcJkHtGjrclCL7sBFocQuzRdRgZRPzIIPnGhEJmVQ,533
20
+ langchain/chat_models/base.py,sha256=k1Qnuh7O_3LwsWtcVFSWsWP00hxiEyninwltTdi1rk8,35655
21
+ langchain/embeddings/__init__.py,sha256=crY7GUw7RSA25JgpYl7I4WPloVCVY6eUmJbSSSchWis,444
22
+ langchain/embeddings/base.py,sha256=1aNxDLQmS_l7RMcvjnK7Cv7rtgKrKt6Sl7mgXM2_JWI,7329
23
+ langchain/messages/__init__.py,sha256=p7NlF1yf8MkMgJzJ2wggXGkkA_okz1f-g63KoflL6PA,1710
24
+ langchain/rate_limiters/__init__.py,sha256=5490xUNhet37N2nX6kbJlDgf8u1DX-C1Cs_r7etXn8A,351
25
+ langchain/tools/__init__.py,sha256=hMzbaGcfHhNYfJx20uV57uMd9a-gNLbmopG4gDReeEc,628
26
+ langchain/tools/tool_node.py,sha256=0rk5SZ0L80X6DJA5ohzyuqydL-S40i5LHMXJsY2t0JI,65016
27
+ langchain-1.0.0rc2.dist-info/METADATA,sha256=WG0BlqzPbwur0AlHyITXIE35MX7iSpzGIfqqD4dFLxc,4544
28
+ langchain-1.0.0rc2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
29
+ langchain-1.0.0rc2.dist-info/licenses/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
30
+ langchain-1.0.0rc2.dist-info/RECORD,,