langchain 1.0.0a15__py3-none-any.whl → 1.0.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

@@ -46,9 +46,10 @@ from typing import (
46
46
  TYPE_CHECKING,
47
47
  Annotated,
48
48
  Any,
49
+ Generic,
49
50
  Literal,
50
- Optional,
51
51
  TypedDict,
52
+ TypeVar,
52
53
  Union,
53
54
  cast,
54
55
  get_args,
@@ -65,6 +66,7 @@ from langchain_core.messages import (
65
66
  convert_to_messages,
66
67
  )
67
68
  from langchain_core.runnables.config import (
69
+ RunnableConfig,
68
70
  get_config_list,
69
71
  get_executor_for_config,
70
72
  )
@@ -78,16 +80,18 @@ from langchain_core.tools.base import (
78
80
  from langgraph._internal._runnable import RunnableCallable
79
81
  from langgraph.errors import GraphBubbleUp
80
82
  from langgraph.graph.message import REMOVE_ALL_MESSAGES
81
- from langgraph.runtime import get_runtime
82
- from langgraph.types import Command, Send
83
+ from langgraph.store.base import BaseStore # noqa: TC002
84
+ from langgraph.types import Command, Send, StreamWriter
83
85
  from pydantic import BaseModel, ValidationError
84
86
  from typing_extensions import Unpack
85
87
 
86
88
  if TYPE_CHECKING:
87
89
  from collections.abc import Sequence
88
90
 
89
- from langchain_core.runnables import RunnableConfig
90
- from langgraph.store.base import BaseStore
91
+ from langgraph.runtime import Runtime
92
+
93
+ StateT = TypeVar("StateT")
94
+ ContextT = TypeVar("ContextT")
91
95
 
92
96
  INVALID_TOOL_NAME_ERROR_TEMPLATE = (
93
97
  "Error: {requested_tool} is not a valid tool, try one of [{available_tools}]."
@@ -117,15 +121,18 @@ class ToolCallRequest:
117
121
 
118
122
  Attributes:
119
123
  tool_call: Tool call dict with name, args, and id from model output.
120
- tool: BaseTool instance to be invoked.
124
+ tool: BaseTool instance to be invoked, or None if tool is not
125
+ registered with the ToolNode. When tool is None, interceptors can
126
+ handle the request without validation. If the interceptor calls execute(),
127
+ validation will occur and raise an error for unregistered tools.
121
128
  state: Agent state (dict, list, or BaseModel).
122
129
  runtime: LangGraph runtime context (optional, None if outside graph).
123
130
  """
124
131
 
125
132
  tool_call: ToolCall
126
- tool: BaseTool
133
+ tool: BaseTool | None
127
134
  state: Any
128
- runtime: Any
135
+ runtime: ToolRuntime
129
136
 
130
137
  def override(self, **overrides: Unpack[_ToolCallRequestOverrides]) -> ToolCallRequest:
131
138
  """Replace the request with a new request with the given overrides.
@@ -183,12 +190,15 @@ Examples:
183
190
 
184
191
  Modify request before execution:
185
192
 
193
+ ```python
186
194
  def handler(request, execute):
187
195
  request.tool_call["args"]["value"] *= 2
188
196
  return execute(request)
197
+ ```
189
198
 
190
199
  Retry on error (execute multiple times):
191
200
 
201
+ ```python
192
202
  def handler(request, execute):
193
203
  for attempt in range(3):
194
204
  try:
@@ -199,9 +209,11 @@ Examples:
199
209
  if attempt == 2:
200
210
  raise
201
211
  return result
212
+ ```
202
213
 
203
214
  Conditional retry based on response:
204
215
 
216
+ ```python
205
217
  def handler(request, execute):
206
218
  for attempt in range(3):
207
219
  result = execute(request)
@@ -210,15 +222,18 @@ Examples:
210
222
  if attempt < 2:
211
223
  continue
212
224
  return result
225
+ ```
213
226
 
214
227
  Cache/short-circuit without calling execute:
215
228
 
229
+ ```python
216
230
  def handler(request, execute):
217
231
  if cached := get_cache(request):
218
232
  return ToolMessage(content=cached, tool_call_id=request.tool_call["id"])
219
233
  result = execute(request)
220
234
  save_cache(request, result)
221
235
  return result
236
+ ```
222
237
  """
223
238
 
224
239
  AsyncToolCallWrapper = Callable[
@@ -562,6 +577,7 @@ class _ToolNode(RunnableCallable):
562
577
  self._tools_by_name: dict[str, BaseTool] = {}
563
578
  self._tool_to_state_args: dict[str, dict[str, str | None]] = {}
564
579
  self._tool_to_store_arg: dict[str, str | None] = {}
580
+ self._tool_to_runtime_arg: dict[str, str | None] = {}
565
581
  self._handle_tool_errors = handle_tool_errors
566
582
  self._messages_key = messages_key
567
583
  self._wrap_tool_call = wrap_tool_call
@@ -574,6 +590,7 @@ class _ToolNode(RunnableCallable):
574
590
  self._tools_by_name[tool_.name] = tool_
575
591
  self._tool_to_state_args[tool_.name] = _get_state_args(tool_)
576
592
  self._tool_to_store_arg[tool_.name] = _get_store_arg(tool_)
593
+ self._tool_to_runtime_arg[tool_.name] = _get_runtime_arg(tool_)
577
594
 
578
595
  @property
579
596
  def tools_by_name(self) -> dict[str, BaseTool]:
@@ -584,26 +601,36 @@ class _ToolNode(RunnableCallable):
584
601
  self,
585
602
  input: list[AnyMessage] | dict[str, Any] | BaseModel,
586
603
  config: RunnableConfig,
587
- *,
588
- store: Optional[BaseStore], # noqa: UP045
604
+ runtime: Runtime,
589
605
  ) -> Any:
590
- try:
591
- runtime = get_runtime()
592
- except RuntimeError:
593
- # Running outside of LangGraph runtime context (e.g., unit tests)
594
- runtime = None
595
-
596
606
  tool_calls, input_type = self._parse_input(input)
597
- tool_calls = [self._inject_tool_args(call, input, store) for call in tool_calls]
598
-
599
607
  config_list = get_config_list(config, len(tool_calls))
608
+
609
+ # Construct ToolRuntime instances at the top level for each tool call
610
+ tool_runtimes = []
611
+ for call, cfg in zip(tool_calls, config_list, strict=False):
612
+ state = self._extract_state(input)
613
+ tool_runtime = ToolRuntime(
614
+ state=state,
615
+ tool_call_id=call["id"],
616
+ config=cfg,
617
+ context=runtime.context,
618
+ store=runtime.store,
619
+ stream_writer=runtime.stream_writer,
620
+ )
621
+ tool_runtimes.append(tool_runtime)
622
+
623
+ # Inject tool arguments (including runtime)
624
+
625
+ injected_tool_calls = []
600
626
  input_types = [input_type] * len(tool_calls)
601
- inputs = [input] * len(tool_calls)
602
- runtimes = [runtime] * len(tool_calls)
627
+ for call, tool_runtime in zip(tool_calls, tool_runtimes, strict=False):
628
+ injected_call = self._inject_tool_args(call, tool_runtime)
629
+ injected_tool_calls.append(injected_call)
603
630
  with get_executor_for_config(config) as executor:
604
- outputs = [
605
- *executor.map(self._run_one, tool_calls, input_types, config_list, inputs, runtimes)
606
- ]
631
+ outputs = list(
632
+ executor.map(self._run_one, injected_tool_calls, input_types, tool_runtimes)
633
+ )
607
634
 
608
635
  return self._combine_tool_outputs(outputs, input_type)
609
636
 
@@ -611,20 +638,32 @@ class _ToolNode(RunnableCallable):
611
638
  self,
612
639
  input: list[AnyMessage] | dict[str, Any] | BaseModel,
613
640
  config: RunnableConfig,
614
- *,
615
- store: Optional[BaseStore], # noqa: UP045
641
+ runtime: Runtime,
616
642
  ) -> Any:
617
- try:
618
- runtime = get_runtime()
619
- except RuntimeError:
620
- # Running outside of LangGraph runtime context (e.g., unit tests)
621
- runtime = None
622
-
623
643
  tool_calls, input_type = self._parse_input(input)
624
- tool_calls = [self._inject_tool_args(call, input, store) for call in tool_calls]
625
- outputs = await asyncio.gather(
626
- *(self._arun_one(call, input_type, config, input, runtime) for call in tool_calls)
627
- )
644
+ config_list = get_config_list(config, len(tool_calls))
645
+
646
+ # Construct ToolRuntime instances at the top level for each tool call
647
+ tool_runtimes = []
648
+ for call, cfg in zip(tool_calls, config_list, strict=False):
649
+ state = self._extract_state(input)
650
+ tool_runtime = ToolRuntime(
651
+ state=state,
652
+ tool_call_id=call["id"],
653
+ config=cfg,
654
+ context=runtime.context,
655
+ store=runtime.store,
656
+ stream_writer=runtime.stream_writer,
657
+ )
658
+ tool_runtimes.append(tool_runtime)
659
+
660
+ injected_tool_calls = []
661
+ coros = []
662
+ for call, tool_runtime in zip(tool_calls, tool_runtimes, strict=False):
663
+ injected_call = self._inject_tool_args(call, tool_runtime)
664
+ injected_tool_calls.append(injected_call)
665
+ coros.append(self._arun_one(injected_call, input_type, tool_runtime))
666
+ outputs = await asyncio.gather(*coros)
628
667
 
629
668
  return self._combine_tool_outputs(outputs, input_type)
630
669
 
@@ -691,6 +730,15 @@ class _ToolNode(RunnableCallable):
691
730
  """
692
731
  call = request.tool_call
693
732
  tool = request.tool
733
+
734
+ # Validate tool exists when we actually need to execute it
735
+ if tool is None:
736
+ if invalid_tool_message := self._validate_tool_call(call):
737
+ return invalid_tool_message
738
+ # This should never happen if validation works correctly
739
+ msg = f"Tool {call['name']} is not registered with ToolNode"
740
+ raise TypeError(msg)
741
+
694
742
  call_args = {**call, "type": "tool_call"}
695
743
 
696
744
  try:
@@ -755,38 +803,32 @@ class _ToolNode(RunnableCallable):
755
803
  self,
756
804
  call: ToolCall,
757
805
  input_type: Literal["list", "dict", "tool_calls"],
758
- config: RunnableConfig,
759
- input: list[AnyMessage] | dict[str, Any] | BaseModel,
760
- runtime: Any,
806
+ tool_runtime: ToolRuntime,
761
807
  ) -> ToolMessage | Command:
762
808
  """Execute single tool call with wrap_tool_call wrapper if configured.
763
809
 
764
810
  Args:
765
811
  call: Tool call dict.
766
812
  input_type: Input format.
767
- config: Runnable configuration.
768
- input: Agent state.
769
- runtime: LangGraph runtime or None.
813
+ tool_runtime: Tool runtime.
770
814
 
771
815
  Returns:
772
816
  ToolMessage or Command.
773
817
  """
774
- if invalid_tool_message := self._validate_tool_call(call):
775
- return invalid_tool_message
776
-
777
- tool = self.tools_by_name[call["name"]]
778
-
779
- # Extract state from ToolCallWithContext if present
780
- state = self._extract_state(input)
818
+ # Validation is deferred to _execute_tool_sync to allow interceptors
819
+ # to short-circuit requests for unregistered tools
820
+ tool = self.tools_by_name.get(call["name"])
781
821
 
782
822
  # Create the tool request with state and runtime
783
823
  tool_request = ToolCallRequest(
784
824
  tool_call=call,
785
825
  tool=tool,
786
- state=state,
787
- runtime=runtime,
826
+ state=tool_runtime.state,
827
+ runtime=tool_runtime,
788
828
  )
789
829
 
830
+ config = tool_runtime.config
831
+
790
832
  if self._wrap_tool_call is None:
791
833
  # No wrapper - execute directly
792
834
  return self._execute_tool_sync(tool_request, input_type, config)
@@ -833,6 +875,15 @@ class _ToolNode(RunnableCallable):
833
875
  """
834
876
  call = request.tool_call
835
877
  tool = request.tool
878
+
879
+ # Validate tool exists when we actually need to execute it
880
+ if tool is None:
881
+ if invalid_tool_message := self._validate_tool_call(call):
882
+ return invalid_tool_message
883
+ # This should never happen if validation works correctly
884
+ msg = f"Tool {call['name']} is not registered with ToolNode"
885
+ raise TypeError(msg)
886
+
836
887
  call_args = {**call, "type": "tool_call"}
837
888
 
838
889
  try:
@@ -897,38 +948,32 @@ class _ToolNode(RunnableCallable):
897
948
  self,
898
949
  call: ToolCall,
899
950
  input_type: Literal["list", "dict", "tool_calls"],
900
- config: RunnableConfig,
901
- input: list[AnyMessage] | dict[str, Any] | BaseModel,
902
- runtime: Any,
951
+ tool_runtime: ToolRuntime,
903
952
  ) -> ToolMessage | Command:
904
953
  """Execute single tool call asynchronously with awrap_tool_call wrapper if configured.
905
954
 
906
955
  Args:
907
956
  call: Tool call dict.
908
957
  input_type: Input format.
909
- config: Runnable configuration.
910
- input: Agent state.
911
- runtime: LangGraph runtime or None.
958
+ tool_runtime: Tool runtime.
912
959
 
913
960
  Returns:
914
961
  ToolMessage or Command.
915
962
  """
916
- if invalid_tool_message := self._validate_tool_call(call):
917
- return invalid_tool_message
918
-
919
- tool = self.tools_by_name[call["name"]]
920
-
921
- # Extract state from ToolCallWithContext if present
922
- state = self._extract_state(input)
963
+ # Validation is deferred to _execute_tool_async to allow interceptors
964
+ # to short-circuit requests for unregistered tools
965
+ tool = self.tools_by_name.get(call["name"])
923
966
 
924
967
  # Create the tool request with state and runtime
925
968
  tool_request = ToolCallRequest(
926
969
  tool_call=call,
927
970
  tool=tool,
928
- state=state,
929
- runtime=runtime,
971
+ state=tool_runtime.state,
972
+ runtime=tool_runtime,
930
973
  )
931
974
 
975
+ config = tool_runtime.config
976
+
932
977
  if self._awrap_tool_call is None and self._wrap_tool_call is None:
933
978
  # No wrapper - execute directly
934
979
  return await self._execute_tool_async(tool_request, input_type, config)
@@ -1031,15 +1076,16 @@ class _ToolNode(RunnableCallable):
1031
1076
  def _inject_state(
1032
1077
  self,
1033
1078
  tool_call: ToolCall,
1034
- input: list[AnyMessage] | dict[str, Any] | BaseModel,
1079
+ state: list[AnyMessage] | dict[str, Any] | BaseModel,
1035
1080
  ) -> ToolCall:
1036
1081
  state_args = self._tool_to_state_args[tool_call["name"]]
1037
- if state_args and isinstance(input, list):
1082
+
1083
+ if state_args and isinstance(state, list):
1038
1084
  required_fields = list(state_args.values())
1039
1085
  if (
1040
1086
  len(required_fields) == 1 and required_fields[0] == self._messages_key
1041
1087
  ) or required_fields[0] is None:
1042
- input = {self._messages_key: input}
1088
+ state = {self._messages_key: state}
1043
1089
  else:
1044
1090
  err_msg = (
1045
1091
  f"Invalid input to ToolNode. Tool {tool_call['name']} requires "
@@ -1050,12 +1096,6 @@ class _ToolNode(RunnableCallable):
1050
1096
  err_msg += f" State should contain fields {required_fields_str}."
1051
1097
  raise ValueError(err_msg)
1052
1098
 
1053
- # Extract state from ToolCallWithContext if present
1054
- if isinstance(input, dict) and input.get("__type") == "tool_call_with_context":
1055
- state = input["state"]
1056
- else:
1057
- state = input
1058
-
1059
1099
  if isinstance(state, dict):
1060
1100
  tool_state_args = {
1061
1101
  tool_arg: state[state_field] if state_field else state
@@ -1091,19 +1131,38 @@ class _ToolNode(RunnableCallable):
1091
1131
  }
1092
1132
  return tool_call
1093
1133
 
1134
+ def _inject_runtime(self, tool_call: ToolCall, tool_runtime: ToolRuntime) -> ToolCall:
1135
+ """Inject ToolRuntime into tool call arguments.
1136
+
1137
+ Args:
1138
+ tool_call: The tool call to inject runtime into.
1139
+ tool_runtime: The ToolRuntime instance to inject.
1140
+
1141
+ Returns:
1142
+ The tool call with runtime injected if needed.
1143
+ """
1144
+ runtime_arg = self._tool_to_runtime_arg.get(tool_call["name"])
1145
+ if not runtime_arg:
1146
+ return tool_call
1147
+
1148
+ tool_call["args"] = {
1149
+ **tool_call["args"],
1150
+ runtime_arg: tool_runtime,
1151
+ }
1152
+ return tool_call
1153
+
1094
1154
  def _inject_tool_args(
1095
1155
  self,
1096
1156
  tool_call: ToolCall,
1097
- input: list[AnyMessage] | dict[str, Any] | BaseModel,
1098
- store: BaseStore | None,
1157
+ tool_runtime: ToolRuntime,
1099
1158
  ) -> ToolCall:
1100
- """Inject graph state and store into tool call arguments.
1159
+ """Inject graph state, store, and runtime into tool call arguments.
1101
1160
 
1102
1161
  This is an internal method that enables tools to access graph context that
1103
1162
  should not be controlled by the model. Tools can declare dependencies on graph
1104
- state or persistent storage using InjectedState and InjectedStore annotations.
1105
- This method automatically identifies these dependencies and injects the
1106
- appropriate values.
1163
+ state, persistent storage, or runtime context using InjectedState, InjectedStore,
1164
+ and ToolRuntime annotations. This method automatically identifies these
1165
+ dependencies and injects the appropriate values.
1107
1166
 
1108
1167
  The injection process preserves the original tool call structure while adding
1109
1168
  the necessary context arguments. This allows tools to be both model-callable
@@ -1112,10 +1171,8 @@ class _ToolNode(RunnableCallable):
1112
1171
  Args:
1113
1172
  tool_call: The tool call dictionary to augment with injected arguments.
1114
1173
  Must contain 'name', 'args', 'id', and 'type' fields.
1115
- input: The current graph state to inject into tools requiring state access.
1116
- Can be a message list, state dictionary, or BaseModel instance.
1117
- store: The persistent store instance to inject into tools requiring storage.
1118
- Will be None if no store is configured for the graph.
1174
+ tool_runtime: The ToolRuntime instance containing all runtime context
1175
+ (state, config, store, context, stream_writer) to inject into tools.
1119
1176
 
1120
1177
  Returns:
1121
1178
  A new ToolCall dictionary with the same structure as the input but with
@@ -1133,8 +1190,9 @@ class _ToolNode(RunnableCallable):
1133
1190
  return tool_call
1134
1191
 
1135
1192
  tool_call_copy: ToolCall = copy(tool_call)
1136
- tool_call_with_state = self._inject_state(tool_call_copy, input)
1137
- return self._inject_store(tool_call_with_state, store)
1193
+ tool_call_with_state = self._inject_state(tool_call_copy, tool_runtime.state)
1194
+ tool_call_with_store = self._inject_store(tool_call_with_state, tool_runtime.store)
1195
+ return self._inject_runtime(tool_call_with_store, tool_runtime)
1138
1196
 
1139
1197
  def _validate_tool_command(
1140
1198
  self,
@@ -1290,11 +1348,71 @@ def tools_condition(
1290
1348
  return "__end__"
1291
1349
 
1292
1350
 
1351
+ @dataclass
1352
+ class ToolRuntime(InjectedToolArg, Generic[ContextT, StateT]):
1353
+ """Runtime context automatically injected into tools.
1354
+
1355
+ When a tool function has a parameter named 'tool_runtime' with type hint
1356
+ 'ToolRuntime', the tool execution system will automatically inject
1357
+ an instance containing:
1358
+
1359
+ - state: The current graph state
1360
+ - tool_call_id: The ID of the current tool call
1361
+ - config: RunnableConfig for the current execution
1362
+ - context: Runtime context (from langgraph Runtime)
1363
+ - store: BaseStore instance for persistent storage (from langgraph Runtime)
1364
+ - stream_writer: StreamWriter for streaming output (from langgraph Runtime)
1365
+
1366
+ No `Annotated` wrapper is needed - just use `runtime: ToolRuntime`
1367
+ as a parameter.
1368
+
1369
+ Example:
1370
+ ```python
1371
+ from langchain_core.tools import tool
1372
+ from langchain.tools import ToolRuntime
1373
+
1374
+ @tool
1375
+ def my_tool(x: int, runtime: ToolRuntime) -> str:
1376
+ \"\"\"Tool that accesses runtime context.\"\"\"
1377
+ # Access state
1378
+ messages = tool_runtime.state["messages"]
1379
+
1380
+ # Access tool_call_id
1381
+ print(f"Tool call ID: {tool_runtime.tool_call_id}")
1382
+
1383
+ # Access config
1384
+ print(f"Run ID: {tool_runtime.config.get('run_id')}")
1385
+
1386
+ # Access runtime context
1387
+ user_id = tool_runtime.context.get("user_id")
1388
+
1389
+ # Access store
1390
+ tool_runtime.store.put(("metrics",), "count", 1)
1391
+
1392
+ # Stream output
1393
+ tool_runtime.stream_writer.write("Processing...")
1394
+
1395
+ return f"Processed {x}"
1396
+ ```
1397
+
1398
+ Note:
1399
+ This is a marker class used for type checking and detection.
1400
+ The actual runtime object will be constructed during tool execution.
1401
+ """
1402
+
1403
+ state: StateT
1404
+ context: ContextT
1405
+ config: RunnableConfig
1406
+ stream_writer: StreamWriter
1407
+ tool_call_id: str | None
1408
+ store: BaseStore | None
1409
+
1410
+
1293
1411
  class InjectedState(InjectedToolArg):
1294
1412
  """Annotation for injecting graph state into tool arguments.
1295
1413
 
1296
1414
  This annotation enables tools to access graph state without exposing state
1297
- management details to the language model. Tools annotated with InjectedState
1415
+ management details to the language model. Tools annotated with `InjectedState`
1298
1416
  receive state data automatically during execution while remaining invisible
1299
1417
  to the model's tool-calling interface.
1300
1418
 
@@ -1352,9 +1470,9 @@ class InjectedState(InjectedToolArg):
1352
1470
  ```
1353
1471
 
1354
1472
  Note:
1355
- - InjectedState arguments are automatically excluded from tool schemas
1473
+ - `InjectedState` arguments are automatically excluded from tool schemas
1356
1474
  presented to language models
1357
- - ToolNode handles the injection process during execution
1475
+ - `ToolNode` handles the injection process during execution
1358
1476
  - Tools can mix regular arguments (controlled by the model) with injected
1359
1477
  arguments (controlled by the system)
1360
1478
  - State injection occurs after the model generates tool calls but before
@@ -1362,7 +1480,7 @@ class InjectedState(InjectedToolArg):
1362
1480
  """
1363
1481
 
1364
1482
  def __init__(self, field: str | None = None) -> None:
1365
- """Initialize the InjectedState annotation."""
1483
+ """Initialize the `InjectedState` annotation."""
1366
1484
  self.field = field
1367
1485
 
1368
1486
 
@@ -1407,7 +1525,7 @@ class InjectedStore(InjectedToolArg):
1407
1525
  return result.value if result else "Not found"
1408
1526
  ```
1409
1527
 
1410
- Usage with ToolNode and graph compilation:
1528
+ Usage with `ToolNode` and graph compilation:
1411
1529
 
1412
1530
  ```python
1413
1531
  from langgraph.graph import StateGraph
@@ -1432,16 +1550,19 @@ class InjectedStore(InjectedToolArg):
1432
1550
  ```
1433
1551
 
1434
1552
  Note:
1435
- - InjectedStore arguments are automatically excluded from tool schemas
1553
+ - `InjectedStore` arguments are automatically excluded from tool schemas
1436
1554
  presented to language models
1437
- - The store instance is automatically injected by ToolNode during execution
1555
+ - The store instance is automatically injected by `ToolNode` during execution
1438
1556
  - Tools can access namespaced storage using the store's get/put methods
1439
1557
  - Store injection requires the graph to be compiled with a store instance
1440
1558
  - Multiple tools can share the same store instance for data consistency
1441
1559
  """
1442
1560
 
1443
1561
 
1444
- def _is_injection(type_arg: Any, injection_type: type[InjectedState | InjectedStore]) -> bool:
1562
+ def _is_injection(
1563
+ type_arg: Any,
1564
+ injection_type: type[InjectedState | InjectedStore | ToolRuntime],
1565
+ ) -> bool:
1445
1566
  """Check if a type argument represents an injection annotation.
1446
1567
 
1447
1568
  This utility function determines whether a type annotation indicates that
@@ -1535,3 +1656,44 @@ def _get_store_arg(tool: BaseTool) -> str | None:
1535
1656
  return name
1536
1657
 
1537
1658
  return None
1659
+
1660
+
1661
+ def _get_runtime_arg(tool: BaseTool) -> str | None:
1662
+ """Extract runtime injection argument from tool annotations.
1663
+
1664
+ This function analyzes a tool's input schema to identify the argument that
1665
+ should be injected with the ToolRuntime instance. Only one runtime argument
1666
+ is supported per tool.
1667
+
1668
+ Args:
1669
+ tool: The tool to analyze for runtime injection requirements.
1670
+
1671
+ Returns:
1672
+ The name of the argument that should receive the runtime injection, or None
1673
+ if no runtime injection is required.
1674
+
1675
+ Raises:
1676
+ ValueError: If a tool argument has multiple ToolRuntime annotations.
1677
+ """
1678
+ full_schema = tool.get_input_schema()
1679
+ for name, type_ in get_all_basemodel_annotations(full_schema).items():
1680
+ # Check if the parameter name is "runtime" (regardless of type)
1681
+ if name == "runtime":
1682
+ return name
1683
+ # Check if the type itself is ToolRuntime (direct usage)
1684
+ if _is_injection(type_, ToolRuntime):
1685
+ return name
1686
+ # Check if ToolRuntime is in Annotated args
1687
+ injections = [
1688
+ type_arg for type_arg in get_args(type_) if _is_injection(type_arg, ToolRuntime)
1689
+ ]
1690
+ if len(injections) > 1:
1691
+ msg = (
1692
+ "A tool argument should not be annotated with ToolRuntime more than "
1693
+ f"once. Received arg {name} with annotations {injections}."
1694
+ )
1695
+ raise ValueError(msg)
1696
+ if len(injections) == 1:
1697
+ return name
1698
+
1699
+ return None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain
3
- Version: 1.0.0a15
3
+ Version: 1.0.0rc1
4
4
  Summary: Building applications with LLMs through composability
5
5
  Project-URL: homepage, https://docs.langchain.com/
6
6
  Project-URL: repository, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
@@ -0,0 +1,30 @@
1
+ langchain/__init__.py,sha256=P-ltxKbHEHDxRxjxiCkHmWeDHiI9-Bgd6sjEJ8c0TcY,64
2
+ langchain/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ langchain/agents/__init__.py,sha256=tDjbhFSC6XHQUZ_XvjHwmbICFfjxmE9xKFMbUVSIwfs,522
4
+ langchain/agents/factory.py,sha256=dEfeSNBZ2NCp0E64mHgxNseNASfMdHd-7i2JhPbWv_c,62606
5
+ langchain/agents/structured_output.py,sha256=BDSF6PWVih41M7IGyjCHWy46jmDxZsfBt_B4llE9OOc,13764
6
+ langchain/agents/middleware/__init__.py,sha256=FBoTr4TAyuLJiOKV-mJN3oaLE6D6Q5ubI5sCbnsCCSs,1955
7
+ langchain/agents/middleware/context_editing.py,sha256=0sUpDc0FvOKMERNnEKnhBqrTjX_rCVWjIX8hH3RTG8U,8749
8
+ langchain/agents/middleware/human_in_the_loop.py,sha256=N7Vt31rlHS7J-cA0EBDS2mlQW-SMvvxyAwjBnAY9vZU,12650
9
+ langchain/agents/middleware/model_call_limit.py,sha256=yYBcOY5DKNIG6_9f-rkTjIj_BLVUO1tuShgxt00P8W0,7735
10
+ langchain/agents/middleware/model_fallback.py,sha256=VKDN81jfFB9zJOaJZ94tfwzodk3zRXRwz6CqQ6MkJtw,4097
11
+ langchain/agents/middleware/pii.py,sha256=rkGojBFIJGMs1p1cKNh0npafts_0UUJ0_NeZsyJo498,24760
12
+ langchain/agents/middleware/summarization.py,sha256=H1VxRkkbauw4p4sMMKyc_uZGbJhtqoVvOF7y_5JBXTc,10329
13
+ langchain/agents/middleware/todo.py,sha256=0PyHV4u5JaBBuMmPWmDr3orZ5T5F6lk2jiVoBzVVMM4,9808
14
+ langchain/agents/middleware/tool_call_limit.py,sha256=AHA-No4oUze2-2d1INnX8d_9otFuDB8uoWayJpt9nPo,12321
15
+ langchain/agents/middleware/tool_emulator.py,sha256=UmN5UIMsikDnFvcPbNLNDOF4RXvIxqd-AMG46LVI0iA,7211
16
+ langchain/agents/middleware/tool_retry.py,sha256=M76csBFFZa37djxtfUCqNU-x2erTNtxZqoO__DozWxA,13787
17
+ langchain/agents/middleware/tool_selection.py,sha256=6RYdgkg6aSNx1w-YxRyL2Hct7UPnMRgGg6YVZVtW5TU,11638
18
+ langchain/agents/middleware/types.py,sha256=FVuYue9cLB5C1ZNcYLIGNPN6IUaNXY3UsWQ6gC-gZNs,55547
19
+ langchain/chat_models/__init__.py,sha256=lQwcJkHtGjrclCL7sBFocQuzRdRgZRPzIIPnGhEJmVQ,533
20
+ langchain/chat_models/base.py,sha256=k1Qnuh7O_3LwsWtcVFSWsWP00hxiEyninwltTdi1rk8,35655
21
+ langchain/embeddings/__init__.py,sha256=crY7GUw7RSA25JgpYl7I4WPloVCVY6eUmJbSSSchWis,444
22
+ langchain/embeddings/base.py,sha256=1aNxDLQmS_l7RMcvjnK7Cv7rtgKrKt6Sl7mgXM2_JWI,7329
23
+ langchain/messages/__init__.py,sha256=p7NlF1yf8MkMgJzJ2wggXGkkA_okz1f-g63KoflL6PA,1710
24
+ langchain/rate_limiters/__init__.py,sha256=5490xUNhet37N2nX6kbJlDgf8u1DX-C1Cs_r7etXn8A,351
25
+ langchain/tools/__init__.py,sha256=hMzbaGcfHhNYfJx20uV57uMd9a-gNLbmopG4gDReeEc,628
26
+ langchain/tools/tool_node.py,sha256=wsYXehwtaCIWYOSv4ncV23WQ7-N34sEwckcYfaFoLFg,64977
27
+ langchain-1.0.0rc1.dist-info/METADATA,sha256=niUo9gSSaOCiH5kFM9cYfd0m1G06cdfifywnk0G_Mqk,4543
28
+ langchain-1.0.0rc1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
29
+ langchain-1.0.0rc1.dist-info/licenses/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
30
+ langchain-1.0.0rc1.dist-info/RECORD,,