langchain 1.0.0a14__py3-none-any.whl → 1.0.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

langchain/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """Main entrypoint into LangChain."""
2
2
 
3
- __version__ = "1.0.0a13"
3
+ __version__ = "1.0.0rc1"
@@ -1,4 +1,10 @@
1
- """langgraph.prebuilt exposes a higher-level API for creating and executing agents and tools."""
1
+ """Entrypoint to building [Agents](https://docs.langchain.com/oss/python/langchain/agents) with LangChain.
2
+
3
+ !!! warning "Reference docs"
4
+ This page contains **reference documentation** for Agents. See
5
+ [the docs](https://docs.langchain.com/oss/python/langchain/agents) for conceptual
6
+ guides, tutorials, and examples on using Agents.
7
+ """ # noqa: E501
2
8
 
3
9
  from langchain.agents.factory import create_agent
4
10
  from langchain.agents.middleware.types import AgentState
@@ -92,7 +92,7 @@ def _chain_model_call_handlers(
92
92
  handlers: List of handlers. First handler wraps all others.
93
93
 
94
94
  Returns:
95
- Composed handler, or None if handlers empty.
95
+ Composed handler, or `None` if handlers empty.
96
96
 
97
97
  Example:
98
98
  ```python
@@ -195,13 +195,13 @@ def _chain_async_model_call_handlers(
195
195
  ]
196
196
  | None
197
197
  ):
198
- """Compose multiple async wrap_model_call handlers into single middleware stack.
198
+ """Compose multiple async `wrap_model_call` handlers into single middleware stack.
199
199
 
200
200
  Args:
201
201
  handlers: List of async handlers. First handler wraps all others.
202
202
 
203
203
  Returns:
204
- Composed async handler, or None if handlers empty.
204
+ Composed async handler, or `None` if handlers empty.
205
205
  """
206
206
  if not handlers:
207
207
  return None
@@ -267,12 +267,13 @@ def _chain_async_model_call_handlers(
267
267
 
268
268
 
269
269
  def _resolve_schema(schemas: set[type], schema_name: str, omit_flag: str | None = None) -> type:
270
- """Resolve schema by merging schemas and optionally respecting OmitFromSchema annotations.
270
+ """Resolve schema by merging schemas and optionally respecting `OmitFromSchema` annotations.
271
271
 
272
272
  Args:
273
273
  schemas: List of schema types to merge
274
- schema_name: Name for the generated TypedDict
275
- omit_flag: If specified, omit fields with this flag set ('input' or 'output')
274
+ schema_name: Name for the generated `TypedDict`
275
+ omit_flag: If specified, omit fields with this flag set (`'input'` or
276
+ `'output'`)
276
277
  """
277
278
  all_annotations = {}
278
279
 
@@ -312,11 +313,11 @@ def _extract_metadata(type_: type) -> list:
312
313
 
313
314
 
314
315
  def _get_can_jump_to(middleware: AgentMiddleware[Any, Any], hook_name: str) -> list[JumpTo]:
315
- """Get the can_jump_to list from either sync or async hook methods.
316
+ """Get the `can_jump_to` list from either sync or async hook methods.
316
317
 
317
318
  Args:
318
319
  middleware: The middleware instance to inspect.
319
- hook_name: The name of the hook ('before_model' or 'after_model').
320
+ hook_name: The name of the hook (`'before_model'` or `'after_model'`).
320
321
 
321
322
  Returns:
322
323
  List of jump destinations, or empty list if not configured.
@@ -350,7 +351,7 @@ def _supports_provider_strategy(model: str | BaseChatModel) -> bool:
350
351
  """Check if a model supports provider-specific structured output.
351
352
 
352
353
  Args:
353
- model: Model name string or BaseChatModel instance.
354
+ model: Model name string or `BaseChatModel` instance.
354
355
 
355
356
  Returns:
356
357
  `True` if the model supports provider-specific structured output, `False` otherwise.
@@ -373,7 +374,7 @@ def _handle_structured_output_error(
373
374
  exception: Exception,
374
375
  response_format: ResponseFormat,
375
376
  ) -> tuple[bool, str]:
376
- """Handle structured output error. Returns (should_retry, retry_tool_message)."""
377
+ """Handle structured output error. Returns `(should_retry, retry_tool_message)`."""
377
378
  if not isinstance(response_format, ToolStrategy):
378
379
  return False, ""
379
380
 
@@ -408,7 +409,7 @@ def _chain_tool_call_wrappers(
408
409
  wrappers: Wrappers in middleware order.
409
410
 
410
411
  Returns:
411
- Composed wrapper, or None if empty.
412
+ Composed wrapper, or `None` if empty.
412
413
 
413
414
  Example:
414
415
  wrapper = _chain_tool_call_wrappers([auth, cache, retry])
@@ -465,7 +466,7 @@ def _chain_async_tool_call_wrappers(
465
466
  wrappers: Async wrappers in middleware order.
466
467
 
467
468
  Returns:
468
- Composed async wrapper, or None if empty.
469
+ Composed async wrapper, or `None` if empty.
469
470
  """
470
471
  if not wrappers:
471
472
  return None
@@ -516,6 +517,7 @@ def create_agent( # noqa: PLR0915
516
517
  system_prompt: str | None = None,
517
518
  middleware: Sequence[AgentMiddleware[AgentState[ResponseT], ContextT]] = (),
518
519
  response_format: ResponseFormat[ResponseT] | type[ResponseT] | None = None,
520
+ state_schema: type[AgentState[ResponseT]] | None = None,
519
521
  context_schema: type[ContextT] | None = None,
520
522
  checkpointer: Checkpointer | None = None,
521
523
  store: BaseStore | None = None,
@@ -534,19 +536,27 @@ def create_agent( # noqa: PLR0915
534
536
 
535
537
  Args:
536
538
  model: The language model for the agent. Can be a string identifier
537
- (e.g., `"openai:gpt-4"`), a chat model instance (e.g., `ChatOpenAI()`).
538
- tools: A list of tools, dicts, or callables. If `None` or an empty list,
539
+ (e.g., `"openai:gpt-4"`) or a chat model instance (e.g., `ChatOpenAI()`).
540
+ For a full list of supported model strings, see
541
+ [`init_chat_model`][langchain.chat_models.init_chat_model(model_provider)].
542
+ tools: A list of tools, `dicts`, or `Callable`. If `None` or an empty list,
539
543
  the agent will consist of a model node without a tool calling loop.
540
- system_prompt: An optional system prompt for the LLM. If provided as a string,
541
- it will be converted to a SystemMessage and added to the beginning
542
- of the message list.
544
+ system_prompt: An optional system prompt for the LLM. Prompts are converted to a
545
+ `SystemMessage` and added to the beginning of the message list.
543
546
  middleware: A sequence of middleware instances to apply to the agent.
544
547
  Middleware can intercept and modify agent behavior at various stages.
545
548
  response_format: An optional configuration for structured responses.
546
- Can be a ToolStrategy, ProviderStrategy, or a Pydantic model class.
549
+ Can be a `ToolStrategy`, `ProviderStrategy`, or a Pydantic model class.
547
550
  If provided, the agent will handle structured output during the
548
551
  conversation flow. Raw schemas will be wrapped in an appropriate strategy
549
552
  based on model capabilities.
553
+ state_schema: An optional `TypedDict` schema that extends `AgentState`.
554
+ When provided, this schema is used instead of `AgentState` as the base
555
+ schema for merging with middleware state schemas. This allows users to
556
+ add custom state fields without needing to create custom middleware.
557
+ Generally, it's recommended to use state_schema extensions via middleware
558
+ to keep relevant extensions scoped to corresponding hooks / tools.
559
+ The schema must be a subclass of `AgentState[ResponseT]`.
550
560
  context_schema: An optional schema for runtime context.
551
561
  checkpointer: An optional checkpoint saver object. This is used for persisting
552
562
  the state of the graph (e.g., as chat memory) for a single thread
@@ -554,24 +564,27 @@ def create_agent( # noqa: PLR0915
554
564
  store: An optional store object. This is used for persisting data
555
565
  across multiple threads (e.g., multiple conversations / users).
556
566
  interrupt_before: An optional list of node names to interrupt before.
557
- This is useful if you want to add a user confirmation or other interrupt
567
+ Useful if you want to add a user confirmation or other interrupt
558
568
  before taking an action.
559
569
  interrupt_after: An optional list of node names to interrupt after.
560
- This is useful if you want to return directly or run additional processing
570
+ Useful if you want to return directly or run additional processing
561
571
  on an output.
562
- debug: A flag indicating whether to enable debug mode.
563
- name: An optional name for the CompiledStateGraph.
572
+ debug: Whether to enable verbose logging for graph execution. When enabled,
573
+ prints detailed information about each node execution, state updates,
574
+ and transitions during agent runtime. Useful for debugging middleware
575
+ behavior and understanding agent execution flow.
576
+ name: An optional name for the `CompiledStateGraph`.
564
577
  This name will be automatically used when adding the agent graph to
565
578
  another graph as a subgraph node - particularly useful for building
566
579
  multi-agent systems.
567
- cache: An optional BaseCache instance to enable caching of graph execution.
580
+ cache: An optional `BaseCache` instance to enable caching of graph execution.
568
581
 
569
582
  Returns:
570
- A compiled StateGraph that can be used for chat interactions.
583
+ A compiled `StateGraph` that can be used for chat interactions.
571
584
 
572
585
  The agent node calls the language model with the messages list (after applying
573
- the system prompt). If the resulting AIMessage contains `tool_calls`, the graph will
574
- then call the tools. The tools node executes the tools and adds the responses
586
+ the system prompt). If the resulting `AIMessage` contains `tool_calls`, the graph
587
+ will then call the tools. The tools node executes the tools and adds the responses
575
588
  to the messages list as `ToolMessage` objects. The agent node then calls the
576
589
  language model again. The process repeats until no more `tool_calls` are
577
590
  present in the response. The agent then returns the full list of messages.
@@ -587,7 +600,7 @@ def create_agent( # noqa: PLR0915
587
600
 
588
601
 
589
602
  graph = create_agent(
590
- model="anthropic:claude-3-7-sonnet-latest",
603
+ model="anthropic:claude-sonnet-4-5-20250929",
591
604
  tools=[check_weather],
592
605
  system_prompt="You are a helpful assistant",
593
606
  )
@@ -757,9 +770,11 @@ def create_agent( # noqa: PLR0915
757
770
  awrap_model_call_handler = _chain_async_model_call_handlers(async_handlers)
758
771
 
759
772
  state_schemas = {m.state_schema for m in middleware}
760
- state_schemas.add(AgentState)
773
+ # Use provided state_schema if available, otherwise use base AgentState
774
+ base_state = state_schema if state_schema is not None else AgentState
775
+ state_schemas.add(base_state)
761
776
 
762
- state_schema = _resolve_schema(state_schemas, "StateSchema", None)
777
+ resolved_state_schema = _resolve_schema(state_schemas, "StateSchema", None)
763
778
  input_schema = _resolve_schema(state_schemas, "InputSchema", "input")
764
779
  output_schema = _resolve_schema(state_schemas, "OutputSchema", "output")
765
780
 
@@ -767,7 +782,7 @@ def create_agent( # noqa: PLR0915
767
782
  graph: StateGraph[
768
783
  AgentState[ResponseT], ContextT, PublicAgentState[ResponseT], PublicAgentState[ResponseT]
769
784
  ] = StateGraph(
770
- state_schema=state_schema,
785
+ state_schema=resolved_state_schema,
771
786
  input_schema=input_schema,
772
787
  output_schema=output_schema,
773
788
  context_schema=context_schema,
@@ -879,8 +894,9 @@ def create_agent( # noqa: PLR0915
879
894
  request: The model request containing model, tools, and response format.
880
895
 
881
896
  Returns:
882
- Tuple of (bound_model, effective_response_format) where `effective_response_format`
883
- is the actual strategy used (may differ from initial if auto-detected).
897
+ Tuple of `(bound_model, effective_response_format)` where
898
+ `effective_response_format` is the actual strategy used (may differ from
899
+ initial if auto-detected).
884
900
  """
885
901
  # Validate ONLY client-side tools that need to exist in tool_node
886
902
  # Build map of available client-side tools from the ToolNode
@@ -986,7 +1002,7 @@ def create_agent( # noqa: PLR0915
986
1002
  def _execute_model_sync(request: ModelRequest) -> ModelResponse:
987
1003
  """Execute model and return response.
988
1004
 
989
- This is the core model execution logic wrapped by wrap_model_call handlers.
1005
+ This is the core model execution logic wrapped by `wrap_model_call` handlers.
990
1006
  Raises any exceptions that occur during model invocation.
991
1007
  """
992
1008
  # Get the bound model (with auto-detection if needed)
@@ -1032,16 +1048,14 @@ def create_agent( # noqa: PLR0915
1032
1048
  if response.structured_response is not None:
1033
1049
  state_updates["structured_response"] = response.structured_response
1034
1050
 
1035
- return {
1036
- "thread_model_call_count": state.get("thread_model_call_count", 0) + 1,
1037
- "run_model_call_count": state.get("run_model_call_count", 0) + 1,
1038
- **state_updates,
1039
- }
1051
+ return state_updates
1040
1052
 
1041
1053
  async def _execute_model_async(request: ModelRequest) -> ModelResponse:
1042
1054
  """Execute model asynchronously and return response.
1043
1055
 
1044
- This is the core async model execution logic wrapped by wrap_model_call handlers.
1056
+ This is the core async model execution logic wrapped by `wrap_model_call`
1057
+ handlers.
1058
+
1045
1059
  Raises any exceptions that occur during model invocation.
1046
1060
  """
1047
1061
  # Get the bound model (with auto-detection if needed)
@@ -1087,11 +1101,7 @@ def create_agent( # noqa: PLR0915
1087
1101
  if response.structured_response is not None:
1088
1102
  state_updates["structured_response"] = response.structured_response
1089
1103
 
1090
- return {
1091
- "thread_model_call_count": state.get("thread_model_call_count", 0) + 1,
1092
- "run_model_call_count": state.get("run_model_call_count", 0) + 1,
1093
- **state_updates,
1094
- }
1104
+ return state_updates
1095
1105
 
1096
1106
  # Use sync or async based on model capabilities
1097
1107
  graph.add_node("model", RunnableCallable(model_node, amodel_node, trace=False))
@@ -1119,7 +1129,9 @@ def create_agent( # noqa: PLR0915
1119
1129
  else None
1120
1130
  )
1121
1131
  before_agent_node = RunnableCallable(sync_before_agent, async_before_agent, trace=False)
1122
- graph.add_node(f"{m.name}.before_agent", before_agent_node, input_schema=state_schema)
1132
+ graph.add_node(
1133
+ f"{m.name}.before_agent", before_agent_node, input_schema=resolved_state_schema
1134
+ )
1123
1135
 
1124
1136
  if (
1125
1137
  m.__class__.before_model is not AgentMiddleware.before_model
@@ -1138,7 +1150,9 @@ def create_agent( # noqa: PLR0915
1138
1150
  else None
1139
1151
  )
1140
1152
  before_node = RunnableCallable(sync_before, async_before, trace=False)
1141
- graph.add_node(f"{m.name}.before_model", before_node, input_schema=state_schema)
1153
+ graph.add_node(
1154
+ f"{m.name}.before_model", before_node, input_schema=resolved_state_schema
1155
+ )
1142
1156
 
1143
1157
  if (
1144
1158
  m.__class__.after_model is not AgentMiddleware.after_model
@@ -1157,7 +1171,7 @@ def create_agent( # noqa: PLR0915
1157
1171
  else None
1158
1172
  )
1159
1173
  after_node = RunnableCallable(sync_after, async_after, trace=False)
1160
- graph.add_node(f"{m.name}.after_model", after_node, input_schema=state_schema)
1174
+ graph.add_node(f"{m.name}.after_model", after_node, input_schema=resolved_state_schema)
1161
1175
 
1162
1176
  if (
1163
1177
  m.__class__.after_agent is not AgentMiddleware.after_agent
@@ -1176,7 +1190,9 @@ def create_agent( # noqa: PLR0915
1176
1190
  else None
1177
1191
  )
1178
1192
  after_agent_node = RunnableCallable(sync_after_agent, async_after_agent, trace=False)
1179
- graph.add_node(f"{m.name}.after_agent", after_agent_node, input_schema=state_schema)
1193
+ graph.add_node(
1194
+ f"{m.name}.after_agent", after_agent_node, input_schema=resolved_state_schema
1195
+ )
1180
1196
 
1181
1197
  # Determine the entry node (runs once at start): before_agent -> before_model -> model
1182
1198
  if middleware_w_before_agent:
@@ -1209,6 +1225,15 @@ def create_agent( # noqa: PLR0915
1209
1225
  graph.add_edge(START, entry_node)
1210
1226
  # add conditional edges only if tools exist
1211
1227
  if tool_node is not None:
1228
+ # Only include exit_node in destinations if any tool has return_direct=True
1229
+ # or if there are structured output tools
1230
+ tools_to_model_destinations = [loop_entry_node]
1231
+ if (
1232
+ any(tool.return_direct for tool in tool_node.tools_by_name.values())
1233
+ or structured_output_tools
1234
+ ):
1235
+ tools_to_model_destinations.append(exit_node)
1236
+
1212
1237
  graph.add_conditional_edges(
1213
1238
  "tools",
1214
1239
  _make_tools_to_model_edge(
@@ -1217,7 +1242,7 @@ def create_agent( # noqa: PLR0915
1217
1242
  structured_output_tools=structured_output_tools,
1218
1243
  end_destination=exit_node,
1219
1244
  ),
1220
- [loop_entry_node, exit_node],
1245
+ tools_to_model_destinations,
1221
1246
  )
1222
1247
 
1223
1248
  # base destinations are tools and exit_node
@@ -1,4 +1,10 @@
1
- """Middleware plugins for agents."""
1
+ """Entrypoint to using [Middleware](https://docs.langchain.com/oss/python/langchain/middleware) plugins with [Agents](https://docs.langchain.com/oss/python/langchain/agents).
2
+
3
+ !!! warning "Reference docs"
4
+ This page contains **reference documentation** for Middleware. See
5
+ [the docs](https://docs.langchain.com/oss/python/langchain/middleware) for conceptual
6
+ guides, tutorials, and examples on using Middleware.
7
+ """ # noqa: E501
2
8
 
3
9
  from .context_editing import (
4
10
  ClearToolUsesEdit,
@@ -11,16 +17,17 @@ from .human_in_the_loop import (
11
17
  from .model_call_limit import ModelCallLimitMiddleware
12
18
  from .model_fallback import ModelFallbackMiddleware
13
19
  from .pii import PIIDetectionError, PIIMiddleware
14
- from .planning import PlanningMiddleware
15
- from .prompt_caching import AnthropicPromptCachingMiddleware
16
20
  from .summarization import SummarizationMiddleware
21
+ from .todo import TodoListMiddleware
17
22
  from .tool_call_limit import ToolCallLimitMiddleware
18
23
  from .tool_emulator import LLMToolEmulator
24
+ from .tool_retry import ToolRetryMiddleware
19
25
  from .tool_selection import LLMToolSelectorMiddleware
20
26
  from .types import (
21
27
  AgentMiddleware,
22
28
  AgentState,
23
29
  ModelRequest,
30
+ ModelResponse,
24
31
  after_agent,
25
32
  after_model,
26
33
  before_agent,
@@ -28,13 +35,12 @@ from .types import (
28
35
  dynamic_prompt,
29
36
  hook_config,
30
37
  wrap_model_call,
38
+ wrap_tool_call,
31
39
  )
32
40
 
33
41
  __all__ = [
34
42
  "AgentMiddleware",
35
43
  "AgentState",
36
- # should move to langchain-anthropic if we decide to keep it
37
- "AnthropicPromptCachingMiddleware",
38
44
  "ClearToolUsesEdit",
39
45
  "ContextEditingMiddleware",
40
46
  "HumanInTheLoopMiddleware",
@@ -44,11 +50,13 @@ __all__ = [
44
50
  "ModelCallLimitMiddleware",
45
51
  "ModelFallbackMiddleware",
46
52
  "ModelRequest",
53
+ "ModelResponse",
47
54
  "PIIDetectionError",
48
55
  "PIIMiddleware",
49
- "PlanningMiddleware",
50
56
  "SummarizationMiddleware",
57
+ "TodoListMiddleware",
51
58
  "ToolCallLimitMiddleware",
59
+ "ToolRetryMiddleware",
52
60
  "after_agent",
53
61
  "after_model",
54
62
  "before_agent",
@@ -56,4 +64,5 @@ __all__ = [
56
64
  "dynamic_prompt",
57
65
  "hook_config",
58
66
  "wrap_model_call",
67
+ "wrap_tool_call",
59
68
  ]
@@ -8,7 +8,7 @@ with any LangChain chat model.
8
8
 
9
9
  from __future__ import annotations
10
10
 
11
- from collections.abc import Callable, Iterable, Sequence
11
+ from collections.abc import Awaitable, Callable, Iterable, Sequence
12
12
  from dataclasses import dataclass
13
13
  from typing import Literal
14
14
 
@@ -198,7 +198,7 @@ class ContextEditingMiddleware(AgentMiddleware):
198
198
  edits: Iterable[ContextEdit] | None = None,
199
199
  token_count_method: Literal["approximate", "model"] = "approximate", # noqa: S107
200
200
  ) -> None:
201
- """Initialise a context editing middleware instance.
201
+ """Initializes a context editing middleware instance.
202
202
 
203
203
  Args:
204
204
  edits: Sequence of edit strategies to apply. Defaults to a single
@@ -239,6 +239,34 @@ class ContextEditingMiddleware(AgentMiddleware):
239
239
 
240
240
  return handler(request)
241
241
 
242
+ async def awrap_model_call(
243
+ self,
244
+ request: ModelRequest,
245
+ handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
246
+ ) -> ModelCallResult:
247
+ """Apply context edits before invoking the model via handler (async version)."""
248
+ if not request.messages:
249
+ return await handler(request)
250
+
251
+ if self.token_count_method == "approximate": # noqa: S105
252
+
253
+ def count_tokens(messages: Sequence[BaseMessage]) -> int:
254
+ return count_tokens_approximately(messages)
255
+ else:
256
+ system_msg = (
257
+ [SystemMessage(content=request.system_prompt)] if request.system_prompt else []
258
+ )
259
+
260
+ def count_tokens(messages: Sequence[BaseMessage]) -> int:
261
+ return request.model.get_num_tokens_from_messages(
262
+ system_msg + list(messages), request.tools
263
+ )
264
+
265
+ for edit in self.edits:
266
+ edit.apply(request.messages, count_tokens=count_tokens)
267
+
268
+ return await handler(request)
269
+
242
270
 
243
271
  __all__ = [
244
272
  "ClearToolUsesEdit",
@@ -11,23 +11,23 @@ from langchain.agents.middleware.types import AgentMiddleware, AgentState
11
11
 
12
12
 
13
13
  class Action(TypedDict):
14
- """Represents an action with a name and arguments."""
14
+ """Represents an action with a name and args."""
15
15
 
16
16
  name: str
17
17
  """The type or name of action being requested (e.g., "add_numbers")."""
18
18
 
19
- arguments: dict[str, Any]
20
- """Key-value pairs of arguments needed for the action (e.g., {"a": 1, "b": 2})."""
19
+ args: dict[str, Any]
20
+ """Key-value pairs of args needed for the action (e.g., {"a": 1, "b": 2})."""
21
21
 
22
22
 
23
23
  class ActionRequest(TypedDict):
24
- """Represents an action request with a name, arguments, and description."""
24
+ """Represents an action request with a name, args, and description."""
25
25
 
26
26
  name: str
27
27
  """The name of the action being requested."""
28
28
 
29
- arguments: dict[str, Any]
30
- """Key-value pairs of arguments needed for the action (e.g., {"a": 1, "b": 2})."""
29
+ args: dict[str, Any]
30
+ """Key-value pairs of args needed for the action (e.g., {"a": 1, "b": 2})."""
31
31
 
32
32
  description: NotRequired[str]
33
33
  """The description of the action to be reviewed."""
@@ -45,8 +45,8 @@ class ReviewConfig(TypedDict):
45
45
  allowed_decisions: list[DecisionType]
46
46
  """The decisions that are allowed for this request."""
47
47
 
48
- arguments_schema: NotRequired[dict[str, Any]]
49
- """JSON schema for the arguments associated with the action, if edits are allowed."""
48
+ args_schema: NotRequired[dict[str, Any]]
49
+ """JSON schema for the args associated with the action, if edits are allowed."""
50
50
 
51
51
 
52
52
  class HITLRequest(TypedDict):
@@ -110,7 +110,8 @@ class _DescriptionFactory(Protocol):
110
110
  class InterruptOnConfig(TypedDict):
111
111
  """Configuration for an action requiring human in the loop.
112
112
 
113
- This is the configuration format used in the `HumanInTheLoopMiddleware.__init__` method.
113
+ This is the configuration format used in the `HumanInTheLoopMiddleware.__init__`
114
+ method.
114
115
  """
115
116
 
116
117
  allowed_decisions: list[DecisionType]
@@ -120,6 +121,7 @@ class InterruptOnConfig(TypedDict):
120
121
  """The description attached to the request for human input.
121
122
 
122
123
  Can be either:
124
+
123
125
  - A static string describing the approval request
124
126
  - A callable that dynamically generates the description based on agent state,
125
127
  runtime, and tool call information
@@ -150,8 +152,8 @@ class InterruptOnConfig(TypedDict):
150
152
  )
151
153
  ```
152
154
  """
153
- arguments_schema: NotRequired[dict[str, Any]]
154
- """JSON schema for the arguments associated with the action, if edits are allowed."""
155
+ args_schema: NotRequired[dict[str, Any]]
156
+ """JSON schema for the args associated with the action, if edits are allowed."""
155
157
 
156
158
 
157
159
  class HumanInTheLoopMiddleware(AgentMiddleware):
@@ -171,12 +173,14 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
171
173
 
172
174
  * `True` indicates all decisions are allowed: approve, edit, and reject.
173
175
  * `False` indicates that the tool is auto-approved.
174
- * `InterruptOnConfig` indicates the specific decisions allowed for this tool.
175
- The InterruptOnConfig can include a `description` field (str or callable) for
176
- custom formatting of the interrupt description.
176
+ * `InterruptOnConfig` indicates the specific decisions allowed for this
177
+ tool.
178
+ The InterruptOnConfig can include a `description` field (`str` or
179
+ `Callable`) for custom formatting of the interrupt description.
177
180
  description_prefix: The prefix to use when constructing action requests.
178
- This is used to provide context about the tool call and the action being requested.
179
- Not used if a tool has a `description` in its InterruptOnConfig.
181
+ This is used to provide context about the tool call and the action being
182
+ requested. Not used if a tool has a `description` in its
183
+ `InterruptOnConfig`.
180
184
  """
181
185
  super().__init__()
182
186
  resolved_configs: dict[str, InterruptOnConfig] = {}
@@ -214,12 +218,12 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
214
218
  # Create ActionRequest with description
215
219
  action_request = ActionRequest(
216
220
  name=tool_name,
217
- arguments=tool_args,
221
+ args=tool_args,
218
222
  description=description,
219
223
  )
220
224
 
221
225
  # Create ReviewConfig
222
- # eventually can get tool information and populate arguments_schema from there
226
+ # eventually can get tool information and populate args_schema from there
223
227
  review_config = ReviewConfig(
224
228
  action_name=tool_name,
225
229
  allowed_decisions=config["allowed_decisions"],
@@ -244,7 +248,7 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
244
248
  ToolCall(
245
249
  type="tool_call",
246
250
  name=edited_action["name"],
247
- args=edited_action["arguments"],
251
+ args=edited_action["args"],
248
252
  id=tool_call["id"],
249
253
  ),
250
254
  None,
@@ -270,7 +274,7 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
270
274
  raise ValueError(msg)
271
275
 
272
276
  def after_model(self, state: AgentState, runtime: Runtime) -> dict[str, Any] | None:
273
- """Trigger interrupt flows for relevant tool calls after an AIMessage."""
277
+ """Trigger interrupt flows for relevant tool calls after an `AIMessage`."""
274
278
  messages = state["messages"]
275
279
  if not messages:
276
280
  return None