langchain 1.0.0a5__tar.gz → 1.0.0a7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (93) hide show
  1. {langchain-1.0.0a5 → langchain-1.0.0a7}/PKG-INFO +9 -5
  2. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/__init__.py +1 -1
  3. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/agents/middleware/__init__.py +3 -0
  4. langchain-1.0.0a7/langchain/agents/middleware/dynamic_system_prompt.py +105 -0
  5. langchain-1.0.0a7/langchain/agents/middleware/human_in_the_loop.py +253 -0
  6. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/agents/middleware/prompt_caching.py +5 -2
  7. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/agents/middleware/summarization.py +1 -1
  8. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/agents/middleware/types.py +50 -10
  9. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/agents/middleware_agent.py +125 -71
  10. {langchain-1.0.0a5 → langchain-1.0.0a7}/pyproject.toml +29 -32
  11. langchain-1.0.0a7/tests/unit_tests/agents/test_middleware_agent.py +1346 -0
  12. langchain-1.0.0a5/langchain/agents/interrupt.py +0 -97
  13. langchain-1.0.0a5/langchain/agents/middleware/_utils.py +0 -11
  14. langchain-1.0.0a5/langchain/agents/middleware/human_in_the_loop.py +0 -135
  15. langchain-1.0.0a5/tests/unit_tests/agents/test_middleware_agent.py +0 -735
  16. {langchain-1.0.0a5 → langchain-1.0.0a7}/LICENSE +0 -0
  17. {langchain-1.0.0a5 → langchain-1.0.0a7}/README.md +0 -0
  18. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/_internal/__init__.py +0 -0
  19. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/_internal/_documents.py +0 -0
  20. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/_internal/_lazy_import.py +0 -0
  21. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/_internal/_prompts.py +0 -0
  22. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/_internal/_typing.py +0 -0
  23. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/_internal/_utils.py +0 -0
  24. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/agents/__init__.py +0 -0
  25. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/agents/_internal/__init__.py +0 -0
  26. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/agents/_internal/_typing.py +0 -0
  27. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/agents/react_agent.py +0 -0
  28. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/agents/structured_output.py +0 -0
  29. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/agents/tool_node.py +0 -0
  30. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/chat_models/__init__.py +0 -0
  31. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/chat_models/base.py +0 -0
  32. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/documents/__init__.py +0 -0
  33. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/embeddings/__init__.py +0 -0
  34. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/embeddings/base.py +0 -0
  35. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/embeddings/cache.py +0 -0
  36. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/globals.py +0 -0
  37. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/py.typed +0 -0
  38. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/storage/__init__.py +0 -0
  39. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/storage/encoder_backed.py +0 -0
  40. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/storage/exceptions.py +0 -0
  41. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/storage/in_memory.py +0 -0
  42. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/text_splitter.py +0 -0
  43. {langchain-1.0.0a5 → langchain-1.0.0a7}/langchain/tools/__init__.py +0 -0
  44. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/__init__.py +0 -0
  45. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/integration_tests/__init__.py +0 -0
  46. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/integration_tests/agents/__init__.py +0 -0
  47. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/integration_tests/agents/test_response_format.py +0 -0
  48. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/integration_tests/cache/__init__.py +0 -0
  49. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/integration_tests/cache/fake_embeddings.py +0 -0
  50. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/integration_tests/chat_models/__init__.py +0 -0
  51. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/integration_tests/chat_models/test_base.py +0 -0
  52. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/integration_tests/conftest.py +0 -0
  53. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/integration_tests/embeddings/__init__.py +0 -0
  54. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/integration_tests/embeddings/test_base.py +0 -0
  55. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/integration_tests/test_compile.py +0 -0
  56. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/__init__.py +0 -0
  57. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/__init__.py +0 -0
  58. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/__snapshots__/test_middleware_agent.ambr +0 -0
  59. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/__snapshots__/test_react_agent_graph.ambr +0 -0
  60. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/any_str.py +0 -0
  61. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/compose-postgres.yml +0 -0
  62. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/compose-redis.yml +0 -0
  63. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/conftest.py +0 -0
  64. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/conftest_checkpointer.py +0 -0
  65. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/conftest_store.py +0 -0
  66. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/memory_assert.py +0 -0
  67. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/messages.py +0 -0
  68. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/model.py +0 -0
  69. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/specifications/responses.json +0 -0
  70. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/specifications/return_direct.json +0 -0
  71. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/test_react_agent.py +0 -0
  72. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/test_react_agent_graph.py +0 -0
  73. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/test_response_format.py +0 -0
  74. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/test_responses.py +0 -0
  75. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/test_responses_spec.py +0 -0
  76. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/test_return_direct_spec.py +0 -0
  77. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/test_tool_node.py +0 -0
  78. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/agents/utils.py +0 -0
  79. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/chat_models/__init__.py +0 -0
  80. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/chat_models/test_chat_models.py +0 -0
  81. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/conftest.py +0 -0
  82. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/embeddings/__init__.py +0 -0
  83. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/embeddings/test_base.py +0 -0
  84. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/embeddings/test_caching.py +0 -0
  85. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/embeddings/test_imports.py +0 -0
  86. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/storage/__init__.py +0 -0
  87. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/storage/test_imports.py +0 -0
  88. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/stubs.py +0 -0
  89. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/test_dependencies.py +0 -0
  90. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/test_imports.py +0 -0
  91. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/test_pytest_config.py +0 -0
  92. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/tools/__init__.py +0 -0
  93. {langchain-1.0.0a5 → langchain-1.0.0a7}/tests/unit_tests/tools/test_imports.py +0 -0
@@ -1,22 +1,26 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain
3
- Version: 1.0.0a5
3
+ Version: 1.0.0a7
4
4
  Summary: Building applications with LLMs through composability
5
5
  License: MIT
6
6
  Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
7
7
  Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D0%22&expanded=true
8
8
  Project-URL: repository, https://github.com/langchain-ai/langchain
9
- Requires-Python: >=3.10
9
+ Requires-Python: <4.0.0,>=3.10.0
10
10
  Requires-Dist: langchain-core<2.0.0,>=0.3.75
11
- Requires-Dist: langchain-text-splitters<1.0.0,>=0.3.11
12
- Requires-Dist: langgraph>=0.6.7
13
- Requires-Dist: pydantic>=2.7.4
11
+ Requires-Dist: langchain-text-splitters<2.0.0,>=0.3.11
12
+ Requires-Dist: langgraph<2.0.0,>=0.6.7
13
+ Requires-Dist: pydantic<3.0.0,>=2.7.4
14
+ Provides-Extra: community
15
+ Requires-Dist: langchain-community; extra == "community"
14
16
  Provides-Extra: anthropic
15
17
  Requires-Dist: langchain-anthropic; extra == "anthropic"
16
18
  Provides-Extra: openai
17
19
  Requires-Dist: langchain-openai; extra == "openai"
18
20
  Provides-Extra: azure-ai
19
21
  Requires-Dist: langchain-azure-ai; extra == "azure-ai"
22
+ Provides-Extra: cohere
23
+ Requires-Dist: langchain-cohere; extra == "cohere"
20
24
  Provides-Extra: google-vertexai
21
25
  Requires-Dist: langchain-google-vertexai; extra == "google-vertexai"
22
26
  Provides-Extra: google-genai
@@ -2,7 +2,7 @@
2
2
 
3
3
  from typing import Any
4
4
 
5
- __version__ = "1.0.0a4"
5
+ __version__ = "1.0.0a6"
6
6
 
7
7
 
8
8
  def __getattr__(name: str) -> Any: # noqa: ANN401
@@ -1,5 +1,6 @@
1
1
  """Middleware plugins for agents."""
2
2
 
3
+ from .dynamic_system_prompt import DynamicSystemPromptMiddleware
3
4
  from .human_in_the_loop import HumanInTheLoopMiddleware
4
5
  from .prompt_caching import AnthropicPromptCachingMiddleware
5
6
  from .summarization import SummarizationMiddleware
@@ -8,7 +9,9 @@ from .types import AgentMiddleware, AgentState, ModelRequest
8
9
  __all__ = [
9
10
  "AgentMiddleware",
10
11
  "AgentState",
12
+ # should move to langchain-anthropic if we decide to keep it
11
13
  "AnthropicPromptCachingMiddleware",
14
+ "DynamicSystemPromptMiddleware",
12
15
  "HumanInTheLoopMiddleware",
13
16
  "ModelRequest",
14
17
  "SummarizationMiddleware",
@@ -0,0 +1,105 @@
1
+ """Dynamic System Prompt Middleware.
2
+
3
+ Allows setting the system prompt dynamically right before each model invocation.
4
+ Useful when the prompt depends on the current agent state or per-invocation context.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from inspect import signature
10
+ from typing import TYPE_CHECKING, Protocol, TypeAlias, cast
11
+
12
+ from langgraph.typing import ContextT
13
+
14
+ from langchain.agents.middleware.types import (
15
+ AgentMiddleware,
16
+ AgentState,
17
+ ModelRequest,
18
+ )
19
+
20
+ if TYPE_CHECKING:
21
+ from langgraph.runtime import Runtime
22
+
23
+
24
+ class DynamicSystemPromptWithoutRuntime(Protocol):
25
+ """Dynamic system prompt without runtime in call signature."""
26
+
27
+ def __call__(self, state: AgentState) -> str:
28
+ """Return the system prompt for the next model call."""
29
+ ...
30
+
31
+
32
+ class DynamicSystemPromptWithRuntime(Protocol[ContextT]):
33
+ """Dynamic system prompt with runtime in call signature."""
34
+
35
+ def __call__(self, state: AgentState, runtime: Runtime[ContextT]) -> str:
36
+ """Return the system prompt for the next model call."""
37
+ ...
38
+
39
+
40
+ DynamicSystemPrompt: TypeAlias = (
41
+ DynamicSystemPromptWithoutRuntime | DynamicSystemPromptWithRuntime[ContextT]
42
+ )
43
+
44
+
45
+ class DynamicSystemPromptMiddleware(AgentMiddleware):
46
+ """Dynamic System Prompt Middleware.
47
+
48
+ Allows setting the system prompt dynamically right before each model invocation.
49
+ Useful when the prompt depends on the current agent state or per-invocation context.
50
+
51
+ Example:
52
+ ```python
53
+ from langchain.agents.middleware import DynamicSystemPromptMiddleware
54
+
55
+
56
+ class Context(TypedDict):
57
+ user_name: str
58
+
59
+
60
+ def system_prompt(state: AgentState, runtime: Runtime[Context]) -> str:
61
+ user_name = runtime.context.get("user_name", "n/a")
62
+ return (
63
+ f"You are a helpful assistant. Always address the user by their name: {user_name}"
64
+ )
65
+
66
+
67
+ middleware = DynamicSystemPromptMiddleware(system_prompt)
68
+ ```
69
+ """
70
+
71
+ _accepts_runtime: bool
72
+
73
+ def __init__(
74
+ self,
75
+ dynamic_system_prompt: DynamicSystemPrompt[ContextT],
76
+ ) -> None:
77
+ """Initialize the dynamic system prompt middleware.
78
+
79
+ Args:
80
+ dynamic_system_prompt: Function that receives the current agent state
81
+ and optionally runtime with context, and returns the system prompt for
82
+ the next model call. Returns a string.
83
+ """
84
+ super().__init__()
85
+ self.dynamic_system_prompt = dynamic_system_prompt
86
+ self._accepts_runtime = "runtime" in signature(dynamic_system_prompt).parameters
87
+
88
+ def modify_model_request(
89
+ self,
90
+ request: ModelRequest,
91
+ state: AgentState,
92
+ runtime: Runtime[ContextT],
93
+ ) -> ModelRequest:
94
+ """Modify the model request to include the dynamic system prompt."""
95
+ if self._accepts_runtime:
96
+ system_prompt = cast(
97
+ "DynamicSystemPromptWithRuntime[ContextT]", self.dynamic_system_prompt
98
+ )(state, runtime)
99
+ else:
100
+ system_prompt = cast("DynamicSystemPromptWithoutRuntime", self.dynamic_system_prompt)(
101
+ state
102
+ )
103
+
104
+ request.system_prompt = system_prompt
105
+ return request
@@ -0,0 +1,253 @@
1
+ """Human in the loop middleware."""
2
+
3
+ from typing import Any, Literal
4
+
5
+ from langchain_core.messages import AIMessage, ToolCall, ToolMessage
6
+ from langgraph.types import interrupt
7
+ from typing_extensions import NotRequired, TypedDict
8
+
9
+ from langchain.agents.middleware.types import AgentMiddleware, AgentState
10
+
11
+
12
+ class HumanInTheLoopConfig(TypedDict):
13
+ """Configuration that defines what actions are allowed for a human interrupt.
14
+
15
+ This controls the available interaction options when the graph is paused for human input.
16
+ """
17
+
18
+ allow_accept: NotRequired[bool]
19
+ """Whether the human can approve the current action without changes."""
20
+ allow_edit: NotRequired[bool]
21
+ """Whether the human can approve the current action with edited content."""
22
+ allow_respond: NotRequired[bool]
23
+ """Whether the human can reject the current action with feedback."""
24
+
25
+
26
+ class ActionRequest(TypedDict):
27
+ """Represents a request with a name and arguments."""
28
+
29
+ action: str
30
+ """The type or name of action being requested (e.g., "add_numbers")."""
31
+ args: dict
32
+ """Key-value pairs of arguments needed for the action (e.g., {"a": 1, "b": 2})."""
33
+
34
+
35
+ class HumanInTheLoopRequest(TypedDict):
36
+ """Represents an interrupt triggered by the graph that requires human intervention.
37
+
38
+ Example:
39
+ ```python
40
+ # Extract a tool call from the state and create an interrupt request
41
+ request = HumanInterrupt(
42
+ action_request=ActionRequest(
43
+ action="run_command", # The action being requested
44
+ args={"command": "ls", "args": ["-l"]}, # Arguments for the action
45
+ ),
46
+ config=HumanInTheLoopConfig(
47
+ allow_accept=True, # Allow approval
48
+ allow_respond=True, # Allow rejection with feedback
49
+ allow_edit=False, # Don't allow approval with edits
50
+ ),
51
+ description="Please review the command before execution",
52
+ )
53
+ # Send the interrupt request and get the response
54
+ response = interrupt([request])[0]
55
+ ```
56
+ """
57
+
58
+ action_request: ActionRequest
59
+ """The specific action being requested from the human."""
60
+ config: HumanInTheLoopConfig
61
+ """Configuration defining what response types are allowed."""
62
+ description: str | None
63
+ """Optional detailed description of what input is needed."""
64
+
65
+
66
+ class AcceptPayload(TypedDict):
67
+ """Response when a human approves the action."""
68
+
69
+ type: Literal["accept"]
70
+ """The type of response when a human approves the action."""
71
+
72
+
73
+ class ResponsePayload(TypedDict):
74
+ """Response when a human rejects the action."""
75
+
76
+ type: Literal["response"]
77
+ """The type of response when a human rejects the action."""
78
+
79
+ args: NotRequired[str]
80
+ """The message to be sent to the model explaining why the action was rejected."""
81
+
82
+
83
+ class EditPayload(TypedDict):
84
+ """Response when a human edits the action."""
85
+
86
+ type: Literal["edit"]
87
+ """The type of response when a human edits the action."""
88
+
89
+ args: ActionRequest
90
+ """The action request with the edited content."""
91
+
92
+
93
+ HumanInTheLoopResponse = AcceptPayload | ResponsePayload | EditPayload
94
+ """Aggregated response type for all possible human in the loop responses."""
95
+
96
+
97
+ class ToolConfig(TypedDict):
98
+ """Configuration for a tool requiring human in the loop."""
99
+
100
+ allow_accept: NotRequired[bool]
101
+ """Whether the human can approve the current action without changes."""
102
+ allow_edit: NotRequired[bool]
103
+ """Whether the human can approve the current action with edited content."""
104
+ allow_respond: NotRequired[bool]
105
+ """Whether the human can reject the current action with feedback."""
106
+ description: NotRequired[str]
107
+ """The description attached to the request for human input."""
108
+
109
+
110
+ class HumanInTheLoopMiddleware(AgentMiddleware):
111
+ """Human in the loop middleware."""
112
+
113
+ def __init__(
114
+ self,
115
+ tool_configs: dict[str, bool | ToolConfig],
116
+ *,
117
+ description_prefix: str = "Tool execution requires approval",
118
+ ) -> None:
119
+ """Initialize the human in the loop middleware.
120
+
121
+ Args:
122
+ tool_configs: Mapping of tool name to allowed actions.
123
+ If a tool doesn't have an entry, it's auto-approved by default.
124
+ * `True` indicates all actions are allowed: accept, edit, and respond.
125
+ * `False` indicates that the tool is auto-approved.
126
+ * ToolConfig indicates the specific actions allowed for this tool.
127
+ description_prefix: The prefix to use when constructing action requests.
128
+ This is used to provide context about the tool call and the action being requested.
129
+ Not used if a tool has a description in its ToolConfig.
130
+ """
131
+ super().__init__()
132
+ resolved_tool_configs: dict[str, ToolConfig] = {}
133
+ for tool_name, tool_config in tool_configs.items():
134
+ if isinstance(tool_config, bool):
135
+ if tool_config is True:
136
+ resolved_tool_configs[tool_name] = ToolConfig(
137
+ allow_accept=True,
138
+ allow_edit=True,
139
+ allow_respond=True,
140
+ )
141
+ else:
142
+ resolved_tool_configs[tool_name] = tool_config
143
+ self.tool_configs = resolved_tool_configs
144
+ self.description_prefix = description_prefix
145
+
146
+ def after_model(self, state: AgentState) -> dict[str, Any] | None: # type: ignore[override]
147
+ """Trigger HITL flows for relevant tool calls after an AIMessage."""
148
+ messages = state["messages"]
149
+ if not messages:
150
+ return None
151
+
152
+ last_ai_msg = next((msg for msg in reversed(messages) if isinstance(msg, AIMessage)), None)
153
+ if not last_ai_msg or not last_ai_msg.tool_calls:
154
+ return None
155
+
156
+ # Separate tool calls that need interrupts from those that don't
157
+ hitl_tool_calls: list[ToolCall] = []
158
+ auto_approved_tool_calls = []
159
+
160
+ for tool_call in last_ai_msg.tool_calls:
161
+ hitl_tool_calls.append(tool_call) if tool_call[
162
+ "name"
163
+ ] in self.tool_configs else auto_approved_tool_calls.append(tool_call)
164
+
165
+ # If no interrupts needed, return early
166
+ if not hitl_tool_calls:
167
+ return None
168
+
169
+ # Process all tool calls that require interrupts
170
+ approved_tool_calls: list[ToolCall] = auto_approved_tool_calls.copy()
171
+ artificial_tool_messages: list[ToolMessage] = []
172
+
173
+ # Create interrupt requests for all tools that need approval
174
+ hitl_requests: list[HumanInTheLoopRequest] = []
175
+ for tool_call in hitl_tool_calls:
176
+ tool_name = tool_call["name"]
177
+ tool_args = tool_call["args"]
178
+ config = self.tool_configs[tool_name]
179
+ description = (
180
+ config.get("description")
181
+ or f"{self.description_prefix}\n\nTool: {tool_name}\nArgs: {tool_args}"
182
+ )
183
+
184
+ request: HumanInTheLoopRequest = {
185
+ "action_request": ActionRequest(
186
+ action=tool_name,
187
+ args=tool_args,
188
+ ),
189
+ "config": config,
190
+ "description": description,
191
+ }
192
+ hitl_requests.append(request)
193
+
194
+ responses: list[HumanInTheLoopResponse] = interrupt(hitl_requests)
195
+
196
+ # Validate that the number of responses matches the number of interrupt tool calls
197
+ if (responses_len := len(responses)) != (hitl_tool_calls_len := len(hitl_tool_calls)):
198
+ msg = (
199
+ f"Number of human responses ({responses_len}) does not match "
200
+ f"number of hanging tool calls ({hitl_tool_calls_len})."
201
+ )
202
+ raise ValueError(msg)
203
+
204
+ for i, response in enumerate(responses):
205
+ tool_call = hitl_tool_calls[i]
206
+ config = self.tool_configs[tool_call["name"]]
207
+
208
+ if response["type"] == "accept" and config.get("allow_accept"):
209
+ approved_tool_calls.append(tool_call)
210
+ elif response["type"] == "edit" and config.get("allow_edit"):
211
+ edited_action = response["args"]
212
+ approved_tool_calls.append(
213
+ ToolCall(
214
+ type="tool_call",
215
+ name=edited_action["action"],
216
+ args=edited_action["args"],
217
+ id=tool_call["id"],
218
+ )
219
+ )
220
+ elif response["type"] == "response" and config.get("allow_respond"):
221
+ # Create a tool message with the human's text response
222
+ content = response.get("args") or (
223
+ f"User rejected the tool call for `{tool_call['name']}` "
224
+ f"with id {tool_call['id']}"
225
+ )
226
+ tool_message = ToolMessage(
227
+ content=content,
228
+ name=tool_call["name"],
229
+ tool_call_id=tool_call["id"],
230
+ status="error",
231
+ )
232
+ artificial_tool_messages.append(tool_message)
233
+ else:
234
+ allowed_actions = [
235
+ action
236
+ for action in ["accept", "edit", "response"]
237
+ if config.get(f"allow_{'respond' if action == 'response' else action}")
238
+ ]
239
+ msg = (
240
+ f"Unexpected human response: {response}. "
241
+ f"Response action '{response.get('type')}' "
242
+ f"is not allowed for tool '{tool_call['name']}'. "
243
+ f"Expected one of {allowed_actions} based on the tool's configuration."
244
+ )
245
+ raise ValueError(msg)
246
+
247
+ # Update the AI message to only include approved tool calls
248
+ last_ai_msg.tool_calls = approved_tool_calls
249
+
250
+ if len(approved_tool_calls) > 0:
251
+ return {"messages": [last_ai_msg, *artificial_tool_messages]}
252
+
253
+ return {"jump_to": "model", "messages": artificial_tool_messages}
@@ -2,7 +2,7 @@
2
2
 
3
3
  from typing import Literal
4
4
 
5
- from langchain.agents.middleware.types import AgentMiddleware, AgentState, ModelRequest
5
+ from langchain.agents.middleware.types import AgentMiddleware, ModelRequest
6
6
 
7
7
 
8
8
  class AnthropicPromptCachingMiddleware(AgentMiddleware):
@@ -32,7 +32,10 @@ class AnthropicPromptCachingMiddleware(AgentMiddleware):
32
32
  self.ttl = ttl
33
33
  self.min_messages_to_cache = min_messages_to_cache
34
34
 
35
- def modify_model_request(self, request: ModelRequest, state: AgentState) -> ModelRequest: # noqa: ARG002
35
+ def modify_model_request( # type: ignore[override]
36
+ self,
37
+ request: ModelRequest,
38
+ ) -> ModelRequest:
36
39
  """Modify the model request to add cache control blocks."""
37
40
  try:
38
41
  from langchain_anthropic import ChatAnthropic
@@ -98,7 +98,7 @@ class SummarizationMiddleware(AgentMiddleware):
98
98
  self.summary_prompt = summary_prompt
99
99
  self.summary_prefix = summary_prefix
100
100
 
101
- def before_model(self, state: AgentState) -> dict[str, Any] | None:
101
+ def before_model(self, state: AgentState) -> dict[str, Any] | None: # type: ignore[override]
102
102
  """Process messages before model invocation, potentially triggering summarization."""
103
103
  messages = state["messages"]
104
104
  self._ensure_message_ids(messages)
@@ -8,15 +8,27 @@ from typing import TYPE_CHECKING, Annotated, Any, Generic, Literal, cast
8
8
  # needed as top level import for pydantic schema generation on AgentState
9
9
  from langchain_core.messages import AnyMessage # noqa: TC002
10
10
  from langgraph.channels.ephemeral_value import EphemeralValue
11
- from langgraph.graph.message import Messages, add_messages
11
+ from langgraph.graph.message import add_messages
12
+ from langgraph.runtime import Runtime
13
+ from langgraph.typing import ContextT
12
14
  from typing_extensions import NotRequired, Required, TypedDict, TypeVar
13
15
 
14
16
  if TYPE_CHECKING:
15
17
  from langchain_core.language_models.chat_models import BaseChatModel
16
18
  from langchain_core.tools import BaseTool
19
+ from langgraph.runtime import Runtime
17
20
 
18
21
  from langchain.agents.structured_output import ResponseFormat
19
22
 
23
+ __all__ = [
24
+ "AgentMiddleware",
25
+ "AgentState",
26
+ "ContextT",
27
+ "ModelRequest",
28
+ "OmitFromSchema",
29
+ "PublicAgentState",
30
+ ]
31
+
20
32
  JumpTo = Literal["tools", "model", "__end__"]
21
33
  """Destination to jump to when a middleware node returns."""
22
34
 
@@ -36,26 +48,49 @@ class ModelRequest:
36
48
  model_settings: dict[str, Any] = field(default_factory=dict)
37
49
 
38
50
 
51
+ @dataclass
52
+ class OmitFromSchema:
53
+ """Annotation used to mark state attributes as omitted from input or output schemas."""
54
+
55
+ input: bool = True
56
+ """Whether to omit the attribute from the input schema."""
57
+
58
+ output: bool = True
59
+ """Whether to omit the attribute from the output schema."""
60
+
61
+
62
+ OmitFromInput = OmitFromSchema(input=True, output=False)
63
+ """Annotation used to mark state attributes as omitted from input schema."""
64
+
65
+ OmitFromOutput = OmitFromSchema(input=False, output=True)
66
+ """Annotation used to mark state attributes as omitted from output schema."""
67
+
68
+ PrivateStateAttr = OmitFromSchema(input=True, output=True)
69
+ """Annotation used to mark state attributes as purely internal for a given middleware."""
70
+
71
+
39
72
  class AgentState(TypedDict, Generic[ResponseT]):
40
73
  """State schema for the agent."""
41
74
 
42
75
  messages: Required[Annotated[list[AnyMessage], add_messages]]
43
- model_request: NotRequired[Annotated[ModelRequest | None, EphemeralValue]]
44
- jump_to: NotRequired[Annotated[JumpTo | None, EphemeralValue]]
76
+ jump_to: NotRequired[Annotated[JumpTo | None, EphemeralValue, PrivateStateAttr]]
45
77
  response: NotRequired[ResponseT]
46
78
 
47
79
 
48
80
  class PublicAgentState(TypedDict, Generic[ResponseT]):
49
- """Input / output schema for the agent."""
81
+ """Public state schema for the agent.
50
82
 
51
- messages: Required[Messages]
83
+ Just used for typing purposes.
84
+ """
85
+
86
+ messages: Required[Annotated[list[AnyMessage], add_messages]]
52
87
  response: NotRequired[ResponseT]
53
88
 
54
89
 
55
- StateT = TypeVar("StateT", bound=AgentState)
90
+ StateT = TypeVar("StateT", bound=AgentState, default=AgentState)
56
91
 
57
92
 
58
- class AgentMiddleware(Generic[StateT]):
93
+ class AgentMiddleware(Generic[StateT, ContextT]):
59
94
  """Base middleware class for an agent.
60
95
 
61
96
  Subclass this and implement any of the defined methods to customize agent behavior
@@ -68,12 +103,17 @@ class AgentMiddleware(Generic[StateT]):
68
103
  tools: list[BaseTool]
69
104
  """Additional tools registered by the middleware."""
70
105
 
71
- def before_model(self, state: StateT) -> dict[str, Any] | None:
106
+ def before_model(self, state: StateT, runtime: Runtime[ContextT]) -> dict[str, Any] | None:
72
107
  """Logic to run before the model is called."""
73
108
 
74
- def modify_model_request(self, request: ModelRequest, state: StateT) -> ModelRequest: # noqa: ARG002
109
+ def modify_model_request(
110
+ self,
111
+ request: ModelRequest,
112
+ state: StateT, # noqa: ARG002
113
+ runtime: Runtime[ContextT], # noqa: ARG002
114
+ ) -> ModelRequest:
75
115
  """Logic to modify request kwargs before the model is called."""
76
116
  return request
77
117
 
78
- def after_model(self, state: StateT) -> dict[str, Any] | None:
118
+ def after_model(self, state: StateT, runtime: Runtime[ContextT]) -> dict[str, Any] | None:
79
119
  """Logic to run after the model is called."""