langchain 1.0.0a5__py3-none-any.whl → 1.0.0a6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain might be problematic. Click here for more details.
- langchain/agents/middleware/__init__.py +3 -0
- langchain/agents/middleware/dynamic_system_prompt.py +105 -0
- langchain/agents/middleware/human_in_the_loop.py +211 -93
- langchain/agents/middleware/prompt_caching.py +5 -2
- langchain/agents/middleware/summarization.py +1 -1
- langchain/agents/middleware/types.py +50 -10
- langchain/agents/middleware_agent.py +125 -71
- {langchain-1.0.0a5.dist-info → langchain-1.0.0a6.dist-info}/METADATA +5 -1
- {langchain-1.0.0a5.dist-info → langchain-1.0.0a6.dist-info}/RECORD +12 -13
- langchain/agents/interrupt.py +0 -97
- langchain/agents/middleware/_utils.py +0 -11
- {langchain-1.0.0a5.dist-info → langchain-1.0.0a6.dist-info}/WHEEL +0 -0
- {langchain-1.0.0a5.dist-info → langchain-1.0.0a6.dist-info}/entry_points.txt +0 -0
- {langchain-1.0.0a5.dist-info → langchain-1.0.0a6.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
"""Middleware plugins for agents."""
|
|
2
2
|
|
|
3
|
+
from .dynamic_system_prompt import DynamicSystemPromptMiddleware
|
|
3
4
|
from .human_in_the_loop import HumanInTheLoopMiddleware
|
|
4
5
|
from .prompt_caching import AnthropicPromptCachingMiddleware
|
|
5
6
|
from .summarization import SummarizationMiddleware
|
|
@@ -8,7 +9,9 @@ from .types import AgentMiddleware, AgentState, ModelRequest
|
|
|
8
9
|
__all__ = [
|
|
9
10
|
"AgentMiddleware",
|
|
10
11
|
"AgentState",
|
|
12
|
+
# should move to langchain-anthropic if we decide to keep it
|
|
11
13
|
"AnthropicPromptCachingMiddleware",
|
|
14
|
+
"DynamicSystemPromptMiddleware",
|
|
12
15
|
"HumanInTheLoopMiddleware",
|
|
13
16
|
"ModelRequest",
|
|
14
17
|
"SummarizationMiddleware",
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
"""Dynamic System Prompt Middleware.
|
|
2
|
+
|
|
3
|
+
Allows setting the system prompt dynamically right before each model invocation.
|
|
4
|
+
Useful when the prompt depends on the current agent state or per-invocation context.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from inspect import signature
|
|
10
|
+
from typing import TYPE_CHECKING, Protocol, TypeAlias, cast
|
|
11
|
+
|
|
12
|
+
from langgraph.typing import ContextT
|
|
13
|
+
|
|
14
|
+
from langchain.agents.middleware.types import (
|
|
15
|
+
AgentMiddleware,
|
|
16
|
+
AgentState,
|
|
17
|
+
ModelRequest,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
if TYPE_CHECKING:
|
|
21
|
+
from langgraph.runtime import Runtime
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class DynamicSystemPromptWithoutRuntime(Protocol):
|
|
25
|
+
"""Dynamic system prompt without runtime in call signature."""
|
|
26
|
+
|
|
27
|
+
def __call__(self, state: AgentState) -> str:
|
|
28
|
+
"""Return the system prompt for the next model call."""
|
|
29
|
+
...
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class DynamicSystemPromptWithRuntime(Protocol[ContextT]):
|
|
33
|
+
"""Dynamic system prompt with runtime in call signature."""
|
|
34
|
+
|
|
35
|
+
def __call__(self, state: AgentState, runtime: Runtime[ContextT]) -> str:
|
|
36
|
+
"""Return the system prompt for the next model call."""
|
|
37
|
+
...
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
DynamicSystemPrompt: TypeAlias = (
|
|
41
|
+
DynamicSystemPromptWithoutRuntime | DynamicSystemPromptWithRuntime[ContextT]
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class DynamicSystemPromptMiddleware(AgentMiddleware):
|
|
46
|
+
"""Dynamic System Prompt Middleware.
|
|
47
|
+
|
|
48
|
+
Allows setting the system prompt dynamically right before each model invocation.
|
|
49
|
+
Useful when the prompt depends on the current agent state or per-invocation context.
|
|
50
|
+
|
|
51
|
+
Example:
|
|
52
|
+
```python
|
|
53
|
+
from langchain.agents.middleware import DynamicSystemPromptMiddleware
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class Context(TypedDict):
|
|
57
|
+
user_name: str
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def system_prompt(state: AgentState, runtime: Runtime[Context]) -> str:
|
|
61
|
+
user_name = runtime.context.get("user_name", "n/a")
|
|
62
|
+
return (
|
|
63
|
+
f"You are a helpful assistant. Always address the user by their name: {user_name}"
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
middleware = DynamicSystemPromptMiddleware(system_prompt)
|
|
68
|
+
```
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
_accepts_runtime: bool
|
|
72
|
+
|
|
73
|
+
def __init__(
|
|
74
|
+
self,
|
|
75
|
+
dynamic_system_prompt: DynamicSystemPrompt[ContextT],
|
|
76
|
+
) -> None:
|
|
77
|
+
"""Initialize the dynamic system prompt middleware.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
dynamic_system_prompt: Function that receives the current agent state
|
|
81
|
+
and optionally runtime with context, and returns the system prompt for
|
|
82
|
+
the next model call. Returns a string.
|
|
83
|
+
"""
|
|
84
|
+
super().__init__()
|
|
85
|
+
self.dynamic_system_prompt = dynamic_system_prompt
|
|
86
|
+
self._accepts_runtime = "runtime" in signature(dynamic_system_prompt).parameters
|
|
87
|
+
|
|
88
|
+
def modify_model_request(
|
|
89
|
+
self,
|
|
90
|
+
request: ModelRequest,
|
|
91
|
+
state: AgentState,
|
|
92
|
+
runtime: Runtime[ContextT],
|
|
93
|
+
) -> ModelRequest:
|
|
94
|
+
"""Modify the model request to include the dynamic system prompt."""
|
|
95
|
+
if self._accepts_runtime:
|
|
96
|
+
system_prompt = cast(
|
|
97
|
+
"DynamicSystemPromptWithRuntime[ContextT]", self.dynamic_system_prompt
|
|
98
|
+
)(state, runtime)
|
|
99
|
+
else:
|
|
100
|
+
system_prompt = cast("DynamicSystemPromptWithoutRuntime", self.dynamic_system_prompt)(
|
|
101
|
+
state
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
request.system_prompt = system_prompt
|
|
105
|
+
return request
|
|
@@ -1,19 +1,110 @@
|
|
|
1
1
|
"""Human in the loop middleware."""
|
|
2
2
|
|
|
3
|
-
from typing import Any
|
|
4
|
-
|
|
5
|
-
from
|
|
6
|
-
ActionRequest,
|
|
7
|
-
HumanInterrupt,
|
|
8
|
-
HumanInterruptConfig,
|
|
9
|
-
HumanResponse,
|
|
10
|
-
)
|
|
3
|
+
from typing import Any, Literal
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import AIMessage, ToolCall, ToolMessage
|
|
11
6
|
from langgraph.types import interrupt
|
|
7
|
+
from typing_extensions import NotRequired, TypedDict
|
|
12
8
|
|
|
13
|
-
from langchain.agents.middleware._utils import _generate_correction_tool_messages
|
|
14
9
|
from langchain.agents.middleware.types import AgentMiddleware, AgentState
|
|
15
10
|
|
|
16
|
-
|
|
11
|
+
|
|
12
|
+
class HumanInTheLoopConfig(TypedDict):
|
|
13
|
+
"""Configuration that defines what actions are allowed for a human interrupt.
|
|
14
|
+
|
|
15
|
+
This controls the available interaction options when the graph is paused for human input.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
allow_accept: NotRequired[bool]
|
|
19
|
+
"""Whether the human can approve the current action without changes."""
|
|
20
|
+
allow_edit: NotRequired[bool]
|
|
21
|
+
"""Whether the human can approve the current action with edited content."""
|
|
22
|
+
allow_respond: NotRequired[bool]
|
|
23
|
+
"""Whether the human can reject the current action with feedback."""
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class ActionRequest(TypedDict):
|
|
27
|
+
"""Represents a request with a name and arguments."""
|
|
28
|
+
|
|
29
|
+
action: str
|
|
30
|
+
"""The type or name of action being requested (e.g., "add_numbers")."""
|
|
31
|
+
args: dict
|
|
32
|
+
"""Key-value pairs of arguments needed for the action (e.g., {"a": 1, "b": 2})."""
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class HumanInTheLoopRequest(TypedDict):
|
|
36
|
+
"""Represents an interrupt triggered by the graph that requires human intervention.
|
|
37
|
+
|
|
38
|
+
Example:
|
|
39
|
+
```python
|
|
40
|
+
# Extract a tool call from the state and create an interrupt request
|
|
41
|
+
request = HumanInterrupt(
|
|
42
|
+
action_request=ActionRequest(
|
|
43
|
+
action="run_command", # The action being requested
|
|
44
|
+
args={"command": "ls", "args": ["-l"]}, # Arguments for the action
|
|
45
|
+
),
|
|
46
|
+
config=HumanInTheLoopConfig(
|
|
47
|
+
allow_accept=True, # Allow approval
|
|
48
|
+
allow_respond=True, # Allow rejection with feedback
|
|
49
|
+
allow_edit=False, # Don't allow approval with edits
|
|
50
|
+
),
|
|
51
|
+
description="Please review the command before execution",
|
|
52
|
+
)
|
|
53
|
+
# Send the interrupt request and get the response
|
|
54
|
+
response = interrupt([request])[0]
|
|
55
|
+
```
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
action_request: ActionRequest
|
|
59
|
+
"""The specific action being requested from the human."""
|
|
60
|
+
config: HumanInTheLoopConfig
|
|
61
|
+
"""Configuration defining what response types are allowed."""
|
|
62
|
+
description: str | None
|
|
63
|
+
"""Optional detailed description of what input is needed."""
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class AcceptPayload(TypedDict):
|
|
67
|
+
"""Response when a human approves the action."""
|
|
68
|
+
|
|
69
|
+
type: Literal["accept"]
|
|
70
|
+
"""The type of response when a human approves the action."""
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class ResponsePayload(TypedDict):
|
|
74
|
+
"""Response when a human rejects the action."""
|
|
75
|
+
|
|
76
|
+
type: Literal["response"]
|
|
77
|
+
"""The type of response when a human rejects the action."""
|
|
78
|
+
|
|
79
|
+
args: NotRequired[str]
|
|
80
|
+
"""The message to be sent to the model explaining why the action was rejected."""
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class EditPayload(TypedDict):
|
|
84
|
+
"""Response when a human edits the action."""
|
|
85
|
+
|
|
86
|
+
type: Literal["edit"]
|
|
87
|
+
"""The type of response when a human edits the action."""
|
|
88
|
+
|
|
89
|
+
args: ActionRequest
|
|
90
|
+
"""The action request with the edited content."""
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
HumanInTheLoopResponse = AcceptPayload | ResponsePayload | EditPayload
|
|
94
|
+
"""Aggregated response type for all possible human in the loop responses."""
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
class ToolConfig(TypedDict):
|
|
98
|
+
"""Configuration for a tool requiring human in the loop."""
|
|
99
|
+
|
|
100
|
+
allow_accept: NotRequired[bool]
|
|
101
|
+
"""Whether the human can approve the current action without changes."""
|
|
102
|
+
allow_edit: NotRequired[bool]
|
|
103
|
+
"""Whether the human can approve the current action with edited content."""
|
|
104
|
+
allow_respond: NotRequired[bool]
|
|
105
|
+
"""Whether the human can reject the current action with feedback."""
|
|
106
|
+
description: NotRequired[str]
|
|
107
|
+
"""The description attached to the request for human input."""
|
|
17
108
|
|
|
18
109
|
|
|
19
110
|
class HumanInTheLoopMiddleware(AgentMiddleware):
|
|
@@ -21,115 +112,142 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
|
|
|
21
112
|
|
|
22
113
|
def __init__(
|
|
23
114
|
self,
|
|
24
|
-
tool_configs:
|
|
25
|
-
|
|
115
|
+
tool_configs: dict[str, bool | ToolConfig],
|
|
116
|
+
*,
|
|
117
|
+
description_prefix: str = "Tool execution requires approval",
|
|
26
118
|
) -> None:
|
|
27
119
|
"""Initialize the human in the loop middleware.
|
|
28
120
|
|
|
29
121
|
Args:
|
|
30
|
-
tool_configs:
|
|
31
|
-
|
|
122
|
+
tool_configs: Mapping of tool name to allowed actions.
|
|
123
|
+
If a tool doesn't have an entry, it's auto-approved by default.
|
|
124
|
+
* `True` indicates all actions are allowed: accept, edit, and respond.
|
|
125
|
+
* `False` indicates that the tool is auto-approved.
|
|
126
|
+
* ToolConfig indicates the specific actions allowed for this tool.
|
|
127
|
+
description_prefix: The prefix to use when constructing action requests.
|
|
128
|
+
This is used to provide context about the tool call and the action being requested.
|
|
129
|
+
Not used if a tool has a description in its ToolConfig.
|
|
32
130
|
"""
|
|
33
131
|
super().__init__()
|
|
34
|
-
|
|
35
|
-
|
|
132
|
+
resolved_tool_configs: dict[str, ToolConfig] = {}
|
|
133
|
+
for tool_name, tool_config in tool_configs.items():
|
|
134
|
+
if isinstance(tool_config, bool):
|
|
135
|
+
if tool_config is True:
|
|
136
|
+
resolved_tool_configs[tool_name] = ToolConfig(
|
|
137
|
+
allow_accept=True,
|
|
138
|
+
allow_edit=True,
|
|
139
|
+
allow_respond=True,
|
|
140
|
+
)
|
|
141
|
+
else:
|
|
142
|
+
resolved_tool_configs[tool_name] = tool_config
|
|
143
|
+
self.tool_configs = resolved_tool_configs
|
|
144
|
+
self.description_prefix = description_prefix
|
|
36
145
|
|
|
37
|
-
def after_model(self, state: AgentState) -> dict[str, Any] | None:
|
|
146
|
+
def after_model(self, state: AgentState) -> dict[str, Any] | None: # type: ignore[override]
|
|
38
147
|
"""Trigger HITL flows for relevant tool calls after an AIMessage."""
|
|
39
148
|
messages = state["messages"]
|
|
40
149
|
if not messages:
|
|
41
150
|
return None
|
|
42
151
|
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
if not hasattr(last_message, "tool_calls") or not last_message.tool_calls:
|
|
152
|
+
last_ai_msg = next((msg for msg in messages if isinstance(msg, AIMessage)), None)
|
|
153
|
+
if not last_ai_msg or not last_ai_msg.tool_calls:
|
|
46
154
|
return None
|
|
47
155
|
|
|
48
156
|
# Separate tool calls that need interrupts from those that don't
|
|
49
|
-
|
|
157
|
+
hitl_tool_calls: list[ToolCall] = []
|
|
50
158
|
auto_approved_tool_calls = []
|
|
51
159
|
|
|
52
|
-
for tool_call in
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
else:
|
|
57
|
-
auto_approved_tool_calls.append(tool_call)
|
|
160
|
+
for tool_call in last_ai_msg.tool_calls:
|
|
161
|
+
hitl_tool_calls.append(tool_call) if tool_call[
|
|
162
|
+
"name"
|
|
163
|
+
] in self.tool_configs else auto_approved_tool_calls.append(tool_call)
|
|
58
164
|
|
|
59
165
|
# If no interrupts needed, return early
|
|
60
|
-
if not
|
|
166
|
+
if not hitl_tool_calls:
|
|
61
167
|
return None
|
|
62
168
|
|
|
63
|
-
|
|
169
|
+
# Process all tool calls that require interrupts
|
|
170
|
+
approved_tool_calls: list[ToolCall] = auto_approved_tool_calls.copy()
|
|
171
|
+
artificial_tool_messages: list[ToolMessage] = []
|
|
64
172
|
|
|
65
|
-
#
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
173
|
+
# Create interrupt requests for all tools that need approval
|
|
174
|
+
hitl_requests: list[HumanInTheLoopRequest] = []
|
|
175
|
+
for tool_call in hitl_tool_calls:
|
|
176
|
+
tool_name = tool_call["name"]
|
|
177
|
+
tool_args = tool_call["args"]
|
|
178
|
+
config = self.tool_configs[tool_name]
|
|
179
|
+
description = (
|
|
180
|
+
config.get("description")
|
|
181
|
+
or f"{self.description_prefix}\n\nTool: {tool_name}\nArgs: {tool_args}"
|
|
71
182
|
)
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
"
|
|
183
|
+
|
|
184
|
+
request: HumanInTheLoopRequest = {
|
|
185
|
+
"action_request": ActionRequest(
|
|
186
|
+
action=tool_name,
|
|
187
|
+
args=tool_args,
|
|
188
|
+
),
|
|
189
|
+
"config": config,
|
|
190
|
+
"description": description,
|
|
75
191
|
}
|
|
192
|
+
hitl_requests.append(request)
|
|
76
193
|
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
194
|
+
responses: list[HumanInTheLoopResponse] = interrupt(hitl_requests)
|
|
195
|
+
|
|
196
|
+
# Validate that the number of responses matches the number of interrupt tool calls
|
|
197
|
+
if (responses_len := len(responses)) != (hitl_tool_calls_len := len(hitl_tool_calls)):
|
|
80
198
|
msg = (
|
|
81
|
-
f"
|
|
82
|
-
"
|
|
83
|
-
"If you call a tool that requires and interrupt, you may ONLY call that tool."
|
|
199
|
+
f"Number of human responses ({responses_len}) does not match "
|
|
200
|
+
f"number of hanging tool calls ({hitl_tool_calls_len})."
|
|
84
201
|
)
|
|
85
|
-
return {
|
|
86
|
-
"messages": _generate_correction_tool_messages(msg, last_message.tool_calls),
|
|
87
|
-
"jump_to": "model",
|
|
88
|
-
}
|
|
89
|
-
|
|
90
|
-
# Only one tool call will need interrupts
|
|
91
|
-
tool_call = interrupt_tool_calls[0]
|
|
92
|
-
tool_name = tool_call["name"]
|
|
93
|
-
tool_args = tool_call["args"]
|
|
94
|
-
description = f"{self.message_prefix}\n\nTool: {tool_name}\nArgs: {tool_args}"
|
|
95
|
-
tool_config = self.tool_configs[tool_name]
|
|
96
|
-
|
|
97
|
-
request: HumanInterrupt = {
|
|
98
|
-
"action_request": ActionRequest(
|
|
99
|
-
action=tool_name,
|
|
100
|
-
args=tool_args,
|
|
101
|
-
),
|
|
102
|
-
"config": tool_config,
|
|
103
|
-
"description": description,
|
|
104
|
-
}
|
|
105
|
-
|
|
106
|
-
responses: list[HumanResponse] = interrupt([request])
|
|
107
|
-
response = responses[0]
|
|
108
|
-
|
|
109
|
-
if response["type"] == "accept":
|
|
110
|
-
approved_tool_calls.append(tool_call)
|
|
111
|
-
elif response["type"] == "edit":
|
|
112
|
-
edited: ActionRequest = response["args"] # type: ignore[assignment]
|
|
113
|
-
new_tool_call = {
|
|
114
|
-
"type": "tool_call",
|
|
115
|
-
"name": tool_call["name"],
|
|
116
|
-
"args": edited["args"],
|
|
117
|
-
"id": tool_call["id"],
|
|
118
|
-
}
|
|
119
|
-
approved_tool_calls.append(new_tool_call)
|
|
120
|
-
elif response["type"] == "ignore":
|
|
121
|
-
return {"jump_to": "__end__"}
|
|
122
|
-
elif response["type"] == "response":
|
|
123
|
-
tool_message = {
|
|
124
|
-
"role": "tool",
|
|
125
|
-
"tool_call_id": tool_call["id"],
|
|
126
|
-
"content": response["args"],
|
|
127
|
-
}
|
|
128
|
-
return {"messages": [tool_message], "jump_to": "model"}
|
|
129
|
-
else:
|
|
130
|
-
msg = f"Unknown response type: {response['type']}"
|
|
131
202
|
raise ValueError(msg)
|
|
132
203
|
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
204
|
+
for i, response in enumerate(responses):
|
|
205
|
+
tool_call = hitl_tool_calls[i]
|
|
206
|
+
config = self.tool_configs[tool_call["name"]]
|
|
207
|
+
|
|
208
|
+
if response["type"] == "accept" and config.get("allow_accept"):
|
|
209
|
+
approved_tool_calls.append(tool_call)
|
|
210
|
+
elif response["type"] == "edit" and config.get("allow_edit"):
|
|
211
|
+
edited_action = response["args"]
|
|
212
|
+
approved_tool_calls.append(
|
|
213
|
+
ToolCall(
|
|
214
|
+
type="tool_call",
|
|
215
|
+
name=edited_action["action"],
|
|
216
|
+
args=edited_action["args"],
|
|
217
|
+
id=tool_call["id"],
|
|
218
|
+
)
|
|
219
|
+
)
|
|
220
|
+
elif response["type"] == "response" and config.get("allow_respond"):
|
|
221
|
+
# Create a tool message with the human's text response
|
|
222
|
+
content = response.get("args") or (
|
|
223
|
+
f"User rejected the tool call for `{tool_call['name']}` "
|
|
224
|
+
f"with id {tool_call['id']}"
|
|
225
|
+
)
|
|
226
|
+
tool_message = ToolMessage(
|
|
227
|
+
content=content,
|
|
228
|
+
name=tool_call["name"],
|
|
229
|
+
tool_call_id=tool_call["id"],
|
|
230
|
+
status="error",
|
|
231
|
+
)
|
|
232
|
+
artificial_tool_messages.append(tool_message)
|
|
233
|
+
else:
|
|
234
|
+
allowed_actions = [
|
|
235
|
+
action
|
|
236
|
+
for action in ["accept", "edit", "response"]
|
|
237
|
+
if config.get(f"allow_{'respond' if action == 'response' else action}")
|
|
238
|
+
]
|
|
239
|
+
msg = (
|
|
240
|
+
f"Unexpected human response: {response}. "
|
|
241
|
+
f"Response action '{response.get('type')}' "
|
|
242
|
+
f"is not allowed for tool '{tool_call['name']}'. "
|
|
243
|
+
f"Expected one of {allowed_actions} based on the tool's configuration."
|
|
244
|
+
)
|
|
245
|
+
raise ValueError(msg)
|
|
246
|
+
|
|
247
|
+
# Update the AI message to only include approved tool calls
|
|
248
|
+
last_ai_msg.tool_calls = approved_tool_calls
|
|
249
|
+
|
|
250
|
+
if len(approved_tool_calls) > 0:
|
|
251
|
+
return {"messages": [last_ai_msg, *artificial_tool_messages]}
|
|
252
|
+
|
|
253
|
+
return {"jump_to": "model", "messages": artificial_tool_messages}
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from typing import Literal
|
|
4
4
|
|
|
5
|
-
from langchain.agents.middleware.types import AgentMiddleware,
|
|
5
|
+
from langchain.agents.middleware.types import AgentMiddleware, ModelRequest
|
|
6
6
|
|
|
7
7
|
|
|
8
8
|
class AnthropicPromptCachingMiddleware(AgentMiddleware):
|
|
@@ -32,7 +32,10 @@ class AnthropicPromptCachingMiddleware(AgentMiddleware):
|
|
|
32
32
|
self.ttl = ttl
|
|
33
33
|
self.min_messages_to_cache = min_messages_to_cache
|
|
34
34
|
|
|
35
|
-
def modify_model_request(
|
|
35
|
+
def modify_model_request( # type: ignore[override]
|
|
36
|
+
self,
|
|
37
|
+
request: ModelRequest,
|
|
38
|
+
) -> ModelRequest:
|
|
36
39
|
"""Modify the model request to add cache control blocks."""
|
|
37
40
|
try:
|
|
38
41
|
from langchain_anthropic import ChatAnthropic
|
|
@@ -98,7 +98,7 @@ class SummarizationMiddleware(AgentMiddleware):
|
|
|
98
98
|
self.summary_prompt = summary_prompt
|
|
99
99
|
self.summary_prefix = summary_prefix
|
|
100
100
|
|
|
101
|
-
def before_model(self, state: AgentState) -> dict[str, Any] | None:
|
|
101
|
+
def before_model(self, state: AgentState) -> dict[str, Any] | None: # type: ignore[override]
|
|
102
102
|
"""Process messages before model invocation, potentially triggering summarization."""
|
|
103
103
|
messages = state["messages"]
|
|
104
104
|
self._ensure_message_ids(messages)
|
|
@@ -8,15 +8,27 @@ from typing import TYPE_CHECKING, Annotated, Any, Generic, Literal, cast
|
|
|
8
8
|
# needed as top level import for pydantic schema generation on AgentState
|
|
9
9
|
from langchain_core.messages import AnyMessage # noqa: TC002
|
|
10
10
|
from langgraph.channels.ephemeral_value import EphemeralValue
|
|
11
|
-
from langgraph.graph.message import
|
|
11
|
+
from langgraph.graph.message import add_messages
|
|
12
|
+
from langgraph.runtime import Runtime
|
|
13
|
+
from langgraph.typing import ContextT
|
|
12
14
|
from typing_extensions import NotRequired, Required, TypedDict, TypeVar
|
|
13
15
|
|
|
14
16
|
if TYPE_CHECKING:
|
|
15
17
|
from langchain_core.language_models.chat_models import BaseChatModel
|
|
16
18
|
from langchain_core.tools import BaseTool
|
|
19
|
+
from langgraph.runtime import Runtime
|
|
17
20
|
|
|
18
21
|
from langchain.agents.structured_output import ResponseFormat
|
|
19
22
|
|
|
23
|
+
__all__ = [
|
|
24
|
+
"AgentMiddleware",
|
|
25
|
+
"AgentState",
|
|
26
|
+
"ContextT",
|
|
27
|
+
"ModelRequest",
|
|
28
|
+
"OmitFromSchema",
|
|
29
|
+
"PublicAgentState",
|
|
30
|
+
]
|
|
31
|
+
|
|
20
32
|
JumpTo = Literal["tools", "model", "__end__"]
|
|
21
33
|
"""Destination to jump to when a middleware node returns."""
|
|
22
34
|
|
|
@@ -36,26 +48,49 @@ class ModelRequest:
|
|
|
36
48
|
model_settings: dict[str, Any] = field(default_factory=dict)
|
|
37
49
|
|
|
38
50
|
|
|
51
|
+
@dataclass
|
|
52
|
+
class OmitFromSchema:
|
|
53
|
+
"""Annotation used to mark state attributes as omitted from input or output schemas."""
|
|
54
|
+
|
|
55
|
+
input: bool = True
|
|
56
|
+
"""Whether to omit the attribute from the input schema."""
|
|
57
|
+
|
|
58
|
+
output: bool = True
|
|
59
|
+
"""Whether to omit the attribute from the output schema."""
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
OmitFromInput = OmitFromSchema(input=True, output=False)
|
|
63
|
+
"""Annotation used to mark state attributes as omitted from input schema."""
|
|
64
|
+
|
|
65
|
+
OmitFromOutput = OmitFromSchema(input=False, output=True)
|
|
66
|
+
"""Annotation used to mark state attributes as omitted from output schema."""
|
|
67
|
+
|
|
68
|
+
PrivateStateAttr = OmitFromSchema(input=True, output=True)
|
|
69
|
+
"""Annotation used to mark state attributes as purely internal for a given middleware."""
|
|
70
|
+
|
|
71
|
+
|
|
39
72
|
class AgentState(TypedDict, Generic[ResponseT]):
|
|
40
73
|
"""State schema for the agent."""
|
|
41
74
|
|
|
42
75
|
messages: Required[Annotated[list[AnyMessage], add_messages]]
|
|
43
|
-
|
|
44
|
-
jump_to: NotRequired[Annotated[JumpTo | None, EphemeralValue]]
|
|
76
|
+
jump_to: NotRequired[Annotated[JumpTo | None, EphemeralValue, PrivateStateAttr]]
|
|
45
77
|
response: NotRequired[ResponseT]
|
|
46
78
|
|
|
47
79
|
|
|
48
80
|
class PublicAgentState(TypedDict, Generic[ResponseT]):
|
|
49
|
-
"""
|
|
81
|
+
"""Public state schema for the agent.
|
|
50
82
|
|
|
51
|
-
|
|
83
|
+
Just used for typing purposes.
|
|
84
|
+
"""
|
|
85
|
+
|
|
86
|
+
messages: Required[Annotated[list[AnyMessage], add_messages]]
|
|
52
87
|
response: NotRequired[ResponseT]
|
|
53
88
|
|
|
54
89
|
|
|
55
|
-
StateT = TypeVar("StateT", bound=AgentState)
|
|
90
|
+
StateT = TypeVar("StateT", bound=AgentState, default=AgentState)
|
|
56
91
|
|
|
57
92
|
|
|
58
|
-
class AgentMiddleware(Generic[StateT]):
|
|
93
|
+
class AgentMiddleware(Generic[StateT, ContextT]):
|
|
59
94
|
"""Base middleware class for an agent.
|
|
60
95
|
|
|
61
96
|
Subclass this and implement any of the defined methods to customize agent behavior
|
|
@@ -68,12 +103,17 @@ class AgentMiddleware(Generic[StateT]):
|
|
|
68
103
|
tools: list[BaseTool]
|
|
69
104
|
"""Additional tools registered by the middleware."""
|
|
70
105
|
|
|
71
|
-
def before_model(self, state: StateT) -> dict[str, Any] | None:
|
|
106
|
+
def before_model(self, state: StateT, runtime: Runtime[ContextT]) -> dict[str, Any] | None:
|
|
72
107
|
"""Logic to run before the model is called."""
|
|
73
108
|
|
|
74
|
-
def modify_model_request(
|
|
109
|
+
def modify_model_request(
|
|
110
|
+
self,
|
|
111
|
+
request: ModelRequest,
|
|
112
|
+
state: StateT, # noqa: ARG002
|
|
113
|
+
runtime: Runtime[ContextT], # noqa: ARG002
|
|
114
|
+
) -> ModelRequest:
|
|
75
115
|
"""Logic to modify request kwargs before the model is called."""
|
|
76
116
|
return request
|
|
77
117
|
|
|
78
|
-
def after_model(self, state: StateT) -> dict[str, Any] | None:
|
|
118
|
+
def after_model(self, state: StateT, runtime: Runtime[ContextT]) -> dict[str, Any] | None:
|
|
79
119
|
"""Logic to run after the model is called."""
|
|
@@ -2,26 +2,28 @@
|
|
|
2
2
|
|
|
3
3
|
import itertools
|
|
4
4
|
from collections.abc import Callable, Sequence
|
|
5
|
-
from
|
|
5
|
+
from inspect import signature
|
|
6
|
+
from typing import Annotated, Any, cast, get_args, get_origin, get_type_hints
|
|
6
7
|
|
|
7
8
|
from langchain_core.language_models.chat_models import BaseChatModel
|
|
8
|
-
from langchain_core.messages import AIMessage, SystemMessage, ToolMessage
|
|
9
|
+
from langchain_core.messages import AIMessage, AnyMessage, SystemMessage, ToolMessage
|
|
9
10
|
from langchain_core.runnables import Runnable
|
|
10
11
|
from langchain_core.tools import BaseTool
|
|
11
12
|
from langgraph.constants import END, START
|
|
12
13
|
from langgraph.graph.state import StateGraph
|
|
14
|
+
from langgraph.runtime import Runtime
|
|
15
|
+
from langgraph.types import Send
|
|
13
16
|
from langgraph.typing import ContextT
|
|
14
|
-
from typing_extensions import TypedDict, TypeVar
|
|
17
|
+
from typing_extensions import NotRequired, Required, TypedDict, TypeVar
|
|
15
18
|
|
|
16
19
|
from langchain.agents.middleware.types import (
|
|
17
20
|
AgentMiddleware,
|
|
18
21
|
AgentState,
|
|
19
22
|
JumpTo,
|
|
20
23
|
ModelRequest,
|
|
24
|
+
OmitFromSchema,
|
|
21
25
|
PublicAgentState,
|
|
22
26
|
)
|
|
23
|
-
|
|
24
|
-
# Import structured output classes from the old implementation
|
|
25
27
|
from langchain.agents.structured_output import (
|
|
26
28
|
MultipleStructuredOutputsError,
|
|
27
29
|
OutputToolBinding,
|
|
@@ -37,26 +39,49 @@ from langchain.chat_models import init_chat_model
|
|
|
37
39
|
STRUCTURED_OUTPUT_ERROR_TEMPLATE = "Error: {error}\n Please fix your mistakes."
|
|
38
40
|
|
|
39
41
|
|
|
40
|
-
def
|
|
41
|
-
"""
|
|
42
|
-
if not schemas:
|
|
43
|
-
return AgentState
|
|
42
|
+
def _resolve_schema(schemas: set[type], schema_name: str, omit_flag: str | None = None) -> type:
|
|
43
|
+
"""Resolve schema by merging schemas and optionally respecting OmitFromSchema annotations.
|
|
44
44
|
|
|
45
|
+
Args:
|
|
46
|
+
schemas: List of schema types to merge
|
|
47
|
+
schema_name: Name for the generated TypedDict
|
|
48
|
+
omit_flag: If specified, omit fields with this flag set ('input' or 'output')
|
|
49
|
+
"""
|
|
45
50
|
all_annotations = {}
|
|
46
51
|
|
|
47
52
|
for schema in schemas:
|
|
48
|
-
|
|
53
|
+
hints = get_type_hints(schema, include_extras=True)
|
|
54
|
+
|
|
55
|
+
for field_name, field_type in hints.items():
|
|
56
|
+
should_omit = False
|
|
57
|
+
|
|
58
|
+
if omit_flag:
|
|
59
|
+
# Check for omission in the annotation metadata
|
|
60
|
+
metadata = _extract_metadata(field_type)
|
|
61
|
+
for meta in metadata:
|
|
62
|
+
if isinstance(meta, OmitFromSchema) and getattr(meta, omit_flag) is True:
|
|
63
|
+
should_omit = True
|
|
64
|
+
break
|
|
65
|
+
|
|
66
|
+
if not should_omit:
|
|
67
|
+
all_annotations[field_name] = field_type
|
|
49
68
|
|
|
50
|
-
return TypedDict(
|
|
69
|
+
return TypedDict(schema_name, all_annotations) # type: ignore[operator]
|
|
51
70
|
|
|
52
71
|
|
|
53
|
-
def
|
|
54
|
-
"""
|
|
55
|
-
|
|
56
|
-
|
|
72
|
+
def _extract_metadata(type_: type) -> list:
|
|
73
|
+
"""Extract metadata from a field type, handling Required/NotRequired and Annotated wrappers."""
|
|
74
|
+
# Handle Required[Annotated[...]] or NotRequired[Annotated[...]]
|
|
75
|
+
if get_origin(type_) in (Required, NotRequired):
|
|
76
|
+
inner_type = get_args(type_)[0]
|
|
77
|
+
if get_origin(inner_type) is Annotated:
|
|
78
|
+
return list(get_args(inner_type)[1:])
|
|
57
79
|
|
|
58
|
-
|
|
59
|
-
|
|
80
|
+
# Handle direct Annotated[...]
|
|
81
|
+
elif get_origin(type_) is Annotated:
|
|
82
|
+
return list(get_args(type_)[1:])
|
|
83
|
+
|
|
84
|
+
return []
|
|
60
85
|
|
|
61
86
|
|
|
62
87
|
def _supports_native_structured_output(model: str | BaseChatModel) -> bool:
|
|
@@ -113,7 +138,7 @@ def create_agent( # noqa: PLR0915
|
|
|
113
138
|
model: str | BaseChatModel,
|
|
114
139
|
tools: Sequence[BaseTool | Callable | dict[str, Any]] | ToolNode | None = None,
|
|
115
140
|
system_prompt: str | None = None,
|
|
116
|
-
middleware: Sequence[AgentMiddleware] = (),
|
|
141
|
+
middleware: Sequence[AgentMiddleware[AgentState[ResponseT], ContextT]] = (),
|
|
117
142
|
response_format: ResponseFormat[ResponseT] | type[ResponseT] | None = None,
|
|
118
143
|
context_schema: type[ContextT] | None = None,
|
|
119
144
|
) -> StateGraph[
|
|
@@ -198,28 +223,30 @@ def create_agent( # noqa: PLR0915
|
|
|
198
223
|
m for m in middleware if m.__class__.after_model is not AgentMiddleware.after_model
|
|
199
224
|
]
|
|
200
225
|
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
)
|
|
226
|
+
state_schemas = {m.state_schema for m in middleware}
|
|
227
|
+
state_schemas.add(AgentState)
|
|
228
|
+
|
|
229
|
+
state_schema = _resolve_schema(state_schemas, "StateSchema", None)
|
|
230
|
+
input_schema = _resolve_schema(state_schemas, "InputSchema", "input")
|
|
231
|
+
output_schema = _resolve_schema(state_schemas, "OutputSchema", "output")
|
|
205
232
|
|
|
206
233
|
# create graph, add nodes
|
|
207
|
-
graph
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
234
|
+
graph: StateGraph[
|
|
235
|
+
AgentState[ResponseT], ContextT, PublicAgentState[ResponseT], PublicAgentState[ResponseT]
|
|
236
|
+
] = StateGraph(
|
|
237
|
+
state_schema=state_schema,
|
|
238
|
+
input_schema=input_schema,
|
|
239
|
+
output_schema=output_schema,
|
|
211
240
|
context_schema=context_schema,
|
|
212
241
|
)
|
|
213
242
|
|
|
214
|
-
def _handle_model_output(
|
|
243
|
+
def _handle_model_output(output: AIMessage) -> dict[str, Any]:
|
|
215
244
|
"""Handle model output including structured responses."""
|
|
216
245
|
# Handle structured output with native strategy
|
|
217
246
|
if isinstance(response_format, ProviderStrategy):
|
|
218
247
|
if not output.tool_calls and native_output_binding:
|
|
219
248
|
structured_response = native_output_binding.parse(output)
|
|
220
249
|
return {"messages": [output], "response": structured_response}
|
|
221
|
-
if state.get("response") is not None:
|
|
222
|
-
return {"messages": [output], "response": None}
|
|
223
250
|
return {"messages": [output]}
|
|
224
251
|
|
|
225
252
|
# Handle structured output with tools strategy
|
|
@@ -297,9 +324,6 @@ def create_agent( # noqa: PLR0915
|
|
|
297
324
|
],
|
|
298
325
|
}
|
|
299
326
|
|
|
300
|
-
# Standard response handling
|
|
301
|
-
if state.get("response") is not None:
|
|
302
|
-
return {"messages": [output], "response": None}
|
|
303
327
|
return {"messages": [output]}
|
|
304
328
|
|
|
305
329
|
def _get_bound_model(request: ModelRequest) -> Runnable:
|
|
@@ -322,7 +346,14 @@ def create_agent( # noqa: PLR0915
|
|
|
322
346
|
)
|
|
323
347
|
return request.model.bind(**request.model_settings)
|
|
324
348
|
|
|
325
|
-
|
|
349
|
+
model_request_signatures: list[
|
|
350
|
+
tuple[bool, AgentMiddleware[AgentState[ResponseT], ContextT]]
|
|
351
|
+
] = [
|
|
352
|
+
("runtime" in signature(m.modify_model_request).parameters, m)
|
|
353
|
+
for m in middleware_w_modify_model_request
|
|
354
|
+
]
|
|
355
|
+
|
|
356
|
+
def model_request(state: AgentState, runtime: Runtime[ContextT]) -> dict[str, Any]:
|
|
326
357
|
"""Sync model request handler with sequential middleware processing."""
|
|
327
358
|
request = ModelRequest(
|
|
328
359
|
model=model,
|
|
@@ -334,10 +365,11 @@ def create_agent( # noqa: PLR0915
|
|
|
334
365
|
)
|
|
335
366
|
|
|
336
367
|
# Apply modify_model_request middleware in sequence
|
|
337
|
-
for m in
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
368
|
+
for use_runtime, m in model_request_signatures:
|
|
369
|
+
if use_runtime:
|
|
370
|
+
m.modify_model_request(request, state, runtime)
|
|
371
|
+
else:
|
|
372
|
+
m.modify_model_request(request, state) # type: ignore[call-arg]
|
|
341
373
|
|
|
342
374
|
# Get the final model and messages
|
|
343
375
|
model_ = _get_bound_model(request)
|
|
@@ -346,9 +378,9 @@ def create_agent( # noqa: PLR0915
|
|
|
346
378
|
messages = [SystemMessage(request.system_prompt), *messages]
|
|
347
379
|
|
|
348
380
|
output = model_.invoke(messages)
|
|
349
|
-
return _handle_model_output(
|
|
381
|
+
return _handle_model_output(output)
|
|
350
382
|
|
|
351
|
-
async def amodel_request(state:
|
|
383
|
+
async def amodel_request(state: AgentState, runtime: Runtime[ContextT]) -> dict[str, Any]:
|
|
352
384
|
"""Async model request handler with sequential middleware processing."""
|
|
353
385
|
# Start with the base model request
|
|
354
386
|
request = ModelRequest(
|
|
@@ -361,10 +393,11 @@ def create_agent( # noqa: PLR0915
|
|
|
361
393
|
)
|
|
362
394
|
|
|
363
395
|
# Apply modify_model_request middleware in sequence
|
|
364
|
-
for m in
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
396
|
+
for use_runtime, m in model_request_signatures:
|
|
397
|
+
if use_runtime:
|
|
398
|
+
m.modify_model_request(request, state, runtime)
|
|
399
|
+
else:
|
|
400
|
+
m.modify_model_request(request, state) # type: ignore[call-arg]
|
|
368
401
|
|
|
369
402
|
# Get the final model and messages
|
|
370
403
|
model_ = _get_bound_model(request)
|
|
@@ -373,7 +406,7 @@ def create_agent( # noqa: PLR0915
|
|
|
373
406
|
messages = [SystemMessage(request.system_prompt), *messages]
|
|
374
407
|
|
|
375
408
|
output = await model_.ainvoke(messages)
|
|
376
|
-
return _handle_model_output(
|
|
409
|
+
return _handle_model_output(output)
|
|
377
410
|
|
|
378
411
|
# Use sync or async based on model capabilities
|
|
379
412
|
from langgraph._internal._runnable import RunnableCallable
|
|
@@ -388,16 +421,12 @@ def create_agent( # noqa: PLR0915
|
|
|
388
421
|
for m in middleware:
|
|
389
422
|
if m.__class__.before_model is not AgentMiddleware.before_model:
|
|
390
423
|
graph.add_node(
|
|
391
|
-
f"{m.__class__.__name__}.before_model",
|
|
392
|
-
m.before_model,
|
|
393
|
-
input_schema=m.state_schema,
|
|
424
|
+
f"{m.__class__.__name__}.before_model", m.before_model, input_schema=state_schema
|
|
394
425
|
)
|
|
395
426
|
|
|
396
427
|
if m.__class__.after_model is not AgentMiddleware.after_model:
|
|
397
428
|
graph.add_node(
|
|
398
|
-
f"{m.__class__.__name__}.after_model",
|
|
399
|
-
m.after_model,
|
|
400
|
-
input_schema=m.state_schema,
|
|
429
|
+
f"{m.__class__.__name__}.after_model", m.after_model, input_schema=state_schema
|
|
401
430
|
)
|
|
402
431
|
|
|
403
432
|
# add start edge
|
|
@@ -417,12 +446,12 @@ def create_agent( # noqa: PLR0915
|
|
|
417
446
|
if tool_node is not None:
|
|
418
447
|
graph.add_conditional_edges(
|
|
419
448
|
"tools",
|
|
420
|
-
_make_tools_to_model_edge(tool_node, first_node),
|
|
449
|
+
_make_tools_to_model_edge(tool_node, first_node, structured_output_tools),
|
|
421
450
|
[first_node, END],
|
|
422
451
|
)
|
|
423
452
|
graph.add_conditional_edges(
|
|
424
453
|
last_node,
|
|
425
|
-
_make_model_to_tools_edge(first_node, structured_output_tools),
|
|
454
|
+
_make_model_to_tools_edge(first_node, structured_output_tools, tool_node),
|
|
426
455
|
[first_node, "tools", END],
|
|
427
456
|
)
|
|
428
457
|
elif last_node == "model_request":
|
|
@@ -481,27 +510,48 @@ def _resolve_jump(jump_to: JumpTo | None, first_node: str) -> str | None:
|
|
|
481
510
|
return None
|
|
482
511
|
|
|
483
512
|
|
|
513
|
+
def _fetch_last_ai_and_tool_messages(
|
|
514
|
+
messages: list[AnyMessage],
|
|
515
|
+
) -> tuple[AIMessage, list[ToolMessage]]:
|
|
516
|
+
last_ai_index: int
|
|
517
|
+
last_ai_message: AIMessage
|
|
518
|
+
|
|
519
|
+
for i in range(len(messages) - 1, -1, -1):
|
|
520
|
+
if isinstance(messages[i], AIMessage):
|
|
521
|
+
last_ai_index = i
|
|
522
|
+
last_ai_message = cast("AIMessage", messages[i])
|
|
523
|
+
break
|
|
524
|
+
|
|
525
|
+
tool_messages = [m for m in messages[last_ai_index + 1 :] if isinstance(m, ToolMessage)]
|
|
526
|
+
return last_ai_message, tool_messages
|
|
527
|
+
|
|
528
|
+
|
|
484
529
|
def _make_model_to_tools_edge(
|
|
485
|
-
first_node: str, structured_output_tools: dict[str, OutputToolBinding]
|
|
486
|
-
) -> Callable[[AgentState], str | None]:
|
|
487
|
-
def model_to_tools(state: AgentState) -> str | None:
|
|
530
|
+
first_node: str, structured_output_tools: dict[str, OutputToolBinding], tool_node: ToolNode
|
|
531
|
+
) -> Callable[[AgentState], str | list[Send] | None]:
|
|
532
|
+
def model_to_tools(state: AgentState) -> str | list[Send] | None:
|
|
488
533
|
if jump_to := state.get("jump_to"):
|
|
489
534
|
return _resolve_jump(jump_to, first_node)
|
|
490
535
|
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
536
|
+
last_ai_message, tool_messages = _fetch_last_ai_and_tool_messages(state["messages"])
|
|
537
|
+
tool_message_ids = [m.tool_call_id for m in tool_messages]
|
|
538
|
+
|
|
539
|
+
pending_tool_calls = [
|
|
540
|
+
c
|
|
541
|
+
for c in last_ai_message.tool_calls
|
|
542
|
+
if c["id"] not in tool_message_ids and c["name"] not in structured_output_tools
|
|
543
|
+
]
|
|
544
|
+
|
|
545
|
+
if pending_tool_calls:
|
|
546
|
+
# imo we should not be injecting state, store here,
|
|
547
|
+
# this should be done by the tool node itself ideally but this is a consequence
|
|
548
|
+
# of using Send w/ tool calls directly which allows more intuitive interrupt behavior
|
|
549
|
+
# largely internal so can be fixed later
|
|
550
|
+
pending_tool_calls = [
|
|
551
|
+
tool_node.inject_tool_args(call, state, None) # type: ignore[arg-type]
|
|
552
|
+
for call in pending_tool_calls
|
|
502
553
|
]
|
|
503
|
-
|
|
504
|
-
return "tools"
|
|
554
|
+
return [Send("tools", [tool_call]) for tool_call in pending_tool_calls]
|
|
505
555
|
|
|
506
556
|
return END
|
|
507
557
|
|
|
@@ -509,17 +559,21 @@ def _make_model_to_tools_edge(
|
|
|
509
559
|
|
|
510
560
|
|
|
511
561
|
def _make_tools_to_model_edge(
|
|
512
|
-
tool_node: ToolNode, next_node: str
|
|
562
|
+
tool_node: ToolNode, next_node: str, structured_output_tools: dict[str, OutputToolBinding]
|
|
513
563
|
) -> Callable[[AgentState], str | None]:
|
|
514
564
|
def tools_to_model(state: AgentState) -> str | None:
|
|
515
|
-
|
|
565
|
+
last_ai_message, tool_messages = _fetch_last_ai_and_tool_messages(state["messages"])
|
|
566
|
+
|
|
516
567
|
if all(
|
|
517
568
|
tool_node.tools_by_name[c["name"]].return_direct
|
|
518
|
-
for c in
|
|
569
|
+
for c in last_ai_message.tool_calls
|
|
519
570
|
if c["name"] in tool_node.tools_by_name
|
|
520
571
|
):
|
|
521
572
|
return END
|
|
522
573
|
|
|
574
|
+
if any(t.name in structured_output_tools for t in tool_messages):
|
|
575
|
+
return END
|
|
576
|
+
|
|
523
577
|
return next_node
|
|
524
578
|
|
|
525
579
|
return tools_to_model
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.0a6
|
|
4
4
|
Summary: Building applications with LLMs through composability
|
|
5
5
|
License: MIT
|
|
6
6
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
|
|
@@ -11,12 +11,16 @@ Requires-Dist: langchain-core<2.0.0,>=0.3.75
|
|
|
11
11
|
Requires-Dist: langchain-text-splitters<1.0.0,>=0.3.11
|
|
12
12
|
Requires-Dist: langgraph>=0.6.7
|
|
13
13
|
Requires-Dist: pydantic>=2.7.4
|
|
14
|
+
Provides-Extra: community
|
|
15
|
+
Requires-Dist: langchain-community; extra == "community"
|
|
14
16
|
Provides-Extra: anthropic
|
|
15
17
|
Requires-Dist: langchain-anthropic; extra == "anthropic"
|
|
16
18
|
Provides-Extra: openai
|
|
17
19
|
Requires-Dist: langchain-openai; extra == "openai"
|
|
18
20
|
Provides-Extra: azure-ai
|
|
19
21
|
Requires-Dist: langchain-azure-ai; extra == "azure-ai"
|
|
22
|
+
Provides-Extra: cohere
|
|
23
|
+
Requires-Dist: langchain-cohere; extra == "cohere"
|
|
20
24
|
Provides-Extra: google-vertexai
|
|
21
25
|
Requires-Dist: langchain-google-vertexai; extra == "google-vertexai"
|
|
22
26
|
Provides-Extra: google-genai
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
langchain-1.0.
|
|
2
|
-
langchain-1.0.
|
|
3
|
-
langchain-1.0.
|
|
4
|
-
langchain-1.0.
|
|
1
|
+
langchain-1.0.0a6.dist-info/METADATA,sha256=aQuJBnnQyJd-FIIP8hM5wyak8eIB0IydkPjHePq5doo,6236
|
|
2
|
+
langchain-1.0.0a6.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
|
|
3
|
+
langchain-1.0.0a6.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
|
|
4
|
+
langchain-1.0.0a6.dist-info/licenses/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
|
|
5
5
|
langchain/__init__.py,sha256=PFBJL2dLkGgUXz9JY16bIhvoo4rwZryOiatsWVexLJk,604
|
|
6
6
|
langchain/_internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
7
7
|
langchain/_internal/_documents.py,sha256=z9wAPukoASOMw4WTFFBKCCZYsvsKbo-Cq6CeHjdq9eE,1045
|
|
@@ -12,14 +12,13 @@ langchain/_internal/_utils.py,sha256=lG8X9muiRAWtQjRPudq-1x-wHbk0J3spu_rYZckVdYs
|
|
|
12
12
|
langchain/agents/__init__.py,sha256=NG2S3dic9L3i4sAD9mpgaTv6Dl4L3u45xxK6jn-I4W8,281
|
|
13
13
|
langchain/agents/_internal/__init__.py,sha256=5nNBeaeQIvv9IOQjY4_aNW8pffWzMXQgi0b6Nx-WghM,37
|
|
14
14
|
langchain/agents/_internal/_typing.py,sha256=JoWa-KL5uLNeq6yrm56wnIvhDeFnCt2fTzgUcj5zWy4,270
|
|
15
|
-
langchain/agents/
|
|
16
|
-
langchain/agents/middleware/
|
|
17
|
-
langchain/agents/middleware/
|
|
18
|
-
langchain/agents/middleware/
|
|
19
|
-
langchain/agents/middleware/
|
|
20
|
-
langchain/agents/middleware/
|
|
21
|
-
langchain/agents/
|
|
22
|
-
langchain/agents/middleware_agent.py,sha256=Qy1QnX9c3MArYqZtpFNUDgcvgPFrui1cyEYRevMn5W0,21429
|
|
15
|
+
langchain/agents/middleware/__init__.py,sha256=Q68coEBPtxTnb7LuDDSKvUrK0RvppqEwyyzpE8-RgiE,613
|
|
16
|
+
langchain/agents/middleware/dynamic_system_prompt.py,sha256=uakW4wyVc9h52T2QO4BeKWmbc2UK31VqFGuMMvj9wX8,3267
|
|
17
|
+
langchain/agents/middleware/human_in_the_loop.py,sha256=xBP_Xc8ZMLUeWLzJyd4GH77mUapsL_7ilKDNLd8_tjE,10091
|
|
18
|
+
langchain/agents/middleware/prompt_caching.py,sha256=du_qrBr0_kwWhdO_xggtfrEN5FTcGLKu3oYQDnSS0Do,2263
|
|
19
|
+
langchain/agents/middleware/summarization.py,sha256=qqEqAuJXQ5rfewhFHftHLnrX8jhdMu9dPfz0akhzfuc,10281
|
|
20
|
+
langchain/agents/middleware/types.py,sha256=DRsl0GjgWXbPlFTiiVnI8pMhzMJF3Y2VkE2zLMKQhaY,3826
|
|
21
|
+
langchain/agents/middleware_agent.py,sha256=hfIt4LdtDjkZGs0ylo8xti67iecpPEsuklYCjJ20V8k,23746
|
|
23
22
|
langchain/agents/react_agent.py,sha256=6ZNI2dp0hTL7hTm7ao-HkQ3hmVvBQuFu9pJz0PSK_eg,49712
|
|
24
23
|
langchain/agents/structured_output.py,sha256=QWNafJx7au_jJawJgIfovnDoP8Z9mLxDZNvDX_1RRJ0,13327
|
|
25
24
|
langchain/agents/tool_node.py,sha256=QabTfIi8nGrwfzaSOeWfyHos6sgXjFTdRXexQG7u2HE,46596
|
|
@@ -37,4 +36,4 @@ langchain/storage/exceptions.py,sha256=Fl_8tON3KmByBKwXtno5WSj0-c2RiZxnhw3gv5aS2
|
|
|
37
36
|
langchain/storage/in_memory.py,sha256=ozrmu0EtaJJVSAzK_u7nzxWpr9OOscWkANHSg-qIVYQ,369
|
|
38
37
|
langchain/text_splitter.py,sha256=yxWs4secpnkfK6VZDiNJNdlYOrRZ18RQZj1S3xNQ73A,1554
|
|
39
38
|
langchain/tools/__init__.py,sha256=NYQzLxW2iI5Twu3voefVC-dJEI4Wgh7jC311CQEpvZs,252
|
|
40
|
-
langchain-1.0.
|
|
39
|
+
langchain-1.0.0a6.dist-info/RECORD,,
|
langchain/agents/interrupt.py
DELETED
|
@@ -1,97 +0,0 @@
|
|
|
1
|
-
"""Interrupt types to use with agent inbox like setups."""
|
|
2
|
-
|
|
3
|
-
from typing import Literal
|
|
4
|
-
|
|
5
|
-
from typing_extensions import TypedDict
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
class HumanInterruptConfig(TypedDict):
|
|
9
|
-
"""Configuration that defines what actions are allowed for a human interrupt.
|
|
10
|
-
|
|
11
|
-
This controls the available interaction options when the graph is paused for human input.
|
|
12
|
-
|
|
13
|
-
Attributes:
|
|
14
|
-
allow_ignore: Whether the human can choose to ignore/skip the current step
|
|
15
|
-
allow_respond: Whether the human can provide a text response/feedback
|
|
16
|
-
allow_edit: Whether the human can edit the provided content/state
|
|
17
|
-
allow_accept: Whether the human can accept/approve the current state
|
|
18
|
-
"""
|
|
19
|
-
|
|
20
|
-
allow_ignore: bool
|
|
21
|
-
allow_respond: bool
|
|
22
|
-
allow_edit: bool
|
|
23
|
-
allow_accept: bool
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
class ActionRequest(TypedDict):
|
|
27
|
-
"""Represents a request for human action within the graph execution.
|
|
28
|
-
|
|
29
|
-
Contains the action type and any associated arguments needed for the action.
|
|
30
|
-
|
|
31
|
-
Attributes:
|
|
32
|
-
action: The type or name of action being requested (e.g., "Approve XYZ action")
|
|
33
|
-
args: Key-value pairs of arguments needed for the action
|
|
34
|
-
"""
|
|
35
|
-
|
|
36
|
-
action: str
|
|
37
|
-
args: dict
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
class HumanInterrupt(TypedDict):
|
|
41
|
-
"""Represents an interrupt triggered by the graph that requires human intervention.
|
|
42
|
-
|
|
43
|
-
This is passed to the `interrupt` function when execution is paused for human input.
|
|
44
|
-
|
|
45
|
-
Attributes:
|
|
46
|
-
action_request: The specific action being requested from the human
|
|
47
|
-
config: Configuration defining what actions are allowed
|
|
48
|
-
description: Optional detailed description of what input is needed
|
|
49
|
-
|
|
50
|
-
Example:
|
|
51
|
-
```python
|
|
52
|
-
# Extract a tool call from the state and create an interrupt request
|
|
53
|
-
request = HumanInterrupt(
|
|
54
|
-
action_request=ActionRequest(
|
|
55
|
-
action="run_command", # The action being requested
|
|
56
|
-
args={"command": "ls", "args": ["-l"]}, # Arguments for the action
|
|
57
|
-
),
|
|
58
|
-
config=HumanInterruptConfig(
|
|
59
|
-
allow_ignore=True, # Allow skipping this step
|
|
60
|
-
allow_respond=True, # Allow text feedback
|
|
61
|
-
allow_edit=False, # Don't allow editing
|
|
62
|
-
allow_accept=True, # Allow direct acceptance
|
|
63
|
-
),
|
|
64
|
-
description="Please review the command before execution",
|
|
65
|
-
)
|
|
66
|
-
# Send the interrupt request and get the response
|
|
67
|
-
response = interrupt([request])[0]
|
|
68
|
-
```
|
|
69
|
-
"""
|
|
70
|
-
|
|
71
|
-
action_request: ActionRequest
|
|
72
|
-
config: HumanInterruptConfig
|
|
73
|
-
description: str | None
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
class HumanResponse(TypedDict):
|
|
77
|
-
"""Human response.
|
|
78
|
-
|
|
79
|
-
The response provided by a human to an interrupt,
|
|
80
|
-
which is returned when graph execution resumes.
|
|
81
|
-
|
|
82
|
-
Attributes:
|
|
83
|
-
type: The type of response:
|
|
84
|
-
|
|
85
|
-
- "accept": Approves the current state without changes
|
|
86
|
-
- "ignore": Skips/ignores the current step
|
|
87
|
-
- "response": Provides text feedback or instructions
|
|
88
|
-
- "edit": Modifies the current state/content
|
|
89
|
-
args: The response payload:
|
|
90
|
-
|
|
91
|
-
- None: For ignore/accept actions
|
|
92
|
-
- str: For text responses
|
|
93
|
-
- ActionRequest: For edit actions with updated content
|
|
94
|
-
"""
|
|
95
|
-
|
|
96
|
-
type: Literal["accept", "ignore", "response", "edit"]
|
|
97
|
-
args: None | str | ActionRequest
|
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
"""Utility functions for middleware."""
|
|
2
|
-
|
|
3
|
-
from typing import Any
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
def _generate_correction_tool_messages(content: str, tool_calls: list) -> list[dict[str, Any]]:
|
|
7
|
-
"""Generate tool messages for model behavior correction."""
|
|
8
|
-
return [
|
|
9
|
-
{"role": "tool", "content": content, "tool_call_id": tool_call["id"]}
|
|
10
|
-
for tool_call in tool_calls
|
|
11
|
-
]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|