langchain 1.0.0a12__py3-none-any.whl → 1.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/__init__.py +1 -1
- langchain/agents/__init__.py +7 -1
- langchain/agents/factory.py +722 -226
- langchain/agents/middleware/__init__.py +36 -9
- langchain/agents/middleware/_execution.py +388 -0
- langchain/agents/middleware/_redaction.py +350 -0
- langchain/agents/middleware/context_editing.py +46 -17
- langchain/agents/middleware/file_search.py +382 -0
- langchain/agents/middleware/human_in_the_loop.py +220 -173
- langchain/agents/middleware/model_call_limit.py +43 -10
- langchain/agents/middleware/model_fallback.py +79 -36
- langchain/agents/middleware/pii.py +68 -504
- langchain/agents/middleware/shell_tool.py +718 -0
- langchain/agents/middleware/summarization.py +2 -2
- langchain/agents/middleware/{planning.py → todo.py} +35 -16
- langchain/agents/middleware/tool_call_limit.py +308 -114
- langchain/agents/middleware/tool_emulator.py +200 -0
- langchain/agents/middleware/tool_retry.py +384 -0
- langchain/agents/middleware/tool_selection.py +25 -21
- langchain/agents/middleware/types.py +714 -257
- langchain/agents/structured_output.py +37 -27
- langchain/chat_models/__init__.py +7 -1
- langchain/chat_models/base.py +192 -190
- langchain/embeddings/__init__.py +13 -3
- langchain/embeddings/base.py +49 -29
- langchain/messages/__init__.py +50 -1
- langchain/tools/__init__.py +9 -7
- langchain/tools/tool_node.py +16 -1174
- langchain-1.0.4.dist-info/METADATA +92 -0
- langchain-1.0.4.dist-info/RECORD +34 -0
- langchain/_internal/__init__.py +0 -0
- langchain/_internal/_documents.py +0 -35
- langchain/_internal/_lazy_import.py +0 -35
- langchain/_internal/_prompts.py +0 -158
- langchain/_internal/_typing.py +0 -70
- langchain/_internal/_utils.py +0 -7
- langchain/agents/_internal/__init__.py +0 -1
- langchain/agents/_internal/_typing.py +0 -13
- langchain/agents/middleware/prompt_caching.py +0 -86
- langchain/documents/__init__.py +0 -7
- langchain/embeddings/cache.py +0 -361
- langchain/storage/__init__.py +0 -22
- langchain/storage/encoder_backed.py +0 -123
- langchain/storage/exceptions.py +0 -5
- langchain/storage/in_memory.py +0 -13
- langchain-1.0.0a12.dist-info/METADATA +0 -122
- langchain-1.0.0a12.dist-info/RECORD +0 -43
- {langchain-1.0.0a12.dist-info → langchain-1.0.4.dist-info}/WHEEL +0 -0
- {langchain-1.0.0a12.dist-info → langchain-1.0.4.dist-info}/licenses/LICENSE +0 -0
|
@@ -10,89 +10,93 @@ from typing_extensions import NotRequired, TypedDict
|
|
|
10
10
|
from langchain.agents.middleware.types import AgentMiddleware, AgentState
|
|
11
11
|
|
|
12
12
|
|
|
13
|
-
class
|
|
14
|
-
"""
|
|
13
|
+
class Action(TypedDict):
|
|
14
|
+
"""Represents an action with a name and args."""
|
|
15
15
|
|
|
16
|
-
|
|
17
|
-
"""
|
|
16
|
+
name: str
|
|
17
|
+
"""The type or name of action being requested (e.g., "add_numbers")."""
|
|
18
18
|
|
|
19
|
-
|
|
20
|
-
"""
|
|
21
|
-
allow_edit: NotRequired[bool]
|
|
22
|
-
"""Whether the human can approve the current action with edited content."""
|
|
23
|
-
allow_respond: NotRequired[bool]
|
|
24
|
-
"""Whether the human can reject the current action with feedback."""
|
|
19
|
+
args: dict[str, Any]
|
|
20
|
+
"""Key-value pairs of args needed for the action (e.g., {"a": 1, "b": 2})."""
|
|
25
21
|
|
|
26
22
|
|
|
27
23
|
class ActionRequest(TypedDict):
|
|
28
|
-
"""Represents
|
|
24
|
+
"""Represents an action request with a name, args, and description."""
|
|
29
25
|
|
|
30
|
-
|
|
31
|
-
"""The
|
|
32
|
-
args: dict
|
|
33
|
-
"""Key-value pairs of arguments needed for the action (e.g., {"a": 1, "b": 2})."""
|
|
26
|
+
name: str
|
|
27
|
+
"""The name of the action being requested."""
|
|
34
28
|
|
|
29
|
+
args: dict[str, Any]
|
|
30
|
+
"""Key-value pairs of args needed for the action (e.g., {"a": 1, "b": 2})."""
|
|
35
31
|
|
|
36
|
-
|
|
37
|
-
"""
|
|
32
|
+
description: NotRequired[str]
|
|
33
|
+
"""The description of the action to be reviewed."""
|
|
38
34
|
|
|
39
|
-
Example:
|
|
40
|
-
```python
|
|
41
|
-
# Extract a tool call from the state and create an interrupt request
|
|
42
|
-
request = HumanInterrupt(
|
|
43
|
-
action_request=ActionRequest(
|
|
44
|
-
action="run_command", # The action being requested
|
|
45
|
-
args={"command": "ls", "args": ["-l"]}, # Arguments for the action
|
|
46
|
-
),
|
|
47
|
-
config=HumanInTheLoopConfig(
|
|
48
|
-
allow_accept=True, # Allow approval
|
|
49
|
-
allow_respond=True, # Allow rejection with feedback
|
|
50
|
-
allow_edit=False, # Don't allow approval with edits
|
|
51
|
-
),
|
|
52
|
-
description="Please review the command before execution",
|
|
53
|
-
)
|
|
54
|
-
# Send the interrupt request and get the response
|
|
55
|
-
response = interrupt([request])[0]
|
|
56
|
-
```
|
|
57
|
-
"""
|
|
58
35
|
|
|
59
|
-
|
|
60
|
-
"""The specific action being requested from the human."""
|
|
61
|
-
config: HumanInTheLoopConfig
|
|
62
|
-
"""Configuration defining what response types are allowed."""
|
|
63
|
-
description: str | None
|
|
64
|
-
"""Optional detailed description of what input is needed."""
|
|
36
|
+
DecisionType = Literal["approve", "edit", "reject"]
|
|
65
37
|
|
|
66
38
|
|
|
67
|
-
class
|
|
68
|
-
"""
|
|
39
|
+
class ReviewConfig(TypedDict):
|
|
40
|
+
"""Policy for reviewing a HITL request."""
|
|
69
41
|
|
|
70
|
-
|
|
71
|
-
"""
|
|
42
|
+
action_name: str
|
|
43
|
+
"""Name of the action associated with this review configuration."""
|
|
72
44
|
|
|
45
|
+
allowed_decisions: list[DecisionType]
|
|
46
|
+
"""The decisions that are allowed for this request."""
|
|
73
47
|
|
|
74
|
-
|
|
75
|
-
"""
|
|
48
|
+
args_schema: NotRequired[dict[str, Any]]
|
|
49
|
+
"""JSON schema for the args associated with the action, if edits are allowed."""
|
|
76
50
|
|
|
77
|
-
type: Literal["response"]
|
|
78
|
-
"""The type of response when a human rejects the action."""
|
|
79
51
|
|
|
80
|
-
|
|
81
|
-
"""
|
|
52
|
+
class HITLRequest(TypedDict):
|
|
53
|
+
"""Request for human feedback on a sequence of actions requested by a model."""
|
|
54
|
+
|
|
55
|
+
action_requests: list[ActionRequest]
|
|
56
|
+
"""A list of agent actions for human review."""
|
|
57
|
+
|
|
58
|
+
review_configs: list[ReviewConfig]
|
|
59
|
+
"""Review configuration for all possible actions."""
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class ApproveDecision(TypedDict):
|
|
63
|
+
"""Response when a human approves the action."""
|
|
64
|
+
|
|
65
|
+
type: Literal["approve"]
|
|
66
|
+
"""The type of response when a human approves the action."""
|
|
82
67
|
|
|
83
68
|
|
|
84
|
-
class
|
|
69
|
+
class EditDecision(TypedDict):
|
|
85
70
|
"""Response when a human edits the action."""
|
|
86
71
|
|
|
87
72
|
type: Literal["edit"]
|
|
88
73
|
"""The type of response when a human edits the action."""
|
|
89
74
|
|
|
90
|
-
|
|
91
|
-
"""
|
|
75
|
+
edited_action: Action
|
|
76
|
+
"""Edited action for the agent to perform.
|
|
77
|
+
|
|
78
|
+
Ex: for a tool call, a human reviewer can edit the tool name and args.
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class RejectDecision(TypedDict):
|
|
83
|
+
"""Response when a human rejects the action."""
|
|
84
|
+
|
|
85
|
+
type: Literal["reject"]
|
|
86
|
+
"""The type of response when a human rejects the action."""
|
|
87
|
+
|
|
88
|
+
message: NotRequired[str]
|
|
89
|
+
"""The message sent to the model explaining why the action was rejected."""
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
Decision = ApproveDecision | EditDecision | RejectDecision
|
|
93
|
+
|
|
92
94
|
|
|
95
|
+
class HITLResponse(TypedDict):
|
|
96
|
+
"""Response payload for a HITLRequest."""
|
|
93
97
|
|
|
94
|
-
|
|
95
|
-
"""
|
|
98
|
+
decisions: list[Decision]
|
|
99
|
+
"""The decisions made by the human."""
|
|
96
100
|
|
|
97
101
|
|
|
98
102
|
class _DescriptionFactory(Protocol):
|
|
@@ -103,49 +107,53 @@ class _DescriptionFactory(Protocol):
|
|
|
103
107
|
...
|
|
104
108
|
|
|
105
109
|
|
|
106
|
-
class
|
|
107
|
-
"""Configuration for
|
|
110
|
+
class InterruptOnConfig(TypedDict):
|
|
111
|
+
"""Configuration for an action requiring human in the loop.
|
|
112
|
+
|
|
113
|
+
This is the configuration format used in the `HumanInTheLoopMiddleware.__init__`
|
|
114
|
+
method.
|
|
115
|
+
"""
|
|
116
|
+
|
|
117
|
+
allowed_decisions: list[DecisionType]
|
|
118
|
+
"""The decisions that are allowed for this action."""
|
|
108
119
|
|
|
109
|
-
allow_accept: NotRequired[bool]
|
|
110
|
-
"""Whether the human can approve the current action without changes."""
|
|
111
|
-
allow_edit: NotRequired[bool]
|
|
112
|
-
"""Whether the human can approve the current action with edited content."""
|
|
113
|
-
allow_respond: NotRequired[bool]
|
|
114
|
-
"""Whether the human can reject the current action with feedback."""
|
|
115
120
|
description: NotRequired[str | _DescriptionFactory]
|
|
116
121
|
"""The description attached to the request for human input.
|
|
117
122
|
|
|
118
123
|
Can be either:
|
|
124
|
+
|
|
119
125
|
- A static string describing the approval request
|
|
120
126
|
- A callable that dynamically generates the description based on agent state,
|
|
121
|
-
|
|
127
|
+
runtime, and tool call information
|
|
122
128
|
|
|
123
129
|
Example:
|
|
124
|
-
|
|
130
|
+
```python
|
|
131
|
+
# Static string description
|
|
132
|
+
config = ToolConfig(
|
|
133
|
+
allowed_decisions=["approve", "reject"],
|
|
134
|
+
description="Please review this tool execution"
|
|
135
|
+
)
|
|
125
136
|
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
137
|
+
# Dynamic callable description
|
|
138
|
+
def format_tool_description(
|
|
139
|
+
tool_call: ToolCall,
|
|
140
|
+
state: AgentState,
|
|
141
|
+
runtime: Runtime
|
|
142
|
+
) -> str:
|
|
143
|
+
import json
|
|
144
|
+
return (
|
|
145
|
+
f"Tool: {tool_call['name']}\\n"
|
|
146
|
+
f"Arguments:\\n{json.dumps(tool_call['args'], indent=2)}"
|
|
130
147
|
)
|
|
131
148
|
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
) -> str:
|
|
138
|
-
import json
|
|
139
|
-
return (
|
|
140
|
-
f"Tool: {tool_call['name']}\\n"
|
|
141
|
-
f"Arguments:\\n{json.dumps(tool_call['args'], indent=2)}"
|
|
142
|
-
)
|
|
143
|
-
|
|
144
|
-
config = ToolConfig(
|
|
145
|
-
allow_accept=True,
|
|
146
|
-
description=format_tool_description
|
|
147
|
-
)
|
|
149
|
+
config = InterruptOnConfig(
|
|
150
|
+
allowed_decisions=["approve", "edit", "reject"],
|
|
151
|
+
description=format_tool_description
|
|
152
|
+
)
|
|
153
|
+
```
|
|
148
154
|
"""
|
|
155
|
+
args_schema: NotRequired[dict[str, Any]]
|
|
156
|
+
"""JSON schema for the args associated with the action, if edits are allowed."""
|
|
149
157
|
|
|
150
158
|
|
|
151
159
|
class HumanInTheLoopMiddleware(AgentMiddleware):
|
|
@@ -153,7 +161,7 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
|
|
|
153
161
|
|
|
154
162
|
def __init__(
|
|
155
163
|
self,
|
|
156
|
-
interrupt_on: dict[str, bool |
|
|
164
|
+
interrupt_on: dict[str, bool | InterruptOnConfig],
|
|
157
165
|
*,
|
|
158
166
|
description_prefix: str = "Tool execution requires approval",
|
|
159
167
|
) -> None:
|
|
@@ -163,34 +171,110 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
|
|
|
163
171
|
interrupt_on: Mapping of tool name to allowed actions.
|
|
164
172
|
If a tool doesn't have an entry, it's auto-approved by default.
|
|
165
173
|
|
|
166
|
-
*
|
|
167
|
-
*
|
|
168
|
-
*
|
|
169
|
-
|
|
170
|
-
|
|
174
|
+
* `True` indicates all decisions are allowed: approve, edit, and reject.
|
|
175
|
+
* `False` indicates that the tool is auto-approved.
|
|
176
|
+
* `InterruptOnConfig` indicates the specific decisions allowed for this
|
|
177
|
+
tool.
|
|
178
|
+
The InterruptOnConfig can include a `description` field (`str` or
|
|
179
|
+
`Callable`) for custom formatting of the interrupt description.
|
|
171
180
|
description_prefix: The prefix to use when constructing action requests.
|
|
172
|
-
This is used to provide context about the tool call and the action being
|
|
173
|
-
Not used if a tool has a
|
|
181
|
+
This is used to provide context about the tool call and the action being
|
|
182
|
+
requested. Not used if a tool has a `description` in its
|
|
183
|
+
`InterruptOnConfig`.
|
|
174
184
|
"""
|
|
175
185
|
super().__init__()
|
|
176
|
-
|
|
186
|
+
resolved_configs: dict[str, InterruptOnConfig] = {}
|
|
177
187
|
for tool_name, tool_config in interrupt_on.items():
|
|
178
188
|
if isinstance(tool_config, bool):
|
|
179
189
|
if tool_config is True:
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
allow_edit=True,
|
|
183
|
-
allow_respond=True,
|
|
190
|
+
resolved_configs[tool_name] = InterruptOnConfig(
|
|
191
|
+
allowed_decisions=["approve", "edit", "reject"]
|
|
184
192
|
)
|
|
185
|
-
elif
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
resolved_tool_configs[tool_name] = tool_config
|
|
189
|
-
self.interrupt_on = resolved_tool_configs
|
|
193
|
+
elif tool_config.get("allowed_decisions"):
|
|
194
|
+
resolved_configs[tool_name] = tool_config
|
|
195
|
+
self.interrupt_on = resolved_configs
|
|
190
196
|
self.description_prefix = description_prefix
|
|
191
197
|
|
|
198
|
+
def _create_action_and_config(
|
|
199
|
+
self,
|
|
200
|
+
tool_call: ToolCall,
|
|
201
|
+
config: InterruptOnConfig,
|
|
202
|
+
state: AgentState,
|
|
203
|
+
runtime: Runtime,
|
|
204
|
+
) -> tuple[ActionRequest, ReviewConfig]:
|
|
205
|
+
"""Create an ActionRequest and ReviewConfig for a tool call."""
|
|
206
|
+
tool_name = tool_call["name"]
|
|
207
|
+
tool_args = tool_call["args"]
|
|
208
|
+
|
|
209
|
+
# Generate description using the description field (str or callable)
|
|
210
|
+
description_value = config.get("description")
|
|
211
|
+
if callable(description_value):
|
|
212
|
+
description = description_value(tool_call, state, runtime)
|
|
213
|
+
elif description_value is not None:
|
|
214
|
+
description = description_value
|
|
215
|
+
else:
|
|
216
|
+
description = f"{self.description_prefix}\n\nTool: {tool_name}\nArgs: {tool_args}"
|
|
217
|
+
|
|
218
|
+
# Create ActionRequest with description
|
|
219
|
+
action_request = ActionRequest(
|
|
220
|
+
name=tool_name,
|
|
221
|
+
args=tool_args,
|
|
222
|
+
description=description,
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
# Create ReviewConfig
|
|
226
|
+
# eventually can get tool information and populate args_schema from there
|
|
227
|
+
review_config = ReviewConfig(
|
|
228
|
+
action_name=tool_name,
|
|
229
|
+
allowed_decisions=config["allowed_decisions"],
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
return action_request, review_config
|
|
233
|
+
|
|
234
|
+
def _process_decision(
|
|
235
|
+
self,
|
|
236
|
+
decision: Decision,
|
|
237
|
+
tool_call: ToolCall,
|
|
238
|
+
config: InterruptOnConfig,
|
|
239
|
+
) -> tuple[ToolCall | None, ToolMessage | None]:
|
|
240
|
+
"""Process a single decision and return the revised tool call and optional tool message."""
|
|
241
|
+
allowed_decisions = config["allowed_decisions"]
|
|
242
|
+
|
|
243
|
+
if decision["type"] == "approve" and "approve" in allowed_decisions:
|
|
244
|
+
return tool_call, None
|
|
245
|
+
if decision["type"] == "edit" and "edit" in allowed_decisions:
|
|
246
|
+
edited_action = decision["edited_action"]
|
|
247
|
+
return (
|
|
248
|
+
ToolCall(
|
|
249
|
+
type="tool_call",
|
|
250
|
+
name=edited_action["name"],
|
|
251
|
+
args=edited_action["args"],
|
|
252
|
+
id=tool_call["id"],
|
|
253
|
+
),
|
|
254
|
+
None,
|
|
255
|
+
)
|
|
256
|
+
if decision["type"] == "reject" and "reject" in allowed_decisions:
|
|
257
|
+
# Create a tool message with the human's text response
|
|
258
|
+
content = decision.get("message") or (
|
|
259
|
+
f"User rejected the tool call for `{tool_call['name']}` with id {tool_call['id']}"
|
|
260
|
+
)
|
|
261
|
+
tool_message = ToolMessage(
|
|
262
|
+
content=content,
|
|
263
|
+
name=tool_call["name"],
|
|
264
|
+
tool_call_id=tool_call["id"],
|
|
265
|
+
status="error",
|
|
266
|
+
)
|
|
267
|
+
return tool_call, tool_message
|
|
268
|
+
msg = (
|
|
269
|
+
f"Unexpected human decision: {decision}. "
|
|
270
|
+
f"Decision type '{decision.get('type')}' "
|
|
271
|
+
f"is not allowed for tool '{tool_call['name']}'. "
|
|
272
|
+
f"Expected one of {allowed_decisions} based on the tool's configuration."
|
|
273
|
+
)
|
|
274
|
+
raise ValueError(msg)
|
|
275
|
+
|
|
192
276
|
def after_model(self, state: AgentState, runtime: Runtime) -> dict[str, Any] | None:
|
|
193
|
-
"""Trigger interrupt flows for relevant tool calls after an AIMessage
|
|
277
|
+
"""Trigger interrupt flows for relevant tool calls after an `AIMessage`."""
|
|
194
278
|
messages = state["messages"]
|
|
195
279
|
if not messages:
|
|
196
280
|
return None
|
|
@@ -216,87 +300,50 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
|
|
|
216
300
|
revised_tool_calls: list[ToolCall] = auto_approved_tool_calls.copy()
|
|
217
301
|
artificial_tool_messages: list[ToolMessage] = []
|
|
218
302
|
|
|
219
|
-
# Create
|
|
220
|
-
|
|
303
|
+
# Create action requests and review configs for all tools that need approval
|
|
304
|
+
action_requests: list[ActionRequest] = []
|
|
305
|
+
review_configs: list[ReviewConfig] = []
|
|
306
|
+
|
|
221
307
|
for tool_call in interrupt_tool_calls:
|
|
222
|
-
|
|
223
|
-
tool_args = tool_call["args"]
|
|
224
|
-
config = self.interrupt_on[tool_name]
|
|
225
|
-
|
|
226
|
-
# Generate description using the description field (str or callable)
|
|
227
|
-
description_value = config.get("description")
|
|
228
|
-
if callable(description_value):
|
|
229
|
-
description = description_value(tool_call, state, runtime)
|
|
230
|
-
elif description_value is not None:
|
|
231
|
-
description = description_value
|
|
232
|
-
else:
|
|
233
|
-
description = f"{self.description_prefix}\n\nTool: {tool_name}\nArgs: {tool_args}"
|
|
234
|
-
|
|
235
|
-
request: HumanInTheLoopRequest = {
|
|
236
|
-
"action_request": ActionRequest(
|
|
237
|
-
action=tool_name,
|
|
238
|
-
args=tool_args,
|
|
239
|
-
),
|
|
240
|
-
"config": config,
|
|
241
|
-
"description": description,
|
|
242
|
-
}
|
|
243
|
-
interrupt_requests.append(request)
|
|
308
|
+
config = self.interrupt_on[tool_call["name"]]
|
|
244
309
|
|
|
245
|
-
|
|
310
|
+
# Create ActionRequest and ReviewConfig using helper method
|
|
311
|
+
action_request, review_config = self._create_action_and_config(
|
|
312
|
+
tool_call, config, state, runtime
|
|
313
|
+
)
|
|
314
|
+
action_requests.append(action_request)
|
|
315
|
+
review_configs.append(review_config)
|
|
246
316
|
|
|
247
|
-
#
|
|
248
|
-
|
|
317
|
+
# Create single HITLRequest with all actions and configs
|
|
318
|
+
hitl_request = HITLRequest(
|
|
319
|
+
action_requests=action_requests,
|
|
320
|
+
review_configs=review_configs,
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
# Send interrupt and get response
|
|
324
|
+
hitl_response: HITLResponse = interrupt(hitl_request)
|
|
325
|
+
decisions = hitl_response["decisions"]
|
|
326
|
+
|
|
327
|
+
# Validate that the number of decisions matches the number of interrupt tool calls
|
|
328
|
+
if (decisions_len := len(decisions)) != (
|
|
249
329
|
interrupt_tool_calls_len := len(interrupt_tool_calls)
|
|
250
330
|
):
|
|
251
331
|
msg = (
|
|
252
|
-
f"Number of human
|
|
332
|
+
f"Number of human decisions ({decisions_len}) does not match "
|
|
253
333
|
f"number of hanging tool calls ({interrupt_tool_calls_len})."
|
|
254
334
|
)
|
|
255
335
|
raise ValueError(msg)
|
|
256
336
|
|
|
257
|
-
|
|
337
|
+
# Process each decision using helper method
|
|
338
|
+
for i, decision in enumerate(decisions):
|
|
258
339
|
tool_call = interrupt_tool_calls[i]
|
|
259
340
|
config = self.interrupt_on[tool_call["name"]]
|
|
260
341
|
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
revised_tool_calls.append(
|
|
266
|
-
ToolCall(
|
|
267
|
-
type="tool_call",
|
|
268
|
-
name=edited_action["action"],
|
|
269
|
-
args=edited_action["args"],
|
|
270
|
-
id=tool_call["id"],
|
|
271
|
-
)
|
|
272
|
-
)
|
|
273
|
-
elif response["type"] == "response" and config.get("allow_respond"):
|
|
274
|
-
# Create a tool message with the human's text response
|
|
275
|
-
content = response.get("args") or (
|
|
276
|
-
f"User rejected the tool call for `{tool_call['name']}` "
|
|
277
|
-
f"with id {tool_call['id']}"
|
|
278
|
-
)
|
|
279
|
-
tool_message = ToolMessage(
|
|
280
|
-
content=content,
|
|
281
|
-
name=tool_call["name"],
|
|
282
|
-
tool_call_id=tool_call["id"],
|
|
283
|
-
status="error",
|
|
284
|
-
)
|
|
285
|
-
revised_tool_calls.append(tool_call)
|
|
342
|
+
revised_tool_call, tool_message = self._process_decision(decision, tool_call, config)
|
|
343
|
+
if revised_tool_call:
|
|
344
|
+
revised_tool_calls.append(revised_tool_call)
|
|
345
|
+
if tool_message:
|
|
286
346
|
artificial_tool_messages.append(tool_message)
|
|
287
|
-
else:
|
|
288
|
-
allowed_actions = [
|
|
289
|
-
action
|
|
290
|
-
for action in ["accept", "edit", "response"]
|
|
291
|
-
if config.get(f"allow_{'respond' if action == 'response' else action}")
|
|
292
|
-
]
|
|
293
|
-
msg = (
|
|
294
|
-
f"Unexpected human response: {response}. "
|
|
295
|
-
f"Response action '{response.get('type')}' "
|
|
296
|
-
f"is not allowed for tool '{tool_call['name']}'. "
|
|
297
|
-
f"Expected one of {allowed_actions} based on the tool's configuration."
|
|
298
|
-
)
|
|
299
|
-
raise ValueError(msg)
|
|
300
347
|
|
|
301
348
|
# Update the AI message to only include approved tool calls
|
|
302
349
|
last_ai_msg.tool_calls = revised_tool_calls
|
|
@@ -2,16 +2,33 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from typing import TYPE_CHECKING, Any, Literal
|
|
5
|
+
from typing import TYPE_CHECKING, Annotated, Any, Literal
|
|
6
6
|
|
|
7
7
|
from langchain_core.messages import AIMessage
|
|
8
|
+
from langgraph.channels.untracked_value import UntrackedValue
|
|
9
|
+
from typing_extensions import NotRequired
|
|
8
10
|
|
|
9
|
-
from langchain.agents.middleware.types import
|
|
11
|
+
from langchain.agents.middleware.types import (
|
|
12
|
+
AgentMiddleware,
|
|
13
|
+
AgentState,
|
|
14
|
+
PrivateStateAttr,
|
|
15
|
+
hook_config,
|
|
16
|
+
)
|
|
10
17
|
|
|
11
18
|
if TYPE_CHECKING:
|
|
12
19
|
from langgraph.runtime import Runtime
|
|
13
20
|
|
|
14
21
|
|
|
22
|
+
class ModelCallLimitState(AgentState):
|
|
23
|
+
"""State schema for ModelCallLimitMiddleware.
|
|
24
|
+
|
|
25
|
+
Extends AgentState with model call tracking fields.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
thread_model_call_count: NotRequired[Annotated[int, PrivateStateAttr]]
|
|
29
|
+
run_model_call_count: NotRequired[Annotated[int, UntrackedValue, PrivateStateAttr]]
|
|
30
|
+
|
|
31
|
+
|
|
15
32
|
def _build_limit_exceeded_message(
|
|
16
33
|
thread_count: int,
|
|
17
34
|
run_count: int,
|
|
@@ -69,8 +86,8 @@ class ModelCallLimitExceededError(Exception):
|
|
|
69
86
|
super().__init__(msg)
|
|
70
87
|
|
|
71
88
|
|
|
72
|
-
class ModelCallLimitMiddleware(AgentMiddleware):
|
|
73
|
-
"""
|
|
89
|
+
class ModelCallLimitMiddleware(AgentMiddleware[ModelCallLimitState, Any]):
|
|
90
|
+
"""Tracks model call counts and enforces limits.
|
|
74
91
|
|
|
75
92
|
This middleware monitors the number of model calls made during agent execution
|
|
76
93
|
and can terminate the agent when specified limits are reached. It supports
|
|
@@ -97,6 +114,8 @@ class ModelCallLimitMiddleware(AgentMiddleware):
|
|
|
97
114
|
```
|
|
98
115
|
"""
|
|
99
116
|
|
|
117
|
+
state_schema = ModelCallLimitState
|
|
118
|
+
|
|
100
119
|
def __init__(
|
|
101
120
|
self,
|
|
102
121
|
*,
|
|
@@ -108,17 +127,16 @@ class ModelCallLimitMiddleware(AgentMiddleware):
|
|
|
108
127
|
|
|
109
128
|
Args:
|
|
110
129
|
thread_limit: Maximum number of model calls allowed per thread.
|
|
111
|
-
None means no limit.
|
|
130
|
+
None means no limit.
|
|
112
131
|
run_limit: Maximum number of model calls allowed per run.
|
|
113
|
-
None means no limit.
|
|
132
|
+
None means no limit.
|
|
114
133
|
exit_behavior: What to do when limits are exceeded.
|
|
115
134
|
- "end": Jump to the end of the agent execution and
|
|
116
135
|
inject an artificial AI message indicating that the limit was exceeded.
|
|
117
|
-
- "error": Raise a ModelCallLimitExceededError
|
|
118
|
-
Defaults to "end".
|
|
136
|
+
- "error": Raise a `ModelCallLimitExceededError`
|
|
119
137
|
|
|
120
138
|
Raises:
|
|
121
|
-
ValueError: If both limits are None or if exit_behavior is invalid.
|
|
139
|
+
ValueError: If both limits are `None` or if `exit_behavior` is invalid.
|
|
122
140
|
"""
|
|
123
141
|
super().__init__()
|
|
124
142
|
|
|
@@ -135,7 +153,7 @@ class ModelCallLimitMiddleware(AgentMiddleware):
|
|
|
135
153
|
self.exit_behavior = exit_behavior
|
|
136
154
|
|
|
137
155
|
@hook_config(can_jump_to=["end"])
|
|
138
|
-
def before_model(self, state:
|
|
156
|
+
def before_model(self, state: ModelCallLimitState, runtime: Runtime) -> dict[str, Any] | None: # noqa: ARG002
|
|
139
157
|
"""Check model call limits before making a model call.
|
|
140
158
|
|
|
141
159
|
Args:
|
|
@@ -175,3 +193,18 @@ class ModelCallLimitMiddleware(AgentMiddleware):
|
|
|
175
193
|
return {"jump_to": "end", "messages": [limit_ai_message]}
|
|
176
194
|
|
|
177
195
|
return None
|
|
196
|
+
|
|
197
|
+
def after_model(self, state: ModelCallLimitState, runtime: Runtime) -> dict[str, Any] | None: # noqa: ARG002
|
|
198
|
+
"""Increment model call counts after a model call.
|
|
199
|
+
|
|
200
|
+
Args:
|
|
201
|
+
state: The current agent state.
|
|
202
|
+
runtime: The langgraph runtime.
|
|
203
|
+
|
|
204
|
+
Returns:
|
|
205
|
+
State updates with incremented call counts.
|
|
206
|
+
"""
|
|
207
|
+
return {
|
|
208
|
+
"thread_model_call_count": state.get("thread_model_call_count", 0) + 1,
|
|
209
|
+
"run_model_call_count": state.get("run_model_call_count", 0) + 1,
|
|
210
|
+
}
|