langchain 1.0.0a15__py3-none-any.whl → 1.0.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain might be problematic. Click here for more details.
- langchain/__init__.py +1 -1
- langchain/agents/__init__.py +7 -1
- langchain/agents/factory.py +67 -33
- langchain/agents/middleware/__init__.py +9 -1
- langchain/agents/middleware/context_editing.py +1 -1
- langchain/agents/middleware/human_in_the_loop.py +11 -7
- langchain/agents/middleware/model_call_limit.py +4 -5
- langchain/agents/middleware/model_fallback.py +0 -2
- langchain/agents/middleware/pii.py +7 -7
- langchain/agents/middleware/tool_call_limit.py +1 -1
- langchain/agents/middleware/tool_emulator.py +2 -2
- langchain/agents/middleware/tool_retry.py +384 -0
- langchain/agents/middleware/types.py +67 -58
- langchain/agents/structured_output.py +29 -25
- langchain/chat_models/__init__.py +7 -1
- langchain/chat_models/base.py +98 -108
- langchain/embeddings/__init__.py +7 -1
- langchain/embeddings/base.py +1 -1
- langchain/messages/__init__.py +10 -1
- langchain/tools/__init__.py +9 -2
- langchain/tools/tool_node.py +256 -94
- {langchain-1.0.0a15.dist-info → langchain-1.0.0rc1.dist-info}/METADATA +1 -1
- langchain-1.0.0rc1.dist-info/RECORD +30 -0
- langchain-1.0.0a15.dist-info/RECORD +0 -29
- {langchain-1.0.0a15.dist-info → langchain-1.0.0rc1.dist-info}/WHEEL +0 -0
- {langchain-1.0.0a15.dist-info → langchain-1.0.0rc1.dist-info}/licenses/LICENSE +0 -0
langchain/__init__.py
CHANGED
langchain/agents/__init__.py
CHANGED
|
@@ -1,4 +1,10 @@
|
|
|
1
|
-
"""
|
|
1
|
+
"""Entrypoint to building [Agents](https://docs.langchain.com/oss/python/langchain/agents) with LangChain.
|
|
2
|
+
|
|
3
|
+
!!! warning "Reference docs"
|
|
4
|
+
This page contains **reference documentation** for Agents. See
|
|
5
|
+
[the docs](https://docs.langchain.com/oss/python/langchain/agents) for conceptual
|
|
6
|
+
guides, tutorials, and examples on using Agents.
|
|
7
|
+
""" # noqa: E501
|
|
2
8
|
|
|
3
9
|
from langchain.agents.factory import create_agent
|
|
4
10
|
from langchain.agents.middleware.types import AgentState
|
langchain/agents/factory.py
CHANGED
|
@@ -92,7 +92,7 @@ def _chain_model_call_handlers(
|
|
|
92
92
|
handlers: List of handlers. First handler wraps all others.
|
|
93
93
|
|
|
94
94
|
Returns:
|
|
95
|
-
Composed handler, or None if handlers empty.
|
|
95
|
+
Composed handler, or `None` if handlers empty.
|
|
96
96
|
|
|
97
97
|
Example:
|
|
98
98
|
```python
|
|
@@ -195,13 +195,13 @@ def _chain_async_model_call_handlers(
|
|
|
195
195
|
]
|
|
196
196
|
| None
|
|
197
197
|
):
|
|
198
|
-
"""Compose multiple async wrap_model_call handlers into single middleware stack.
|
|
198
|
+
"""Compose multiple async `wrap_model_call` handlers into single middleware stack.
|
|
199
199
|
|
|
200
200
|
Args:
|
|
201
201
|
handlers: List of async handlers. First handler wraps all others.
|
|
202
202
|
|
|
203
203
|
Returns:
|
|
204
|
-
Composed async handler, or None if handlers empty.
|
|
204
|
+
Composed async handler, or `None` if handlers empty.
|
|
205
205
|
"""
|
|
206
206
|
if not handlers:
|
|
207
207
|
return None
|
|
@@ -267,12 +267,13 @@ def _chain_async_model_call_handlers(
|
|
|
267
267
|
|
|
268
268
|
|
|
269
269
|
def _resolve_schema(schemas: set[type], schema_name: str, omit_flag: str | None = None) -> type:
|
|
270
|
-
"""Resolve schema by merging schemas and optionally respecting OmitFromSchema annotations.
|
|
270
|
+
"""Resolve schema by merging schemas and optionally respecting `OmitFromSchema` annotations.
|
|
271
271
|
|
|
272
272
|
Args:
|
|
273
273
|
schemas: List of schema types to merge
|
|
274
|
-
schema_name: Name for the generated TypedDict
|
|
275
|
-
omit_flag: If specified, omit fields with this flag set ('input' or
|
|
274
|
+
schema_name: Name for the generated `TypedDict`
|
|
275
|
+
omit_flag: If specified, omit fields with this flag set (`'input'` or
|
|
276
|
+
`'output'`)
|
|
276
277
|
"""
|
|
277
278
|
all_annotations = {}
|
|
278
279
|
|
|
@@ -312,11 +313,11 @@ def _extract_metadata(type_: type) -> list:
|
|
|
312
313
|
|
|
313
314
|
|
|
314
315
|
def _get_can_jump_to(middleware: AgentMiddleware[Any, Any], hook_name: str) -> list[JumpTo]:
|
|
315
|
-
"""Get the can_jump_to list from either sync or async hook methods.
|
|
316
|
+
"""Get the `can_jump_to` list from either sync or async hook methods.
|
|
316
317
|
|
|
317
318
|
Args:
|
|
318
319
|
middleware: The middleware instance to inspect.
|
|
319
|
-
hook_name: The name of the hook ('before_model' or 'after_model').
|
|
320
|
+
hook_name: The name of the hook (`'before_model'` or `'after_model'`).
|
|
320
321
|
|
|
321
322
|
Returns:
|
|
322
323
|
List of jump destinations, or empty list if not configured.
|
|
@@ -350,7 +351,7 @@ def _supports_provider_strategy(model: str | BaseChatModel) -> bool:
|
|
|
350
351
|
"""Check if a model supports provider-specific structured output.
|
|
351
352
|
|
|
352
353
|
Args:
|
|
353
|
-
model: Model name string or BaseChatModel instance.
|
|
354
|
+
model: Model name string or `BaseChatModel` instance.
|
|
354
355
|
|
|
355
356
|
Returns:
|
|
356
357
|
`True` if the model supports provider-specific structured output, `False` otherwise.
|
|
@@ -373,7 +374,7 @@ def _handle_structured_output_error(
|
|
|
373
374
|
exception: Exception,
|
|
374
375
|
response_format: ResponseFormat,
|
|
375
376
|
) -> tuple[bool, str]:
|
|
376
|
-
"""Handle structured output error. Returns (should_retry, retry_tool_message)
|
|
377
|
+
"""Handle structured output error. Returns `(should_retry, retry_tool_message)`."""
|
|
377
378
|
if not isinstance(response_format, ToolStrategy):
|
|
378
379
|
return False, ""
|
|
379
380
|
|
|
@@ -408,7 +409,7 @@ def _chain_tool_call_wrappers(
|
|
|
408
409
|
wrappers: Wrappers in middleware order.
|
|
409
410
|
|
|
410
411
|
Returns:
|
|
411
|
-
Composed wrapper, or None if empty.
|
|
412
|
+
Composed wrapper, or `None` if empty.
|
|
412
413
|
|
|
413
414
|
Example:
|
|
414
415
|
wrapper = _chain_tool_call_wrappers([auth, cache, retry])
|
|
@@ -465,7 +466,7 @@ def _chain_async_tool_call_wrappers(
|
|
|
465
466
|
wrappers: Async wrappers in middleware order.
|
|
466
467
|
|
|
467
468
|
Returns:
|
|
468
|
-
Composed async wrapper, or None if empty.
|
|
469
|
+
Composed async wrapper, or `None` if empty.
|
|
469
470
|
"""
|
|
470
471
|
if not wrappers:
|
|
471
472
|
return None
|
|
@@ -516,6 +517,7 @@ def create_agent( # noqa: PLR0915
|
|
|
516
517
|
system_prompt: str | None = None,
|
|
517
518
|
middleware: Sequence[AgentMiddleware[AgentState[ResponseT], ContextT]] = (),
|
|
518
519
|
response_format: ResponseFormat[ResponseT] | type[ResponseT] | None = None,
|
|
520
|
+
state_schema: type[AgentState[ResponseT]] | None = None,
|
|
519
521
|
context_schema: type[ContextT] | None = None,
|
|
520
522
|
checkpointer: Checkpointer | None = None,
|
|
521
523
|
store: BaseStore | None = None,
|
|
@@ -534,11 +536,13 @@ def create_agent( # noqa: PLR0915
|
|
|
534
536
|
|
|
535
537
|
Args:
|
|
536
538
|
model: The language model for the agent. Can be a string identifier
|
|
537
|
-
(e.g., `"openai:gpt-4"`)
|
|
538
|
-
|
|
539
|
+
(e.g., `"openai:gpt-4"`) or a chat model instance (e.g., `ChatOpenAI()`).
|
|
540
|
+
For a full list of supported model strings, see
|
|
541
|
+
[`init_chat_model`][langchain.chat_models.init_chat_model(model_provider)].
|
|
542
|
+
tools: A list of tools, `dicts`, or `Callable`. If `None` or an empty list,
|
|
539
543
|
the agent will consist of a model node without a tool calling loop.
|
|
540
544
|
system_prompt: An optional system prompt for the LLM. Prompts are converted to a
|
|
541
|
-
|
|
545
|
+
`SystemMessage` and added to the beginning of the message list.
|
|
542
546
|
middleware: A sequence of middleware instances to apply to the agent.
|
|
543
547
|
Middleware can intercept and modify agent behavior at various stages.
|
|
544
548
|
response_format: An optional configuration for structured responses.
|
|
@@ -546,6 +550,13 @@ def create_agent( # noqa: PLR0915
|
|
|
546
550
|
If provided, the agent will handle structured output during the
|
|
547
551
|
conversation flow. Raw schemas will be wrapped in an appropriate strategy
|
|
548
552
|
based on model capabilities.
|
|
553
|
+
state_schema: An optional `TypedDict` schema that extends `AgentState`.
|
|
554
|
+
When provided, this schema is used instead of `AgentState` as the base
|
|
555
|
+
schema for merging with middleware state schemas. This allows users to
|
|
556
|
+
add custom state fields without needing to create custom middleware.
|
|
557
|
+
Generally, it's recommended to use state_schema extensions via middleware
|
|
558
|
+
to keep relevant extensions scoped to corresponding hooks / tools.
|
|
559
|
+
The schema must be a subclass of `AgentState[ResponseT]`.
|
|
549
560
|
context_schema: An optional schema for runtime context.
|
|
550
561
|
checkpointer: An optional checkpoint saver object. This is used for persisting
|
|
551
562
|
the state of the graph (e.g., as chat memory) for a single thread
|
|
@@ -553,12 +564,15 @@ def create_agent( # noqa: PLR0915
|
|
|
553
564
|
store: An optional store object. This is used for persisting data
|
|
554
565
|
across multiple threads (e.g., multiple conversations / users).
|
|
555
566
|
interrupt_before: An optional list of node names to interrupt before.
|
|
556
|
-
|
|
567
|
+
Useful if you want to add a user confirmation or other interrupt
|
|
557
568
|
before taking an action.
|
|
558
569
|
interrupt_after: An optional list of node names to interrupt after.
|
|
559
|
-
|
|
570
|
+
Useful if you want to return directly or run additional processing
|
|
560
571
|
on an output.
|
|
561
|
-
debug:
|
|
572
|
+
debug: Whether to enable verbose logging for graph execution. When enabled,
|
|
573
|
+
prints detailed information about each node execution, state updates,
|
|
574
|
+
and transitions during agent runtime. Useful for debugging middleware
|
|
575
|
+
behavior and understanding agent execution flow.
|
|
562
576
|
name: An optional name for the `CompiledStateGraph`.
|
|
563
577
|
This name will be automatically used when adding the agent graph to
|
|
564
578
|
another graph as a subgraph node - particularly useful for building
|
|
@@ -569,8 +583,8 @@ def create_agent( # noqa: PLR0915
|
|
|
569
583
|
A compiled `StateGraph` that can be used for chat interactions.
|
|
570
584
|
|
|
571
585
|
The agent node calls the language model with the messages list (after applying
|
|
572
|
-
the system prompt). If the resulting AIMessage contains `tool_calls`, the graph
|
|
573
|
-
then call the tools. The tools node executes the tools and adds the responses
|
|
586
|
+
the system prompt). If the resulting `AIMessage` contains `tool_calls`, the graph
|
|
587
|
+
will then call the tools. The tools node executes the tools and adds the responses
|
|
574
588
|
to the messages list as `ToolMessage` objects. The agent node then calls the
|
|
575
589
|
language model again. The process repeats until no more `tool_calls` are
|
|
576
590
|
present in the response. The agent then returns the full list of messages.
|
|
@@ -586,7 +600,7 @@ def create_agent( # noqa: PLR0915
|
|
|
586
600
|
|
|
587
601
|
|
|
588
602
|
graph = create_agent(
|
|
589
|
-
model="anthropic:claude-
|
|
603
|
+
model="anthropic:claude-sonnet-4-5-20250929",
|
|
590
604
|
tools=[check_weather],
|
|
591
605
|
system_prompt="You are a helpful assistant",
|
|
592
606
|
)
|
|
@@ -756,9 +770,11 @@ def create_agent( # noqa: PLR0915
|
|
|
756
770
|
awrap_model_call_handler = _chain_async_model_call_handlers(async_handlers)
|
|
757
771
|
|
|
758
772
|
state_schemas = {m.state_schema for m in middleware}
|
|
759
|
-
|
|
773
|
+
# Use provided state_schema if available, otherwise use base AgentState
|
|
774
|
+
base_state = state_schema if state_schema is not None else AgentState
|
|
775
|
+
state_schemas.add(base_state)
|
|
760
776
|
|
|
761
|
-
|
|
777
|
+
resolved_state_schema = _resolve_schema(state_schemas, "StateSchema", None)
|
|
762
778
|
input_schema = _resolve_schema(state_schemas, "InputSchema", "input")
|
|
763
779
|
output_schema = _resolve_schema(state_schemas, "OutputSchema", "output")
|
|
764
780
|
|
|
@@ -766,7 +782,7 @@ def create_agent( # noqa: PLR0915
|
|
|
766
782
|
graph: StateGraph[
|
|
767
783
|
AgentState[ResponseT], ContextT, PublicAgentState[ResponseT], PublicAgentState[ResponseT]
|
|
768
784
|
] = StateGraph(
|
|
769
|
-
state_schema=
|
|
785
|
+
state_schema=resolved_state_schema,
|
|
770
786
|
input_schema=input_schema,
|
|
771
787
|
output_schema=output_schema,
|
|
772
788
|
context_schema=context_schema,
|
|
@@ -878,8 +894,9 @@ def create_agent( # noqa: PLR0915
|
|
|
878
894
|
request: The model request containing model, tools, and response format.
|
|
879
895
|
|
|
880
896
|
Returns:
|
|
881
|
-
Tuple of (bound_model, effective_response_format) where
|
|
882
|
-
is the actual strategy used (may differ from
|
|
897
|
+
Tuple of `(bound_model, effective_response_format)` where
|
|
898
|
+
`effective_response_format` is the actual strategy used (may differ from
|
|
899
|
+
initial if auto-detected).
|
|
883
900
|
"""
|
|
884
901
|
# Validate ONLY client-side tools that need to exist in tool_node
|
|
885
902
|
# Build map of available client-side tools from the ToolNode
|
|
@@ -985,7 +1002,7 @@ def create_agent( # noqa: PLR0915
|
|
|
985
1002
|
def _execute_model_sync(request: ModelRequest) -> ModelResponse:
|
|
986
1003
|
"""Execute model and return response.
|
|
987
1004
|
|
|
988
|
-
This is the core model execution logic wrapped by wrap_model_call handlers.
|
|
1005
|
+
This is the core model execution logic wrapped by `wrap_model_call` handlers.
|
|
989
1006
|
Raises any exceptions that occur during model invocation.
|
|
990
1007
|
"""
|
|
991
1008
|
# Get the bound model (with auto-detection if needed)
|
|
@@ -1036,7 +1053,9 @@ def create_agent( # noqa: PLR0915
|
|
|
1036
1053
|
async def _execute_model_async(request: ModelRequest) -> ModelResponse:
|
|
1037
1054
|
"""Execute model asynchronously and return response.
|
|
1038
1055
|
|
|
1039
|
-
This is the core async model execution logic wrapped by wrap_model_call
|
|
1056
|
+
This is the core async model execution logic wrapped by `wrap_model_call`
|
|
1057
|
+
handlers.
|
|
1058
|
+
|
|
1040
1059
|
Raises any exceptions that occur during model invocation.
|
|
1041
1060
|
"""
|
|
1042
1061
|
# Get the bound model (with auto-detection if needed)
|
|
@@ -1110,7 +1129,9 @@ def create_agent( # noqa: PLR0915
|
|
|
1110
1129
|
else None
|
|
1111
1130
|
)
|
|
1112
1131
|
before_agent_node = RunnableCallable(sync_before_agent, async_before_agent, trace=False)
|
|
1113
|
-
graph.add_node(
|
|
1132
|
+
graph.add_node(
|
|
1133
|
+
f"{m.name}.before_agent", before_agent_node, input_schema=resolved_state_schema
|
|
1134
|
+
)
|
|
1114
1135
|
|
|
1115
1136
|
if (
|
|
1116
1137
|
m.__class__.before_model is not AgentMiddleware.before_model
|
|
@@ -1129,7 +1150,9 @@ def create_agent( # noqa: PLR0915
|
|
|
1129
1150
|
else None
|
|
1130
1151
|
)
|
|
1131
1152
|
before_node = RunnableCallable(sync_before, async_before, trace=False)
|
|
1132
|
-
graph.add_node(
|
|
1153
|
+
graph.add_node(
|
|
1154
|
+
f"{m.name}.before_model", before_node, input_schema=resolved_state_schema
|
|
1155
|
+
)
|
|
1133
1156
|
|
|
1134
1157
|
if (
|
|
1135
1158
|
m.__class__.after_model is not AgentMiddleware.after_model
|
|
@@ -1148,7 +1171,7 @@ def create_agent( # noqa: PLR0915
|
|
|
1148
1171
|
else None
|
|
1149
1172
|
)
|
|
1150
1173
|
after_node = RunnableCallable(sync_after, async_after, trace=False)
|
|
1151
|
-
graph.add_node(f"{m.name}.after_model", after_node, input_schema=
|
|
1174
|
+
graph.add_node(f"{m.name}.after_model", after_node, input_schema=resolved_state_schema)
|
|
1152
1175
|
|
|
1153
1176
|
if (
|
|
1154
1177
|
m.__class__.after_agent is not AgentMiddleware.after_agent
|
|
@@ -1167,7 +1190,9 @@ def create_agent( # noqa: PLR0915
|
|
|
1167
1190
|
else None
|
|
1168
1191
|
)
|
|
1169
1192
|
after_agent_node = RunnableCallable(sync_after_agent, async_after_agent, trace=False)
|
|
1170
|
-
graph.add_node(
|
|
1193
|
+
graph.add_node(
|
|
1194
|
+
f"{m.name}.after_agent", after_agent_node, input_schema=resolved_state_schema
|
|
1195
|
+
)
|
|
1171
1196
|
|
|
1172
1197
|
# Determine the entry node (runs once at start): before_agent -> before_model -> model
|
|
1173
1198
|
if middleware_w_before_agent:
|
|
@@ -1200,6 +1225,15 @@ def create_agent( # noqa: PLR0915
|
|
|
1200
1225
|
graph.add_edge(START, entry_node)
|
|
1201
1226
|
# add conditional edges only if tools exist
|
|
1202
1227
|
if tool_node is not None:
|
|
1228
|
+
# Only include exit_node in destinations if any tool has return_direct=True
|
|
1229
|
+
# or if there are structured output tools
|
|
1230
|
+
tools_to_model_destinations = [loop_entry_node]
|
|
1231
|
+
if (
|
|
1232
|
+
any(tool.return_direct for tool in tool_node.tools_by_name.values())
|
|
1233
|
+
or structured_output_tools
|
|
1234
|
+
):
|
|
1235
|
+
tools_to_model_destinations.append(exit_node)
|
|
1236
|
+
|
|
1203
1237
|
graph.add_conditional_edges(
|
|
1204
1238
|
"tools",
|
|
1205
1239
|
_make_tools_to_model_edge(
|
|
@@ -1208,7 +1242,7 @@ def create_agent( # noqa: PLR0915
|
|
|
1208
1242
|
structured_output_tools=structured_output_tools,
|
|
1209
1243
|
end_destination=exit_node,
|
|
1210
1244
|
),
|
|
1211
|
-
|
|
1245
|
+
tools_to_model_destinations,
|
|
1212
1246
|
)
|
|
1213
1247
|
|
|
1214
1248
|
# base destinations are tools and exit_node
|
|
@@ -1,4 +1,10 @@
|
|
|
1
|
-
"""Middleware plugins
|
|
1
|
+
"""Entrypoint to using [Middleware](https://docs.langchain.com/oss/python/langchain/middleware) plugins with [Agents](https://docs.langchain.com/oss/python/langchain/agents).
|
|
2
|
+
|
|
3
|
+
!!! warning "Reference docs"
|
|
4
|
+
This page contains **reference documentation** for Middleware. See
|
|
5
|
+
[the docs](https://docs.langchain.com/oss/python/langchain/middleware) for conceptual
|
|
6
|
+
guides, tutorials, and examples on using Middleware.
|
|
7
|
+
""" # noqa: E501
|
|
2
8
|
|
|
3
9
|
from .context_editing import (
|
|
4
10
|
ClearToolUsesEdit,
|
|
@@ -15,6 +21,7 @@ from .summarization import SummarizationMiddleware
|
|
|
15
21
|
from .todo import TodoListMiddleware
|
|
16
22
|
from .tool_call_limit import ToolCallLimitMiddleware
|
|
17
23
|
from .tool_emulator import LLMToolEmulator
|
|
24
|
+
from .tool_retry import ToolRetryMiddleware
|
|
18
25
|
from .tool_selection import LLMToolSelectorMiddleware
|
|
19
26
|
from .types import (
|
|
20
27
|
AgentMiddleware,
|
|
@@ -49,6 +56,7 @@ __all__ = [
|
|
|
49
56
|
"SummarizationMiddleware",
|
|
50
57
|
"TodoListMiddleware",
|
|
51
58
|
"ToolCallLimitMiddleware",
|
|
59
|
+
"ToolRetryMiddleware",
|
|
52
60
|
"after_agent",
|
|
53
61
|
"after_model",
|
|
54
62
|
"before_agent",
|
|
@@ -198,7 +198,7 @@ class ContextEditingMiddleware(AgentMiddleware):
|
|
|
198
198
|
edits: Iterable[ContextEdit] | None = None,
|
|
199
199
|
token_count_method: Literal["approximate", "model"] = "approximate", # noqa: S107
|
|
200
200
|
) -> None:
|
|
201
|
-
"""
|
|
201
|
+
"""Initializes a context editing middleware instance.
|
|
202
202
|
|
|
203
203
|
Args:
|
|
204
204
|
edits: Sequence of edit strategies to apply. Defaults to a single
|
|
@@ -110,7 +110,8 @@ class _DescriptionFactory(Protocol):
|
|
|
110
110
|
class InterruptOnConfig(TypedDict):
|
|
111
111
|
"""Configuration for an action requiring human in the loop.
|
|
112
112
|
|
|
113
|
-
This is the configuration format used in the `HumanInTheLoopMiddleware.__init__`
|
|
113
|
+
This is the configuration format used in the `HumanInTheLoopMiddleware.__init__`
|
|
114
|
+
method.
|
|
114
115
|
"""
|
|
115
116
|
|
|
116
117
|
allowed_decisions: list[DecisionType]
|
|
@@ -120,6 +121,7 @@ class InterruptOnConfig(TypedDict):
|
|
|
120
121
|
"""The description attached to the request for human input.
|
|
121
122
|
|
|
122
123
|
Can be either:
|
|
124
|
+
|
|
123
125
|
- A static string describing the approval request
|
|
124
126
|
- A callable that dynamically generates the description based on agent state,
|
|
125
127
|
runtime, and tool call information
|
|
@@ -171,12 +173,14 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
|
|
|
171
173
|
|
|
172
174
|
* `True` indicates all decisions are allowed: approve, edit, and reject.
|
|
173
175
|
* `False` indicates that the tool is auto-approved.
|
|
174
|
-
* `InterruptOnConfig` indicates the specific decisions allowed for this
|
|
175
|
-
|
|
176
|
-
|
|
176
|
+
* `InterruptOnConfig` indicates the specific decisions allowed for this
|
|
177
|
+
tool.
|
|
178
|
+
The InterruptOnConfig can include a `description` field (`str` or
|
|
179
|
+
`Callable`) for custom formatting of the interrupt description.
|
|
177
180
|
description_prefix: The prefix to use when constructing action requests.
|
|
178
|
-
This is used to provide context about the tool call and the action being
|
|
179
|
-
Not used if a tool has a `description` in its
|
|
181
|
+
This is used to provide context about the tool call and the action being
|
|
182
|
+
requested. Not used if a tool has a `description` in its
|
|
183
|
+
`InterruptOnConfig`.
|
|
180
184
|
"""
|
|
181
185
|
super().__init__()
|
|
182
186
|
resolved_configs: dict[str, InterruptOnConfig] = {}
|
|
@@ -270,7 +274,7 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
|
|
|
270
274
|
raise ValueError(msg)
|
|
271
275
|
|
|
272
276
|
def after_model(self, state: AgentState, runtime: Runtime) -> dict[str, Any] | None:
|
|
273
|
-
"""Trigger interrupt flows for relevant tool calls after an AIMessage
|
|
277
|
+
"""Trigger interrupt flows for relevant tool calls after an `AIMessage`."""
|
|
274
278
|
messages = state["messages"]
|
|
275
279
|
if not messages:
|
|
276
280
|
return None
|
|
@@ -127,17 +127,16 @@ class ModelCallLimitMiddleware(AgentMiddleware[ModelCallLimitState, Any]):
|
|
|
127
127
|
|
|
128
128
|
Args:
|
|
129
129
|
thread_limit: Maximum number of model calls allowed per thread.
|
|
130
|
-
None means no limit.
|
|
130
|
+
None means no limit.
|
|
131
131
|
run_limit: Maximum number of model calls allowed per run.
|
|
132
|
-
None means no limit.
|
|
132
|
+
None means no limit.
|
|
133
133
|
exit_behavior: What to do when limits are exceeded.
|
|
134
134
|
- "end": Jump to the end of the agent execution and
|
|
135
135
|
inject an artificial AI message indicating that the limit was exceeded.
|
|
136
|
-
- "error": Raise a ModelCallLimitExceededError
|
|
137
|
-
Defaults to "end".
|
|
136
|
+
- "error": Raise a `ModelCallLimitExceededError`
|
|
138
137
|
|
|
139
138
|
Raises:
|
|
140
|
-
ValueError: If both limits are None or if exit_behavior is invalid.
|
|
139
|
+
ValueError: If both limits are `None` or if `exit_behavior` is invalid.
|
|
141
140
|
"""
|
|
142
141
|
super().__init__()
|
|
143
142
|
|
|
@@ -421,7 +421,7 @@ class PIIMiddleware(AgentMiddleware):
|
|
|
421
421
|
- `credit_card`: Credit card numbers (validated with Luhn algorithm)
|
|
422
422
|
- `ip`: IP addresses (validated with stdlib)
|
|
423
423
|
- `mac_address`: MAC addresses
|
|
424
|
-
- `url`: URLs (both http
|
|
424
|
+
- `url`: URLs (both `http`/`https` and bare URLs)
|
|
425
425
|
|
|
426
426
|
Strategies:
|
|
427
427
|
- `block`: Raise an exception when PII is detected
|
|
@@ -431,12 +431,12 @@ class PIIMiddleware(AgentMiddleware):
|
|
|
431
431
|
|
|
432
432
|
Strategy Selection Guide:
|
|
433
433
|
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
434
|
+
| Strategy | Preserves Identity? | Best For |
|
|
435
|
+
| -------- | ------------------- | --------------------------------------- |
|
|
436
|
+
| `block` | N/A | Avoid PII completely |
|
|
437
|
+
| `redact` | No | General compliance, log sanitization |
|
|
438
|
+
| `mask` | No | Human readability, customer service UIs |
|
|
439
|
+
| `hash` | Yes (pseudonymous) | Analytics, debugging |
|
|
440
440
|
|
|
441
441
|
Example:
|
|
442
442
|
```python
|
|
@@ -204,7 +204,7 @@ class ToolCallLimitMiddleware(AgentMiddleware[ToolCallLimitState, Any]):
|
|
|
204
204
|
Defaults to "end".
|
|
205
205
|
|
|
206
206
|
Raises:
|
|
207
|
-
ValueError: If both limits are None or if exit_behavior is invalid.
|
|
207
|
+
ValueError: If both limits are `None` or if `exit_behavior` is invalid.
|
|
208
208
|
"""
|
|
209
209
|
super().__init__()
|
|
210
210
|
|
|
@@ -123,7 +123,7 @@ class LLMToolEmulator(AgentMiddleware):
|
|
|
123
123
|
|
|
124
124
|
# Extract tool information for emulation
|
|
125
125
|
tool_args = request.tool_call["args"]
|
|
126
|
-
tool_description = request.tool.description
|
|
126
|
+
tool_description = request.tool.description if request.tool else "No description available"
|
|
127
127
|
|
|
128
128
|
# Build prompt for emulator LLM
|
|
129
129
|
prompt = (
|
|
@@ -175,7 +175,7 @@ class LLMToolEmulator(AgentMiddleware):
|
|
|
175
175
|
|
|
176
176
|
# Extract tool information for emulation
|
|
177
177
|
tool_args = request.tool_call["args"]
|
|
178
|
-
tool_description = request.tool.description
|
|
178
|
+
tool_description = request.tool.description if request.tool else "No description available"
|
|
179
179
|
|
|
180
180
|
# Build prompt for emulator LLM
|
|
181
181
|
prompt = (
|