langchain 1.2.3__py3-none-any.whl → 1.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
langchain/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """Main entrypoint into LangChain."""
2
2
 
3
- __version__ = "1.2.3"
3
+ __version__ = "1.2.4"
@@ -51,7 +51,7 @@ from langchain.chat_models import init_chat_model
51
51
  if TYPE_CHECKING:
52
52
  from collections.abc import Awaitable, Callable, Sequence
53
53
 
54
- from langchain_core.runnables import Runnable
54
+ from langchain_core.runnables import Runnable, RunnableConfig
55
55
  from langgraph.cache.base import BaseCache
56
56
  from langgraph.graph.state import CompiledStateGraph
57
57
  from langgraph.runtime import Runtime
@@ -288,6 +288,9 @@ def _resolve_schema(schemas: set[type], schema_name: str, omit_flag: str | None
288
288
  schema_name: Name for the generated `TypedDict`
289
289
  omit_flag: If specified, omit fields with this flag set (`'input'` or
290
290
  `'output'`)
291
+
292
+ Returns:
293
+ Merged schema as `TypedDict`
291
294
  """
292
295
  all_annotations = {}
293
296
 
@@ -311,7 +314,7 @@ def _resolve_schema(schemas: set[type], schema_name: str, omit_flag: str | None
311
314
  return TypedDict(schema_name, all_annotations) # type: ignore[operator]
312
315
 
313
316
 
314
- def _extract_metadata(type_: type) -> list:
317
+ def _extract_metadata(type_: type) -> list[Any]:
315
318
  """Extract metadata from a field type, handling Required/NotRequired and Annotated wrappers."""
316
319
  # Handle Required[Annotated[...]] or NotRequired[Annotated[...]]
317
320
  if get_origin(type_) in {Required, NotRequired}:
@@ -361,7 +364,9 @@ def _get_can_jump_to(middleware: AgentMiddleware[Any, Any], hook_name: str) -> l
361
364
  return []
362
365
 
363
366
 
364
- def _supports_provider_strategy(model: str | BaseChatModel, tools: list | None = None) -> bool:
367
+ def _supports_provider_strategy(
368
+ model: str | BaseChatModel, tools: list[BaseTool | dict[str, Any]] | None = None
369
+ ) -> bool:
365
370
  """Check if a model supports provider-specific structured output.
366
371
 
367
372
  Args:
@@ -400,7 +405,7 @@ def _supports_provider_strategy(model: str | BaseChatModel, tools: list | None =
400
405
 
401
406
  def _handle_structured_output_error(
402
407
  exception: Exception,
403
- response_format: ResponseFormat,
408
+ response_format: ResponseFormat[Any],
404
409
  ) -> tuple[bool, str]:
405
410
  """Handle structured output error. Returns `(should_retry, retry_tool_message)`."""
406
411
  if not isinstance(response_format, ToolStrategy):
@@ -414,18 +419,15 @@ def _handle_structured_output_error(
414
419
  return True, STRUCTURED_OUTPUT_ERROR_TEMPLATE.format(error=str(exception))
415
420
  if isinstance(handle_errors, str):
416
421
  return True, handle_errors
417
- if isinstance(handle_errors, type) and issubclass(handle_errors, Exception):
418
- if isinstance(exception, handle_errors):
422
+ if isinstance(handle_errors, type):
423
+ if issubclass(handle_errors, Exception) and isinstance(exception, handle_errors):
419
424
  return True, STRUCTURED_OUTPUT_ERROR_TEMPLATE.format(error=str(exception))
420
425
  return False, ""
421
426
  if isinstance(handle_errors, tuple):
422
427
  if any(isinstance(exception, exc_type) for exc_type in handle_errors):
423
428
  return True, STRUCTURED_OUTPUT_ERROR_TEMPLATE.format(error=str(exception))
424
429
  return False, ""
425
- if callable(handle_errors):
426
- # type narrowing not working appropriately w/ callable check, can fix later
427
- return True, handle_errors(exception) # type: ignore[return-value,call-arg]
428
- return False, ""
430
+ return True, handle_errors(exception)
429
431
 
430
432
 
431
433
  def _chain_tool_call_wrappers(
@@ -455,10 +457,10 @@ def _chain_tool_call_wrappers(
455
457
 
456
458
  def composed(
457
459
  request: ToolCallRequest,
458
- execute: Callable[[ToolCallRequest], ToolMessage | Command],
459
- ) -> ToolMessage | Command:
460
+ execute: Callable[[ToolCallRequest], ToolMessage | Command[Any]],
461
+ ) -> ToolMessage | Command[Any]:
460
462
  # Create a callable that invokes inner with the original execute
461
- def call_inner(req: ToolCallRequest) -> ToolMessage | Command:
463
+ def call_inner(req: ToolCallRequest) -> ToolMessage | Command[Any]:
462
464
  return inner(req, execute)
463
465
 
464
466
  # Outer can call call_inner multiple times
@@ -477,14 +479,14 @@ def _chain_tool_call_wrappers(
477
479
  def _chain_async_tool_call_wrappers(
478
480
  wrappers: Sequence[
479
481
  Callable[
480
- [ToolCallRequest, Callable[[ToolCallRequest], Awaitable[ToolMessage | Command]]],
481
- Awaitable[ToolMessage | Command],
482
+ [ToolCallRequest, Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]]],
483
+ Awaitable[ToolMessage | Command[Any]],
482
484
  ]
483
485
  ],
484
486
  ) -> (
485
487
  Callable[
486
- [ToolCallRequest, Callable[[ToolCallRequest], Awaitable[ToolMessage | Command]]],
487
- Awaitable[ToolMessage | Command],
488
+ [ToolCallRequest, Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]]],
489
+ Awaitable[ToolMessage | Command[Any]],
488
490
  ]
489
491
  | None
490
492
  ):
@@ -504,25 +506,25 @@ def _chain_async_tool_call_wrappers(
504
506
 
505
507
  def compose_two(
506
508
  outer: Callable[
507
- [ToolCallRequest, Callable[[ToolCallRequest], Awaitable[ToolMessage | Command]]],
508
- Awaitable[ToolMessage | Command],
509
+ [ToolCallRequest, Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]]],
510
+ Awaitable[ToolMessage | Command[Any]],
509
511
  ],
510
512
  inner: Callable[
511
- [ToolCallRequest, Callable[[ToolCallRequest], Awaitable[ToolMessage | Command]]],
512
- Awaitable[ToolMessage | Command],
513
+ [ToolCallRequest, Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]]],
514
+ Awaitable[ToolMessage | Command[Any]],
513
515
  ],
514
516
  ) -> Callable[
515
- [ToolCallRequest, Callable[[ToolCallRequest], Awaitable[ToolMessage | Command]]],
516
- Awaitable[ToolMessage | Command],
517
+ [ToolCallRequest, Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]]],
518
+ Awaitable[ToolMessage | Command[Any]],
517
519
  ]:
518
520
  """Compose two async wrappers where outer wraps inner."""
519
521
 
520
522
  async def composed(
521
523
  request: ToolCallRequest,
522
- execute: Callable[[ToolCallRequest], Awaitable[ToolMessage | Command]],
523
- ) -> ToolMessage | Command:
524
+ execute: Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]],
525
+ ) -> ToolMessage | Command[Any]:
524
526
  # Create an async callable that invokes inner with the original execute
525
- async def call_inner(req: ToolCallRequest) -> ToolMessage | Command:
527
+ async def call_inner(req: ToolCallRequest) -> ToolMessage | Command[Any]:
526
528
  return await inner(req, execute)
527
529
 
528
530
  # Outer can call call_inner multiple times
@@ -540,11 +542,11 @@ def _chain_async_tool_call_wrappers(
540
542
 
541
543
  def create_agent(
542
544
  model: str | BaseChatModel,
543
- tools: Sequence[BaseTool | Callable | dict[str, Any]] | None = None,
545
+ tools: Sequence[BaseTool | Callable[..., Any] | dict[str, Any]] | None = None,
544
546
  *,
545
547
  system_prompt: str | SystemMessage | None = None,
546
548
  middleware: Sequence[AgentMiddleware[StateT_co, ContextT]] = (),
547
- response_format: ResponseFormat[ResponseT] | type[ResponseT] | None = None,
549
+ response_format: ResponseFormat[ResponseT] | type[ResponseT] | dict[str, Any] | None = None,
548
550
  state_schema: type[AgentState[ResponseT]] | None = None,
549
551
  context_schema: type[ContextT] | None = None,
550
552
  checkpointer: Checkpointer | None = None,
@@ -553,7 +555,7 @@ def create_agent(
553
555
  interrupt_after: list[str] | None = None,
554
556
  debug: bool = False,
555
557
  name: str | None = None,
556
- cache: BaseCache | None = None,
558
+ cache: BaseCache[Any] | None = None,
557
559
  ) -> CompiledStateGraph[
558
560
  AgentState[ResponseT], ContextT, _InputAgentState, _OutputAgentState[ResponseT]
559
561
  ]:
@@ -653,6 +655,9 @@ def create_agent(
653
655
  Returns:
654
656
  A compiled `StateGraph` that can be used for chat interactions.
655
657
 
658
+ Raises:
659
+ AssertionError: If duplicate middleware instances are provided.
660
+
656
661
  The agent node calls the language model with the messages list (after applying
657
662
  the system prompt). If the resulting [`AIMessage`][langchain.messages.AIMessage]
658
663
  contains `tool_calls`, the graph will then call the tools. The tools node executes
@@ -701,7 +706,7 @@ def create_agent(
701
706
  # Raw schemas are wrapped in AutoStrategy to preserve auto-detection intent.
702
707
  # AutoStrategy is converted to ToolStrategy upfront to calculate tools during agent creation,
703
708
  # but may be replaced with ProviderStrategy later based on model capabilities.
704
- initial_response_format: ToolStrategy | ProviderStrategy | AutoStrategy | None
709
+ initial_response_format: ToolStrategy[Any] | ProviderStrategy[Any] | AutoStrategy[Any] | None
705
710
  if response_format is None:
706
711
  initial_response_format = None
707
712
  elif isinstance(response_format, (ToolStrategy, ProviderStrategy)):
@@ -716,13 +721,13 @@ def create_agent(
716
721
 
717
722
  # For AutoStrategy, convert to ToolStrategy to setup tools upfront
718
723
  # (may be replaced with ProviderStrategy later based on model)
719
- tool_strategy_for_setup: ToolStrategy | None = None
724
+ tool_strategy_for_setup: ToolStrategy[Any] | None = None
720
725
  if isinstance(initial_response_format, AutoStrategy):
721
726
  tool_strategy_for_setup = ToolStrategy(schema=initial_response_format.schema)
722
727
  elif isinstance(initial_response_format, ToolStrategy):
723
728
  tool_strategy_for_setup = initial_response_format
724
729
 
725
- structured_output_tools: dict[str, OutputToolBinding] = {}
730
+ structured_output_tools: dict[str, OutputToolBinding[Any]] = {}
726
731
  if tool_strategy_for_setup:
727
732
  for response_schema in tool_strategy_for_setup.schema_specs:
728
733
  structured_tool_info = OutputToolBinding.from_schema_spec(response_schema)
@@ -869,7 +874,7 @@ def create_agent(
869
874
  )
870
875
 
871
876
  def _handle_model_output(
872
- output: AIMessage, effective_response_format: ResponseFormat | None
877
+ output: AIMessage, effective_response_format: ResponseFormat[Any] | None
873
878
  ) -> dict[str, Any]:
874
879
  """Handle model output including structured responses.
875
880
 
@@ -972,7 +977,9 @@ def create_agent(
972
977
 
973
978
  return {"messages": [output]}
974
979
 
975
- def _get_bound_model(request: ModelRequest) -> tuple[Runnable, ResponseFormat | None]:
980
+ def _get_bound_model(
981
+ request: ModelRequest,
982
+ ) -> tuple[Runnable[Any, Any], ResponseFormat[Any] | None]:
976
983
  """Get the model with appropriate tool bindings.
977
984
 
978
985
  Performs auto-detection of strategy if needed based on model capabilities.
@@ -984,6 +991,10 @@ def create_agent(
984
991
  Tuple of `(bound_model, effective_response_format)` where
985
992
  `effective_response_format` is the actual strategy used (may differ from
986
993
  initial if auto-detected).
994
+
995
+ Raises:
996
+ ValueError: If middleware returned unknown client-side tool names.
997
+ ValueError: If `ToolStrategy` specifies tools not declared upfront.
987
998
  """
988
999
  # Validate ONLY client-side tools that need to exist in tool_node
989
1000
  # Build map of available client-side tools from the ToolNode
@@ -1018,7 +1029,7 @@ def create_agent(
1018
1029
  raise ValueError(msg)
1019
1030
 
1020
1031
  # Determine effective response format (auto-detect if needed)
1021
- effective_response_format: ResponseFormat | None
1032
+ effective_response_format: ResponseFormat[Any] | None
1022
1033
  if isinstance(request.response_format, AutoStrategy):
1023
1034
  # User provided raw schema via AutoStrategy - auto-detect best strategy based on model
1024
1035
  if _supports_provider_strategy(request.model, tools=request.tools):
@@ -1112,7 +1123,7 @@ def create_agent(
1112
1123
  structured_response=structured_response,
1113
1124
  )
1114
1125
 
1115
- def model_node(state: AgentState, runtime: Runtime[ContextT]) -> dict[str, Any]:
1126
+ def model_node(state: AgentState[Any], runtime: Runtime[ContextT]) -> dict[str, Any]:
1116
1127
  """Sync model request handler with sequential middleware processing."""
1117
1128
  request = ModelRequest(
1118
1129
  model=model,
@@ -1167,7 +1178,7 @@ def create_agent(
1167
1178
  structured_response=structured_response,
1168
1179
  )
1169
1180
 
1170
- async def amodel_node(state: AgentState, runtime: Runtime[ContextT]) -> dict[str, Any]:
1181
+ async def amodel_node(state: AgentState[Any], runtime: Runtime[ContextT]) -> dict[str, Any]:
1171
1182
  """Async model request handler with sequential middleware processing."""
1172
1183
  request = ModelRequest(
1173
1184
  model=model,
@@ -1471,6 +1482,10 @@ def create_agent(
1471
1482
  can_jump_to=_get_can_jump_to(middleware_w_after_agent[0], "after_agent"),
1472
1483
  )
1473
1484
 
1485
+ config: RunnableConfig = {"recursion_limit": 10_000}
1486
+ if name:
1487
+ config["metadata"] = {"lc_agent_name": name}
1488
+
1474
1489
  return graph.compile(
1475
1490
  checkpointer=checkpointer,
1476
1491
  store=store,
@@ -1479,7 +1494,7 @@ def create_agent(
1479
1494
  debug=debug,
1480
1495
  name=name,
1481
1496
  cache=cache,
1482
- ).with_config({"recursion_limit": 10_000})
1497
+ ).with_config(config)
1483
1498
 
1484
1499
 
1485
1500
  def _resolve_jump(
@@ -1516,7 +1531,7 @@ def _fetch_last_ai_and_tool_messages(
1516
1531
  def _make_model_to_tools_edge(
1517
1532
  *,
1518
1533
  model_destination: str,
1519
- structured_output_tools: dict[str, OutputToolBinding],
1534
+ structured_output_tools: dict[str, OutputToolBinding[Any]],
1520
1535
  end_destination: str,
1521
1536
  ) -> Callable[[dict[str, Any]], str | list[Send] | None]:
1522
1537
  def model_to_tools(
@@ -1600,7 +1615,7 @@ def _make_tools_to_model_edge(
1600
1615
  *,
1601
1616
  tool_node: ToolNode,
1602
1617
  model_destination: str,
1603
- structured_output_tools: dict[str, OutputToolBinding],
1618
+ structured_output_tools: dict[str, OutputToolBinding[Any]],
1604
1619
  end_destination: str,
1605
1620
  ) -> Callable[[dict[str, Any]], str | None]:
1606
1621
  def tools_to_model(state: dict[str, Any]) -> str | None:
@@ -1,32 +1,29 @@
1
1
  """Entrypoint to using [middleware](https://docs.langchain.com/oss/python/langchain/middleware) plugins with [Agents](https://docs.langchain.com/oss/python/langchain/agents).""" # noqa: E501
2
2
 
3
- from .context_editing import (
4
- ClearToolUsesEdit,
5
- ContextEditingMiddleware,
6
- )
7
- from .file_search import FilesystemFileSearchMiddleware
8
- from .human_in_the_loop import (
3
+ from langchain.agents.middleware.context_editing import ClearToolUsesEdit, ContextEditingMiddleware
4
+ from langchain.agents.middleware.file_search import FilesystemFileSearchMiddleware
5
+ from langchain.agents.middleware.human_in_the_loop import (
9
6
  HumanInTheLoopMiddleware,
10
7
  InterruptOnConfig,
11
8
  )
12
- from .model_call_limit import ModelCallLimitMiddleware
13
- from .model_fallback import ModelFallbackMiddleware
14
- from .model_retry import ModelRetryMiddleware
15
- from .pii import PIIDetectionError, PIIMiddleware
16
- from .shell_tool import (
9
+ from langchain.agents.middleware.model_call_limit import ModelCallLimitMiddleware
10
+ from langchain.agents.middleware.model_fallback import ModelFallbackMiddleware
11
+ from langchain.agents.middleware.model_retry import ModelRetryMiddleware
12
+ from langchain.agents.middleware.pii import PIIDetectionError, PIIMiddleware
13
+ from langchain.agents.middleware.shell_tool import (
17
14
  CodexSandboxExecutionPolicy,
18
15
  DockerExecutionPolicy,
19
16
  HostExecutionPolicy,
20
17
  RedactionRule,
21
18
  ShellToolMiddleware,
22
19
  )
23
- from .summarization import SummarizationMiddleware
24
- from .todo import TodoListMiddleware
25
- from .tool_call_limit import ToolCallLimitMiddleware
26
- from .tool_emulator import LLMToolEmulator
27
- from .tool_retry import ToolRetryMiddleware
28
- from .tool_selection import LLMToolSelectorMiddleware
29
- from .types import (
20
+ from langchain.agents.middleware.summarization import SummarizationMiddleware
21
+ from langchain.agents.middleware.todo import TodoListMiddleware
22
+ from langchain.agents.middleware.tool_call_limit import ToolCallLimitMiddleware
23
+ from langchain.agents.middleware.tool_emulator import LLMToolEmulator
24
+ from langchain.agents.middleware.tool_retry import ToolRetryMiddleware
25
+ from langchain.agents.middleware.tool_selection import LLMToolSelectorMiddleware
26
+ from langchain.agents.middleware.types import (
30
27
  AgentMiddleware,
31
28
  AgentState,
32
29
  ModelRequest,
@@ -15,8 +15,10 @@ from pathlib import Path
15
15
 
16
16
  try: # pragma: no cover - optional dependency on POSIX platforms
17
17
  import resource
18
+
19
+ _HAS_RESOURCE = True
18
20
  except ImportError: # pragma: no cover - non-POSIX systems
19
- resource = None # type: ignore[assignment]
21
+ _HAS_RESOURCE = False
20
22
 
21
23
 
22
24
  SHELL_TEMP_PREFIX = "langchain-shell-"
@@ -119,7 +121,7 @@ class HostExecutionPolicy(BaseExecutionPolicy):
119
121
  self._limits_requested = any(
120
122
  value is not None for value in (self.cpu_time_seconds, self.memory_bytes)
121
123
  )
122
- if self._limits_requested and resource is None:
124
+ if self._limits_requested and not _HAS_RESOURCE:
123
125
  msg = (
124
126
  "HostExecutionPolicy cpu/memory limits require the Python 'resource' module. "
125
127
  "Either remove the limits or run on a POSIX platform."
@@ -163,11 +165,9 @@ class HostExecutionPolicy(BaseExecutionPolicy):
163
165
  def _apply_post_spawn_limits(self, process: subprocess.Popen[str]) -> None:
164
166
  if not self._limits_requested or not self._can_use_prlimit():
165
167
  return
166
- if resource is None: # pragma: no cover - defensive
168
+ if not _HAS_RESOURCE: # pragma: no cover - defensive
167
169
  return
168
170
  pid = process.pid
169
- if pid is None:
170
- return
171
171
  try:
172
172
  prlimit = typing.cast("typing.Any", resource).prlimit
173
173
  if self.cpu_time_seconds is not None:
@@ -184,11 +184,7 @@ class HostExecutionPolicy(BaseExecutionPolicy):
184
184
 
185
185
  @staticmethod
186
186
  def _can_use_prlimit() -> bool:
187
- return (
188
- resource is not None
189
- and hasattr(resource, "prlimit")
190
- and sys.platform.startswith("linux")
191
- )
187
+ return _HAS_RESOURCE and hasattr(resource, "prlimit") and sys.platform.startswith("linux")
192
188
 
193
189
 
194
190
  @dataclass
@@ -251,9 +247,9 @@ class CodexSandboxExecutionPolicy(BaseExecutionPolicy):
251
247
  return self.platform
252
248
  if sys.platform.startswith("linux"):
253
249
  return "linux"
254
- if sys.platform == "darwin":
250
+ if sys.platform == "darwin": # type: ignore[unreachable, unused-ignore]
255
251
  return "macos"
256
- msg = (
252
+ msg = ( # type: ignore[unreachable, unused-ignore]
257
253
  "Codex sandbox policy could not determine a supported platform; "
258
254
  "set 'platform' explicitly."
259
255
  )
@@ -48,7 +48,14 @@ Detector = Callable[[str], list[PIIMatch]]
48
48
 
49
49
 
50
50
  def detect_email(content: str) -> list[PIIMatch]:
51
- """Detect email addresses in content."""
51
+ """Detect email addresses in content.
52
+
53
+ Args:
54
+ content: The text content to scan for email addresses.
55
+
56
+ Returns:
57
+ A list of detected email matches.
58
+ """
52
59
  pattern = r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b"
53
60
  return [
54
61
  PIIMatch(
@@ -62,7 +69,14 @@ def detect_email(content: str) -> list[PIIMatch]:
62
69
 
63
70
 
64
71
  def detect_credit_card(content: str) -> list[PIIMatch]:
65
- """Detect credit card numbers in content using Luhn validation."""
72
+ """Detect credit card numbers in content using Luhn validation.
73
+
74
+ Args:
75
+ content: The text content to scan for credit card numbers.
76
+
77
+ Returns:
78
+ A list of detected credit card matches.
79
+ """
66
80
  pattern = r"\b\d{4}[\s-]?\d{4}[\s-]?\d{4}[\s-]?\d{4}\b"
67
81
  matches = []
68
82
 
@@ -82,7 +96,14 @@ def detect_credit_card(content: str) -> list[PIIMatch]:
82
96
 
83
97
 
84
98
  def detect_ip(content: str) -> list[PIIMatch]:
85
- """Detect IPv4 or IPv6 addresses in content."""
99
+ """Detect IPv4 or IPv6 addresses in content.
100
+
101
+ Args:
102
+ content: The text content to scan for IP addresses.
103
+
104
+ Returns:
105
+ A list of detected IP address matches.
106
+ """
86
107
  matches: list[PIIMatch] = []
87
108
  ipv4_pattern = r"\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b"
88
109
 
@@ -105,7 +126,14 @@ def detect_ip(content: str) -> list[PIIMatch]:
105
126
 
106
127
 
107
128
  def detect_mac_address(content: str) -> list[PIIMatch]:
108
- """Detect MAC addresses in content."""
129
+ """Detect MAC addresses in content.
130
+
131
+ Args:
132
+ content: The text content to scan for MAC addresses.
133
+
134
+ Returns:
135
+ A list of detected MAC address matches.
136
+ """
109
137
  pattern = r"\b([0-9A-Fa-f]{2}[:-]){5}[0-9A-Fa-f]{2}\b"
110
138
  return [
111
139
  PIIMatch(
@@ -119,7 +147,14 @@ def detect_mac_address(content: str) -> list[PIIMatch]:
119
147
 
120
148
 
121
149
  def detect_url(content: str) -> list[PIIMatch]:
122
- """Detect URLs in content using regex and stdlib validation."""
150
+ """Detect URLs in content using regex and stdlib validation.
151
+
152
+ Args:
153
+ content: The text content to scan for URLs.
154
+
155
+ Returns:
156
+ A list of detected URL matches.
157
+ """
123
158
  matches: list[PIIMatch] = []
124
159
 
125
160
  # Pattern 1: URLs with scheme (http:// or https://)
@@ -273,7 +308,20 @@ def apply_strategy(
273
308
  matches: list[PIIMatch],
274
309
  strategy: RedactionStrategy,
275
310
  ) -> str:
276
- """Apply the configured strategy to matches within content."""
311
+ """Apply the configured strategy to matches within content.
312
+
313
+ Args:
314
+ content: The content to apply strategy to.
315
+ matches: List of detected PII matches.
316
+ strategy: The redaction strategy to apply.
317
+
318
+ Returns:
319
+ The content with the strategy applied.
320
+
321
+ Raises:
322
+ PIIDetectionError: If the strategy is `'block'` and matches are found.
323
+ ValueError: If the strategy is unknown.
324
+ """
277
325
  if not matches:
278
326
  return content
279
327
  if strategy == "redact":
@@ -284,12 +332,24 @@ def apply_strategy(
284
332
  return _apply_hash_strategy(content, matches)
285
333
  if strategy == "block":
286
334
  raise PIIDetectionError(matches[0]["type"], matches)
287
- msg = f"Unknown redaction strategy: {strategy}"
335
+ msg = f"Unknown redaction strategy: {strategy}" # type: ignore[unreachable]
288
336
  raise ValueError(msg)
289
337
 
290
338
 
291
339
  def resolve_detector(pii_type: str, detector: Detector | str | None) -> Detector:
292
- """Return a callable detector for the given configuration."""
340
+ """Return a callable detector for the given configuration.
341
+
342
+ Args:
343
+ pii_type: The PII type name.
344
+ detector: Optional custom detector or regex pattern. If `None`, a built-in detector
345
+ for the given PII type will be used.
346
+
347
+ Returns:
348
+ The resolved detector.
349
+
350
+ Raises:
351
+ ValueError: If an unknown PII type is specified without a custom detector or regex.
352
+ """
293
353
  if detector is None:
294
354
  if pii_type not in BUILTIN_DETECTORS:
295
355
  msg = (
@@ -325,7 +385,11 @@ class RedactionRule:
325
385
  detector: Detector | str | None = None
326
386
 
327
387
  def resolve(self) -> ResolvedRedactionRule:
328
- """Resolve runtime detector and return an immutable rule."""
388
+ """Resolve runtime detector and return an immutable rule.
389
+
390
+ Returns:
391
+ The resolved redaction rule.
392
+ """
329
393
  resolved_detector = resolve_detector(self.pii_type, self.detector)
330
394
  return ResolvedRedactionRule(
331
395
  pii_type=self.pii_type,
@@ -343,7 +407,14 @@ class ResolvedRedactionRule:
343
407
  detector: Detector
344
408
 
345
409
  def apply(self, content: str) -> tuple[str, list[PIIMatch]]:
346
- """Apply this rule to content, returning new content and matches."""
410
+ """Apply this rule to content, returning new content and matches.
411
+
412
+ Args:
413
+ content: The text content to scan and redact.
414
+
415
+ Returns:
416
+ A tuple of (updated content, list of detected matches).
417
+ """
347
418
  matches = self.detector(content)
348
419
  if not matches:
349
420
  return content, []
@@ -152,8 +152,8 @@ class ClearToolUsesEdit(ContextEdit):
152
152
 
153
153
  return
154
154
 
155
+ @staticmethod
155
156
  def _build_cleared_tool_input_message(
156
- self,
157
157
  message: AIMessage,
158
158
  tool_call_id: str,
159
159
  ) -> AIMessage:
@@ -220,7 +220,16 @@ class ContextEditingMiddleware(AgentMiddleware):
220
220
  request: ModelRequest,
221
221
  handler: Callable[[ModelRequest], ModelResponse],
222
222
  ) -> ModelCallResult:
223
- """Apply context edits before invoking the model via handler."""
223
+ """Apply context edits before invoking the model via handler.
224
+
225
+ Args:
226
+ request: Model request to execute (includes state and runtime).
227
+ handler: Async callback that executes the model request and returns
228
+ `ModelResponse`.
229
+
230
+ Returns:
231
+ The result of invoking the handler with potentially edited messages.
232
+ """
224
233
  if not request.messages:
225
234
  return handler(request)
226
235
 
@@ -248,7 +257,16 @@ class ContextEditingMiddleware(AgentMiddleware):
248
257
  request: ModelRequest,
249
258
  handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
250
259
  ) -> ModelCallResult:
251
- """Apply context edits before invoking the model via handler (async version)."""
260
+ """Apply context edits before invoking the model via handler.
261
+
262
+ Args:
263
+ request: Model request to execute (includes state and runtime).
264
+ handler: Async callback that executes the model request and returns
265
+ `ModelResponse`.
266
+
267
+ Returns:
268
+ The result of invoking the handler with potentially edited messages.
269
+ """
252
270
  if not request.messages:
253
271
  return await handler(request)
254
272
 
@@ -352,8 +352,8 @@ class FilesystemFileSearchMiddleware(AgentMiddleware):
352
352
 
353
353
  return results
354
354
 
355
+ @staticmethod
355
356
  def _format_grep_results(
356
- self,
357
357
  results: dict[str, list[tuple[int, str]]],
358
358
  output_mode: str,
359
359
  ) -> str: