langchain 1.0.0a4__py3-none-any.whl → 1.0.0a5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
langchain/__init__.py CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  from typing import Any
4
4
 
5
- __version__ = "1.0.0a3"
5
+ __version__ = "1.0.0a4"
6
6
 
7
7
 
8
8
  def __getattr__(name: str) -> Any: # noqa: ANN401
@@ -1,13 +1,12 @@
1
1
  """Lazy import utilities."""
2
2
 
3
3
  from importlib import import_module
4
- from typing import Union
5
4
 
6
5
 
7
6
  def import_attr(
8
7
  attr_name: str,
9
- module_name: Union[str, None],
10
- package: Union[str, None],
8
+ module_name: str | None,
9
+ package: str | None,
11
10
  ) -> object:
12
11
  """Import an attribute from a module located in a package.
13
12
 
@@ -12,7 +12,7 @@ particularly for summarization chains and other document processing workflows.
12
12
  from __future__ import annotations
13
13
 
14
14
  import inspect
15
- from typing import TYPE_CHECKING, Union
15
+ from typing import TYPE_CHECKING
16
16
 
17
17
  if TYPE_CHECKING:
18
18
  from collections.abc import Awaitable, Callable
@@ -24,11 +24,7 @@ if TYPE_CHECKING:
24
24
 
25
25
 
26
26
  def resolve_prompt(
27
- prompt: Union[
28
- str,
29
- None,
30
- Callable[[StateT, Runtime[ContextT]], list[MessageLikeRepresentation]],
31
- ],
27
+ prompt: str | None | Callable[[StateT, Runtime[ContextT]], list[MessageLikeRepresentation]],
32
28
  state: StateT,
33
29
  runtime: Runtime[ContextT],
34
30
  default_user_content: str,
@@ -61,6 +57,7 @@ def resolve_prompt(
61
57
  def custom_prompt(state, runtime):
62
58
  return [{"role": "system", "content": "Custom"}]
63
59
 
60
+
64
61
  messages = resolve_prompt(custom_prompt, state, runtime, "content", "default")
65
62
  messages = resolve_prompt("Custom system", state, runtime, "content", "default")
66
63
  messages = resolve_prompt(None, state, runtime, "content", "Default")
@@ -88,12 +85,10 @@ def resolve_prompt(
88
85
 
89
86
 
90
87
  async def aresolve_prompt(
91
- prompt: Union[
92
- str,
93
- None,
94
- Callable[[StateT, Runtime[ContextT]], list[MessageLikeRepresentation]],
95
- Callable[[StateT, Runtime[ContextT]], Awaitable[list[MessageLikeRepresentation]]],
96
- ],
88
+ prompt: str
89
+ | None
90
+ | Callable[[StateT, Runtime[ContextT]], list[MessageLikeRepresentation]]
91
+ | Callable[[StateT, Runtime[ContextT]], Awaitable[list[MessageLikeRepresentation]]],
97
92
  state: StateT,
98
93
  runtime: Runtime[ContextT],
99
94
  default_user_content: str,
@@ -128,15 +123,13 @@ async def aresolve_prompt(
128
123
  async def async_prompt(state, runtime):
129
124
  return [{"role": "system", "content": "Async"}]
130
125
 
126
+
131
127
  def sync_prompt(state, runtime):
132
128
  return [{"role": "system", "content": "Sync"}]
133
129
 
134
- messages = await aresolve_prompt(
135
- async_prompt, state, runtime, "content", "default"
136
- )
137
- messages = await aresolve_prompt(
138
- sync_prompt, state, runtime, "content", "default"
139
- )
130
+
131
+ messages = await aresolve_prompt(async_prompt, state, runtime, "content", "default")
132
+ messages = await aresolve_prompt(sync_prompt, state, runtime, "content", "default")
140
133
  messages = await aresolve_prompt("Custom", state, runtime, "content", "default")
141
134
  ```
142
135
 
@@ -2,7 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import TYPE_CHECKING, Any, ClassVar, Protocol, TypeAlias, TypeVar, Union
5
+ from typing import TYPE_CHECKING, Any, ClassVar, Protocol, TypeAlias, TypeVar
6
6
 
7
7
  from langgraph.graph._node import StateNode
8
8
  from pydantic import BaseModel
@@ -44,7 +44,7 @@ class DataclassLike(Protocol):
44
44
  __dataclass_fields__: ClassVar[dict[str, Field[Any]]]
45
45
 
46
46
 
47
- StateLike: TypeAlias = Union[TypedDictLikeV1, TypedDictLikeV2, DataclassLike, BaseModel]
47
+ StateLike: TypeAlias = TypedDictLikeV1 | TypedDictLikeV2 | DataclassLike | BaseModel
48
48
  """Type alias for state-like types.
49
49
 
50
50
  It can either be a ``TypedDict``, ``dataclass``, or Pydantic ``BaseModel``.
@@ -58,7 +58,7 @@ It can either be a ``TypedDict``, ``dataclass``, or Pydantic ``BaseModel``.
58
58
  StateT = TypeVar("StateT", bound=StateLike)
59
59
  """Type variable used to represent the state in a graph."""
60
60
 
61
- ContextT = TypeVar("ContextT", bound=Union[StateLike, None])
61
+ ContextT = TypeVar("ContextT", bound=StateLike | None)
62
62
  """Type variable for context types."""
63
63
 
64
64
 
@@ -3,11 +3,11 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from collections.abc import Awaitable, Callable
6
- from typing import TypeVar, Union
6
+ from typing import TypeVar
7
7
 
8
8
  from typing_extensions import ParamSpec
9
9
 
10
10
  P = ParamSpec("P")
11
11
  R = TypeVar("R")
12
12
 
13
- SyncOrAsync = Callable[P, Union[R, Awaitable[R]]]
13
+ SyncOrAsync = Callable[P, R | Awaitable[R]]
@@ -1,6 +1,6 @@
1
1
  """Interrupt types to use with agent inbox like setups."""
2
2
 
3
- from typing import Literal, Union
3
+ from typing import Literal
4
4
 
5
5
  from typing_extensions import TypedDict
6
6
 
@@ -53,15 +53,15 @@ class HumanInterrupt(TypedDict):
53
53
  request = HumanInterrupt(
54
54
  action_request=ActionRequest(
55
55
  action="run_command", # The action being requested
56
- args={"command": "ls", "args": ["-l"]} # Arguments for the action
56
+ args={"command": "ls", "args": ["-l"]}, # Arguments for the action
57
57
  ),
58
58
  config=HumanInterruptConfig(
59
- allow_ignore=True, # Allow skipping this step
60
- allow_respond=True, # Allow text feedback
61
- allow_edit=False, # Don't allow editing
62
- allow_accept=True # Allow direct acceptance
59
+ allow_ignore=True, # Allow skipping this step
60
+ allow_respond=True, # Allow text feedback
61
+ allow_edit=False, # Don't allow editing
62
+ allow_accept=True, # Allow direct acceptance
63
63
  ),
64
- description="Please review the command before execution"
64
+ description="Please review the command before execution",
65
65
  )
66
66
  # Send the interrupt request and get the response
67
67
  response = interrupt([request])[0]
@@ -74,19 +74,24 @@ class HumanInterrupt(TypedDict):
74
74
 
75
75
 
76
76
  class HumanResponse(TypedDict):
77
- """The response provided by a human to an interrupt, which is returned when graph execution resumes.
77
+ """Human response.
78
+
79
+ The response provided by a human to an interrupt,
80
+ which is returned when graph execution resumes.
78
81
 
79
82
  Attributes:
80
83
  type: The type of response:
84
+
81
85
  - "accept": Approves the current state without changes
82
86
  - "ignore": Skips/ignores the current step
83
87
  - "response": Provides text feedback or instructions
84
88
  - "edit": Modifies the current state/content
85
89
  args: The response payload:
90
+
86
91
  - None: For ignore/accept actions
87
92
  - str: For text responses
88
93
  - ActionRequest: For edit actions with updated content
89
94
  """
90
95
 
91
96
  type: Literal["accept", "ignore", "response", "edit"]
92
- args: Union[None, str, ActionRequest]
97
+ args: None | str | ActionRequest
@@ -65,7 +65,10 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
65
65
  # Right now, we do not support multiple tool calls with interrupts
66
66
  if len(interrupt_tool_calls) > 1:
67
67
  tool_names = [t["name"] for t in interrupt_tool_calls]
68
- msg = f"Called the following tools which require interrupts: {tool_names}\n\nYou may only call ONE tool that requires an interrupt at a time"
68
+ msg = (
69
+ f"Called the following tools which require interrupts: {tool_names}\n\n"
70
+ "You may only call ONE tool that requires an interrupt at a time"
71
+ )
69
72
  return {
70
73
  "messages": _generate_correction_tool_messages(msg, last_message.tool_calls),
71
74
  "jump_to": "model",
@@ -74,7 +77,11 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
74
77
  # Right now, we do not support interrupting a tool call if other tool calls exist
75
78
  if auto_approved_tool_calls:
76
79
  tool_names = [t["name"] for t in interrupt_tool_calls]
77
- msg = f"Called the following tools which require interrupts: {tool_names}. You also called other tools that do not require interrupts. If you call a tool that requires and interrupt, you may ONLY call that tool."
80
+ msg = (
81
+ f"Called the following tools which require interrupts: {tool_names}. "
82
+ "You also called other tools that do not require interrupts. "
83
+ "If you call a tool that requires and interrupt, you may ONLY call that tool."
84
+ )
78
85
  return {
79
86
  "messages": _generate_correction_tool_messages(msg, last_message.tool_calls),
80
87
  "jump_to": "model",
@@ -6,9 +6,12 @@ from langchain.agents.middleware.types import AgentMiddleware, AgentState, Model
6
6
 
7
7
 
8
8
  class AnthropicPromptCachingMiddleware(AgentMiddleware):
9
- """Prompt Caching Middleware - Optimizes API usage by caching conversation prefixes for Anthropic models.
9
+ """Prompt Caching Middleware.
10
10
 
11
- Learn more about anthropic prompt caching [here](https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching).
11
+ Optimizes API usage by caching conversation prefixes for Anthropic models.
12
+
13
+ Learn more about Anthropic prompt caching
14
+ `here <https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching>`__.
12
15
  """
13
16
 
14
17
  def __init__(
@@ -22,7 +25,8 @@ class AnthropicPromptCachingMiddleware(AgentMiddleware):
22
25
  Args:
23
26
  type: The type of cache to use, only "ephemeral" is supported.
24
27
  ttl: The time to live for the cache, only "5m" and "1h" are supported.
25
- min_messages_to_cache: The minimum number of messages until the cache is used, default is 0.
28
+ min_messages_to_cache: The minimum number of messages until the cache is used,
29
+ default is 0.
26
30
  """
27
31
  self.type = type
28
32
  self.ttl = ttl
@@ -34,15 +38,16 @@ class AnthropicPromptCachingMiddleware(AgentMiddleware):
34
38
  from langchain_anthropic import ChatAnthropic
35
39
  except ImportError:
36
40
  msg = (
37
- "AnthropicPromptCachingMiddleware caching middleware only supports Anthropic models."
41
+ "AnthropicPromptCachingMiddleware caching middleware only supports "
42
+ "Anthropic models."
38
43
  "Please install langchain-anthropic."
39
44
  )
40
45
  raise ValueError(msg)
41
46
 
42
47
  if not isinstance(request.model, ChatAnthropic):
43
48
  msg = (
44
- "AnthropicPromptCachingMiddleware caching middleware only supports Anthropic models, "
45
- f"not instances of {type(request.model)}"
49
+ "AnthropicPromptCachingMiddleware caching middleware only supports "
50
+ f"Anthropic models, not instances of {type(request.model)}"
46
51
  )
47
52
  raise ValueError(msg)
48
53
 
@@ -48,7 +48,7 @@ Respond ONLY with the extracted context. Do not include any additional informati
48
48
  <messages>
49
49
  Messages to summarize:
50
50
  {messages}
51
- </messages>"""
51
+ </messages>""" # noqa: E501
52
52
 
53
53
  SUMMARY_PREFIX = "## Previous conversation summary:"
54
54
 
@@ -58,7 +58,8 @@ StateT = TypeVar("StateT", bound=AgentState)
58
58
  class AgentMiddleware(Generic[StateT]):
59
59
  """Base middleware class for an agent.
60
60
 
61
- Subclass this and implement any of the defined methods to customize agent behavior between steps in the main agent loop.
61
+ Subclass this and implement any of the defined methods to customize agent behavior
62
+ between steps in the main agent loop.
62
63
  """
63
64
 
64
65
  state_schema: type[StateT] = cast("type[StateT]", AgentState)
@@ -2,10 +2,10 @@
2
2
 
3
3
  import itertools
4
4
  from collections.abc import Callable, Sequence
5
- from typing import Any, Union
5
+ from typing import Any
6
6
 
7
7
  from langchain_core.language_models.chat_models import BaseChatModel
8
- from langchain_core.messages import AIMessage, AnyMessage, SystemMessage, ToolMessage
8
+ from langchain_core.messages import AIMessage, SystemMessage, ToolMessage
9
9
  from langchain_core.runnables import Runnable
10
10
  from langchain_core.tools import BaseTool
11
11
  from langgraph.constants import END, START
@@ -59,7 +59,7 @@ def _filter_state_for_schema(state: dict[str, Any], schema: type) -> dict[str, A
59
59
  return {k: v for k, v in state.items() if k in schema_fields}
60
60
 
61
61
 
62
- def _supports_native_structured_output(model: Union[str, BaseChatModel]) -> bool:
62
+ def _supports_native_structured_output(model: str | BaseChatModel) -> bool:
63
63
  """Check if a model supports native structured output."""
64
64
  model_name: str | None = None
65
65
  if isinstance(model, str):
@@ -211,24 +211,6 @@ def create_agent( # noqa: PLR0915
211
211
  context_schema=context_schema,
212
212
  )
213
213
 
214
- def _prepare_model_request(state: dict[str, Any]) -> tuple[ModelRequest, list[AnyMessage]]:
215
- """Prepare model request and messages."""
216
- request = state.get("model_request") or ModelRequest(
217
- model=model,
218
- tools=default_tools,
219
- system_prompt=system_prompt,
220
- response_format=response_format,
221
- messages=state["messages"],
222
- tool_choice=None,
223
- )
224
-
225
- # prepare messages
226
- messages = request.messages
227
- if request.system_prompt:
228
- messages = [SystemMessage(request.system_prompt), *messages]
229
-
230
- return request, messages
231
-
232
214
  def _handle_model_output(state: dict[str, Any], output: AIMessage) -> dict[str, Any]:
233
215
  """Handle model output including structured responses."""
234
216
  # Handle structured output with native strategy
@@ -342,8 +324,14 @@ def create_agent( # noqa: PLR0915
342
324
 
343
325
  def model_request(state: dict[str, Any]) -> dict[str, Any]:
344
326
  """Sync model request handler with sequential middleware processing."""
345
- # Start with the base model request
346
- request, messages = _prepare_model_request(state)
327
+ request = ModelRequest(
328
+ model=model,
329
+ tools=default_tools,
330
+ system_prompt=system_prompt,
331
+ response_format=response_format,
332
+ messages=state["messages"],
333
+ tool_choice=None,
334
+ )
347
335
 
348
336
  # Apply modify_model_request middleware in sequence
349
337
  for m in middleware_w_modify_model_request:
@@ -351,15 +339,26 @@ def create_agent( # noqa: PLR0915
351
339
  filtered_state = _filter_state_for_schema(state, m.state_schema)
352
340
  request = m.modify_model_request(request, filtered_state)
353
341
 
354
- # Get the bound model with the final request
342
+ # Get the final model and messages
355
343
  model_ = _get_bound_model(request)
344
+ messages = request.messages
345
+ if request.system_prompt:
346
+ messages = [SystemMessage(request.system_prompt), *messages]
347
+
356
348
  output = model_.invoke(messages)
357
349
  return _handle_model_output(state, output)
358
350
 
359
351
  async def amodel_request(state: dict[str, Any]) -> dict[str, Any]:
360
352
  """Async model request handler with sequential middleware processing."""
361
353
  # Start with the base model request
362
- request, messages = _prepare_model_request(state)
354
+ request = ModelRequest(
355
+ model=model,
356
+ tools=default_tools,
357
+ system_prompt=system_prompt,
358
+ response_format=response_format,
359
+ messages=state["messages"],
360
+ tool_choice=None,
361
+ )
363
362
 
364
363
  # Apply modify_model_request middleware in sequence
365
364
  for m in middleware_w_modify_model_request:
@@ -367,8 +366,12 @@ def create_agent( # noqa: PLR0915
367
366
  filtered_state = _filter_state_for_schema(state, m.state_schema)
368
367
  request = m.modify_model_request(request, filtered_state)
369
368
 
370
- # Get the bound model with the final request
369
+ # Get the final model and messages
371
370
  model_ = _get_bound_model(request)
371
+ messages = request.messages
372
+ if request.system_prompt:
373
+ messages = [SystemMessage(request.system_prompt), *messages]
374
+
372
375
  output = await model_.ainvoke(messages)
373
376
  return _handle_model_output(state, output)
374
377