langchain 1.0.0a3__py3-none-any.whl → 1.0.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
langchain/__init__.py CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  from typing import Any
4
4
 
5
- __version__ = "1.0.0a1"
5
+ __version__ = "1.0.0a3"
6
6
 
7
7
 
8
8
  def __getattr__(name: str) -> Any: # noqa: ANN401
@@ -0,0 +1,15 @@
1
+ """Middleware plugins for agents."""
2
+
3
+ from .human_in_the_loop import HumanInTheLoopMiddleware
4
+ from .prompt_caching import AnthropicPromptCachingMiddleware
5
+ from .summarization import SummarizationMiddleware
6
+ from .types import AgentMiddleware, AgentState, ModelRequest
7
+
8
+ __all__ = [
9
+ "AgentMiddleware",
10
+ "AgentState",
11
+ "AnthropicPromptCachingMiddleware",
12
+ "HumanInTheLoopMiddleware",
13
+ "ModelRequest",
14
+ "SummarizationMiddleware",
15
+ ]
@@ -0,0 +1,11 @@
1
+ """Utility functions for middleware."""
2
+
3
+ from typing import Any
4
+
5
+
6
+ def _generate_correction_tool_messages(content: str, tool_calls: list) -> list[dict[str, Any]]:
7
+ """Generate tool messages for model behavior correction."""
8
+ return [
9
+ {"role": "tool", "content": content, "tool_call_id": tool_call["id"]}
10
+ for tool_call in tool_calls
11
+ ]
@@ -0,0 +1,128 @@
1
+ """Human in the loop middleware."""
2
+
3
+ from typing import Any
4
+
5
+ from langgraph.prebuilt.interrupt import (
6
+ ActionRequest,
7
+ HumanInterrupt,
8
+ HumanInterruptConfig,
9
+ HumanResponse,
10
+ )
11
+ from langgraph.types import interrupt
12
+
13
+ from langchain.agents.middleware._utils import _generate_correction_tool_messages
14
+ from langchain.agents.middleware.types import AgentMiddleware, AgentState
15
+
16
+ ToolInterruptConfig = dict[str, HumanInterruptConfig]
17
+
18
+
19
+ class HumanInTheLoopMiddleware(AgentMiddleware):
20
+ """Human in the loop middleware."""
21
+
22
+ def __init__(
23
+ self,
24
+ tool_configs: ToolInterruptConfig,
25
+ message_prefix: str = "Tool execution requires approval",
26
+ ) -> None:
27
+ """Initialize the human in the loop middleware.
28
+
29
+ Args:
30
+ tool_configs: The tool interrupt configs to use for the middleware.
31
+ message_prefix: The message prefix to use when constructing interrupt content.
32
+ """
33
+ super().__init__()
34
+ self.tool_configs = tool_configs
35
+ self.message_prefix = message_prefix
36
+
37
+ def after_model(self, state: AgentState) -> dict[str, Any] | None:
38
+ """Trigger HITL flows for relevant tool calls after an AIMessage."""
39
+ messages = state["messages"]
40
+ if not messages:
41
+ return None
42
+
43
+ last_message = messages[-1]
44
+
45
+ if not hasattr(last_message, "tool_calls") or not last_message.tool_calls:
46
+ return None
47
+
48
+ # Separate tool calls that need interrupts from those that don't
49
+ interrupt_tool_calls = []
50
+ auto_approved_tool_calls = []
51
+
52
+ for tool_call in last_message.tool_calls:
53
+ tool_name = tool_call["name"]
54
+ if tool_name in self.tool_configs:
55
+ interrupt_tool_calls.append(tool_call)
56
+ else:
57
+ auto_approved_tool_calls.append(tool_call)
58
+
59
+ # If no interrupts needed, return early
60
+ if not interrupt_tool_calls:
61
+ return None
62
+
63
+ approved_tool_calls = auto_approved_tool_calls.copy()
64
+
65
+ # Right now, we do not support multiple tool calls with interrupts
66
+ if len(interrupt_tool_calls) > 1:
67
+ tool_names = [t["name"] for t in interrupt_tool_calls]
68
+ msg = f"Called the following tools which require interrupts: {tool_names}\n\nYou may only call ONE tool that requires an interrupt at a time"
69
+ return {
70
+ "messages": _generate_correction_tool_messages(msg, last_message.tool_calls),
71
+ "jump_to": "model",
72
+ }
73
+
74
+ # Right now, we do not support interrupting a tool call if other tool calls exist
75
+ if auto_approved_tool_calls:
76
+ tool_names = [t["name"] for t in interrupt_tool_calls]
77
+ msg = f"Called the following tools which require interrupts: {tool_names}. You also called other tools that do not require interrupts. If you call a tool that requires and interrupt, you may ONLY call that tool."
78
+ return {
79
+ "messages": _generate_correction_tool_messages(msg, last_message.tool_calls),
80
+ "jump_to": "model",
81
+ }
82
+
83
+ # Only one tool call will need interrupts
84
+ tool_call = interrupt_tool_calls[0]
85
+ tool_name = tool_call["name"]
86
+ tool_args = tool_call["args"]
87
+ description = f"{self.message_prefix}\n\nTool: {tool_name}\nArgs: {tool_args}"
88
+ tool_config = self.tool_configs[tool_name]
89
+
90
+ request: HumanInterrupt = {
91
+ "action_request": ActionRequest(
92
+ action=tool_name,
93
+ args=tool_args,
94
+ ),
95
+ "config": tool_config,
96
+ "description": description,
97
+ }
98
+
99
+ responses: list[HumanResponse] = interrupt([request])
100
+ response = responses[0]
101
+
102
+ if response["type"] == "accept":
103
+ approved_tool_calls.append(tool_call)
104
+ elif response["type"] == "edit":
105
+ edited: ActionRequest = response["args"] # type: ignore[assignment]
106
+ new_tool_call = {
107
+ "type": "tool_call",
108
+ "name": tool_call["name"],
109
+ "args": edited["args"],
110
+ "id": tool_call["id"],
111
+ }
112
+ approved_tool_calls.append(new_tool_call)
113
+ elif response["type"] == "ignore":
114
+ return {"jump_to": "__end__"}
115
+ elif response["type"] == "response":
116
+ tool_message = {
117
+ "role": "tool",
118
+ "tool_call_id": tool_call["id"],
119
+ "content": response["args"],
120
+ }
121
+ return {"messages": [tool_message], "jump_to": "model"}
122
+ else:
123
+ msg = f"Unknown response type: {response['type']}"
124
+ raise ValueError(msg)
125
+
126
+ last_message.tool_calls = approved_tool_calls
127
+
128
+ return {"messages": [last_message]}
@@ -0,0 +1,57 @@
1
+ """Anthropic prompt caching middleware."""
2
+
3
+ from typing import Literal
4
+
5
+ from langchain.agents.middleware.types import AgentMiddleware, AgentState, ModelRequest
6
+
7
+
8
+ class AnthropicPromptCachingMiddleware(AgentMiddleware):
9
+ """Prompt Caching Middleware - Optimizes API usage by caching conversation prefixes for Anthropic models.
10
+
11
+ Learn more about anthropic prompt caching [here](https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching).
12
+ """
13
+
14
+ def __init__(
15
+ self,
16
+ type: Literal["ephemeral"] = "ephemeral",
17
+ ttl: Literal["5m", "1h"] = "5m",
18
+ min_messages_to_cache: int = 0,
19
+ ) -> None:
20
+ """Initialize the middleware with cache control settings.
21
+
22
+ Args:
23
+ type: The type of cache to use, only "ephemeral" is supported.
24
+ ttl: The time to live for the cache, only "5m" and "1h" are supported.
25
+ min_messages_to_cache: The minimum number of messages until the cache is used, default is 0.
26
+ """
27
+ self.type = type
28
+ self.ttl = ttl
29
+ self.min_messages_to_cache = min_messages_to_cache
30
+
31
+ def modify_model_request(self, request: ModelRequest, state: AgentState) -> ModelRequest: # noqa: ARG002
32
+ """Modify the model request to add cache control blocks."""
33
+ try:
34
+ from langchain_anthropic import ChatAnthropic
35
+ except ImportError:
36
+ msg = (
37
+ "AnthropicPromptCachingMiddleware caching middleware only supports Anthropic models."
38
+ "Please install langchain-anthropic."
39
+ )
40
+ raise ValueError(msg)
41
+
42
+ if not isinstance(request.model, ChatAnthropic):
43
+ msg = (
44
+ "AnthropicPromptCachingMiddleware caching middleware only supports Anthropic models, "
45
+ f"not instances of {type(request.model)}"
46
+ )
47
+ raise ValueError(msg)
48
+
49
+ messages_count = (
50
+ len(request.messages) + 1 if request.system_prompt else len(request.messages)
51
+ )
52
+ if messages_count < self.min_messages_to_cache:
53
+ return request
54
+
55
+ request.model_settings["cache_control"] = {"type": self.type, "ttl": self.ttl}
56
+
57
+ return request
@@ -0,0 +1,248 @@
1
+ """Summarization middleware."""
2
+
3
+ import uuid
4
+ from collections.abc import Callable, Iterable
5
+ from typing import Any, cast
6
+
7
+ from langchain_core.messages import (
8
+ AIMessage,
9
+ AnyMessage,
10
+ MessageLikeRepresentation,
11
+ RemoveMessage,
12
+ ToolMessage,
13
+ )
14
+ from langchain_core.messages.human import HumanMessage
15
+ from langchain_core.messages.utils import count_tokens_approximately, trim_messages
16
+ from langgraph.graph.message import (
17
+ REMOVE_ALL_MESSAGES,
18
+ )
19
+
20
+ from langchain.agents.middleware.types import AgentMiddleware, AgentState
21
+ from langchain.chat_models import BaseChatModel, init_chat_model
22
+
23
+ TokenCounter = Callable[[Iterable[MessageLikeRepresentation]], int]
24
+
25
+ DEFAULT_SUMMARY_PROMPT = """<role>
26
+ Context Extraction Assistant
27
+ </role>
28
+
29
+ <primary_objective>
30
+ Your sole objective in this task is to extract the highest quality/most relevant context from the conversation history below.
31
+ </primary_objective>
32
+
33
+ <objective_information>
34
+ You're nearing the total number of input tokens you can accept, so you must extract the highest quality/most relevant pieces of information from your conversation history.
35
+ This context will then overwrite the conversation history presented below. Because of this, ensure the context you extract is only the most important information to your overall goal.
36
+ </objective_information>
37
+
38
+ <instructions>
39
+ The conversation history below will be replaced with the context you extract in this step. Because of this, you must do your very best to extract and record all of the most important context from the conversation history.
40
+ You want to ensure that you don't repeat any actions you've already completed, so the context you extract from the conversation history should be focused on the most important information to your overall goal.
41
+ </instructions>
42
+
43
+ The user will message you with the full message history you'll be extracting context from, to then replace. Carefully read over it all, and think deeply about what information is most important to your overall goal that should be saved:
44
+
45
+ With all of this in mind, please carefully read over the entire conversation history, and extract the most important and relevant context to replace it so that you can free up space in the conversation history.
46
+ Respond ONLY with the extracted context. Do not include any additional information, or text before or after the extracted context.
47
+
48
+ <messages>
49
+ Messages to summarize:
50
+ {messages}
51
+ </messages>"""
52
+
53
+ SUMMARY_PREFIX = "## Previous conversation summary:"
54
+
55
+ _DEFAULT_MESSAGES_TO_KEEP = 20
56
+ _DEFAULT_TRIM_TOKEN_LIMIT = 4000
57
+ _DEFAULT_FALLBACK_MESSAGE_COUNT = 15
58
+ _SEARCH_RANGE_FOR_TOOL_PAIRS = 5
59
+
60
+
61
+ class SummarizationMiddleware(AgentMiddleware):
62
+ """Middleware that summarizes conversation history when token limits are approached.
63
+
64
+ This middleware monitors message token counts and automatically summarizes older
65
+ messages when a threshold is reached, preserving recent messages and maintaining
66
+ context continuity by ensuring AI/Tool message pairs remain together.
67
+ """
68
+
69
+ def __init__(
70
+ self,
71
+ model: str | BaseChatModel,
72
+ max_tokens_before_summary: int | None = None,
73
+ messages_to_keep: int = _DEFAULT_MESSAGES_TO_KEEP,
74
+ token_counter: TokenCounter = count_tokens_approximately,
75
+ summary_prompt: str = DEFAULT_SUMMARY_PROMPT,
76
+ summary_prefix: str = SUMMARY_PREFIX,
77
+ ) -> None:
78
+ """Initialize the summarization middleware.
79
+
80
+ Args:
81
+ model: The language model to use for generating summaries.
82
+ max_tokens_before_summary: Token threshold to trigger summarization.
83
+ If None, summarization is disabled.
84
+ messages_to_keep: Number of recent messages to preserve after summarization.
85
+ token_counter: Function to count tokens in messages.
86
+ summary_prompt: Prompt template for generating summaries.
87
+ summary_prefix: Prefix added to system message when including summary.
88
+ """
89
+ super().__init__()
90
+
91
+ if isinstance(model, str):
92
+ model = init_chat_model(model)
93
+
94
+ self.model = model
95
+ self.max_tokens_before_summary = max_tokens_before_summary
96
+ self.messages_to_keep = messages_to_keep
97
+ self.token_counter = token_counter
98
+ self.summary_prompt = summary_prompt
99
+ self.summary_prefix = summary_prefix
100
+
101
+ def before_model(self, state: AgentState) -> dict[str, Any] | None:
102
+ """Process messages before model invocation, potentially triggering summarization."""
103
+ messages = state["messages"]
104
+ self._ensure_message_ids(messages)
105
+
106
+ total_tokens = self.token_counter(messages)
107
+ if (
108
+ self.max_tokens_before_summary is not None
109
+ and total_tokens < self.max_tokens_before_summary
110
+ ):
111
+ return None
112
+
113
+ cutoff_index = self._find_safe_cutoff(messages)
114
+
115
+ if cutoff_index <= 0:
116
+ return None
117
+
118
+ messages_to_summarize, preserved_messages = self._partition_messages(messages, cutoff_index)
119
+
120
+ summary = self._create_summary(messages_to_summarize)
121
+ new_messages = self._build_new_messages(summary)
122
+
123
+ return {
124
+ "messages": [
125
+ RemoveMessage(id=REMOVE_ALL_MESSAGES),
126
+ *new_messages,
127
+ *preserved_messages,
128
+ ]
129
+ }
130
+
131
+ def _build_new_messages(self, summary: str) -> list[HumanMessage]:
132
+ return [
133
+ HumanMessage(content=f"Here is a summary of the conversation to date:\n\n{summary}")
134
+ ]
135
+
136
+ def _ensure_message_ids(self, messages: list[AnyMessage]) -> None:
137
+ """Ensure all messages have unique IDs for the add_messages reducer."""
138
+ for msg in messages:
139
+ if msg.id is None:
140
+ msg.id = str(uuid.uuid4())
141
+
142
+ def _partition_messages(
143
+ self,
144
+ conversation_messages: list[AnyMessage],
145
+ cutoff_index: int,
146
+ ) -> tuple[list[AnyMessage], list[AnyMessage]]:
147
+ """Partition messages into those to summarize and those to preserve."""
148
+ messages_to_summarize = conversation_messages[:cutoff_index]
149
+ preserved_messages = conversation_messages[cutoff_index:]
150
+
151
+ return messages_to_summarize, preserved_messages
152
+
153
+ def _find_safe_cutoff(self, messages: list[AnyMessage]) -> int:
154
+ """Find safe cutoff point that preserves AI/Tool message pairs.
155
+
156
+ Returns the index where messages can be safely cut without separating
157
+ related AI and Tool messages. Returns 0 if no safe cutoff is found.
158
+ """
159
+ if len(messages) <= self.messages_to_keep:
160
+ return 0
161
+
162
+ target_cutoff = len(messages) - self.messages_to_keep
163
+
164
+ for i in range(target_cutoff, -1, -1):
165
+ if self._is_safe_cutoff_point(messages, i):
166
+ return i
167
+
168
+ return 0
169
+
170
+ def _is_safe_cutoff_point(self, messages: list[AnyMessage], cutoff_index: int) -> bool:
171
+ """Check if cutting at index would separate AI/Tool message pairs."""
172
+ if cutoff_index >= len(messages):
173
+ return True
174
+
175
+ search_start = max(0, cutoff_index - _SEARCH_RANGE_FOR_TOOL_PAIRS)
176
+ search_end = min(len(messages), cutoff_index + _SEARCH_RANGE_FOR_TOOL_PAIRS)
177
+
178
+ for i in range(search_start, search_end):
179
+ if not self._has_tool_calls(messages[i]):
180
+ continue
181
+
182
+ tool_call_ids = self._extract_tool_call_ids(cast("AIMessage", messages[i]))
183
+ if self._cutoff_separates_tool_pair(messages, i, cutoff_index, tool_call_ids):
184
+ return False
185
+
186
+ return True
187
+
188
+ def _has_tool_calls(self, message: AnyMessage) -> bool:
189
+ """Check if message is an AI message with tool calls."""
190
+ return (
191
+ isinstance(message, AIMessage) and hasattr(message, "tool_calls") and message.tool_calls # type: ignore[return-value]
192
+ )
193
+
194
+ def _extract_tool_call_ids(self, ai_message: AIMessage) -> set[str]:
195
+ """Extract tool call IDs from an AI message."""
196
+ tool_call_ids = set()
197
+ for tc in ai_message.tool_calls:
198
+ call_id = tc.get("id") if isinstance(tc, dict) else getattr(tc, "id", None)
199
+ if call_id is not None:
200
+ tool_call_ids.add(call_id)
201
+ return tool_call_ids
202
+
203
+ def _cutoff_separates_tool_pair(
204
+ self,
205
+ messages: list[AnyMessage],
206
+ ai_message_index: int,
207
+ cutoff_index: int,
208
+ tool_call_ids: set[str],
209
+ ) -> bool:
210
+ """Check if cutoff separates an AI message from its corresponding tool messages."""
211
+ for j in range(ai_message_index + 1, len(messages)):
212
+ message = messages[j]
213
+ if isinstance(message, ToolMessage) and message.tool_call_id in tool_call_ids:
214
+ ai_before_cutoff = ai_message_index < cutoff_index
215
+ tool_before_cutoff = j < cutoff_index
216
+ if ai_before_cutoff != tool_before_cutoff:
217
+ return True
218
+ return False
219
+
220
+ def _create_summary(self, messages_to_summarize: list[AnyMessage]) -> str:
221
+ """Generate summary for the given messages."""
222
+ if not messages_to_summarize:
223
+ return "No previous conversation history."
224
+
225
+ trimmed_messages = self._trim_messages_for_summary(messages_to_summarize)
226
+ if not trimmed_messages:
227
+ return "Previous conversation was too long to summarize."
228
+
229
+ try:
230
+ response = self.model.invoke(self.summary_prompt.format(messages=trimmed_messages))
231
+ return cast("str", response.content).strip()
232
+ except Exception as e: # noqa: BLE001
233
+ return f"Error generating summary: {e!s}"
234
+
235
+ def _trim_messages_for_summary(self, messages: list[AnyMessage]) -> list[AnyMessage]:
236
+ """Trim messages to fit within summary generation limits."""
237
+ try:
238
+ return trim_messages(
239
+ messages,
240
+ max_tokens=_DEFAULT_TRIM_TOKEN_LIMIT,
241
+ token_counter=self.token_counter,
242
+ start_on="human",
243
+ strategy="last",
244
+ allow_partial=True,
245
+ include_system=True,
246
+ )
247
+ except Exception: # noqa: BLE001
248
+ return messages[-_DEFAULT_FALLBACK_MESSAGE_COUNT:]
@@ -0,0 +1,78 @@
1
+ """Types for middleware and agents."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+ from typing import TYPE_CHECKING, Annotated, Any, Generic, Literal, cast
7
+
8
+ # needed as top level import for pydantic schema generation on AgentState
9
+ from langchain_core.messages import AnyMessage # noqa: TC002
10
+ from langgraph.channels.ephemeral_value import EphemeralValue
11
+ from langgraph.graph.message import Messages, add_messages
12
+ from typing_extensions import NotRequired, Required, TypedDict, TypeVar
13
+
14
+ if TYPE_CHECKING:
15
+ from langchain_core.language_models.chat_models import BaseChatModel
16
+ from langchain_core.tools import BaseTool
17
+
18
+ from langchain.agents.structured_output import ResponseFormat
19
+
20
+ JumpTo = Literal["tools", "model", "__end__"]
21
+ """Destination to jump to when a middleware node returns."""
22
+
23
+ ResponseT = TypeVar("ResponseT")
24
+
25
+
26
+ @dataclass
27
+ class ModelRequest:
28
+ """Model request information for the agent."""
29
+
30
+ model: BaseChatModel
31
+ system_prompt: str | None
32
+ messages: list[AnyMessage] # excluding system prompt
33
+ tool_choice: Any | None
34
+ tools: list[BaseTool]
35
+ response_format: ResponseFormat | None
36
+ model_settings: dict[str, Any] = field(default_factory=dict)
37
+
38
+
39
+ class AgentState(TypedDict, Generic[ResponseT]):
40
+ """State schema for the agent."""
41
+
42
+ messages: Required[Annotated[list[AnyMessage], add_messages]]
43
+ model_request: NotRequired[Annotated[ModelRequest | None, EphemeralValue]]
44
+ jump_to: NotRequired[Annotated[JumpTo | None, EphemeralValue]]
45
+ response: NotRequired[ResponseT]
46
+
47
+
48
+ class PublicAgentState(TypedDict, Generic[ResponseT]):
49
+ """Input / output schema for the agent."""
50
+
51
+ messages: Required[Messages]
52
+ response: NotRequired[ResponseT]
53
+
54
+
55
+ StateT = TypeVar("StateT", bound=AgentState)
56
+
57
+
58
+ class AgentMiddleware(Generic[StateT]):
59
+ """Base middleware class for an agent.
60
+
61
+ Subclass this and implement any of the defined methods to customize agent behavior between steps in the main agent loop.
62
+ """
63
+
64
+ state_schema: type[StateT] = cast("type[StateT]", AgentState)
65
+ """The schema for state passed to the middleware nodes."""
66
+
67
+ tools: list[BaseTool]
68
+ """Additional tools registered by the middleware."""
69
+
70
+ def before_model(self, state: StateT) -> dict[str, Any] | None:
71
+ """Logic to run before the model is called."""
72
+
73
+ def modify_model_request(self, request: ModelRequest, state: StateT) -> ModelRequest: # noqa: ARG002
74
+ """Logic to modify request kwargs before the model is called."""
75
+ return request
76
+
77
+ def after_model(self, state: StateT) -> dict[str, Any] | None:
78
+ """Logic to run after the model is called."""
@@ -0,0 +1,554 @@
1
+ """Middleware agent implementation."""
2
+
3
+ import itertools
4
+ from collections.abc import Callable, Sequence
5
+ from typing import Any, Union
6
+
7
+ from langchain_core.language_models.chat_models import BaseChatModel
8
+ from langchain_core.messages import AIMessage, AnyMessage, SystemMessage, ToolMessage
9
+ from langchain_core.runnables import Runnable
10
+ from langchain_core.tools import BaseTool
11
+ from langgraph.constants import END, START
12
+ from langgraph.graph.state import StateGraph
13
+ from langgraph.typing import ContextT
14
+ from typing_extensions import TypedDict, TypeVar
15
+
16
+ from langchain.agents.middleware.types import (
17
+ AgentMiddleware,
18
+ AgentState,
19
+ JumpTo,
20
+ ModelRequest,
21
+ PublicAgentState,
22
+ )
23
+
24
+ # Import structured output classes from the old implementation
25
+ from langchain.agents.structured_output import (
26
+ MultipleStructuredOutputsError,
27
+ OutputToolBinding,
28
+ ProviderStrategy,
29
+ ProviderStrategyBinding,
30
+ ResponseFormat,
31
+ StructuredOutputValidationError,
32
+ ToolStrategy,
33
+ )
34
+ from langchain.agents.tool_node import ToolNode
35
+ from langchain.chat_models import init_chat_model
36
+
37
+ STRUCTURED_OUTPUT_ERROR_TEMPLATE = "Error: {error}\n Please fix your mistakes."
38
+
39
+
40
+ def _merge_state_schemas(schemas: list[type]) -> type:
41
+ """Merge multiple TypedDict schemas into a single schema with all fields."""
42
+ if not schemas:
43
+ return AgentState
44
+
45
+ all_annotations = {}
46
+
47
+ for schema in schemas:
48
+ all_annotations.update(schema.__annotations__)
49
+
50
+ return TypedDict("MergedState", all_annotations) # type: ignore[operator]
51
+
52
+
53
+ def _filter_state_for_schema(state: dict[str, Any], schema: type) -> dict[str, Any]:
54
+ """Filter state to only include fields defined in the given schema."""
55
+ if not hasattr(schema, "__annotations__"):
56
+ return state
57
+
58
+ schema_fields = set(schema.__annotations__.keys())
59
+ return {k: v for k, v in state.items() if k in schema_fields}
60
+
61
+
62
+ def _supports_native_structured_output(model: Union[str, BaseChatModel]) -> bool:
63
+ """Check if a model supports native structured output."""
64
+ model_name: str | None = None
65
+ if isinstance(model, str):
66
+ model_name = model
67
+ elif isinstance(model, BaseChatModel):
68
+ model_name = getattr(model, "model_name", None)
69
+
70
+ return (
71
+ "grok" in model_name.lower()
72
+ or any(part in model_name for part in ["gpt-5", "gpt-4.1", "gpt-oss", "o3-pro", "o3-mini"])
73
+ if model_name
74
+ else False
75
+ )
76
+
77
+
78
+ def _handle_structured_output_error(
79
+ exception: Exception,
80
+ response_format: ResponseFormat,
81
+ ) -> tuple[bool, str]:
82
+ """Handle structured output error. Returns (should_retry, retry_tool_message)."""
83
+ if not isinstance(response_format, ToolStrategy):
84
+ return False, ""
85
+
86
+ handle_errors = response_format.handle_errors
87
+
88
+ if handle_errors is False:
89
+ return False, ""
90
+ if handle_errors is True:
91
+ return True, STRUCTURED_OUTPUT_ERROR_TEMPLATE.format(error=str(exception))
92
+ if isinstance(handle_errors, str):
93
+ return True, handle_errors
94
+ if isinstance(handle_errors, type) and issubclass(handle_errors, Exception):
95
+ if isinstance(exception, handle_errors):
96
+ return True, STRUCTURED_OUTPUT_ERROR_TEMPLATE.format(error=str(exception))
97
+ return False, ""
98
+ if isinstance(handle_errors, tuple):
99
+ if any(isinstance(exception, exc_type) for exc_type in handle_errors):
100
+ return True, STRUCTURED_OUTPUT_ERROR_TEMPLATE.format(error=str(exception))
101
+ return False, ""
102
+ if callable(handle_errors):
103
+ # type narrowing not working appropriately w/ callable check, can fix later
104
+ return True, handle_errors(exception) # type: ignore[return-value,call-arg]
105
+ return False, ""
106
+
107
+
108
+ ResponseT = TypeVar("ResponseT")
109
+
110
+
111
+ def create_agent( # noqa: PLR0915
112
+ *,
113
+ model: str | BaseChatModel,
114
+ tools: Sequence[BaseTool | Callable | dict[str, Any]] | ToolNode | None = None,
115
+ system_prompt: str | None = None,
116
+ middleware: Sequence[AgentMiddleware] = (),
117
+ response_format: ResponseFormat[ResponseT] | type[ResponseT] | None = None,
118
+ context_schema: type[ContextT] | None = None,
119
+ ) -> StateGraph[
120
+ AgentState[ResponseT], ContextT, PublicAgentState[ResponseT], PublicAgentState[ResponseT]
121
+ ]:
122
+ """Create a middleware agent graph."""
123
+ # init chat model
124
+ if isinstance(model, str):
125
+ model = init_chat_model(model)
126
+
127
+ # Handle tools being None or empty
128
+ if tools is None:
129
+ tools = []
130
+
131
+ # Setup structured output
132
+ structured_output_tools: dict[str, OutputToolBinding] = {}
133
+ native_output_binding: ProviderStrategyBinding | None = None
134
+
135
+ if response_format is not None:
136
+ if not isinstance(response_format, (ToolStrategy, ProviderStrategy)):
137
+ # Auto-detect strategy based on model capabilities
138
+ if _supports_native_structured_output(model):
139
+ response_format = ProviderStrategy(schema=response_format)
140
+ else:
141
+ response_format = ToolStrategy(schema=response_format)
142
+
143
+ if isinstance(response_format, ToolStrategy):
144
+ # Setup tools strategy for structured output
145
+ for response_schema in response_format.schema_specs:
146
+ structured_tool_info = OutputToolBinding.from_schema_spec(response_schema)
147
+ structured_output_tools[structured_tool_info.tool.name] = structured_tool_info
148
+ elif isinstance(response_format, ProviderStrategy):
149
+ # Setup native strategy
150
+ native_output_binding = ProviderStrategyBinding.from_schema_spec(
151
+ response_format.schema_spec
152
+ )
153
+ middleware_tools = [t for m in middleware for t in getattr(m, "tools", [])]
154
+
155
+ # Setup tools
156
+ tool_node: ToolNode | None = None
157
+ if isinstance(tools, list):
158
+ # Extract builtin provider tools (dict format)
159
+ builtin_tools = [t for t in tools if isinstance(t, dict)]
160
+ regular_tools = [t for t in tools if not isinstance(t, dict)]
161
+
162
+ # Add structured output tools to regular tools
163
+ structured_tools = [info.tool for info in structured_output_tools.values()]
164
+ all_tools = middleware_tools + regular_tools + structured_tools
165
+
166
+ # Only create ToolNode if we have tools
167
+ tool_node = ToolNode(tools=all_tools) if all_tools else None
168
+ default_tools = regular_tools + builtin_tools + structured_tools + middleware_tools
169
+ elif isinstance(tools, ToolNode):
170
+ # tools is ToolNode or None
171
+ tool_node = tools
172
+ if tool_node:
173
+ default_tools = list(tool_node.tools_by_name.values()) + middleware_tools
174
+ # Update tool node to know about tools provided by middleware
175
+ all_tools = list(tool_node.tools_by_name.values()) + middleware_tools
176
+ tool_node = ToolNode(all_tools)
177
+ # Add structured output tools
178
+ for info in structured_output_tools.values():
179
+ default_tools.append(info.tool)
180
+ else:
181
+ default_tools = (
182
+ list(structured_output_tools.values()) if structured_output_tools else []
183
+ ) + middleware_tools
184
+
185
+ # validate middleware
186
+ assert len({m.__class__.__name__ for m in middleware}) == len(middleware), ( # noqa: S101
187
+ "Please remove duplicate middleware instances."
188
+ )
189
+ middleware_w_before = [
190
+ m for m in middleware if m.__class__.before_model is not AgentMiddleware.before_model
191
+ ]
192
+ middleware_w_modify_model_request = [
193
+ m
194
+ for m in middleware
195
+ if m.__class__.modify_model_request is not AgentMiddleware.modify_model_request
196
+ ]
197
+ middleware_w_after = [
198
+ m for m in middleware if m.__class__.after_model is not AgentMiddleware.after_model
199
+ ]
200
+
201
+ # Collect all middleware state schemas and create merged schema
202
+ merged_state_schema: type[AgentState] = _merge_state_schemas(
203
+ [m.state_schema for m in middleware]
204
+ )
205
+
206
+ # create graph, add nodes
207
+ graph = StateGraph(
208
+ merged_state_schema,
209
+ input_schema=PublicAgentState,
210
+ output_schema=PublicAgentState,
211
+ context_schema=context_schema,
212
+ )
213
+
214
+ def _prepare_model_request(state: dict[str, Any]) -> tuple[ModelRequest, list[AnyMessage]]:
215
+ """Prepare model request and messages."""
216
+ request = state.get("model_request") or ModelRequest(
217
+ model=model,
218
+ tools=default_tools,
219
+ system_prompt=system_prompt,
220
+ response_format=response_format,
221
+ messages=state["messages"],
222
+ tool_choice=None,
223
+ )
224
+
225
+ # prepare messages
226
+ messages = request.messages
227
+ if request.system_prompt:
228
+ messages = [SystemMessage(request.system_prompt), *messages]
229
+
230
+ return request, messages
231
+
232
+ def _handle_model_output(state: dict[str, Any], output: AIMessage) -> dict[str, Any]:
233
+ """Handle model output including structured responses."""
234
+ # Handle structured output with native strategy
235
+ if isinstance(response_format, ProviderStrategy):
236
+ if not output.tool_calls and native_output_binding:
237
+ structured_response = native_output_binding.parse(output)
238
+ return {"messages": [output], "response": structured_response}
239
+ if state.get("response") is not None:
240
+ return {"messages": [output], "response": None}
241
+ return {"messages": [output]}
242
+
243
+ # Handle structured output with tools strategy
244
+ if (
245
+ isinstance(response_format, ToolStrategy)
246
+ and isinstance(output, AIMessage)
247
+ and output.tool_calls
248
+ ):
249
+ structured_tool_calls = [
250
+ tc for tc in output.tool_calls if tc["name"] in structured_output_tools
251
+ ]
252
+
253
+ if structured_tool_calls:
254
+ exception: Exception | None = None
255
+ if len(structured_tool_calls) > 1:
256
+ # Handle multiple structured outputs error
257
+ tool_names = [tc["name"] for tc in structured_tool_calls]
258
+ exception = MultipleStructuredOutputsError(tool_names)
259
+ should_retry, error_message = _handle_structured_output_error(
260
+ exception, response_format
261
+ )
262
+ if not should_retry:
263
+ raise exception
264
+
265
+ # Add error messages and retry
266
+ tool_messages = [
267
+ ToolMessage(
268
+ content=error_message,
269
+ tool_call_id=tc["id"],
270
+ name=tc["name"],
271
+ )
272
+ for tc in structured_tool_calls
273
+ ]
274
+ return {"messages": [output, *tool_messages]}
275
+
276
+ # Handle single structured output
277
+ tool_call = structured_tool_calls[0]
278
+ try:
279
+ structured_tool_binding = structured_output_tools[tool_call["name"]]
280
+ structured_response = structured_tool_binding.parse(tool_call["args"])
281
+
282
+ tool_message_content = (
283
+ response_format.tool_message_content
284
+ if response_format.tool_message_content
285
+ else f"Returning structured response: {structured_response}"
286
+ )
287
+
288
+ return {
289
+ "messages": [
290
+ output,
291
+ ToolMessage(
292
+ content=tool_message_content,
293
+ tool_call_id=tool_call["id"],
294
+ name=tool_call["name"],
295
+ ),
296
+ ],
297
+ "response": structured_response,
298
+ }
299
+ except Exception as exc: # noqa: BLE001
300
+ exception = StructuredOutputValidationError(tool_call["name"], exc)
301
+ should_retry, error_message = _handle_structured_output_error(
302
+ exception, response_format
303
+ )
304
+ if not should_retry:
305
+ raise exception
306
+
307
+ return {
308
+ "messages": [
309
+ output,
310
+ ToolMessage(
311
+ content=error_message,
312
+ tool_call_id=tool_call["id"],
313
+ name=tool_call["name"],
314
+ ),
315
+ ],
316
+ }
317
+
318
+ # Standard response handling
319
+ if state.get("response") is not None:
320
+ return {"messages": [output], "response": None}
321
+ return {"messages": [output]}
322
+
323
+ def _get_bound_model(request: ModelRequest) -> Runnable:
324
+ """Get the model with appropriate tool bindings."""
325
+ if isinstance(response_format, ProviderStrategy):
326
+ # Use native structured output
327
+ kwargs = response_format.to_model_kwargs()
328
+ return request.model.bind_tools(
329
+ request.tools, strict=True, **kwargs, **request.model_settings
330
+ )
331
+ if isinstance(response_format, ToolStrategy):
332
+ tool_choice = "any" if structured_output_tools else request.tool_choice
333
+ return request.model.bind_tools(
334
+ request.tools, tool_choice=tool_choice, **request.model_settings
335
+ )
336
+ # Standard model binding
337
+ if request.tools:
338
+ return request.model.bind_tools(
339
+ request.tools, tool_choice=request.tool_choice, **request.model_settings
340
+ )
341
+ return request.model.bind(**request.model_settings)
342
+
343
+ def model_request(state: dict[str, Any]) -> dict[str, Any]:
344
+ """Sync model request handler with sequential middleware processing."""
345
+ # Start with the base model request
346
+ request, messages = _prepare_model_request(state)
347
+
348
+ # Apply modify_model_request middleware in sequence
349
+ for m in middleware_w_modify_model_request:
350
+ # Filter state to only include fields defined in this middleware's schema
351
+ filtered_state = _filter_state_for_schema(state, m.state_schema)
352
+ request = m.modify_model_request(request, filtered_state)
353
+
354
+ # Get the bound model with the final request
355
+ model_ = _get_bound_model(request)
356
+ output = model_.invoke(messages)
357
+ return _handle_model_output(state, output)
358
+
359
+ async def amodel_request(state: dict[str, Any]) -> dict[str, Any]:
360
+ """Async model request handler with sequential middleware processing."""
361
+ # Start with the base model request
362
+ request, messages = _prepare_model_request(state)
363
+
364
+ # Apply modify_model_request middleware in sequence
365
+ for m in middleware_w_modify_model_request:
366
+ # Filter state to only include fields defined in this middleware's schema
367
+ filtered_state = _filter_state_for_schema(state, m.state_schema)
368
+ request = m.modify_model_request(request, filtered_state)
369
+
370
+ # Get the bound model with the final request
371
+ model_ = _get_bound_model(request)
372
+ output = await model_.ainvoke(messages)
373
+ return _handle_model_output(state, output)
374
+
375
+ # Use sync or async based on model capabilities
376
+ from langgraph._internal._runnable import RunnableCallable
377
+
378
+ graph.add_node("model_request", RunnableCallable(model_request, amodel_request))
379
+
380
+ # Only add tools node if we have tools
381
+ if tool_node is not None:
382
+ graph.add_node("tools", tool_node)
383
+
384
+ # Add middleware nodes
385
+ for m in middleware:
386
+ if m.__class__.before_model is not AgentMiddleware.before_model:
387
+ graph.add_node(
388
+ f"{m.__class__.__name__}.before_model",
389
+ m.before_model,
390
+ input_schema=m.state_schema,
391
+ )
392
+
393
+ if m.__class__.after_model is not AgentMiddleware.after_model:
394
+ graph.add_node(
395
+ f"{m.__class__.__name__}.after_model",
396
+ m.after_model,
397
+ input_schema=m.state_schema,
398
+ )
399
+
400
+ # add start edge
401
+ first_node = (
402
+ f"{middleware_w_before[0].__class__.__name__}.before_model"
403
+ if middleware_w_before
404
+ else "model_request"
405
+ )
406
+ last_node = (
407
+ f"{middleware_w_after[0].__class__.__name__}.after_model"
408
+ if middleware_w_after
409
+ else "model_request"
410
+ )
411
+ graph.add_edge(START, first_node)
412
+
413
+ # add conditional edges only if tools exist
414
+ if tool_node is not None:
415
+ graph.add_conditional_edges(
416
+ "tools",
417
+ _make_tools_to_model_edge(tool_node, first_node),
418
+ [first_node, END],
419
+ )
420
+ graph.add_conditional_edges(
421
+ last_node,
422
+ _make_model_to_tools_edge(first_node, structured_output_tools),
423
+ [first_node, "tools", END],
424
+ )
425
+ elif last_node == "model_request":
426
+ # If no tools, just go to END from model
427
+ graph.add_edge(last_node, END)
428
+ else:
429
+ # If after_model, then need to check for jump_to
430
+ _add_middleware_edge(
431
+ graph,
432
+ f"{middleware_w_after[0].__class__.__name__}.after_model",
433
+ END,
434
+ first_node,
435
+ tools_available=tool_node is not None,
436
+ )
437
+
438
+ # Add middleware edges (same as before)
439
+ if middleware_w_before:
440
+ for m1, m2 in itertools.pairwise(middleware_w_before):
441
+ _add_middleware_edge(
442
+ graph,
443
+ f"{m1.__class__.__name__}.before_model",
444
+ f"{m2.__class__.__name__}.before_model",
445
+ first_node,
446
+ tools_available=tool_node is not None,
447
+ )
448
+ # Go directly to model_request after the last before_model
449
+ _add_middleware_edge(
450
+ graph,
451
+ f"{middleware_w_before[-1].__class__.__name__}.before_model",
452
+ "model_request",
453
+ first_node,
454
+ tools_available=tool_node is not None,
455
+ )
456
+
457
+ if middleware_w_after:
458
+ graph.add_edge("model_request", f"{middleware_w_after[-1].__class__.__name__}.after_model")
459
+ for idx in range(len(middleware_w_after) - 1, 0, -1):
460
+ m1 = middleware_w_after[idx]
461
+ m2 = middleware_w_after[idx - 1]
462
+ _add_middleware_edge(
463
+ graph,
464
+ f"{m1.__class__.__name__}.after_model",
465
+ f"{m2.__class__.__name__}.after_model",
466
+ first_node,
467
+ tools_available=tool_node is not None,
468
+ )
469
+
470
+ return graph
471
+
472
+
473
+ def _resolve_jump(jump_to: JumpTo | None, first_node: str) -> str | None:
474
+ if jump_to == "model":
475
+ return first_node
476
+ if jump_to:
477
+ return jump_to
478
+ return None
479
+
480
+
481
+ def _make_model_to_tools_edge(
482
+ first_node: str, structured_output_tools: dict[str, OutputToolBinding]
483
+ ) -> Callable[[AgentState], str | None]:
484
+ def model_to_tools(state: AgentState) -> str | None:
485
+ if jump_to := state.get("jump_to"):
486
+ return _resolve_jump(jump_to, first_node)
487
+
488
+ message = state["messages"][-1]
489
+
490
+ # Check if this is a ToolMessage from structured output - if so, end
491
+ if isinstance(message, ToolMessage) and message.name in structured_output_tools:
492
+ return END
493
+
494
+ # Check for tool calls
495
+ if isinstance(message, AIMessage) and message.tool_calls:
496
+ # If all tool calls are for structured output, don't go to tools
497
+ non_structured_calls = [
498
+ tc for tc in message.tool_calls if tc["name"] not in structured_output_tools
499
+ ]
500
+ if non_structured_calls:
501
+ return "tools"
502
+
503
+ return END
504
+
505
+ return model_to_tools
506
+
507
+
508
+ def _make_tools_to_model_edge(
509
+ tool_node: ToolNode, next_node: str
510
+ ) -> Callable[[AgentState], str | None]:
511
+ def tools_to_model(state: AgentState) -> str | None:
512
+ ai_message = [m for m in state["messages"] if isinstance(m, AIMessage)][-1]
513
+ if all(
514
+ tool_node.tools_by_name[c["name"]].return_direct
515
+ for c in ai_message.tool_calls
516
+ if c["name"] in tool_node.tools_by_name
517
+ ):
518
+ return END
519
+
520
+ return next_node
521
+
522
+ return tools_to_model
523
+
524
+
525
+ def _add_middleware_edge(
526
+ graph: StateGraph[AgentState, ContextT, PublicAgentState, PublicAgentState],
527
+ name: str,
528
+ default_destination: str,
529
+ model_destination: str,
530
+ tools_available: bool, # noqa: FBT001
531
+ ) -> None:
532
+ """Add an edge to the graph for a middleware node.
533
+
534
+ Args:
535
+ graph: The graph to add the edge to.
536
+ method: The method to call for the middleware node.
537
+ name: The name of the middleware node.
538
+ default_destination: The default destination for the edge.
539
+ model_destination: The destination for the edge to the model.
540
+ tools_available: Whether tools are available for the edge to potentially route to.
541
+ """
542
+
543
+ def jump_edge(state: AgentState) -> str:
544
+ return _resolve_jump(state.get("jump_to"), model_destination) or default_destination
545
+
546
+ destinations = [default_destination]
547
+ if default_destination != END:
548
+ destinations.append(END)
549
+ if tools_available:
550
+ destinations.append("tools")
551
+ if name != model_destination:
552
+ destinations.append(model_destination)
553
+
554
+ graph.add_conditional_edges(name, jump_edge, destinations)
@@ -1,3 +1,5 @@
1
+ """React agent implementation."""
2
+
1
3
  from __future__ import annotations
2
4
 
3
5
  import inspect
@@ -43,6 +45,7 @@ from langgraph.typing import ContextT, StateT
43
45
  from pydantic import BaseModel
44
46
  from typing_extensions import NotRequired, TypedDict, TypeVar
45
47
 
48
+ from langchain.agents.middleware_agent import create_agent as create_middleware_agent
46
49
  from langchain.agents.structured_output import (
47
50
  MultipleStructuredOutputsError,
48
51
  OutputToolBinding,
@@ -64,6 +67,7 @@ if TYPE_CHECKING:
64
67
  from langchain.agents._internal._typing import (
65
68
  SyncOrAsync,
66
69
  )
70
+ from langchain.agents.types import AgentMiddleware
67
71
 
68
72
  StructuredResponseT = TypeVar("StructuredResponseT", default=None)
69
73
 
@@ -906,6 +910,7 @@ def create_agent( # noqa: D417
906
910
  ],
907
911
  tools: Union[Sequence[Union[BaseTool, Callable, dict[str, Any]]], ToolNode],
908
912
  *,
913
+ middleware: Sequence[AgentMiddleware] = (),
909
914
  prompt: Prompt | None = None,
910
915
  response_format: Union[
911
916
  ToolStrategy[StructuredResponseT],
@@ -1112,6 +1117,29 @@ def create_agent( # noqa: D417
1112
1117
  print(chunk)
1113
1118
  ```
1114
1119
  """
1120
+ if middleware:
1121
+ assert isinstance(model, str | BaseChatModel) # noqa: S101
1122
+ assert isinstance(prompt, str | None) # noqa: S101
1123
+ assert not isinstance(response_format, tuple) # noqa: S101
1124
+ assert pre_model_hook is None # noqa: S101
1125
+ assert post_model_hook is None # noqa: S101
1126
+ assert state_schema is None # noqa: S101
1127
+ return create_middleware_agent( # type: ignore[return-value]
1128
+ model=model,
1129
+ tools=tools,
1130
+ system_prompt=prompt,
1131
+ middleware=middleware,
1132
+ response_format=response_format,
1133
+ context_schema=context_schema,
1134
+ ).compile(
1135
+ checkpointer=checkpointer,
1136
+ store=store,
1137
+ name=name,
1138
+ interrupt_after=interrupt_after,
1139
+ interrupt_before=interrupt_before,
1140
+ debug=debug,
1141
+ )
1142
+
1115
1143
  # Handle deprecated config_schema parameter
1116
1144
  if (config_schema := deprecated_kwargs.pop("config_schema", MISSING)) is not MISSING:
1117
1145
  warn(
@@ -1,3 +1,5 @@
1
+ """Chat models."""
2
+
1
3
  from langchain_core.language_models import BaseChatModel
2
4
 
3
5
  from langchain.chat_models.base import init_chat_model
@@ -1,3 +1,5 @@
1
+ """Factory functions for chat models."""
2
+
1
3
  from __future__ import annotations
2
4
 
3
5
  import warnings
@@ -1,3 +1,5 @@
1
+ """Document."""
2
+
1
3
  from langchain_core.documents import Document
2
4
 
3
5
  __all__ = [
@@ -1,3 +1,5 @@
1
+ """Embeddings."""
2
+
1
3
  from langchain_core.embeddings import Embeddings
2
4
 
3
5
  from langchain.embeddings.base import init_embeddings
@@ -1,3 +1,5 @@
1
+ """Factory functions for embeddings."""
2
+
1
3
  import functools
2
4
  from importlib import util
3
5
  from typing import Any, Union
@@ -1,3 +1,5 @@
1
+ """Encoder-backed store implementation."""
2
+
1
3
  from collections.abc import AsyncIterator, Callable, Iterator, Sequence
2
4
  from typing import (
3
5
  Any,
@@ -1,3 +1,5 @@
1
+ """Store exceptions."""
2
+
1
3
  from langchain_core.stores import InvalidKeyException
2
4
 
3
5
  __all__ = ["InvalidKeyException"]
@@ -1,3 +1,5 @@
1
+ """Tools."""
2
+
1
3
  from langchain_core.tools import (
2
4
  BaseTool,
3
5
  InjectedToolArg,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain
3
- Version: 1.0.0a3
3
+ Version: 1.0.0a4
4
4
  Summary: Building applications with LLMs through composability
5
5
  License: MIT
6
6
  Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
@@ -9,7 +9,7 @@ Project-URL: repository, https://github.com/langchain-ai/langchain
9
9
  Requires-Python: >=3.10
10
10
  Requires-Dist: langchain-core<2.0.0,>=0.3.75
11
11
  Requires-Dist: langchain-text-splitters<1.0.0,>=0.3.11
12
- Requires-Dist: langgraph>=0.6.0
12
+ Requires-Dist: langgraph>=0.6.7
13
13
  Requires-Dist: pydantic>=2.7.4
14
14
  Provides-Extra: anthropic
15
15
  Requires-Dist: langchain-anthropic; extra == "anthropic"
@@ -0,0 +1,40 @@
1
+ langchain-1.0.0a4.dist-info/METADATA,sha256=lYsbj5Aa0dYAvavgAd4az8Ujq0aUgV74N-NSVqfN02g,6731
2
+ langchain-1.0.0a4.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
3
+ langchain-1.0.0a4.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
+ langchain-1.0.0a4.dist-info/licenses/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
5
+ langchain/__init__.py,sha256=Gy73PvB6_XAlhmNFzilPvNZpCUHW5dcjwRYdfQG6JeQ,604
6
+ langchain/_internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ langchain/_internal/_documents.py,sha256=z9wAPukoASOMw4WTFFBKCCZYsvsKbo-Cq6CeHjdq9eE,1045
8
+ langchain/_internal/_lazy_import.py,sha256=N52tzaTdeae9XqFhLWIsfE0Am-2P4y-6v7m6n9tv0Y0,1295
9
+ langchain/_internal/_prompts.py,sha256=BW0PZlMoaW4jFG77-_FPHCd8NSLLg2FgF1ihG9l200M,6027
10
+ langchain/_internal/_typing.py,sha256=pVioGzy4zB2SxB_SyegZ7liAN1yqNHSvMviUZ47dAik,1674
11
+ langchain/_internal/_utils.py,sha256=lG8X9muiRAWtQjRPudq-1x-wHbk0J3spu_rYZckVdYs,251
12
+ langchain/agents/__init__.py,sha256=NG2S3dic9L3i4sAD9mpgaTv6Dl4L3u45xxK6jn-I4W8,281
13
+ langchain/agents/_internal/__init__.py,sha256=5nNBeaeQIvv9IOQjY4_aNW8pffWzMXQgi0b6Nx-WghM,37
14
+ langchain/agents/_internal/_typing.py,sha256=Jm3ogPZjGGgM-kz46RwxpsbBYHMnY8_bY74Vw8PLdAU,283
15
+ langchain/agents/interrupt.py,sha256=8A6xV3Vjm8DEO7W9mlKJKRmpl89Q0cXfDzRsz8VjJeE,3309
16
+ langchain/agents/middleware/__init__.py,sha256=qS6U6996Ypsiu4ZbV20pRoWmzZS3zD86ZOJo3794W9M,446
17
+ langchain/agents/middleware/_utils.py,sha256=XMEwKA5WjALrHcU3k89KzMxOytn5Nt_j2IwB7hIk7_I,359
18
+ langchain/agents/middleware/human_in_the_loop.py,sha256=1qOczElAxWY9OCxGNqSNG4FC7olhv_mzw3onSf2_UrY,4816
19
+ langchain/agents/middleware/prompt_caching.py,sha256=tK4PprYfMbHRxC84sl7rnw-EhFdiZ7dJkRMl9Hy8xTU,2215
20
+ langchain/agents/middleware/summarization.py,sha256=glk9yqJF5FfqvU3PabQVtM5yL-htP8J0myOlRl-nl-g,10241
21
+ langchain/agents/middleware/types.py,sha256=6DUKRME_9S6pzYph0hdTLsD2O7UdNiuPFgt0rO7x1kI,2670
22
+ langchain/agents/middleware_agent.py,sha256=a8TXDceRtL7ml-LsI9pldfUmOQuCrAZh25GdJrTsozQ,21472
23
+ langchain/agents/react_agent.py,sha256=rqRqq-SBUBbT5PABKawhWT1IHwv-K4yW8E-clK1BuA8,49479
24
+ langchain/agents/structured_output.py,sha256=8D7U-HXRm1-WnMX3SLUz4wpNNwbYtkq5WSpjgOWrBpk,13289
25
+ langchain/agents/tool_node.py,sha256=IXNX8CNxeGWln7lUfObnlQDvz3XBmz2KND_THxen1AQ,46637
26
+ langchain/chat_models/__init__.py,sha256=PTq9qskQEbqXYAcUUxUXDsugOcwISgFhv4w40JgkbgU,181
27
+ langchain/chat_models/base.py,sha256=zIKF-fBXGjbkT0gQm4r-3kZ6RyCOb6AwC_fKosUVEuI,34929
28
+ langchain/documents/__init__.py,sha256=DjuBCy1TQbem4Vz8SsCcGAbZeFwW5KgGPvDrA8e9oGA,94
29
+ langchain/embeddings/__init__.py,sha256=sJZEfZ4ovEFU5JJnoVNCJIjtSCLT1w9r9uFw1hieRZ8,269
30
+ langchain/embeddings/base.py,sha256=EiN1hh1PWrLmlPm3a5GU7bKfFKsgeBz43MDRxydyAlg,7550
31
+ langchain/embeddings/cache.py,sha256=idyAGk9QELAsnEihcA_oFrei2H0T63wRD3BPYS4_dfY,14415
32
+ langchain/globals.py,sha256=sVLV-9ydiPDuDAZ_f1fVqKlUDY7UgcueBw8VPlxaqRQ,575
33
+ langchain/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
+ langchain/storage/__init__.py,sha256=cvxc63N2nluqyVc7d9MeAj5mmO2iYl3GhcxMCpmqjUk,533
35
+ langchain/storage/encoder_backed.py,sha256=Xe60Uj0whfiIkFi5_Dw3hWyyDCALRb3e3Ev0duuEwuE,4284
36
+ langchain/storage/exceptions.py,sha256=Fl_8tON3KmByBKwXtno5WSj0-c2RiZxnhw3gv5aS2T8,114
37
+ langchain/storage/in_memory.py,sha256=ozrmu0EtaJJVSAzK_u7nzxWpr9OOscWkANHSg-qIVYQ,369
38
+ langchain/text_splitter.py,sha256=yxWs4secpnkfK6VZDiNJNdlYOrRZ18RQZj1S3xNQ73A,1554
39
+ langchain/tools/__init__.py,sha256=NYQzLxW2iI5Twu3voefVC-dJEI4Wgh7jC311CQEpvZs,252
40
+ langchain-1.0.0a4.dist-info/RECORD,,
@@ -1,33 +0,0 @@
1
- langchain-1.0.0a3.dist-info/METADATA,sha256=7FrDorI_AGxr9mNHcmqcz21yjtiDVSa5iPVXQpB5Bzg,6731
2
- langchain-1.0.0a3.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
3
- langchain-1.0.0a3.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
- langchain-1.0.0a3.dist-info/licenses/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
5
- langchain/__init__.py,sha256=ISYSSsweDINkqsfffWO_bLHCgohQqoMvZAs_5WYxA1U,604
6
- langchain/_internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- langchain/_internal/_documents.py,sha256=z9wAPukoASOMw4WTFFBKCCZYsvsKbo-Cq6CeHjdq9eE,1045
8
- langchain/_internal/_lazy_import.py,sha256=N52tzaTdeae9XqFhLWIsfE0Am-2P4y-6v7m6n9tv0Y0,1295
9
- langchain/_internal/_prompts.py,sha256=BW0PZlMoaW4jFG77-_FPHCd8NSLLg2FgF1ihG9l200M,6027
10
- langchain/_internal/_typing.py,sha256=pVioGzy4zB2SxB_SyegZ7liAN1yqNHSvMviUZ47dAik,1674
11
- langchain/_internal/_utils.py,sha256=lG8X9muiRAWtQjRPudq-1x-wHbk0J3spu_rYZckVdYs,251
12
- langchain/agents/__init__.py,sha256=NG2S3dic9L3i4sAD9mpgaTv6Dl4L3u45xxK6jn-I4W8,281
13
- langchain/agents/_internal/__init__.py,sha256=5nNBeaeQIvv9IOQjY4_aNW8pffWzMXQgi0b6Nx-WghM,37
14
- langchain/agents/_internal/_typing.py,sha256=Jm3ogPZjGGgM-kz46RwxpsbBYHMnY8_bY74Vw8PLdAU,283
15
- langchain/agents/interrupt.py,sha256=8A6xV3Vjm8DEO7W9mlKJKRmpl89Q0cXfDzRsz8VjJeE,3309
16
- langchain/agents/react_agent.py,sha256=z22IrQNDti0vMDD5WaaMiv5ZOZKTgdxB-4E9n01arQM,48374
17
- langchain/agents/structured_output.py,sha256=8D7U-HXRm1-WnMX3SLUz4wpNNwbYtkq5WSpjgOWrBpk,13289
18
- langchain/agents/tool_node.py,sha256=IXNX8CNxeGWln7lUfObnlQDvz3XBmz2KND_THxen1AQ,46637
19
- langchain/chat_models/__init__.py,sha256=LlKLVCO4FTYyQawC6du1YJxQbeBvYdtYEntPowy28aA,161
20
- langchain/chat_models/base.py,sha256=bUI34jo_bhPqNNhYfzZ4GqLb_fFHXwPmzczf4IkCbhw,34887
21
- langchain/documents/__init__.py,sha256=4z9Hy3P7gVRyepIceVROcCXmFSAlHV1UUdwrhE4TXlM,77
22
- langchain/embeddings/__init__.py,sha256=-a1o8_FQAFYNA9R0uqElDYiYNNUz_at9lpkZulYN9cQ,250
23
- langchain/embeddings/base.py,sha256=IkDCQ0eIZi6ipWH6yyGcLiJMI0i4GfyYHBs5POeOFgM,7509
24
- langchain/embeddings/cache.py,sha256=idyAGk9QELAsnEihcA_oFrei2H0T63wRD3BPYS4_dfY,14415
25
- langchain/globals.py,sha256=sVLV-9ydiPDuDAZ_f1fVqKlUDY7UgcueBw8VPlxaqRQ,575
26
- langchain/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
- langchain/storage/__init__.py,sha256=cvxc63N2nluqyVc7d9MeAj5mmO2iYl3GhcxMCpmqjUk,533
28
- langchain/storage/encoder_backed.py,sha256=JYyOk3mpBQDi5Xi5bIQQAzG-NrcpPAeAsgVDzOcaqnk,4240
29
- langchain/storage/exceptions.py,sha256=P5FiMbxsTA0bLbc96i_DgWmQGOUEc1snGBtxn7sOjZk,89
30
- langchain/storage/in_memory.py,sha256=ozrmu0EtaJJVSAzK_u7nzxWpr9OOscWkANHSg-qIVYQ,369
31
- langchain/text_splitter.py,sha256=yxWs4secpnkfK6VZDiNJNdlYOrRZ18RQZj1S3xNQ73A,1554
32
- langchain/tools/__init__.py,sha256=Fhohmkn63aAN66tqfuTVN9p5Kb1kauXVOkrvpl0x-RQ,238
33
- langchain-1.0.0a3.dist-info/RECORD,,