langchain 1.0.0a14__py3-none-any.whl → 1.0.0a15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

langchain/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """Main entrypoint into LangChain."""
2
2
 
3
- __version__ = "1.0.0a13"
3
+ __version__ = "1.0.0a14"
@@ -537,13 +537,12 @@ def create_agent( # noqa: PLR0915
537
537
  (e.g., `"openai:gpt-4"`), a chat model instance (e.g., `ChatOpenAI()`).
538
538
  tools: A list of tools, dicts, or callables. If `None` or an empty list,
539
539
  the agent will consist of a model node without a tool calling loop.
540
- system_prompt: An optional system prompt for the LLM. If provided as a string,
541
- it will be converted to a SystemMessage and added to the beginning
542
- of the message list.
540
+ system_prompt: An optional system prompt for the LLM. Prompts are converted to a
541
+ `SystemMessage` and added to the beginning of the message list.
543
542
  middleware: A sequence of middleware instances to apply to the agent.
544
543
  Middleware can intercept and modify agent behavior at various stages.
545
544
  response_format: An optional configuration for structured responses.
546
- Can be a ToolStrategy, ProviderStrategy, or a Pydantic model class.
545
+ Can be a `ToolStrategy`, `ProviderStrategy`, or a Pydantic model class.
547
546
  If provided, the agent will handle structured output during the
548
547
  conversation flow. Raw schemas will be wrapped in an appropriate strategy
549
548
  based on model capabilities.
@@ -560,14 +559,14 @@ def create_agent( # noqa: PLR0915
560
559
  This is useful if you want to return directly or run additional processing
561
560
  on an output.
562
561
  debug: A flag indicating whether to enable debug mode.
563
- name: An optional name for the CompiledStateGraph.
562
+ name: An optional name for the `CompiledStateGraph`.
564
563
  This name will be automatically used when adding the agent graph to
565
564
  another graph as a subgraph node - particularly useful for building
566
565
  multi-agent systems.
567
- cache: An optional BaseCache instance to enable caching of graph execution.
566
+ cache: An optional `BaseCache` instance to enable caching of graph execution.
568
567
 
569
568
  Returns:
570
- A compiled StateGraph that can be used for chat interactions.
569
+ A compiled `StateGraph` that can be used for chat interactions.
571
570
 
572
571
  The agent node calls the language model with the messages list (after applying
573
572
  the system prompt). If the resulting AIMessage contains `tool_calls`, the graph will
@@ -1032,11 +1031,7 @@ def create_agent( # noqa: PLR0915
1032
1031
  if response.structured_response is not None:
1033
1032
  state_updates["structured_response"] = response.structured_response
1034
1033
 
1035
- return {
1036
- "thread_model_call_count": state.get("thread_model_call_count", 0) + 1,
1037
- "run_model_call_count": state.get("run_model_call_count", 0) + 1,
1038
- **state_updates,
1039
- }
1034
+ return state_updates
1040
1035
 
1041
1036
  async def _execute_model_async(request: ModelRequest) -> ModelResponse:
1042
1037
  """Execute model asynchronously and return response.
@@ -1087,11 +1082,7 @@ def create_agent( # noqa: PLR0915
1087
1082
  if response.structured_response is not None:
1088
1083
  state_updates["structured_response"] = response.structured_response
1089
1084
 
1090
- return {
1091
- "thread_model_call_count": state.get("thread_model_call_count", 0) + 1,
1092
- "run_model_call_count": state.get("run_model_call_count", 0) + 1,
1093
- **state_updates,
1094
- }
1085
+ return state_updates
1095
1086
 
1096
1087
  # Use sync or async based on model capabilities
1097
1088
  graph.add_node("model", RunnableCallable(model_node, amodel_node, trace=False))
@@ -11,9 +11,8 @@ from .human_in_the_loop import (
11
11
  from .model_call_limit import ModelCallLimitMiddleware
12
12
  from .model_fallback import ModelFallbackMiddleware
13
13
  from .pii import PIIDetectionError, PIIMiddleware
14
- from .planning import PlanningMiddleware
15
- from .prompt_caching import AnthropicPromptCachingMiddleware
16
14
  from .summarization import SummarizationMiddleware
15
+ from .todo import TodoListMiddleware
17
16
  from .tool_call_limit import ToolCallLimitMiddleware
18
17
  from .tool_emulator import LLMToolEmulator
19
18
  from .tool_selection import LLMToolSelectorMiddleware
@@ -21,6 +20,7 @@ from .types import (
21
20
  AgentMiddleware,
22
21
  AgentState,
23
22
  ModelRequest,
23
+ ModelResponse,
24
24
  after_agent,
25
25
  after_model,
26
26
  before_agent,
@@ -28,13 +28,12 @@ from .types import (
28
28
  dynamic_prompt,
29
29
  hook_config,
30
30
  wrap_model_call,
31
+ wrap_tool_call,
31
32
  )
32
33
 
33
34
  __all__ = [
34
35
  "AgentMiddleware",
35
36
  "AgentState",
36
- # should move to langchain-anthropic if we decide to keep it
37
- "AnthropicPromptCachingMiddleware",
38
37
  "ClearToolUsesEdit",
39
38
  "ContextEditingMiddleware",
40
39
  "HumanInTheLoopMiddleware",
@@ -44,10 +43,11 @@ __all__ = [
44
43
  "ModelCallLimitMiddleware",
45
44
  "ModelFallbackMiddleware",
46
45
  "ModelRequest",
46
+ "ModelResponse",
47
47
  "PIIDetectionError",
48
48
  "PIIMiddleware",
49
- "PlanningMiddleware",
50
49
  "SummarizationMiddleware",
50
+ "TodoListMiddleware",
51
51
  "ToolCallLimitMiddleware",
52
52
  "after_agent",
53
53
  "after_model",
@@ -56,4 +56,5 @@ __all__ = [
56
56
  "dynamic_prompt",
57
57
  "hook_config",
58
58
  "wrap_model_call",
59
+ "wrap_tool_call",
59
60
  ]
@@ -8,7 +8,7 @@ with any LangChain chat model.
8
8
 
9
9
  from __future__ import annotations
10
10
 
11
- from collections.abc import Callable, Iterable, Sequence
11
+ from collections.abc import Awaitable, Callable, Iterable, Sequence
12
12
  from dataclasses import dataclass
13
13
  from typing import Literal
14
14
 
@@ -239,6 +239,34 @@ class ContextEditingMiddleware(AgentMiddleware):
239
239
 
240
240
  return handler(request)
241
241
 
242
+ async def awrap_model_call(
243
+ self,
244
+ request: ModelRequest,
245
+ handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
246
+ ) -> ModelCallResult:
247
+ """Apply context edits before invoking the model via handler (async version)."""
248
+ if not request.messages:
249
+ return await handler(request)
250
+
251
+ if self.token_count_method == "approximate": # noqa: S105
252
+
253
+ def count_tokens(messages: Sequence[BaseMessage]) -> int:
254
+ return count_tokens_approximately(messages)
255
+ else:
256
+ system_msg = (
257
+ [SystemMessage(content=request.system_prompt)] if request.system_prompt else []
258
+ )
259
+
260
+ def count_tokens(messages: Sequence[BaseMessage]) -> int:
261
+ return request.model.get_num_tokens_from_messages(
262
+ system_msg + list(messages), request.tools
263
+ )
264
+
265
+ for edit in self.edits:
266
+ edit.apply(request.messages, count_tokens=count_tokens)
267
+
268
+ return await handler(request)
269
+
242
270
 
243
271
  __all__ = [
244
272
  "ClearToolUsesEdit",
@@ -11,23 +11,23 @@ from langchain.agents.middleware.types import AgentMiddleware, AgentState
11
11
 
12
12
 
13
13
  class Action(TypedDict):
14
- """Represents an action with a name and arguments."""
14
+ """Represents an action with a name and args."""
15
15
 
16
16
  name: str
17
17
  """The type or name of action being requested (e.g., "add_numbers")."""
18
18
 
19
- arguments: dict[str, Any]
20
- """Key-value pairs of arguments needed for the action (e.g., {"a": 1, "b": 2})."""
19
+ args: dict[str, Any]
20
+ """Key-value pairs of args needed for the action (e.g., {"a": 1, "b": 2})."""
21
21
 
22
22
 
23
23
  class ActionRequest(TypedDict):
24
- """Represents an action request with a name, arguments, and description."""
24
+ """Represents an action request with a name, args, and description."""
25
25
 
26
26
  name: str
27
27
  """The name of the action being requested."""
28
28
 
29
- arguments: dict[str, Any]
30
- """Key-value pairs of arguments needed for the action (e.g., {"a": 1, "b": 2})."""
29
+ args: dict[str, Any]
30
+ """Key-value pairs of args needed for the action (e.g., {"a": 1, "b": 2})."""
31
31
 
32
32
  description: NotRequired[str]
33
33
  """The description of the action to be reviewed."""
@@ -45,8 +45,8 @@ class ReviewConfig(TypedDict):
45
45
  allowed_decisions: list[DecisionType]
46
46
  """The decisions that are allowed for this request."""
47
47
 
48
- arguments_schema: NotRequired[dict[str, Any]]
49
- """JSON schema for the arguments associated with the action, if edits are allowed."""
48
+ args_schema: NotRequired[dict[str, Any]]
49
+ """JSON schema for the args associated with the action, if edits are allowed."""
50
50
 
51
51
 
52
52
  class HITLRequest(TypedDict):
@@ -150,8 +150,8 @@ class InterruptOnConfig(TypedDict):
150
150
  )
151
151
  ```
152
152
  """
153
- arguments_schema: NotRequired[dict[str, Any]]
154
- """JSON schema for the arguments associated with the action, if edits are allowed."""
153
+ args_schema: NotRequired[dict[str, Any]]
154
+ """JSON schema for the args associated with the action, if edits are allowed."""
155
155
 
156
156
 
157
157
  class HumanInTheLoopMiddleware(AgentMiddleware):
@@ -214,12 +214,12 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
214
214
  # Create ActionRequest with description
215
215
  action_request = ActionRequest(
216
216
  name=tool_name,
217
- arguments=tool_args,
217
+ args=tool_args,
218
218
  description=description,
219
219
  )
220
220
 
221
221
  # Create ReviewConfig
222
- # eventually can get tool information and populate arguments_schema from there
222
+ # eventually can get tool information and populate args_schema from there
223
223
  review_config = ReviewConfig(
224
224
  action_name=tool_name,
225
225
  allowed_decisions=config["allowed_decisions"],
@@ -244,7 +244,7 @@ class HumanInTheLoopMiddleware(AgentMiddleware):
244
244
  ToolCall(
245
245
  type="tool_call",
246
246
  name=edited_action["name"],
247
- args=edited_action["arguments"],
247
+ args=edited_action["args"],
248
248
  id=tool_call["id"],
249
249
  ),
250
250
  None,
@@ -2,16 +2,33 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import TYPE_CHECKING, Any, Literal
5
+ from typing import TYPE_CHECKING, Annotated, Any, Literal
6
6
 
7
7
  from langchain_core.messages import AIMessage
8
+ from langgraph.channels.untracked_value import UntrackedValue
9
+ from typing_extensions import NotRequired
8
10
 
9
- from langchain.agents.middleware.types import AgentMiddleware, AgentState, hook_config
11
+ from langchain.agents.middleware.types import (
12
+ AgentMiddleware,
13
+ AgentState,
14
+ PrivateStateAttr,
15
+ hook_config,
16
+ )
10
17
 
11
18
  if TYPE_CHECKING:
12
19
  from langgraph.runtime import Runtime
13
20
 
14
21
 
22
+ class ModelCallLimitState(AgentState):
23
+ """State schema for ModelCallLimitMiddleware.
24
+
25
+ Extends AgentState with model call tracking fields.
26
+ """
27
+
28
+ thread_model_call_count: NotRequired[Annotated[int, PrivateStateAttr]]
29
+ run_model_call_count: NotRequired[Annotated[int, UntrackedValue, PrivateStateAttr]]
30
+
31
+
15
32
  def _build_limit_exceeded_message(
16
33
  thread_count: int,
17
34
  run_count: int,
@@ -69,7 +86,7 @@ class ModelCallLimitExceededError(Exception):
69
86
  super().__init__(msg)
70
87
 
71
88
 
72
- class ModelCallLimitMiddleware(AgentMiddleware):
89
+ class ModelCallLimitMiddleware(AgentMiddleware[ModelCallLimitState, Any]):
73
90
  """Middleware that tracks model call counts and enforces limits.
74
91
 
75
92
  This middleware monitors the number of model calls made during agent execution
@@ -97,6 +114,8 @@ class ModelCallLimitMiddleware(AgentMiddleware):
97
114
  ```
98
115
  """
99
116
 
117
+ state_schema = ModelCallLimitState
118
+
100
119
  def __init__(
101
120
  self,
102
121
  *,
@@ -135,7 +154,7 @@ class ModelCallLimitMiddleware(AgentMiddleware):
135
154
  self.exit_behavior = exit_behavior
136
155
 
137
156
  @hook_config(can_jump_to=["end"])
138
- def before_model(self, state: AgentState, runtime: Runtime) -> dict[str, Any] | None: # noqa: ARG002
157
+ def before_model(self, state: ModelCallLimitState, runtime: Runtime) -> dict[str, Any] | None: # noqa: ARG002
139
158
  """Check model call limits before making a model call.
140
159
 
141
160
  Args:
@@ -175,3 +194,18 @@ class ModelCallLimitMiddleware(AgentMiddleware):
175
194
  return {"jump_to": "end", "messages": [limit_ai_message]}
176
195
 
177
196
  return None
197
+
198
+ def after_model(self, state: ModelCallLimitState, runtime: Runtime) -> dict[str, Any] | None: # noqa: ARG002
199
+ """Increment model call counts after a model call.
200
+
201
+ Args:
202
+ state: The current agent state.
203
+ runtime: The langgraph runtime.
204
+
205
+ Returns:
206
+ State updates with incremented call counts.
207
+ """
208
+ return {
209
+ "thread_model_call_count": state.get("thread_model_call_count", 0) + 1,
210
+ "run_model_call_count": state.get("run_model_call_count", 0) + 1,
211
+ }
@@ -13,7 +13,7 @@ from langchain.agents.middleware.types import (
13
13
  from langchain.chat_models import init_chat_model
14
14
 
15
15
  if TYPE_CHECKING:
16
- from collections.abc import Callable
16
+ from collections.abc import Awaitable, Callable
17
17
 
18
18
  from langchain_core.language_models.chat_models import BaseChatModel
19
19
 
@@ -102,3 +102,38 @@ class ModelFallbackMiddleware(AgentMiddleware):
102
102
  continue
103
103
 
104
104
  raise last_exception
105
+
106
+ async def awrap_model_call(
107
+ self,
108
+ request: ModelRequest,
109
+ handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
110
+ ) -> ModelCallResult:
111
+ """Try fallback models in sequence on errors (async version).
112
+
113
+ Args:
114
+ request: Initial model request.
115
+ handler: Async callback to execute the model.
116
+
117
+ Returns:
118
+ AIMessage from successful model call.
119
+
120
+ Raises:
121
+ Exception: If all models fail, re-raises last exception.
122
+ """
123
+ # Try primary model first
124
+ last_exception: Exception
125
+ try:
126
+ return await handler(request)
127
+ except Exception as e: # noqa: BLE001
128
+ last_exception = e
129
+
130
+ # Try fallback models
131
+ for fallback_model in self.models:
132
+ request.model = fallback_model
133
+ try:
134
+ return await handler(request)
135
+ except Exception as e: # noqa: BLE001
136
+ last_exception = e
137
+ continue
138
+
139
+ raise last_exception
@@ -6,7 +6,7 @@ from __future__ import annotations
6
6
  from typing import TYPE_CHECKING, Annotated, Literal
7
7
 
8
8
  if TYPE_CHECKING:
9
- from collections.abc import Callable
9
+ from collections.abc import Awaitable, Callable
10
10
 
11
11
  from langchain_core.messages import ToolMessage
12
12
  from langchain_core.tools import tool
@@ -126,7 +126,7 @@ def write_todos(todos: list[Todo], tool_call_id: Annotated[str, InjectedToolCall
126
126
  )
127
127
 
128
128
 
129
- class PlanningMiddleware(AgentMiddleware):
129
+ class TodoListMiddleware(AgentMiddleware):
130
130
  """Middleware that provides todo list management capabilities to agents.
131
131
 
132
132
  This middleware adds a `write_todos` tool that allows agents to create and manage
@@ -139,10 +139,10 @@ class PlanningMiddleware(AgentMiddleware):
139
139
 
140
140
  Example:
141
141
  ```python
142
- from langchain.agents.middleware.planning import PlanningMiddleware
142
+ from langchain.agents.middleware.todo import TodoListMiddleware
143
143
  from langchain.agents import create_agent
144
144
 
145
- agent = create_agent("openai:gpt-4o", middleware=[PlanningMiddleware()])
145
+ agent = create_agent("openai:gpt-4o", middleware=[TodoListMiddleware()])
146
146
 
147
147
  # Agent now has access to write_todos tool and todo state tracking
148
148
  result = await agent.invoke({"messages": [HumanMessage("Help me refactor my codebase")]})
@@ -165,7 +165,7 @@ class PlanningMiddleware(AgentMiddleware):
165
165
  system_prompt: str = WRITE_TODOS_SYSTEM_PROMPT,
166
166
  tool_description: str = WRITE_TODOS_TOOL_DESCRIPTION,
167
167
  ) -> None:
168
- """Initialize the PlanningMiddleware with optional custom prompts.
168
+ """Initialize the TodoListMiddleware with optional custom prompts.
169
169
 
170
170
  Args:
171
171
  system_prompt: Custom system prompt to guide the agent on using the todo tool.
@@ -204,3 +204,16 @@ class PlanningMiddleware(AgentMiddleware):
204
204
  else self.system_prompt
205
205
  )
206
206
  return handler(request)
207
+
208
+ async def awrap_model_call(
209
+ self,
210
+ request: ModelRequest,
211
+ handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
212
+ ) -> ModelCallResult:
213
+ """Update the system prompt to include the todo system prompt (async version)."""
214
+ request.system_prompt = (
215
+ request.system_prompt + "\n\n" + self.system_prompt
216
+ if request.system_prompt
217
+ else self.system_prompt
218
+ )
219
+ return await handler(request)
@@ -2,16 +2,37 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import TYPE_CHECKING, Any, Literal
5
+ from typing import TYPE_CHECKING, Annotated, Any, Literal
6
6
 
7
7
  from langchain_core.messages import AIMessage, AnyMessage, HumanMessage
8
+ from langgraph.channels.untracked_value import UntrackedValue
9
+ from typing_extensions import NotRequired
8
10
 
9
- from langchain.agents.middleware.types import AgentMiddleware, AgentState, hook_config
11
+ from langchain.agents.middleware.types import (
12
+ AgentMiddleware,
13
+ AgentState,
14
+ PrivateStateAttr,
15
+ hook_config,
16
+ )
10
17
 
11
18
  if TYPE_CHECKING:
12
19
  from langgraph.runtime import Runtime
13
20
 
14
21
 
22
+ class ToolCallLimitState(AgentState):
23
+ """State schema for ToolCallLimitMiddleware.
24
+
25
+ Extends AgentState with tool call tracking fields.
26
+
27
+ The count fields are dictionaries mapping tool names to execution counts.
28
+ This allows multiple middleware instances to track different tools independently.
29
+ The special key "__all__" is used for tracking all tool calls globally.
30
+ """
31
+
32
+ thread_tool_call_count: NotRequired[Annotated[dict[str, int], PrivateStateAttr]]
33
+ run_tool_call_count: NotRequired[Annotated[dict[str, int], UntrackedValue, PrivateStateAttr]]
34
+
35
+
15
36
  def _count_tool_calls_in_messages(messages: list[AnyMessage], tool_name: str | None = None) -> int:
16
37
  """Count tool calls in a list of messages.
17
38
 
@@ -124,18 +145,18 @@ class ToolCallLimitExceededError(Exception):
124
145
  super().__init__(msg)
125
146
 
126
147
 
127
- class ToolCallLimitMiddleware(AgentMiddleware):
148
+ class ToolCallLimitMiddleware(AgentMiddleware[ToolCallLimitState, Any]):
128
149
  """Middleware that tracks tool call counts and enforces limits.
129
150
 
130
151
  This middleware monitors the number of tool calls made during agent execution
131
152
  and can terminate the agent when specified limits are reached. It supports
132
153
  both thread-level and run-level call counting with configurable exit behaviors.
133
154
 
134
- Thread-level: The middleware counts all tool calls in the entire message history
135
- and persists this count across multiple runs (invocations) of the agent.
155
+ Thread-level: The middleware tracks the total number of tool calls and persists
156
+ call count across multiple runs (invocations) of the agent.
136
157
 
137
- Run-level: The middleware counts tool calls made after the last HumanMessage,
138
- representing the current run (invocation) of the agent.
158
+ Run-level: The middleware tracks the number of tool calls made during a single
159
+ run (invocation) of the agent.
139
160
 
140
161
  Example:
141
162
  ```python
@@ -157,6 +178,8 @@ class ToolCallLimitMiddleware(AgentMiddleware):
157
178
  ```
158
179
  """
159
180
 
181
+ state_schema = ToolCallLimitState
182
+
160
183
  def __init__(
161
184
  self,
162
185
  *,
@@ -211,11 +234,11 @@ class ToolCallLimitMiddleware(AgentMiddleware):
211
234
  return base_name
212
235
 
213
236
  @hook_config(can_jump_to=["end"])
214
- def before_model(self, state: AgentState, runtime: Runtime) -> dict[str, Any] | None: # noqa: ARG002
237
+ def before_model(self, state: ToolCallLimitState, runtime: Runtime) -> dict[str, Any] | None: # noqa: ARG002
215
238
  """Check tool call limits before making a model call.
216
239
 
217
240
  Args:
218
- state: The current agent state containing messages.
241
+ state: The current agent state containing tool call counts.
219
242
  runtime: The langgraph runtime.
220
243
 
221
244
  Returns:
@@ -226,14 +249,14 @@ class ToolCallLimitMiddleware(AgentMiddleware):
226
249
  ToolCallLimitExceededError: If limits are exceeded and exit_behavior
227
250
  is "error".
228
251
  """
229
- messages = state.get("messages", [])
252
+ # Get the count key for this middleware instance
253
+ count_key = self.tool_name if self.tool_name else "__all__"
230
254
 
231
- # Count tool calls in entire thread
232
- thread_count = _count_tool_calls_in_messages(messages, self.tool_name)
255
+ thread_counts = state.get("thread_tool_call_count", {})
256
+ run_counts = state.get("run_tool_call_count", {})
233
257
 
234
- # Count tool calls in current run (after last HumanMessage)
235
- run_messages = _get_run_messages(messages)
236
- run_count = _count_tool_calls_in_messages(run_messages, self.tool_name)
258
+ thread_count = thread_counts.get(count_key, 0)
259
+ run_count = run_counts.get(count_key, 0)
237
260
 
238
261
  # Check if any limits are exceeded
239
262
  thread_limit_exceeded = self.thread_limit is not None and thread_count >= self.thread_limit
@@ -258,3 +281,53 @@ class ToolCallLimitMiddleware(AgentMiddleware):
258
281
  return {"jump_to": "end", "messages": [limit_ai_message]}
259
282
 
260
283
  return None
284
+
285
+ def after_model(self, state: ToolCallLimitState, runtime: Runtime) -> dict[str, Any] | None: # noqa: ARG002
286
+ """Increment tool call counts after a model call (when tool calls are made).
287
+
288
+ Args:
289
+ state: The current agent state.
290
+ runtime: The langgraph runtime.
291
+
292
+ Returns:
293
+ State updates with incremented tool call counts if tool calls were made.
294
+ """
295
+ # Get the last AIMessage to check for tool calls
296
+ messages = state.get("messages", [])
297
+ if not messages:
298
+ return None
299
+
300
+ # Find the last AIMessage
301
+ last_ai_message = None
302
+ for message in reversed(messages):
303
+ if isinstance(message, AIMessage):
304
+ last_ai_message = message
305
+ break
306
+
307
+ if not last_ai_message or not last_ai_message.tool_calls:
308
+ return None
309
+
310
+ # Count relevant tool calls (filter by tool_name if specified)
311
+ tool_call_count = 0
312
+ for tool_call in last_ai_message.tool_calls:
313
+ if self.tool_name is None or tool_call["name"] == self.tool_name:
314
+ tool_call_count += 1
315
+
316
+ if tool_call_count == 0:
317
+ return None
318
+
319
+ # Get the count key for this middleware instance
320
+ count_key = self.tool_name if self.tool_name else "__all__"
321
+
322
+ # Get current counts
323
+ thread_counts = state.get("thread_tool_call_count", {}).copy()
324
+ run_counts = state.get("run_tool_call_count", {}).copy()
325
+
326
+ # Increment counts for this key
327
+ thread_counts[count_key] = thread_counts.get(count_key, 0) + tool_call_count
328
+ run_counts[count_key] = run_counts.get(count_key, 0) + tool_call_count
329
+
330
+ return {
331
+ "thread_tool_call_count": thread_counts,
332
+ "run_tool_call_count": run_counts,
333
+ }
@@ -3,7 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from collections.abc import Awaitable, Callable
6
- from dataclasses import dataclass, field
6
+ from dataclasses import dataclass, field, replace
7
7
  from inspect import iscoroutinefunction
8
8
  from typing import (
9
9
  TYPE_CHECKING,
@@ -26,11 +26,10 @@ from typing import TypeAlias
26
26
 
27
27
  from langchain_core.messages import AIMessage, AnyMessage, BaseMessage, ToolMessage # noqa: TC002
28
28
  from langgraph.channels.ephemeral_value import EphemeralValue
29
- from langgraph.channels.untracked_value import UntrackedValue
30
29
  from langgraph.graph.message import add_messages
31
30
  from langgraph.types import Command # noqa: TC002
32
31
  from langgraph.typing import ContextT
33
- from typing_extensions import NotRequired, Required, TypedDict, TypeVar
32
+ from typing_extensions import NotRequired, Required, TypedDict, TypeVar, Unpack
34
33
 
35
34
  if TYPE_CHECKING:
36
35
  from langchain_core.language_models.chat_models import BaseChatModel
@@ -62,6 +61,18 @@ JumpTo = Literal["tools", "model", "end"]
62
61
  ResponseT = TypeVar("ResponseT")
63
62
 
64
63
 
64
+ class _ModelRequestOverrides(TypedDict, total=False):
65
+ """Possible overrides for ModelRequest.override() method."""
66
+
67
+ model: BaseChatModel
68
+ system_prompt: str | None
69
+ messages: list[AnyMessage]
70
+ tool_choice: Any | None
71
+ tools: list[BaseTool | dict]
72
+ response_format: ResponseFormat | None
73
+ model_settings: dict[str, Any]
74
+
75
+
65
76
  @dataclass
66
77
  class ModelRequest:
67
78
  """Model request information for the agent."""
@@ -76,6 +87,36 @@ class ModelRequest:
76
87
  runtime: Runtime[ContextT] # type: ignore[valid-type]
77
88
  model_settings: dict[str, Any] = field(default_factory=dict)
78
89
 
90
+ def override(self, **overrides: Unpack[_ModelRequestOverrides]) -> ModelRequest:
91
+ """Replace the request with a new request with the given overrides.
92
+
93
+ Returns a new `ModelRequest` instance with the specified attributes replaced.
94
+ This follows an immutable pattern, leaving the original request unchanged.
95
+
96
+ Args:
97
+ **overrides: Keyword arguments for attributes to override. Supported keys:
98
+ - model: BaseChatModel instance
99
+ - system_prompt: Optional system prompt string
100
+ - messages: List of messages
101
+ - tool_choice: Tool choice configuration
102
+ - tools: List of available tools
103
+ - response_format: Response format specification
104
+ - model_settings: Additional model settings
105
+
106
+ Returns:
107
+ New ModelRequest instance with specified overrides applied.
108
+
109
+ Examples:
110
+ ```python
111
+ # Create a new request with different model
112
+ new_request = request.override(model=different_model)
113
+
114
+ # Override multiple attributes
115
+ new_request = request.override(system_prompt="New instructions", tool_choice="auto")
116
+ ```
117
+ """
118
+ return replace(self, **overrides)
119
+
79
120
 
80
121
  @dataclass
81
122
  class ModelResponse:
@@ -129,8 +170,6 @@ class AgentState(TypedDict, Generic[ResponseT]):
129
170
  messages: Required[Annotated[list[AnyMessage], add_messages]]
130
171
  jump_to: NotRequired[Annotated[JumpTo | None, EphemeralValue, PrivateStateAttr]]
131
172
  structured_response: NotRequired[Annotated[ResponseT, OmitFromInput]]
132
- thread_model_call_count: NotRequired[Annotated[int, PrivateStateAttr]]
133
- run_model_call_count: NotRequired[Annotated[int, UntrackedValue, PrivateStateAttr]]
134
173
 
135
174
 
136
175
  class PublicAgentState(TypedDict, Generic[ResponseT]):
@@ -8,7 +8,7 @@ from langchain_core.tools import (
8
8
  tool,
9
9
  )
10
10
 
11
- from langchain.tools.tool_node import InjectedState, InjectedStore, ToolInvocationError
11
+ from langchain.tools.tool_node import InjectedState, InjectedStore
12
12
 
13
13
  __all__ = [
14
14
  "BaseTool",
@@ -17,6 +17,5 @@ __all__ = [
17
17
  "InjectedToolArg",
18
18
  "InjectedToolCallId",
19
19
  "ToolException",
20
- "ToolInvocationError",
21
20
  "tool",
22
21
  ]
@@ -81,6 +81,7 @@ from langgraph.graph.message import REMOVE_ALL_MESSAGES
81
81
  from langgraph.runtime import get_runtime
82
82
  from langgraph.types import Command, Send
83
83
  from pydantic import BaseModel, ValidationError
84
+ from typing_extensions import Unpack
84
85
 
85
86
  if TYPE_CHECKING:
86
87
  from collections.abc import Sequence
@@ -104,6 +105,12 @@ TOOL_INVOCATION_ERROR_TEMPLATE = (
104
105
  )
105
106
 
106
107
 
108
+ class _ToolCallRequestOverrides(TypedDict, total=False):
109
+ """Possible overrides for ToolCallRequest.override() method."""
110
+
111
+ tool_call: ToolCall
112
+
113
+
107
114
  @dataclass()
108
115
  class ToolCallRequest:
109
116
  """Tool execution request passed to tool call interceptors.
@@ -120,6 +127,31 @@ class ToolCallRequest:
120
127
  state: Any
121
128
  runtime: Any
122
129
 
130
+ def override(self, **overrides: Unpack[_ToolCallRequestOverrides]) -> ToolCallRequest:
131
+ """Replace the request with a new request with the given overrides.
132
+
133
+ Returns a new `ToolCallRequest` instance with the specified attributes replaced.
134
+ This follows an immutable pattern, leaving the original request unchanged.
135
+
136
+ Args:
137
+ **overrides: Keyword arguments for attributes to override. Supported keys:
138
+ - tool_call: Tool call dict with name, args, and id
139
+
140
+ Returns:
141
+ New ToolCallRequest instance with specified overrides applied.
142
+
143
+ Examples:
144
+ ```python
145
+ # Modify tool call arguments without mutating original
146
+ modified_call = {**request.tool_call, "args": {"value": 10}}
147
+ new_request = request.override(tool_call=modified_call)
148
+
149
+ # Override multiple attributes
150
+ new_request = request.override(tool_call=modified_call, state=new_state)
151
+ ```
152
+ """
153
+ return replace(self, **overrides)
154
+
123
155
 
124
156
  ToolCallWrapper = Callable[
125
157
  [ToolCallRequest, Callable[[ToolCallRequest], ToolMessage | Command]],
@@ -0,0 +1,85 @@
1
+ Metadata-Version: 2.4
2
+ Name: langchain
3
+ Version: 1.0.0a15
4
+ Summary: Building applications with LLMs through composability
5
+ Project-URL: homepage, https://docs.langchain.com/
6
+ Project-URL: repository, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
7
+ Project-URL: changelog, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D1%22
8
+ Project-URL: twitter, https://x.com/LangChainAI
9
+ Project-URL: slack, https://www.langchain.com/join-community
10
+ Project-URL: reddit, https://www.reddit.com/r/LangChain/
11
+ License: MIT
12
+ License-File: LICENSE
13
+ Requires-Python: <4.0.0,>=3.10.0
14
+ Requires-Dist: langchain-core<2.0.0,>=1.0.0a7
15
+ Requires-Dist: langgraph<2.0.0,>=1.0.0a4
16
+ Requires-Dist: pydantic<3.0.0,>=2.7.4
17
+ Provides-Extra: anthropic
18
+ Requires-Dist: langchain-anthropic; extra == 'anthropic'
19
+ Provides-Extra: aws
20
+ Requires-Dist: langchain-aws; extra == 'aws'
21
+ Provides-Extra: community
22
+ Requires-Dist: langchain-community; extra == 'community'
23
+ Provides-Extra: deepseek
24
+ Requires-Dist: langchain-deepseek; extra == 'deepseek'
25
+ Provides-Extra: fireworks
26
+ Requires-Dist: langchain-fireworks; extra == 'fireworks'
27
+ Provides-Extra: google-genai
28
+ Requires-Dist: langchain-google-genai; extra == 'google-genai'
29
+ Provides-Extra: google-vertexai
30
+ Requires-Dist: langchain-google-vertexai; extra == 'google-vertexai'
31
+ Provides-Extra: groq
32
+ Requires-Dist: langchain-groq; extra == 'groq'
33
+ Provides-Extra: mistralai
34
+ Requires-Dist: langchain-mistralai; extra == 'mistralai'
35
+ Provides-Extra: ollama
36
+ Requires-Dist: langchain-ollama; extra == 'ollama'
37
+ Provides-Extra: openai
38
+ Requires-Dist: langchain-openai; extra == 'openai'
39
+ Provides-Extra: perplexity
40
+ Requires-Dist: langchain-perplexity; extra == 'perplexity'
41
+ Provides-Extra: together
42
+ Requires-Dist: langchain-together; extra == 'together'
43
+ Provides-Extra: xai
44
+ Requires-Dist: langchain-xai; extra == 'xai'
45
+ Description-Content-Type: text/markdown
46
+
47
+ # 🦜️🔗 LangChain
48
+
49
+ [![PyPI - Version](https://img.shields.io/pypi/v/langchain?label=%20)](https://pypi.org/project/langchain/#history)
50
+ [![PyPI - License](https://img.shields.io/pypi/l/langchain)](https://opensource.org/licenses/MIT)
51
+ [![PyPI - Downloads](https://img.shields.io/pepy/dt/langchain)](https://pypistats.org/packages/langchain)
52
+ [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai)
53
+
54
+ Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
55
+
56
+ To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
57
+ [LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
58
+
59
+ ## Quick Install
60
+
61
+ ```bash
62
+ pip install langchain
63
+ ```
64
+
65
+ ## 🤔 What is this?
66
+
67
+ LangChain is the easiest way to start building agents and applications powered by LLMs. With under 10 lines of code, you can connect to OpenAI, Anthropic, Google, and [more](https://docs.langchain.com/oss/python/integrations/providers/overview). LangChain provides a pre-built agent architecture and model integrations to help you get started quickly and seamlessly incorporate LLMs into your agents and applications.
68
+
69
+ We recommend you use LangChain if you want to quickly build agents and autonomous applications. Use [LangGraph](https://docs.langchain.com/oss/python/langgraph/overview), our low-level agent orchestration framework and runtime, when you have more advanced needs that require a combination of deterministic and agentic workflows, heavy customization, and carefully controlled latency.
70
+
71
+ LangChain [agents](https://docs.langchain.com/oss/python/langchain/agents) are built on top of LangGraph in order to provide durable execution, streaming, human-in-the-loop, persistence, and more. (You do not need to know LangGraph for basic LangChain agent usage.)
72
+
73
+ ## 📖 Documentation
74
+
75
+ For full documentation, see the [API reference](https://reference.langchain.com/python/langchain_classic).
76
+
77
+ ## 📕 Releases & Versioning
78
+
79
+ See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
80
+
81
+ ## 💁 Contributing
82
+
83
+ As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
84
+
85
+ For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).
@@ -0,0 +1,29 @@
1
+ langchain/__init__.py,sha256=FOFSABkFKCLhvZ83wF1wmMdw-l8UKpDcjmi-BqKuSRQ,64
2
+ langchain/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ langchain/agents/__init__.py,sha256=x85V7MqddVSrraoirGHplPMzEz9Lha-vL9fKjXCS7lA,258
4
+ langchain/agents/factory.py,sha256=9iBzgKnb_9D2DLt_4ZnE62GrAYmDDykeAya9ZwNHJdQ,60839
5
+ langchain/agents/structured_output.py,sha256=msf-ClqDnMfJ-oGHqjwEyth860tMnx58GLTvqJijqg8,13686
6
+ langchain/agents/middleware/__init__.py,sha256=CGBHDIok3roWJDytMYfladxmdkaBa1vpjIy_aD5-euM,1480
7
+ langchain/agents/middleware/context_editing.py,sha256=brg9IQHC8NZKOQmA7afSlj2IScfe2rozdZB9TGthRTQ,8748
8
+ langchain/agents/middleware/human_in_the_loop.py,sha256=yKhubIrh4TrgUC8ctz7artixYq5paDF5gys2w5XzJzg,12601
9
+ langchain/agents/middleware/model_call_limit.py,sha256=tZx5MSMJvb4EE6Zr-hvC1nEHNgn4a4uFfVQxyzUzBe4,7804
10
+ langchain/agents/middleware/model_fallback.py,sha256=io6jHXnbTpDTA_RZg9d-eArpktOetomFMTX--B9y_x0,4177
11
+ langchain/agents/middleware/pii.py,sha256=7hTBxnpcG_hSZd29TCg-4tbiLFO9IJb-wwnujCRMrv4,24780
12
+ langchain/agents/middleware/summarization.py,sha256=H1VxRkkbauw4p4sMMKyc_uZGbJhtqoVvOF7y_5JBXTc,10329
13
+ langchain/agents/middleware/todo.py,sha256=0PyHV4u5JaBBuMmPWmDr3orZ5T5F6lk2jiVoBzVVMM4,9808
14
+ langchain/agents/middleware/tool_call_limit.py,sha256=0ilGNJRVBtjVN7MyMDgtYXOr1WLrCfNblXNCmvND-84,12317
15
+ langchain/agents/middleware/tool_emulator.py,sha256=5qJFPfTSiVukNclDeUo7_c7-PjGEVWyefbPC-zpYSlI,7115
16
+ langchain/agents/middleware/tool_selection.py,sha256=6RYdgkg6aSNx1w-YxRyL2Hct7UPnMRgGg6YVZVtW5TU,11638
17
+ langchain/agents/middleware/types.py,sha256=JGR6KMqfsPrt8Uxfcl9aN4bpMRVkGOFQKGrSdwUWgnA,55292
18
+ langchain/chat_models/__init__.py,sha256=PTq9qskQEbqXYAcUUxUXDsugOcwISgFhv4w40JgkbgU,181
19
+ langchain/chat_models/base.py,sha256=HPlD0QaLOGXRJAY1Qq6ojr1WcteBlgVO--_GoSqpxXE,34560
20
+ langchain/embeddings/__init__.py,sha256=kfLfu342i9bTrA0WC8yA6IJE2bgY4ZynWBi-_cMUg8E,179
21
+ langchain/embeddings/base.py,sha256=o77Z1TrXoUZN1SdYY9nZCNehm7cZzC-TNqc5NIzWtww,7327
22
+ langchain/messages/__init__.py,sha256=X5-dRewJP-jtehdC6oDbs21j9bxGDUbI5WlcNrO_bHk,1309
23
+ langchain/rate_limiters/__init__.py,sha256=5490xUNhet37N2nX6kbJlDgf8u1DX-C1Cs_r7etXn8A,351
24
+ langchain/tools/__init__.py,sha256=fYEuNXytW77uztDt1kQyQWWeZRIL3pA0h1m8F7bLerA,362
25
+ langchain/tools/tool_node.py,sha256=C0DPV53kY_eqGf2bZbddHj3l2_74sSnHkmZJZ06uhpw,59158
26
+ langchain-1.0.0a15.dist-info/METADATA,sha256=CMqeRpnFLjhFavvb-6zFZIL9XrLmXzDoX20OVNZZASk,4543
27
+ langchain-1.0.0a15.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
28
+ langchain-1.0.0a15.dist-info/licenses/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
29
+ langchain-1.0.0a15.dist-info/RECORD,,
@@ -1,89 +0,0 @@
1
- """Anthropic prompt caching middleware."""
2
-
3
- from collections.abc import Callable
4
- from typing import Literal
5
- from warnings import warn
6
-
7
- from langchain.agents.middleware.types import (
8
- AgentMiddleware,
9
- ModelCallResult,
10
- ModelRequest,
11
- ModelResponse,
12
- )
13
-
14
-
15
- class AnthropicPromptCachingMiddleware(AgentMiddleware):
16
- """Prompt Caching Middleware.
17
-
18
- Optimizes API usage by caching conversation prefixes for Anthropic models.
19
-
20
- Learn more about Anthropic prompt caching
21
- [here](https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching).
22
- """
23
-
24
- def __init__(
25
- self,
26
- type: Literal["ephemeral"] = "ephemeral",
27
- ttl: Literal["5m", "1h"] = "5m",
28
- min_messages_to_cache: int = 0,
29
- unsupported_model_behavior: Literal["ignore", "warn", "raise"] = "warn",
30
- ) -> None:
31
- """Initialize the middleware with cache control settings.
32
-
33
- Args:
34
- type: The type of cache to use, only "ephemeral" is supported.
35
- ttl: The time to live for the cache, only "5m" and "1h" are supported.
36
- min_messages_to_cache: The minimum number of messages until the cache is used,
37
- default is 0.
38
- unsupported_model_behavior: The behavior to take when an unsupported model is used.
39
- "ignore" will ignore the unsupported model and continue without caching.
40
- "warn" will warn the user and continue without caching.
41
- "raise" will raise an error and stop the agent.
42
- """
43
- self.type = type
44
- self.ttl = ttl
45
- self.min_messages_to_cache = min_messages_to_cache
46
- self.unsupported_model_behavior = unsupported_model_behavior
47
-
48
- def wrap_model_call(
49
- self,
50
- request: ModelRequest,
51
- handler: Callable[[ModelRequest], ModelResponse],
52
- ) -> ModelCallResult:
53
- """Modify the model request to add cache control blocks."""
54
- try:
55
- from langchain_anthropic import ChatAnthropic
56
- except ImportError:
57
- ChatAnthropic = None # noqa: N806
58
-
59
- msg: str | None = None
60
-
61
- if ChatAnthropic is None:
62
- msg = (
63
- "AnthropicPromptCachingMiddleware caching middleware only supports "
64
- "Anthropic models. "
65
- "Please install langchain-anthropic."
66
- )
67
- elif not isinstance(request.model, ChatAnthropic):
68
- msg = (
69
- "AnthropicPromptCachingMiddleware caching middleware only supports "
70
- f"Anthropic models, not instances of {type(request.model)}"
71
- )
72
-
73
- if msg is not None:
74
- if self.unsupported_model_behavior == "raise":
75
- raise ValueError(msg)
76
- if self.unsupported_model_behavior == "warn":
77
- warn(msg, stacklevel=3)
78
- else:
79
- return handler(request)
80
-
81
- messages_count = (
82
- len(request.messages) + 1 if request.system_prompt else len(request.messages)
83
- )
84
- if messages_count < self.min_messages_to_cache:
85
- return handler(request)
86
-
87
- request.model_settings["cache_control"] = {"type": self.type, "ttl": self.ttl}
88
-
89
- return handler(request)
@@ -1,125 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: langchain
3
- Version: 1.0.0a14
4
- Summary: Building applications with LLMs through composability
5
- Project-URL: homepage, https://docs.langchain.com/
6
- Project-URL: repository, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
7
- Project-URL: changelog, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D1%22
8
- Project-URL: twitter, https://x.com/LangChainAI
9
- Project-URL: slack, https://www.langchain.com/join-community
10
- Project-URL: reddit, https://www.reddit.com/r/LangChain/
11
- License: MIT
12
- License-File: LICENSE
13
- Requires-Python: <4.0.0,>=3.10.0
14
- Requires-Dist: langchain-core<2.0.0,>=1.0.0a7
15
- Requires-Dist: langgraph<2.0.0,>=1.0.0a4
16
- Requires-Dist: pydantic<3.0.0,>=2.7.4
17
- Provides-Extra: anthropic
18
- Requires-Dist: langchain-anthropic; extra == 'anthropic'
19
- Provides-Extra: aws
20
- Requires-Dist: langchain-aws; extra == 'aws'
21
- Provides-Extra: community
22
- Requires-Dist: langchain-community; extra == 'community'
23
- Provides-Extra: deepseek
24
- Requires-Dist: langchain-deepseek; extra == 'deepseek'
25
- Provides-Extra: fireworks
26
- Requires-Dist: langchain-fireworks; extra == 'fireworks'
27
- Provides-Extra: google-genai
28
- Requires-Dist: langchain-google-genai; extra == 'google-genai'
29
- Provides-Extra: google-vertexai
30
- Requires-Dist: langchain-google-vertexai; extra == 'google-vertexai'
31
- Provides-Extra: groq
32
- Requires-Dist: langchain-groq; extra == 'groq'
33
- Provides-Extra: mistralai
34
- Requires-Dist: langchain-mistralai; extra == 'mistralai'
35
- Provides-Extra: ollama
36
- Requires-Dist: langchain-ollama; extra == 'ollama'
37
- Provides-Extra: openai
38
- Requires-Dist: langchain-openai; extra == 'openai'
39
- Provides-Extra: perplexity
40
- Requires-Dist: langchain-perplexity; extra == 'perplexity'
41
- Provides-Extra: together
42
- Requires-Dist: langchain-together; extra == 'together'
43
- Provides-Extra: xai
44
- Requires-Dist: langchain-xai; extra == 'xai'
45
- Description-Content-Type: text/markdown
46
-
47
- # 🦜️🔗 LangChain
48
-
49
- ⚡ Building applications with LLMs through composability ⚡
50
-
51
- [![PyPI - License](https://img.shields.io/pypi/l/langchain?style=flat-square)](https://opensource.org/licenses/MIT)
52
- [![PyPI - Downloads](https://img.shields.io/pepy/dt/langchain)](https://pypistats.org/packages/langchain)
53
- [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai)
54
-
55
- Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
56
-
57
- To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
58
- [LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
59
-
60
- ## Quick Install
61
-
62
- `pip install langchain`
63
-
64
- ## 🤔 What is this?
65
-
66
- Large language models (LLMs) are emerging as a transformative technology, enabling developers to build applications that they previously could not. However, using these LLMs in isolation is often insufficient for creating a truly powerful app - the real power comes when you can combine them with other sources of computation or knowledge.
67
-
68
- This library aims to assist in the development of those types of applications. Common examples of these applications include:
69
-
70
- **❓ Question answering with RAG**
71
-
72
- - [Documentation](https://python.langchain.com/docs/tutorials/rag/)
73
- - End-to-end Example: [Chat LangChain](https://chat.langchain.com) and [repo](https://github.com/langchain-ai/chat-langchain)
74
-
75
- **🧱 Extracting structured output**
76
-
77
- - [Documentation](https://python.langchain.com/docs/tutorials/extraction/)
78
- - End-to-end Example: [SQL Llama2 Template](https://github.com/langchain-ai/langchain-extract/)
79
-
80
- **🤖 Chatbots**
81
-
82
- - [Documentation](https://python.langchain.com/docs/tutorials/chatbot/)
83
- - End-to-end Example: [Web LangChain (web researcher chatbot)](https://weblangchain.vercel.app) and [repo](https://github.com/langchain-ai/weblangchain)
84
-
85
- ## 📖 Documentation
86
-
87
- Please see [our full documentation](https://python.langchain.com) on:
88
-
89
- - Getting started (installation, setting up the environment, simple examples)
90
- - How-To examples (demos, integrations, helper functions)
91
- - Reference (full API docs)
92
- - Resources (high-level explanation of core concepts)
93
-
94
- ## 🚀 What can this help with?
95
-
96
- There are five main areas that LangChain is designed to help with.
97
- These are, in increasing order of complexity:
98
-
99
- **🤖 Agents:**
100
-
101
- Agents involve an LLM making decisions about which Actions to take, taking that Action, seeing an Observation, and repeating that until done. LangChain provides a standard interface for agents, a selection of agents to choose from, and examples of end-to-end agents.
102
-
103
- **📚 Retrieval Augmented Generation:**
104
-
105
- Retrieval Augmented Generation involves specific types of chains that first interact with an external data source to fetch data for use in the generation step. Examples include summarization of long pieces of text and question/answering over specific data sources.
106
-
107
- **🧐 Evaluation:**
108
-
109
- Generative models are notoriously hard to evaluate with traditional metrics. One new way of evaluating them is using language models themselves to do the evaluation. LangChain provides some prompts/chains for assisting in this.
110
-
111
- **📃 Models and Prompts:**
112
-
113
- This includes prompt management, prompt optimization, a generic interface for all LLMs, and common utilities for working with chat models and LLMs.
114
-
115
- **🔗 Chains:**
116
-
117
- Chains go beyond a single LLM call and involve sequences of calls (whether to an LLM or a different utility). LangChain provides a standard interface for chains, lots of integrations with other tools, and end-to-end chains for common applications.
118
-
119
- For more information on these concepts, please see our [full documentation](https://python.langchain.com).
120
-
121
- ## 💁 Contributing
122
-
123
- As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
124
-
125
- For detailed information on how to contribute, see the [Contributing Guide](https://python.langchain.com/docs/contributing/).
@@ -1,30 +0,0 @@
1
- langchain/__init__.py,sha256=rED92FbyWFRmks07cFlRTuz5ZtaPKxYq6BcsxW5KhrE,64
2
- langchain/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- langchain/agents/__init__.py,sha256=x85V7MqddVSrraoirGHplPMzEz9Lha-vL9fKjXCS7lA,258
4
- langchain/agents/factory.py,sha256=e6xjbw_qTFvfP-AHXZtykXGrX-2GoX6hFaW_97WHiEw,61245
5
- langchain/agents/structured_output.py,sha256=msf-ClqDnMfJ-oGHqjwEyth860tMnx58GLTvqJijqg8,13686
6
- langchain/agents/middleware/__init__.py,sha256=FDwjEGYtxPgyFa9iiLAWT5M2W8c-NDYfGz6_y8cEqPI,1568
7
- langchain/agents/middleware/context_editing.py,sha256=6ca6Qed-F59JD1rAlrIuxlrBbDVIKQmCpfvZaIFbBy8,7691
8
- langchain/agents/middleware/human_in_the_loop.py,sha256=Bs4_Hgjuy9l0-AMUHvU9wlr_rL2Z1rUwL_VcfFLhhUM,12666
9
- langchain/agents/middleware/model_call_limit.py,sha256=H3lJL2cLv3u0uF0kJsRagFt1rBmHHgn5SFsfnmcyQdA,6703
10
- langchain/agents/middleware/model_fallback.py,sha256=pdKRSO9JD6MdMYHFl7IK4yj6LEwQfyzfTJiBE8uJ2pE,3118
11
- langchain/agents/middleware/pii.py,sha256=7hTBxnpcG_hSZd29TCg-4tbiLFO9IJb-wwnujCRMrv4,24780
12
- langchain/agents/middleware/planning.py,sha256=59Q6-4aALytjssIZ5a4hZkx5THxIG-RTeUHuDP1LGDA,9319
13
- langchain/agents/middleware/prompt_caching.py,sha256=cMvIJ_dpSsn4_cqCvZBBKjtw5GpcVkc8Lgf_VEPzM1w,3225
14
- langchain/agents/middleware/summarization.py,sha256=H1VxRkkbauw4p4sMMKyc_uZGbJhtqoVvOF7y_5JBXTc,10329
15
- langchain/agents/middleware/tool_call_limit.py,sha256=6cWviwPRzaf7UUcp9zlXwk6RJBBoWVaVSBc1NaVT2fI,9729
16
- langchain/agents/middleware/tool_emulator.py,sha256=5qJFPfTSiVukNclDeUo7_c7-PjGEVWyefbPC-zpYSlI,7115
17
- langchain/agents/middleware/tool_selection.py,sha256=6RYdgkg6aSNx1w-YxRyL2Hct7UPnMRgGg6YVZVtW5TU,11638
18
- langchain/agents/middleware/types.py,sha256=JqTdwFru-nqs8RlamYPqEM0cnak9WBPEp__dtsve3g4,53868
19
- langchain/chat_models/__init__.py,sha256=PTq9qskQEbqXYAcUUxUXDsugOcwISgFhv4w40JgkbgU,181
20
- langchain/chat_models/base.py,sha256=HPlD0QaLOGXRJAY1Qq6ojr1WcteBlgVO--_GoSqpxXE,34560
21
- langchain/embeddings/__init__.py,sha256=kfLfu342i9bTrA0WC8yA6IJE2bgY4ZynWBi-_cMUg8E,179
22
- langchain/embeddings/base.py,sha256=o77Z1TrXoUZN1SdYY9nZCNehm7cZzC-TNqc5NIzWtww,7327
23
- langchain/messages/__init__.py,sha256=X5-dRewJP-jtehdC6oDbs21j9bxGDUbI5WlcNrO_bHk,1309
24
- langchain/rate_limiters/__init__.py,sha256=5490xUNhet37N2nX6kbJlDgf8u1DX-C1Cs_r7etXn8A,351
25
- langchain/tools/__init__.py,sha256=C0GW8HPluAgnVmGneHXY-ibwbl3kXixBtZS88PtnXSI,410
26
- langchain/tools/tool_node.py,sha256=p9NO3R8dgA9QhjCuGb-INebjizjzKj21tIsnoKSBkA8,57917
27
- langchain-1.0.0a14.dist-info/METADATA,sha256=5-_c3FrZ93AM_AtuB9-PkYjoYDLgfMFtfij0SSHjvJE,6118
28
- langchain-1.0.0a14.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
29
- langchain-1.0.0a14.dist-info/licenses/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
30
- langchain-1.0.0a14.dist-info/RECORD,,