langchain 1.0.0rc1__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -45,7 +45,6 @@ __all__ = [
45
45
  "ModelRequest",
46
46
  "ModelResponse",
47
47
  "OmitFromSchema",
48
- "PublicAgentState",
49
48
  "after_agent",
50
49
  "after_model",
51
50
  "before_agent",
@@ -172,11 +171,14 @@ class AgentState(TypedDict, Generic[ResponseT]):
172
171
  structured_response: NotRequired[Annotated[ResponseT, OmitFromInput]]
173
172
 
174
173
 
175
- class PublicAgentState(TypedDict, Generic[ResponseT]):
176
- """Public state schema for the agent.
174
+ class _InputAgentState(TypedDict): # noqa: PYI049
175
+ """Input state schema for the agent."""
177
176
 
178
- Just used for typing purposes.
179
- """
177
+ messages: Required[Annotated[list[AnyMessage | dict], add_messages]]
178
+
179
+
180
+ class _OutputAgentState(TypedDict, Generic[ResponseT]): # noqa: PYI049
181
+ """Output state schema for the agent."""
180
182
 
181
183
  messages: Required[Annotated[list[AnyMessage], add_messages]]
182
184
  structured_response: NotRequired[ResponseT]
@@ -4,14 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  import warnings
6
6
  from importlib import util
7
- from typing import (
8
- TYPE_CHECKING,
9
- Any,
10
- Literal,
11
- TypeAlias,
12
- cast,
13
- overload,
14
- )
7
+ from typing import TYPE_CHECKING, Any, Literal, TypeAlias, cast, overload
15
8
 
16
9
  from langchain_core.language_models import BaseChatModel, LanguageModelInput
17
10
  from langchain_core.messages import AIMessage, AnyMessage
@@ -83,7 +76,7 @@ def init_chat_model(
83
76
  for supported model parameters.
84
77
 
85
78
  Args:
86
- model: The name of the model, e.g. `'o3-mini'`, `'claude-sonnet-4-5-20250929'`.
79
+ model: The name of the model, e.g. `'o3-mini'`, `'claude-sonnet-4-5'`.
87
80
 
88
81
  You can also specify model and model provider in a single argument using:
89
82
 
@@ -179,7 +172,7 @@ def init_chat_model(
179
172
  from langchain.chat_models import init_chat_model
180
173
 
181
174
  o3_mini = init_chat_model("openai:o3-mini", temperature=0)
182
- claude_sonnet = init_chat_model("anthropic:claude-sonnet-4-5-20250929", temperature=0)
175
+ claude_sonnet = init_chat_model("anthropic:claude-sonnet-4-5", temperature=0)
183
176
  gemini_2_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
184
177
 
185
178
  o3_mini.invoke("what's your name")
@@ -201,7 +194,7 @@ def init_chat_model(
201
194
 
202
195
  configurable_model.invoke(
203
196
  "what's your name",
204
- config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
197
+ config={"configurable": {"model": "claude-sonnet-4-5"}},
205
198
  )
206
199
  ```
207
200
 
@@ -225,7 +218,7 @@ def init_chat_model(
225
218
  "what's your name",
226
219
  config={
227
220
  "configurable": {
228
- "foo_model": "anthropic:claude-sonnet-4-5-20250929",
221
+ "foo_model": "anthropic:claude-sonnet-4-5",
229
222
  "foo_temperature": 0.6,
230
223
  }
231
224
  },
@@ -271,7 +264,7 @@ def init_chat_model(
271
264
 
272
265
  configurable_model_with_tools.invoke(
273
266
  "Which city is hotter today and which is bigger: LA or NY?",
274
- config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
267
+ config={"configurable": {"model": "claude-sonnet-4-5"}},
275
268
  )
276
269
  ```
277
270
 
@@ -612,10 +605,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
612
605
  @property
613
606
  def InputType(self) -> TypeAlias:
614
607
  """Get the input type for this `Runnable`."""
615
- from langchain_core.prompt_values import (
616
- ChatPromptValueConcrete,
617
- StringPromptValue,
618
- )
608
+ from langchain_core.prompt_values import ChatPromptValueConcrete, StringPromptValue
619
609
 
620
610
  # This is a version of LanguageModelInput which replaces the abstract
621
611
  # base class BaseMessage with a union of its subclasses, which makes
@@ -4,6 +4,12 @@
4
4
  This page contains **reference documentation** for Embeddings. See
5
5
  [the docs](https://docs.langchain.com/oss/python/langchain/retrieval#embedding-models)
6
6
  for conceptual guides, tutorials, and examples on using Embeddings.
7
+
8
+ !!! warning "Modules moved"
9
+ With the release of `langchain 1.0.0`, several embeddings modules were moved to
10
+ `langchain-classic`, such as `CacheBackedEmbeddings` and all community
11
+ embeddings. See [list](https://github.com/langchain-ai/langchain/blob/bdf1cd383ce36dc18381a3bf3fb0a579337a32b5/libs/langchain/langchain/embeddings/__init__.py)
12
+ of moved modules to inform your migration.
7
13
  """
8
14
 
9
15
  from langchain_core.embeddings import Embeddings
@@ -133,20 +133,34 @@ def init_embeddings(
133
133
  installed.
134
134
 
135
135
  Args:
136
- model: Name of the model to use. Can be either:
136
+ model: Name of the model to use.
137
+
138
+ Can be either:
139
+
137
140
  - A model string like `"openai:text-embedding-3-small"`
138
- - Just the model name if provider is specified
139
- provider: Optional explicit provider name. If not specified,
140
- will attempt to parse from the model string. Supported providers
141
- and their required packages:
141
+ - Just the model name if the provider is specified separately or can be
142
+ inferred.
143
+
144
+ See supported providers under the `provider` arg description.
145
+ provider: Optional explicit provider name. If not specified, will attempt to
146
+ parse from the model string in the `model` arg.
147
+
148
+ Supported providers:
142
149
 
143
- {_get_provider_list()}
150
+ - `openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
151
+ - `azure_openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
152
+ - `bedrock` -> [`langchain-aws`](https://docs.langchain.com/oss/python/integrations/providers/aws)
153
+ - `cohere` -> [`langchain-cohere`](https://docs.langchain.com/oss/python/integrations/providers/cohere)
154
+ - `google_vertexai` -> [`langchain-google-vertexai`](https://docs.langchain.com/oss/python/integrations/providers/google)
155
+ - `huggingface` -> [`langchain-huggingface`](https://docs.langchain.com/oss/python/integrations/providers/huggingface)
156
+ - `mistraiai` -> [`langchain-mistralai`](https://docs.langchain.com/oss/python/integrations/providers/mistralai)
157
+ - `ollama` -> [`langchain-ollama`](https://docs.langchain.com/oss/python/integrations/providers/ollama)
144
158
 
145
159
  **kwargs: Additional model-specific parameters passed to the embedding model.
146
160
  These vary by provider, see the provider-specific documentation for details.
147
161
 
148
162
  Returns:
149
- An Embeddings instance that can generate embeddings for text.
163
+ An `Embeddings` instance that can generate embeddings for text.
150
164
 
151
165
  Raises:
152
166
  ValueError: If the model provider is not supported or cannot be determined
@@ -13,10 +13,10 @@ The module implements design patterns for:
13
13
  - Command-based state updates for advanced control flow
14
14
 
15
15
  Key Components:
16
- ToolNode: Main class for executing tools in LangGraph workflows
17
- InjectedState: Annotation for injecting graph state into tools
18
- InjectedStore: Annotation for injecting persistent store into tools
19
- tools_condition: Utility function for conditional routing based on tool calls
16
+ `ToolNode`: Main class for executing tools in LangGraph workflows
17
+ `InjectedState`: Annotation for injecting graph state into tools
18
+ `InjectedStore`: Annotation for injecting persistent store into tools
19
+ `tools_condition`: Utility function for conditional routing based on tool calls
20
20
 
21
21
  Typical Usage:
22
22
  ```python
@@ -49,7 +49,6 @@ from typing import (
49
49
  Generic,
50
50
  Literal,
51
51
  TypedDict,
52
- TypeVar,
53
52
  Union,
54
53
  cast,
55
54
  get_args,
@@ -75,6 +74,7 @@ from langchain_core.tools import tool as create_tool
75
74
  from langchain_core.tools.base import (
76
75
  TOOL_MESSAGE_BLOCK_TYPES,
77
76
  ToolException,
77
+ _DirectlyInjectedToolArg,
78
78
  get_all_basemodel_annotations,
79
79
  )
80
80
  from langgraph._internal._runnable import RunnableCallable
@@ -83,15 +83,18 @@ from langgraph.graph.message import REMOVE_ALL_MESSAGES
83
83
  from langgraph.store.base import BaseStore # noqa: TC002
84
84
  from langgraph.types import Command, Send, StreamWriter
85
85
  from pydantic import BaseModel, ValidationError
86
- from typing_extensions import Unpack
86
+ from typing_extensions import TypeVar, Unpack
87
87
 
88
88
  if TYPE_CHECKING:
89
89
  from collections.abc import Sequence
90
90
 
91
91
  from langgraph.runtime import Runtime
92
92
 
93
- StateT = TypeVar("StateT")
94
- ContextT = TypeVar("ContextT")
93
+ # right now we use a dict as the default, can change this to AgentState, but depends
94
+ # on if this lives in LangChain or LangGraph... ideally would have some typed
95
+ # messages key
96
+ StateT = TypeVar("StateT", default=dict)
97
+ ContextT = TypeVar("ContextT", default=None)
95
98
 
96
99
  INVALID_TOOL_NAME_ERROR_TEMPLATE = (
97
100
  "Error: {requested_tool} is not a valid tool, try one of [{available_tools}]."
@@ -122,11 +125,11 @@ class ToolCallRequest:
122
125
  Attributes:
123
126
  tool_call: Tool call dict with name, args, and id from model output.
124
127
  tool: BaseTool instance to be invoked, or None if tool is not
125
- registered with the ToolNode. When tool is None, interceptors can
126
- handle the request without validation. If the interceptor calls execute(),
128
+ registered with the `ToolNode`. When tool is `None`, interceptors can
129
+ handle the request without validation. If the interceptor calls `execute()`,
127
130
  validation will occur and raise an error for unregistered tools.
128
- state: Agent state (dict, list, or BaseModel).
129
- runtime: LangGraph runtime context (optional, None if outside graph).
131
+ state: Agent state (`dict`, `list`, or `BaseModel`).
132
+ runtime: LangGraph runtime context (optional, `None` if outside graph).
130
133
  """
131
134
 
132
135
  tool_call: ToolCall
@@ -177,7 +180,7 @@ The execute callable can be invoked multiple times for retry logic,
177
180
  with potentially modified requests each time. Each call to execute
178
181
  is independent and stateless.
179
182
 
180
- Note:
183
+ !!! note
181
184
  When implementing middleware for `create_agent`, use
182
185
  `AgentMiddleware.wrap_tool_call` which provides properly typed
183
186
  state parameter for better type safety.
@@ -246,7 +249,7 @@ AsyncToolCallWrapper = Callable[
246
249
  class ToolCallWithContext(TypedDict):
247
250
  """ToolCall with additional context for graph state.
248
251
 
249
- This is an internal data structure meant to help the ToolNode accept
252
+ This is an internal data structure meant to help the `ToolNode` accept
250
253
  tool calls with additional context (e.g. state) when dispatched using the
251
254
  Send API.
252
255
 
@@ -267,16 +270,16 @@ class ToolCallWithContext(TypedDict):
267
270
 
268
271
 
269
272
  def msg_content_output(output: Any) -> str | list[dict]:
270
- """Convert tool output to ToolMessage content format.
273
+ """Convert tool output to `ToolMessage` content format.
271
274
 
272
- Handles str, list[dict] (content blocks), and arbitrary objects by attempting
275
+ Handles `str`, `list[dict]` (content blocks), and arbitrary objects by attempting
273
276
  JSON serialization with fallback to str().
274
277
 
275
278
  Args:
276
279
  output: Tool execution output of any type.
277
280
 
278
281
  Returns:
279
- String or list of content blocks suitable for ToolMessage.content.
282
+ String or list of content blocks suitable for `ToolMessage.content`.
280
283
  """
281
284
  if isinstance(output, str) or (
282
285
  isinstance(output, list)
@@ -296,7 +299,7 @@ def msg_content_output(output: Any) -> str | list[dict]:
296
299
  class ToolInvocationError(ToolException):
297
300
  """An error occurred while invoking a tool due to invalid arguments.
298
301
 
299
- This exception is only raised when invoking a tool using the ToolNode!
302
+ This exception is only raised when invoking a tool using the `ToolNode`!
300
303
  """
301
304
 
302
305
  def __init__(
@@ -337,7 +340,7 @@ def _handle_tool_error(
337
340
  """Generate error message content based on exception handling configuration.
338
341
 
339
342
  This function centralizes error message generation logic, supporting different
340
- error handling strategies configured via the ToolNode's handle_tool_errors
343
+ error handling strategies configured via the `ToolNode`'s `handle_tool_errors`
341
344
  parameter.
342
345
 
343
346
  Args:
@@ -349,12 +352,12 @@ def _handle_tool_error(
349
352
  - tuple: Not used in this context (handled by caller)
350
353
 
351
354
  Returns:
352
- A string containing the error message to include in the ToolMessage.
355
+ A string containing the error message to include in the `ToolMessage`.
353
356
 
354
357
  Raises:
355
358
  ValueError: If flag is not one of the supported types.
356
359
 
357
- Note:
360
+ !!! note
358
361
  The tuple case is handled by the caller through exception type checking,
359
362
  not by this function directly.
360
363
  """
@@ -391,9 +394,9 @@ def _infer_handled_types(handler: Callable[..., str]) -> tuple[type[Exception],
391
394
 
392
395
  Raises:
393
396
  ValueError: If the handler's annotation contains non-Exception types or
394
- if Union types contain non-Exception types.
397
+ if Union types contain non-Exception types.
395
398
 
396
- Note:
399
+ !!! note
397
400
  This function supports both single exception types and Union types for
398
401
  handlers that need to handle multiple exception types differently.
399
402
  """
@@ -559,7 +562,7 @@ class _ToolNode(RunnableCallable):
559
562
  wrap_tool_call: ToolCallWrapper | None = None,
560
563
  awrap_tool_call: AsyncToolCallWrapper | None = None,
561
564
  ) -> None:
562
- """Initialize ToolNode with tools and configuration.
565
+ """Initialize `ToolNode` with tools and configuration.
563
566
 
564
567
  Args:
565
568
  tools: Sequence of tools to make available for execution.
@@ -625,7 +628,7 @@ class _ToolNode(RunnableCallable):
625
628
  injected_tool_calls = []
626
629
  input_types = [input_type] * len(tool_calls)
627
630
  for call, tool_runtime in zip(tool_calls, tool_runtimes, strict=False):
628
- injected_call = self._inject_tool_args(call, tool_runtime)
631
+ injected_call = self._inject_tool_args(call, tool_runtime) # type: ignore[arg-type]
629
632
  injected_tool_calls.append(injected_call)
630
633
  with get_executor_for_config(config) as executor:
631
634
  outputs = list(
@@ -660,9 +663,9 @@ class _ToolNode(RunnableCallable):
660
663
  injected_tool_calls = []
661
664
  coros = []
662
665
  for call, tool_runtime in zip(tool_calls, tool_runtimes, strict=False):
663
- injected_call = self._inject_tool_args(call, tool_runtime)
666
+ injected_call = self._inject_tool_args(call, tool_runtime) # type: ignore[arg-type]
664
667
  injected_tool_calls.append(injected_call)
665
- coros.append(self._arun_one(injected_call, input_type, tool_runtime))
668
+ coros.append(self._arun_one(injected_call, input_type, tool_runtime)) # type: ignore[arg-type]
666
669
  outputs = await asyncio.gather(*coros)
667
670
 
668
671
  return self._combine_tool_outputs(outputs, input_type)
@@ -1180,11 +1183,11 @@ class _ToolNode(RunnableCallable):
1180
1183
 
1181
1184
  Raises:
1182
1185
  ValueError: If a tool requires store injection but no store is provided,
1183
- or if state injection requirements cannot be satisfied.
1186
+ or if state injection requirements cannot be satisfied.
1184
1187
 
1185
- Note:
1188
+ !!! note
1186
1189
  This method is called automatically during tool execution. It should not
1187
- be called from outside the ToolNode.
1190
+ be called from outside the `ToolNode`.
1188
1191
  """
1189
1192
  if tool_call["name"] not in self.tools_by_name:
1190
1193
  return tool_call
@@ -1328,9 +1331,9 @@ def tools_condition(
1328
1331
  return tools_condition(state, messages_key="chat_history")
1329
1332
  ```
1330
1333
 
1331
- Note:
1332
- This function is designed to work seamlessly with ToolNode and standard
1333
- LangGraph patterns. It expects the last message to be an AIMessage when
1334
+ !!! note
1335
+ This function is designed to work seamlessly with `ToolNode` and standard
1336
+ LangGraph patterns. It expects the last message to be an `AIMessage` when
1334
1337
  tool calls are present, which is the standard output format for tool-calling
1335
1338
  language models.
1336
1339
  """
@@ -1349,19 +1352,19 @@ def tools_condition(
1349
1352
 
1350
1353
 
1351
1354
  @dataclass
1352
- class ToolRuntime(InjectedToolArg, Generic[ContextT, StateT]):
1355
+ class ToolRuntime(_DirectlyInjectedToolArg, Generic[ContextT, StateT]):
1353
1356
  """Runtime context automatically injected into tools.
1354
1357
 
1355
- When a tool function has a parameter named 'tool_runtime' with type hint
1356
- 'ToolRuntime', the tool execution system will automatically inject
1357
- an instance containing:
1358
+ When a tool function has a parameter named `tool_runtime` with type hint
1359
+ `ToolRuntime`, the tool execution system will automatically inject an instance
1360
+ containing:
1358
1361
 
1359
- - state: The current graph state
1360
- - tool_call_id: The ID of the current tool call
1361
- - config: RunnableConfig for the current execution
1362
- - context: Runtime context (from langgraph Runtime)
1363
- - store: BaseStore instance for persistent storage (from langgraph Runtime)
1364
- - stream_writer: StreamWriter for streaming output (from langgraph Runtime)
1362
+ - `state`: The current graph state
1363
+ - `tool_call_id`: The ID of the current tool call
1364
+ - `config`: `RunnableConfig` for the current execution
1365
+ - `context`: Runtime context (from langgraph `Runtime`)
1366
+ - `store`: `BaseStore` instance for persistent storage (from langgraph `Runtime`)
1367
+ - `stream_writer`: `StreamWriter` for streaming output (from langgraph `Runtime`)
1365
1368
 
1366
1369
  No `Annotated` wrapper is needed - just use `runtime: ToolRuntime`
1367
1370
  as a parameter.
@@ -1395,7 +1398,7 @@ class ToolRuntime(InjectedToolArg, Generic[ContextT, StateT]):
1395
1398
  return f"Processed {x}"
1396
1399
  ```
1397
1400
 
1398
- Note:
1401
+ !!! note
1399
1402
  This is a marker class used for type checking and detection.
1400
1403
  The actual runtime object will be constructed during tool execution.
1401
1404
  """
@@ -1469,7 +1472,7 @@ class InjectedState(InjectedToolArg):
1469
1472
  ]
1470
1473
  ```
1471
1474
 
1472
- Note:
1475
+ !!! note
1473
1476
  - `InjectedState` arguments are automatically excluded from tool schemas
1474
1477
  presented to language models
1475
1478
  - `ToolNode` handles the injection process during execution
@@ -1549,7 +1552,7 @@ class InjectedStore(InjectedToolArg):
1549
1552
  result2 = graph.invoke({"messages": [HumanMessage("What's my favorite color?")]})
1550
1553
  ```
1551
1554
 
1552
- Note:
1555
+ !!! note
1553
1556
  - `InjectedStore` arguments are automatically excluded from tool schemas
1554
1557
  presented to language models
1555
1558
  - The store instance is automatically injected by `ToolNode` during execution
@@ -1,18 +1,19 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain
3
- Version: 1.0.0rc1
3
+ Version: 1.0.1
4
4
  Summary: Building applications with LLMs through composability
5
- Project-URL: homepage, https://docs.langchain.com/
6
- Project-URL: repository, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
7
- Project-URL: changelog, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D1%22
8
- Project-URL: twitter, https://x.com/LangChainAI
9
- Project-URL: slack, https://www.langchain.com/join-community
10
- Project-URL: reddit, https://www.reddit.com/r/LangChain/
5
+ Project-URL: Homepage, https://docs.langchain.com/
6
+ Project-URL: Documentation, https://reference.langchain.com/python/langchain/langchain/
7
+ Project-URL: Source, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
8
+ Project-URL: Changelog, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D1%22
9
+ Project-URL: Twitter, https://x.com/LangChainAI
10
+ Project-URL: Slack, https://www.langchain.com/join-community
11
+ Project-URL: Reddit, https://www.reddit.com/r/LangChain/
11
12
  License: MIT
12
13
  License-File: LICENSE
13
14
  Requires-Python: <4.0.0,>=3.10.0
14
- Requires-Dist: langchain-core<2.0.0,>=1.0.0a7
15
- Requires-Dist: langgraph<2.0.0,>=1.0.0a4
15
+ Requires-Dist: langchain-core<2.0.0,>=1.0.0
16
+ Requires-Dist: langgraph<1.1.0,>=1.0.0
16
17
  Requires-Dist: pydantic<3.0.0,>=2.7.4
17
18
  Provides-Extra: anthropic
18
19
  Requires-Dist: langchain-anthropic; extra == 'anthropic'
@@ -30,6 +31,8 @@ Provides-Extra: google-vertexai
30
31
  Requires-Dist: langchain-google-vertexai; extra == 'google-vertexai'
31
32
  Provides-Extra: groq
32
33
  Requires-Dist: langchain-groq; extra == 'groq'
34
+ Provides-Extra: huggingface
35
+ Requires-Dist: langchain-huggingface; extra == 'huggingface'
33
36
  Provides-Extra: mistralai
34
37
  Requires-Dist: langchain-mistralai; extra == 'mistralai'
35
38
  Provides-Extra: ollama
@@ -1,30 +1,34 @@
1
- langchain/__init__.py,sha256=P-ltxKbHEHDxRxjxiCkHmWeDHiI9-Bgd6sjEJ8c0TcY,64
1
+ langchain/__init__.py,sha256=3EzcPFwVNXhQEus521J89yt2GHHJs6VenVpzltp09Uk,61
2
2
  langchain/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  langchain/agents/__init__.py,sha256=tDjbhFSC6XHQUZ_XvjHwmbICFfjxmE9xKFMbUVSIwfs,522
4
- langchain/agents/factory.py,sha256=dEfeSNBZ2NCp0E64mHgxNseNASfMdHd-7i2JhPbWv_c,62606
4
+ langchain/agents/factory.py,sha256=vUpv5sD91eaLUH_2GK5TrjnblwEl7h9L-3GrDtBueq4,62774
5
5
  langchain/agents/structured_output.py,sha256=BDSF6PWVih41M7IGyjCHWy46jmDxZsfBt_B4llE9OOc,13764
6
- langchain/agents/middleware/__init__.py,sha256=FBoTr4TAyuLJiOKV-mJN3oaLE6D6Q5ubI5sCbnsCCSs,1955
6
+ langchain/agents/middleware/__init__.py,sha256=Vm-Ajh4YoaahAa9b_XEAuiyoupKNIwZVzX-8JN1xKkA,2251
7
+ langchain/agents/middleware/_execution.py,sha256=Xyjh3HxTHbgA-C9FFE4WXUOqKUW8mdOB455XRlA_BOU,14251
8
+ langchain/agents/middleware/_redaction.py,sha256=LJeNOvdZ0gd4273Lqgpbxh7EiuuZ6q5LlqeHK4eyin4,11210
7
9
  langchain/agents/middleware/context_editing.py,sha256=0sUpDc0FvOKMERNnEKnhBqrTjX_rCVWjIX8hH3RTG8U,8749
10
+ langchain/agents/middleware/file_search.py,sha256=RiBNJRfy8R5E8TvjQRVgXf1O0UDtXqEarirFPnihbtI,12757
8
11
  langchain/agents/middleware/human_in_the_loop.py,sha256=N7Vt31rlHS7J-cA0EBDS2mlQW-SMvvxyAwjBnAY9vZU,12650
9
12
  langchain/agents/middleware/model_call_limit.py,sha256=yYBcOY5DKNIG6_9f-rkTjIj_BLVUO1tuShgxt00P8W0,7735
10
13
  langchain/agents/middleware/model_fallback.py,sha256=VKDN81jfFB9zJOaJZ94tfwzodk3zRXRwz6CqQ6MkJtw,4097
11
- langchain/agents/middleware/pii.py,sha256=rkGojBFIJGMs1p1cKNh0npafts_0UUJ0_NeZsyJo498,24760
14
+ langchain/agents/middleware/pii.py,sha256=I3nTAnfvrHqre9SoRJvlw0koT8-x3gGZdSvb0uKH5xg,10978
15
+ langchain/agents/middleware/shell_tool.py,sha256=HqPTht0Gx3zAv3JmWS_R1Hsd6yQEkvDjlCXXwQq8Fg8,26627
12
16
  langchain/agents/middleware/summarization.py,sha256=H1VxRkkbauw4p4sMMKyc_uZGbJhtqoVvOF7y_5JBXTc,10329
13
17
  langchain/agents/middleware/todo.py,sha256=0PyHV4u5JaBBuMmPWmDr3orZ5T5F6lk2jiVoBzVVMM4,9808
14
18
  langchain/agents/middleware/tool_call_limit.py,sha256=AHA-No4oUze2-2d1INnX8d_9otFuDB8uoWayJpt9nPo,12321
15
19
  langchain/agents/middleware/tool_emulator.py,sha256=UmN5UIMsikDnFvcPbNLNDOF4RXvIxqd-AMG46LVI0iA,7211
16
20
  langchain/agents/middleware/tool_retry.py,sha256=M76csBFFZa37djxtfUCqNU-x2erTNtxZqoO__DozWxA,13787
17
21
  langchain/agents/middleware/tool_selection.py,sha256=6RYdgkg6aSNx1w-YxRyL2Hct7UPnMRgGg6YVZVtW5TU,11638
18
- langchain/agents/middleware/types.py,sha256=FVuYue9cLB5C1ZNcYLIGNPN6IUaNXY3UsWQ6gC-gZNs,55547
22
+ langchain/agents/middleware/types.py,sha256=4oQAoTSHL1LQh9OiUxz_WJ6JrAvKRiUSXUAHawOcLkU,55670
19
23
  langchain/chat_models/__init__.py,sha256=lQwcJkHtGjrclCL7sBFocQuzRdRgZRPzIIPnGhEJmVQ,533
20
- langchain/chat_models/base.py,sha256=k1Qnuh7O_3LwsWtcVFSWsWP00hxiEyninwltTdi1rk8,35655
21
- langchain/embeddings/__init__.py,sha256=crY7GUw7RSA25JgpYl7I4WPloVCVY6eUmJbSSSchWis,444
22
- langchain/embeddings/base.py,sha256=1aNxDLQmS_l7RMcvjnK7Cv7rtgKrKt6Sl7mgXM2_JWI,7329
24
+ langchain/chat_models/base.py,sha256=XowWNeNZbVpGOOGGd2Wmvs2h6h7TzjFcQOx4KnJ2g4M,35544
25
+ langchain/embeddings/__init__.py,sha256=FYmjgpgjQc8wTy_qu2VnwgYiSqF6tUtwH6jtea6VbSs,843
26
+ langchain/embeddings/base.py,sha256=V9YgYiRAJs5U5o8BQUsmfV8XtRQrRWV3rENXHLKiECg,8511
23
27
  langchain/messages/__init__.py,sha256=p7NlF1yf8MkMgJzJ2wggXGkkA_okz1f-g63KoflL6PA,1710
24
28
  langchain/rate_limiters/__init__.py,sha256=5490xUNhet37N2nX6kbJlDgf8u1DX-C1Cs_r7etXn8A,351
25
29
  langchain/tools/__init__.py,sha256=hMzbaGcfHhNYfJx20uV57uMd9a-gNLbmopG4gDReeEc,628
26
- langchain/tools/tool_node.py,sha256=wsYXehwtaCIWYOSv4ncV23WQ7-N34sEwckcYfaFoLFg,64977
27
- langchain-1.0.0rc1.dist-info/METADATA,sha256=niUo9gSSaOCiH5kFM9cYfd0m1G06cdfifywnk0G_Mqk,4543
28
- langchain-1.0.0rc1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
29
- langchain-1.0.0rc1.dist-info/licenses/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
30
- langchain-1.0.0rc1.dist-info/RECORD,,
30
+ langchain/tools/tool_node.py,sha256=JVrF5W8JER7Y--f1zagMJz8efOaT6gsYfuh98HAZQ8g,65378
31
+ langchain-1.0.1.dist-info/METADATA,sha256=eX-uYQv1NzmNVWzJWwd9SibmfmWUEsWMVUkmgeYCn70,4709
32
+ langchain-1.0.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
33
+ langchain-1.0.1.dist-info/licenses/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
34
+ langchain-1.0.1.dist-info/RECORD,,