langchain 1.0.0rc2__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -45,7 +45,6 @@ __all__ = [
45
45
  "ModelRequest",
46
46
  "ModelResponse",
47
47
  "OmitFromSchema",
48
- "PublicAgentState",
49
48
  "after_agent",
50
49
  "after_model",
51
50
  "before_agent",
@@ -172,11 +171,14 @@ class AgentState(TypedDict, Generic[ResponseT]):
172
171
  structured_response: NotRequired[Annotated[ResponseT, OmitFromInput]]
173
172
 
174
173
 
175
- class PublicAgentState(TypedDict, Generic[ResponseT]):
176
- """Public state schema for the agent.
174
+ class _InputAgentState(TypedDict): # noqa: PYI049
175
+ """Input state schema for the agent."""
177
176
 
178
- Just used for typing purposes.
179
- """
177
+ messages: Required[Annotated[list[AnyMessage | dict], add_messages]]
178
+
179
+
180
+ class _OutputAgentState(TypedDict, Generic[ResponseT]): # noqa: PYI049
181
+ """Output state schema for the agent."""
180
182
 
181
183
  messages: Required[Annotated[list[AnyMessage], add_messages]]
182
184
  structured_response: NotRequired[ResponseT]
@@ -4,14 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  import warnings
6
6
  from importlib import util
7
- from typing import (
8
- TYPE_CHECKING,
9
- Any,
10
- Literal,
11
- TypeAlias,
12
- cast,
13
- overload,
14
- )
7
+ from typing import TYPE_CHECKING, Any, Literal, TypeAlias, cast, overload
15
8
 
16
9
  from langchain_core.language_models import BaseChatModel, LanguageModelInput
17
10
  from langchain_core.messages import AIMessage, AnyMessage
@@ -83,7 +76,7 @@ def init_chat_model(
83
76
  for supported model parameters.
84
77
 
85
78
  Args:
86
- model: The name of the model, e.g. `'o3-mini'`, `'claude-sonnet-4-5-20250929'`.
79
+ model: The name of the model, e.g. `'o3-mini'`, `'claude-sonnet-4-5'`.
87
80
 
88
81
  You can also specify model and model provider in a single argument using:
89
82
 
@@ -179,7 +172,7 @@ def init_chat_model(
179
172
  from langchain.chat_models import init_chat_model
180
173
 
181
174
  o3_mini = init_chat_model("openai:o3-mini", temperature=0)
182
- claude_sonnet = init_chat_model("anthropic:claude-sonnet-4-5-20250929", temperature=0)
175
+ claude_sonnet = init_chat_model("anthropic:claude-sonnet-4-5", temperature=0)
183
176
  gemini_2_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
184
177
 
185
178
  o3_mini.invoke("what's your name")
@@ -201,7 +194,7 @@ def init_chat_model(
201
194
 
202
195
  configurable_model.invoke(
203
196
  "what's your name",
204
- config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
197
+ config={"configurable": {"model": "claude-sonnet-4-5"}},
205
198
  )
206
199
  ```
207
200
 
@@ -225,7 +218,7 @@ def init_chat_model(
225
218
  "what's your name",
226
219
  config={
227
220
  "configurable": {
228
- "foo_model": "anthropic:claude-sonnet-4-5-20250929",
221
+ "foo_model": "anthropic:claude-sonnet-4-5",
229
222
  "foo_temperature": 0.6,
230
223
  }
231
224
  },
@@ -271,7 +264,7 @@ def init_chat_model(
271
264
 
272
265
  configurable_model_with_tools.invoke(
273
266
  "Which city is hotter today and which is bigger: LA or NY?",
274
- config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
267
+ config={"configurable": {"model": "claude-sonnet-4-5"}},
275
268
  )
276
269
  ```
277
270
 
@@ -612,10 +605,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
612
605
  @property
613
606
  def InputType(self) -> TypeAlias:
614
607
  """Get the input type for this `Runnable`."""
615
- from langchain_core.prompt_values import (
616
- ChatPromptValueConcrete,
617
- StringPromptValue,
618
- )
608
+ from langchain_core.prompt_values import ChatPromptValueConcrete, StringPromptValue
619
609
 
620
610
  # This is a version of LanguageModelInput which replaces the abstract
621
611
  # base class BaseMessage with a union of its subclasses, which makes
@@ -4,6 +4,12 @@
4
4
  This page contains **reference documentation** for Embeddings. See
5
5
  [the docs](https://docs.langchain.com/oss/python/langchain/retrieval#embedding-models)
6
6
  for conceptual guides, tutorials, and examples on using Embeddings.
7
+
8
+ !!! warning "Modules moved"
9
+ With the release of `langchain 1.0.0`, several embeddings modules were moved to
10
+ `langchain-classic`, such as `CacheBackedEmbeddings` and all community
11
+ embeddings. See [list](https://github.com/langchain-ai/langchain/blob/bdf1cd383ce36dc18381a3bf3fb0a579337a32b5/libs/langchain/langchain/embeddings/__init__.py)
12
+ of moved modules to inform your migration.
7
13
  """
8
14
 
9
15
  from langchain_core.embeddings import Embeddings
@@ -133,20 +133,34 @@ def init_embeddings(
133
133
  installed.
134
134
 
135
135
  Args:
136
- model: Name of the model to use. Can be either:
136
+ model: Name of the model to use.
137
+
138
+ Can be either:
139
+
137
140
  - A model string like `"openai:text-embedding-3-small"`
138
- - Just the model name if provider is specified
139
- provider: Optional explicit provider name. If not specified,
140
- will attempt to parse from the model string. Supported providers
141
- and their required packages:
141
+ - Just the model name if the provider is specified separately or can be
142
+ inferred.
143
+
144
+ See supported providers under the `provider` arg description.
145
+ provider: Optional explicit provider name. If not specified, will attempt to
146
+ parse from the model string in the `model` arg.
147
+
148
+ Supported providers:
142
149
 
143
- {_get_provider_list()}
150
+ - `openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
151
+ - `azure_openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
152
+ - `bedrock` -> [`langchain-aws`](https://docs.langchain.com/oss/python/integrations/providers/aws)
153
+ - `cohere` -> [`langchain-cohere`](https://docs.langchain.com/oss/python/integrations/providers/cohere)
154
+ - `google_vertexai` -> [`langchain-google-vertexai`](https://docs.langchain.com/oss/python/integrations/providers/google)
155
+ - `huggingface` -> [`langchain-huggingface`](https://docs.langchain.com/oss/python/integrations/providers/huggingface)
156
+ - `mistraiai` -> [`langchain-mistralai`](https://docs.langchain.com/oss/python/integrations/providers/mistralai)
157
+ - `ollama` -> [`langchain-ollama`](https://docs.langchain.com/oss/python/integrations/providers/ollama)
144
158
 
145
159
  **kwargs: Additional model-specific parameters passed to the embedding model.
146
160
  These vary by provider, see the provider-specific documentation for details.
147
161
 
148
162
  Returns:
149
- An Embeddings instance that can generate embeddings for text.
163
+ An `Embeddings` instance that can generate embeddings for text.
150
164
 
151
165
  Raises:
152
166
  ValueError: If the model provider is not supported or cannot be determined
@@ -13,10 +13,10 @@ The module implements design patterns for:
13
13
  - Command-based state updates for advanced control flow
14
14
 
15
15
  Key Components:
16
- ToolNode: Main class for executing tools in LangGraph workflows
17
- InjectedState: Annotation for injecting graph state into tools
18
- InjectedStore: Annotation for injecting persistent store into tools
19
- tools_condition: Utility function for conditional routing based on tool calls
16
+ `ToolNode`: Main class for executing tools in LangGraph workflows
17
+ `InjectedState`: Annotation for injecting graph state into tools
18
+ `InjectedStore`: Annotation for injecting persistent store into tools
19
+ `tools_condition`: Utility function for conditional routing based on tool calls
20
20
 
21
21
  Typical Usage:
22
22
  ```python
@@ -49,7 +49,6 @@ from typing import (
49
49
  Generic,
50
50
  Literal,
51
51
  TypedDict,
52
- TypeVar,
53
52
  Union,
54
53
  cast,
55
54
  get_args,
@@ -84,15 +83,18 @@ from langgraph.graph.message import REMOVE_ALL_MESSAGES
84
83
  from langgraph.store.base import BaseStore # noqa: TC002
85
84
  from langgraph.types import Command, Send, StreamWriter
86
85
  from pydantic import BaseModel, ValidationError
87
- from typing_extensions import Unpack
86
+ from typing_extensions import TypeVar, Unpack
88
87
 
89
88
  if TYPE_CHECKING:
90
89
  from collections.abc import Sequence
91
90
 
92
91
  from langgraph.runtime import Runtime
93
92
 
94
- StateT = TypeVar("StateT")
95
- ContextT = TypeVar("ContextT")
93
+ # right now we use a dict as the default, can change this to AgentState, but depends
94
+ # on if this lives in LangChain or LangGraph... ideally would have some typed
95
+ # messages key
96
+ StateT = TypeVar("StateT", default=dict)
97
+ ContextT = TypeVar("ContextT", default=None)
96
98
 
97
99
  INVALID_TOOL_NAME_ERROR_TEMPLATE = (
98
100
  "Error: {requested_tool} is not a valid tool, try one of [{available_tools}]."
@@ -123,11 +125,11 @@ class ToolCallRequest:
123
125
  Attributes:
124
126
  tool_call: Tool call dict with name, args, and id from model output.
125
127
  tool: BaseTool instance to be invoked, or None if tool is not
126
- registered with the ToolNode. When tool is None, interceptors can
127
- handle the request without validation. If the interceptor calls execute(),
128
+ registered with the `ToolNode`. When tool is `None`, interceptors can
129
+ handle the request without validation. If the interceptor calls `execute()`,
128
130
  validation will occur and raise an error for unregistered tools.
129
- state: Agent state (dict, list, or BaseModel).
130
- runtime: LangGraph runtime context (optional, None if outside graph).
131
+ state: Agent state (`dict`, `list`, or `BaseModel`).
132
+ runtime: LangGraph runtime context (optional, `None` if outside graph).
131
133
  """
132
134
 
133
135
  tool_call: ToolCall
@@ -178,7 +180,7 @@ The execute callable can be invoked multiple times for retry logic,
178
180
  with potentially modified requests each time. Each call to execute
179
181
  is independent and stateless.
180
182
 
181
- Note:
183
+ !!! note
182
184
  When implementing middleware for `create_agent`, use
183
185
  `AgentMiddleware.wrap_tool_call` which provides properly typed
184
186
  state parameter for better type safety.
@@ -247,7 +249,7 @@ AsyncToolCallWrapper = Callable[
247
249
  class ToolCallWithContext(TypedDict):
248
250
  """ToolCall with additional context for graph state.
249
251
 
250
- This is an internal data structure meant to help the ToolNode accept
252
+ This is an internal data structure meant to help the `ToolNode` accept
251
253
  tool calls with additional context (e.g. state) when dispatched using the
252
254
  Send API.
253
255
 
@@ -268,16 +270,16 @@ class ToolCallWithContext(TypedDict):
268
270
 
269
271
 
270
272
  def msg_content_output(output: Any) -> str | list[dict]:
271
- """Convert tool output to ToolMessage content format.
273
+ """Convert tool output to `ToolMessage` content format.
272
274
 
273
- Handles str, list[dict] (content blocks), and arbitrary objects by attempting
275
+ Handles `str`, `list[dict]` (content blocks), and arbitrary objects by attempting
274
276
  JSON serialization with fallback to str().
275
277
 
276
278
  Args:
277
279
  output: Tool execution output of any type.
278
280
 
279
281
  Returns:
280
- String or list of content blocks suitable for ToolMessage.content.
282
+ String or list of content blocks suitable for `ToolMessage.content`.
281
283
  """
282
284
  if isinstance(output, str) or (
283
285
  isinstance(output, list)
@@ -297,7 +299,7 @@ def msg_content_output(output: Any) -> str | list[dict]:
297
299
  class ToolInvocationError(ToolException):
298
300
  """An error occurred while invoking a tool due to invalid arguments.
299
301
 
300
- This exception is only raised when invoking a tool using the ToolNode!
302
+ This exception is only raised when invoking a tool using the `ToolNode`!
301
303
  """
302
304
 
303
305
  def __init__(
@@ -338,7 +340,7 @@ def _handle_tool_error(
338
340
  """Generate error message content based on exception handling configuration.
339
341
 
340
342
  This function centralizes error message generation logic, supporting different
341
- error handling strategies configured via the ToolNode's handle_tool_errors
343
+ error handling strategies configured via the `ToolNode`'s `handle_tool_errors`
342
344
  parameter.
343
345
 
344
346
  Args:
@@ -350,12 +352,12 @@ def _handle_tool_error(
350
352
  - tuple: Not used in this context (handled by caller)
351
353
 
352
354
  Returns:
353
- A string containing the error message to include in the ToolMessage.
355
+ A string containing the error message to include in the `ToolMessage`.
354
356
 
355
357
  Raises:
356
358
  ValueError: If flag is not one of the supported types.
357
359
 
358
- Note:
360
+ !!! note
359
361
  The tuple case is handled by the caller through exception type checking,
360
362
  not by this function directly.
361
363
  """
@@ -392,9 +394,9 @@ def _infer_handled_types(handler: Callable[..., str]) -> tuple[type[Exception],
392
394
 
393
395
  Raises:
394
396
  ValueError: If the handler's annotation contains non-Exception types or
395
- if Union types contain non-Exception types.
397
+ if Union types contain non-Exception types.
396
398
 
397
- Note:
399
+ !!! note
398
400
  This function supports both single exception types and Union types for
399
401
  handlers that need to handle multiple exception types differently.
400
402
  """
@@ -560,7 +562,7 @@ class _ToolNode(RunnableCallable):
560
562
  wrap_tool_call: ToolCallWrapper | None = None,
561
563
  awrap_tool_call: AsyncToolCallWrapper | None = None,
562
564
  ) -> None:
563
- """Initialize ToolNode with tools and configuration.
565
+ """Initialize `ToolNode` with tools and configuration.
564
566
 
565
567
  Args:
566
568
  tools: Sequence of tools to make available for execution.
@@ -626,7 +628,7 @@ class _ToolNode(RunnableCallable):
626
628
  injected_tool_calls = []
627
629
  input_types = [input_type] * len(tool_calls)
628
630
  for call, tool_runtime in zip(tool_calls, tool_runtimes, strict=False):
629
- injected_call = self._inject_tool_args(call, tool_runtime)
631
+ injected_call = self._inject_tool_args(call, tool_runtime) # type: ignore[arg-type]
630
632
  injected_tool_calls.append(injected_call)
631
633
  with get_executor_for_config(config) as executor:
632
634
  outputs = list(
@@ -661,9 +663,9 @@ class _ToolNode(RunnableCallable):
661
663
  injected_tool_calls = []
662
664
  coros = []
663
665
  for call, tool_runtime in zip(tool_calls, tool_runtimes, strict=False):
664
- injected_call = self._inject_tool_args(call, tool_runtime)
666
+ injected_call = self._inject_tool_args(call, tool_runtime) # type: ignore[arg-type]
665
667
  injected_tool_calls.append(injected_call)
666
- coros.append(self._arun_one(injected_call, input_type, tool_runtime))
668
+ coros.append(self._arun_one(injected_call, input_type, tool_runtime)) # type: ignore[arg-type]
667
669
  outputs = await asyncio.gather(*coros)
668
670
 
669
671
  return self._combine_tool_outputs(outputs, input_type)
@@ -1181,11 +1183,11 @@ class _ToolNode(RunnableCallable):
1181
1183
 
1182
1184
  Raises:
1183
1185
  ValueError: If a tool requires store injection but no store is provided,
1184
- or if state injection requirements cannot be satisfied.
1186
+ or if state injection requirements cannot be satisfied.
1185
1187
 
1186
- Note:
1188
+ !!! note
1187
1189
  This method is called automatically during tool execution. It should not
1188
- be called from outside the ToolNode.
1190
+ be called from outside the `ToolNode`.
1189
1191
  """
1190
1192
  if tool_call["name"] not in self.tools_by_name:
1191
1193
  return tool_call
@@ -1329,9 +1331,9 @@ def tools_condition(
1329
1331
  return tools_condition(state, messages_key="chat_history")
1330
1332
  ```
1331
1333
 
1332
- Note:
1333
- This function is designed to work seamlessly with ToolNode and standard
1334
- LangGraph patterns. It expects the last message to be an AIMessage when
1334
+ !!! note
1335
+ This function is designed to work seamlessly with `ToolNode` and standard
1336
+ LangGraph patterns. It expects the last message to be an `AIMessage` when
1335
1337
  tool calls are present, which is the standard output format for tool-calling
1336
1338
  language models.
1337
1339
  """
@@ -1353,16 +1355,16 @@ def tools_condition(
1353
1355
  class ToolRuntime(_DirectlyInjectedToolArg, Generic[ContextT, StateT]):
1354
1356
  """Runtime context automatically injected into tools.
1355
1357
 
1356
- When a tool function has a parameter named 'tool_runtime' with type hint
1357
- 'ToolRuntime', the tool execution system will automatically inject
1358
- an instance containing:
1358
+ When a tool function has a parameter named `tool_runtime` with type hint
1359
+ `ToolRuntime`, the tool execution system will automatically inject an instance
1360
+ containing:
1359
1361
 
1360
- - state: The current graph state
1361
- - tool_call_id: The ID of the current tool call
1362
- - config: RunnableConfig for the current execution
1363
- - context: Runtime context (from langgraph Runtime)
1364
- - store: BaseStore instance for persistent storage (from langgraph Runtime)
1365
- - stream_writer: StreamWriter for streaming output (from langgraph Runtime)
1362
+ - `state`: The current graph state
1363
+ - `tool_call_id`: The ID of the current tool call
1364
+ - `config`: `RunnableConfig` for the current execution
1365
+ - `context`: Runtime context (from langgraph `Runtime`)
1366
+ - `store`: `BaseStore` instance for persistent storage (from langgraph `Runtime`)
1367
+ - `stream_writer`: `StreamWriter` for streaming output (from langgraph `Runtime`)
1366
1368
 
1367
1369
  No `Annotated` wrapper is needed - just use `runtime: ToolRuntime`
1368
1370
  as a parameter.
@@ -1396,7 +1398,7 @@ class ToolRuntime(_DirectlyInjectedToolArg, Generic[ContextT, StateT]):
1396
1398
  return f"Processed {x}"
1397
1399
  ```
1398
1400
 
1399
- Note:
1401
+ !!! note
1400
1402
  This is a marker class used for type checking and detection.
1401
1403
  The actual runtime object will be constructed during tool execution.
1402
1404
  """
@@ -1470,7 +1472,7 @@ class InjectedState(InjectedToolArg):
1470
1472
  ]
1471
1473
  ```
1472
1474
 
1473
- Note:
1475
+ !!! note
1474
1476
  - `InjectedState` arguments are automatically excluded from tool schemas
1475
1477
  presented to language models
1476
1478
  - `ToolNode` handles the injection process during execution
@@ -1550,7 +1552,7 @@ class InjectedStore(InjectedToolArg):
1550
1552
  result2 = graph.invoke({"messages": [HumanMessage("What's my favorite color?")]})
1551
1553
  ```
1552
1554
 
1553
- Note:
1555
+ !!! note
1554
1556
  - `InjectedStore` arguments are automatically excluded from tool schemas
1555
1557
  presented to language models
1556
1558
  - The store instance is automatically injected by `ToolNode` during execution
@@ -1,18 +1,19 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain
3
- Version: 1.0.0rc2
3
+ Version: 1.0.1
4
4
  Summary: Building applications with LLMs through composability
5
- Project-URL: homepage, https://docs.langchain.com/
6
- Project-URL: repository, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
7
- Project-URL: changelog, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D1%22
8
- Project-URL: twitter, https://x.com/LangChainAI
9
- Project-URL: slack, https://www.langchain.com/join-community
10
- Project-URL: reddit, https://www.reddit.com/r/LangChain/
5
+ Project-URL: Homepage, https://docs.langchain.com/
6
+ Project-URL: Documentation, https://reference.langchain.com/python/langchain/langchain/
7
+ Project-URL: Source, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
8
+ Project-URL: Changelog, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D1%22
9
+ Project-URL: Twitter, https://x.com/LangChainAI
10
+ Project-URL: Slack, https://www.langchain.com/join-community
11
+ Project-URL: Reddit, https://www.reddit.com/r/LangChain/
11
12
  License: MIT
12
13
  License-File: LICENSE
13
14
  Requires-Python: <4.0.0,>=3.10.0
14
- Requires-Dist: langchain-core<2.0.0,>=1.0.0rc3
15
- Requires-Dist: langgraph<2.0.0,>=1.0.0a4
15
+ Requires-Dist: langchain-core<2.0.0,>=1.0.0
16
+ Requires-Dist: langgraph<1.1.0,>=1.0.0
16
17
  Requires-Dist: pydantic<3.0.0,>=2.7.4
17
18
  Provides-Extra: anthropic
18
19
  Requires-Dist: langchain-anthropic; extra == 'anthropic'
@@ -30,6 +31,8 @@ Provides-Extra: google-vertexai
30
31
  Requires-Dist: langchain-google-vertexai; extra == 'google-vertexai'
31
32
  Provides-Extra: groq
32
33
  Requires-Dist: langchain-groq; extra == 'groq'
34
+ Provides-Extra: huggingface
35
+ Requires-Dist: langchain-huggingface; extra == 'huggingface'
33
36
  Provides-Extra: mistralai
34
37
  Requires-Dist: langchain-mistralai; extra == 'mistralai'
35
38
  Provides-Extra: ollama
@@ -1,30 +1,34 @@
1
- langchain/__init__.py,sha256=zTVJXeQnKRDdEeQc-ivRY9P9TV6An_dmmkgW6phY86I,64
1
+ langchain/__init__.py,sha256=3EzcPFwVNXhQEus521J89yt2GHHJs6VenVpzltp09Uk,61
2
2
  langchain/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  langchain/agents/__init__.py,sha256=tDjbhFSC6XHQUZ_XvjHwmbICFfjxmE9xKFMbUVSIwfs,522
4
- langchain/agents/factory.py,sha256=RhClhqWInb2i0pjNJj9-0OvnB6waj0r8Qbm5bN7ofw8,62214
4
+ langchain/agents/factory.py,sha256=vUpv5sD91eaLUH_2GK5TrjnblwEl7h9L-3GrDtBueq4,62774
5
5
  langchain/agents/structured_output.py,sha256=BDSF6PWVih41M7IGyjCHWy46jmDxZsfBt_B4llE9OOc,13764
6
- langchain/agents/middleware/__init__.py,sha256=FBoTr4TAyuLJiOKV-mJN3oaLE6D6Q5ubI5sCbnsCCSs,1955
6
+ langchain/agents/middleware/__init__.py,sha256=Vm-Ajh4YoaahAa9b_XEAuiyoupKNIwZVzX-8JN1xKkA,2251
7
+ langchain/agents/middleware/_execution.py,sha256=Xyjh3HxTHbgA-C9FFE4WXUOqKUW8mdOB455XRlA_BOU,14251
8
+ langchain/agents/middleware/_redaction.py,sha256=LJeNOvdZ0gd4273Lqgpbxh7EiuuZ6q5LlqeHK4eyin4,11210
7
9
  langchain/agents/middleware/context_editing.py,sha256=0sUpDc0FvOKMERNnEKnhBqrTjX_rCVWjIX8hH3RTG8U,8749
10
+ langchain/agents/middleware/file_search.py,sha256=RiBNJRfy8R5E8TvjQRVgXf1O0UDtXqEarirFPnihbtI,12757
8
11
  langchain/agents/middleware/human_in_the_loop.py,sha256=N7Vt31rlHS7J-cA0EBDS2mlQW-SMvvxyAwjBnAY9vZU,12650
9
12
  langchain/agents/middleware/model_call_limit.py,sha256=yYBcOY5DKNIG6_9f-rkTjIj_BLVUO1tuShgxt00P8W0,7735
10
13
  langchain/agents/middleware/model_fallback.py,sha256=VKDN81jfFB9zJOaJZ94tfwzodk3zRXRwz6CqQ6MkJtw,4097
11
- langchain/agents/middleware/pii.py,sha256=rkGojBFIJGMs1p1cKNh0npafts_0UUJ0_NeZsyJo498,24760
14
+ langchain/agents/middleware/pii.py,sha256=I3nTAnfvrHqre9SoRJvlw0koT8-x3gGZdSvb0uKH5xg,10978
15
+ langchain/agents/middleware/shell_tool.py,sha256=HqPTht0Gx3zAv3JmWS_R1Hsd6yQEkvDjlCXXwQq8Fg8,26627
12
16
  langchain/agents/middleware/summarization.py,sha256=H1VxRkkbauw4p4sMMKyc_uZGbJhtqoVvOF7y_5JBXTc,10329
13
17
  langchain/agents/middleware/todo.py,sha256=0PyHV4u5JaBBuMmPWmDr3orZ5T5F6lk2jiVoBzVVMM4,9808
14
18
  langchain/agents/middleware/tool_call_limit.py,sha256=AHA-No4oUze2-2d1INnX8d_9otFuDB8uoWayJpt9nPo,12321
15
19
  langchain/agents/middleware/tool_emulator.py,sha256=UmN5UIMsikDnFvcPbNLNDOF4RXvIxqd-AMG46LVI0iA,7211
16
20
  langchain/agents/middleware/tool_retry.py,sha256=M76csBFFZa37djxtfUCqNU-x2erTNtxZqoO__DozWxA,13787
17
21
  langchain/agents/middleware/tool_selection.py,sha256=6RYdgkg6aSNx1w-YxRyL2Hct7UPnMRgGg6YVZVtW5TU,11638
18
- langchain/agents/middleware/types.py,sha256=FVuYue9cLB5C1ZNcYLIGNPN6IUaNXY3UsWQ6gC-gZNs,55547
22
+ langchain/agents/middleware/types.py,sha256=4oQAoTSHL1LQh9OiUxz_WJ6JrAvKRiUSXUAHawOcLkU,55670
19
23
  langchain/chat_models/__init__.py,sha256=lQwcJkHtGjrclCL7sBFocQuzRdRgZRPzIIPnGhEJmVQ,533
20
- langchain/chat_models/base.py,sha256=k1Qnuh7O_3LwsWtcVFSWsWP00hxiEyninwltTdi1rk8,35655
21
- langchain/embeddings/__init__.py,sha256=crY7GUw7RSA25JgpYl7I4WPloVCVY6eUmJbSSSchWis,444
22
- langchain/embeddings/base.py,sha256=1aNxDLQmS_l7RMcvjnK7Cv7rtgKrKt6Sl7mgXM2_JWI,7329
24
+ langchain/chat_models/base.py,sha256=XowWNeNZbVpGOOGGd2Wmvs2h6h7TzjFcQOx4KnJ2g4M,35544
25
+ langchain/embeddings/__init__.py,sha256=FYmjgpgjQc8wTy_qu2VnwgYiSqF6tUtwH6jtea6VbSs,843
26
+ langchain/embeddings/base.py,sha256=V9YgYiRAJs5U5o8BQUsmfV8XtRQrRWV3rENXHLKiECg,8511
23
27
  langchain/messages/__init__.py,sha256=p7NlF1yf8MkMgJzJ2wggXGkkA_okz1f-g63KoflL6PA,1710
24
28
  langchain/rate_limiters/__init__.py,sha256=5490xUNhet37N2nX6kbJlDgf8u1DX-C1Cs_r7etXn8A,351
25
29
  langchain/tools/__init__.py,sha256=hMzbaGcfHhNYfJx20uV57uMd9a-gNLbmopG4gDReeEc,628
26
- langchain/tools/tool_node.py,sha256=0rk5SZ0L80X6DJA5ohzyuqydL-S40i5LHMXJsY2t0JI,65016
27
- langchain-1.0.0rc2.dist-info/METADATA,sha256=WG0BlqzPbwur0AlHyITXIE35MX7iSpzGIfqqD4dFLxc,4544
28
- langchain-1.0.0rc2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
29
- langchain-1.0.0rc2.dist-info/licenses/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
30
- langchain-1.0.0rc2.dist-info/RECORD,,
30
+ langchain/tools/tool_node.py,sha256=JVrF5W8JER7Y--f1zagMJz8efOaT6gsYfuh98HAZQ8g,65378
31
+ langchain-1.0.1.dist-info/METADATA,sha256=eX-uYQv1NzmNVWzJWwd9SibmfmWUEsWMVUkmgeYCn70,4709
32
+ langchain-1.0.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
33
+ langchain-1.0.1.dist-info/licenses/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
34
+ langchain-1.0.1.dist-info/RECORD,,