langchain 1.0.0rc2__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

@@ -45,7 +45,6 @@ __all__ = [
45
45
  "ModelRequest",
46
46
  "ModelResponse",
47
47
  "OmitFromSchema",
48
- "PublicAgentState",
49
48
  "after_agent",
50
49
  "after_model",
51
50
  "before_agent",
@@ -172,11 +171,14 @@ class AgentState(TypedDict, Generic[ResponseT]):
172
171
  structured_response: NotRequired[Annotated[ResponseT, OmitFromInput]]
173
172
 
174
173
 
175
- class PublicAgentState(TypedDict, Generic[ResponseT]):
176
- """Public state schema for the agent.
174
+ class _InputAgentState(TypedDict): # noqa: PYI049
175
+ """Input state schema for the agent."""
177
176
 
178
- Just used for typing purposes.
179
- """
177
+ messages: Required[Annotated[list[AnyMessage | dict], add_messages]]
178
+
179
+
180
+ class _OutputAgentState(TypedDict, Generic[ResponseT]): # noqa: PYI049
181
+ """Output state schema for the agent."""
180
182
 
181
183
  messages: Required[Annotated[list[AnyMessage], add_messages]]
182
184
  structured_response: NotRequired[ResponseT]
@@ -4,14 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  import warnings
6
6
  from importlib import util
7
- from typing import (
8
- TYPE_CHECKING,
9
- Any,
10
- Literal,
11
- TypeAlias,
12
- cast,
13
- overload,
14
- )
7
+ from typing import TYPE_CHECKING, Any, Literal, TypeAlias, cast, overload
15
8
 
16
9
  from langchain_core.language_models import BaseChatModel, LanguageModelInput
17
10
  from langchain_core.messages import AIMessage, AnyMessage
@@ -83,7 +76,7 @@ def init_chat_model(
83
76
  for supported model parameters.
84
77
 
85
78
  Args:
86
- model: The name of the model, e.g. `'o3-mini'`, `'claude-sonnet-4-5-20250929'`.
79
+ model: The name of the model, e.g. `'o3-mini'`, `'claude-sonnet-4-5'`.
87
80
 
88
81
  You can also specify model and model provider in a single argument using:
89
82
 
@@ -179,7 +172,7 @@ def init_chat_model(
179
172
  from langchain.chat_models import init_chat_model
180
173
 
181
174
  o3_mini = init_chat_model("openai:o3-mini", temperature=0)
182
- claude_sonnet = init_chat_model("anthropic:claude-sonnet-4-5-20250929", temperature=0)
175
+ claude_sonnet = init_chat_model("anthropic:claude-sonnet-4-5", temperature=0)
183
176
  gemini_2_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
184
177
 
185
178
  o3_mini.invoke("what's your name")
@@ -201,7 +194,7 @@ def init_chat_model(
201
194
 
202
195
  configurable_model.invoke(
203
196
  "what's your name",
204
- config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
197
+ config={"configurable": {"model": "claude-sonnet-4-5"}},
205
198
  )
206
199
  ```
207
200
 
@@ -225,7 +218,7 @@ def init_chat_model(
225
218
  "what's your name",
226
219
  config={
227
220
  "configurable": {
228
- "foo_model": "anthropic:claude-sonnet-4-5-20250929",
221
+ "foo_model": "anthropic:claude-sonnet-4-5",
229
222
  "foo_temperature": 0.6,
230
223
  }
231
224
  },
@@ -271,7 +264,7 @@ def init_chat_model(
271
264
 
272
265
  configurable_model_with_tools.invoke(
273
266
  "Which city is hotter today and which is bigger: LA or NY?",
274
- config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
267
+ config={"configurable": {"model": "claude-sonnet-4-5"}},
275
268
  )
276
269
  ```
277
270
 
@@ -612,10 +605,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
612
605
  @property
613
606
  def InputType(self) -> TypeAlias:
614
607
  """Get the input type for this `Runnable`."""
615
- from langchain_core.prompt_values import (
616
- ChatPromptValueConcrete,
617
- StringPromptValue,
618
- )
608
+ from langchain_core.prompt_values import ChatPromptValueConcrete, StringPromptValue
619
609
 
620
610
  # This is a version of LanguageModelInput which replaces the abstract
621
611
  # base class BaseMessage with a union of its subclasses, which makes
@@ -4,6 +4,12 @@
4
4
  This page contains **reference documentation** for Embeddings. See
5
5
  [the docs](https://docs.langchain.com/oss/python/langchain/retrieval#embedding-models)
6
6
  for conceptual guides, tutorials, and examples on using Embeddings.
7
+
8
+ !!! warning "Modules moved"
9
+ With the release of `langchain 1.0.0`, several embeddings modules were moved to
10
+ `langchain-classic`, such as `CacheBackedEmbeddings` and all community
11
+ embeddings. See [list](https://github.com/langchain-ai/langchain/blob/bdf1cd383ce36dc18381a3bf3fb0a579337a32b5/libs/langchain/langchain/embeddings/__init__.py)
12
+ of moved modules to inform your migration.
7
13
  """
8
14
 
9
15
  from langchain_core.embeddings import Embeddings
@@ -133,20 +133,34 @@ def init_embeddings(
133
133
  installed.
134
134
 
135
135
  Args:
136
- model: Name of the model to use. Can be either:
136
+ model: Name of the model to use.
137
+
138
+ Can be either:
139
+
137
140
  - A model string like `"openai:text-embedding-3-small"`
138
- - Just the model name if provider is specified
139
- provider: Optional explicit provider name. If not specified,
140
- will attempt to parse from the model string. Supported providers
141
- and their required packages:
141
+ - Just the model name if the provider is specified separately or can be
142
+ inferred.
143
+
144
+ See supported providers under the `provider` arg description.
145
+ provider: Optional explicit provider name. If not specified, will attempt to
146
+ parse from the model string in the `model` arg.
147
+
148
+ Supported providers:
142
149
 
143
- {_get_provider_list()}
150
+ - `openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
151
+ - `azure_openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
152
+ - `bedrock` -> [`langchain-aws`](https://docs.langchain.com/oss/python/integrations/providers/aws)
153
+ - `cohere` -> [`langchain-cohere`](https://docs.langchain.com/oss/python/integrations/providers/cohere)
154
+ - `google_vertexai` -> [`langchain-google-vertexai`](https://docs.langchain.com/oss/python/integrations/providers/google)
155
+ - `huggingface` -> [`langchain-huggingface`](https://docs.langchain.com/oss/python/integrations/providers/huggingface)
156
+ - `mistraiai` -> [`langchain-mistralai`](https://docs.langchain.com/oss/python/integrations/providers/mistralai)
157
+ - `ollama` -> [`langchain-ollama`](https://docs.langchain.com/oss/python/integrations/providers/ollama)
144
158
 
145
159
  **kwargs: Additional model-specific parameters passed to the embedding model.
146
160
  These vary by provider, see the provider-specific documentation for details.
147
161
 
148
162
  Returns:
149
- An Embeddings instance that can generate embeddings for text.
163
+ An `Embeddings` instance that can generate embeddings for text.
150
164
 
151
165
  Raises:
152
166
  ValueError: If the model provider is not supported or cannot be determined
@@ -13,10 +13,10 @@ The module implements design patterns for:
13
13
  - Command-based state updates for advanced control flow
14
14
 
15
15
  Key Components:
16
- ToolNode: Main class for executing tools in LangGraph workflows
17
- InjectedState: Annotation for injecting graph state into tools
18
- InjectedStore: Annotation for injecting persistent store into tools
19
- tools_condition: Utility function for conditional routing based on tool calls
16
+ `ToolNode`: Main class for executing tools in LangGraph workflows
17
+ `InjectedState`: Annotation for injecting graph state into tools
18
+ `InjectedStore`: Annotation for injecting persistent store into tools
19
+ `tools_condition`: Utility function for conditional routing based on tool calls
20
20
 
21
21
  Typical Usage:
22
22
  ```python
@@ -49,7 +49,6 @@ from typing import (
49
49
  Generic,
50
50
  Literal,
51
51
  TypedDict,
52
- TypeVar,
53
52
  Union,
54
53
  cast,
55
54
  get_args,
@@ -84,15 +83,19 @@ from langgraph.graph.message import REMOVE_ALL_MESSAGES
84
83
  from langgraph.store.base import BaseStore # noqa: TC002
85
84
  from langgraph.types import Command, Send, StreamWriter
86
85
  from pydantic import BaseModel, ValidationError
87
- from typing_extensions import Unpack
86
+ from typing_extensions import TypeVar, Unpack
88
87
 
89
88
  if TYPE_CHECKING:
90
89
  from collections.abc import Sequence
91
90
 
92
91
  from langgraph.runtime import Runtime
92
+ from pydantic_core import ErrorDetails
93
93
 
94
- StateT = TypeVar("StateT")
95
- ContextT = TypeVar("ContextT")
94
+ # right now we use a dict as the default, can change this to AgentState, but depends
95
+ # on if this lives in LangChain or LangGraph... ideally would have some typed
96
+ # messages key
97
+ StateT = TypeVar("StateT", default=dict)
98
+ ContextT = TypeVar("ContextT", default=None)
96
99
 
97
100
  INVALID_TOOL_NAME_ERROR_TEMPLATE = (
98
101
  "Error: {requested_tool} is not a valid tool, try one of [{available_tools}]."
@@ -123,11 +126,11 @@ class ToolCallRequest:
123
126
  Attributes:
124
127
  tool_call: Tool call dict with name, args, and id from model output.
125
128
  tool: BaseTool instance to be invoked, or None if tool is not
126
- registered with the ToolNode. When tool is None, interceptors can
127
- handle the request without validation. If the interceptor calls execute(),
129
+ registered with the `ToolNode`. When tool is `None`, interceptors can
130
+ handle the request without validation. If the interceptor calls `execute()`,
128
131
  validation will occur and raise an error for unregistered tools.
129
- state: Agent state (dict, list, or BaseModel).
130
- runtime: LangGraph runtime context (optional, None if outside graph).
132
+ state: Agent state (`dict`, `list`, or `BaseModel`).
133
+ runtime: LangGraph runtime context (optional, `None` if outside graph).
131
134
  """
132
135
 
133
136
  tool_call: ToolCall
@@ -178,7 +181,7 @@ The execute callable can be invoked multiple times for retry logic,
178
181
  with potentially modified requests each time. Each call to execute
179
182
  is independent and stateless.
180
183
 
181
- Note:
184
+ !!! note
182
185
  When implementing middleware for `create_agent`, use
183
186
  `AgentMiddleware.wrap_tool_call` which provides properly typed
184
187
  state parameter for better type safety.
@@ -247,7 +250,7 @@ AsyncToolCallWrapper = Callable[
247
250
  class ToolCallWithContext(TypedDict):
248
251
  """ToolCall with additional context for graph state.
249
252
 
250
- This is an internal data structure meant to help the ToolNode accept
253
+ This is an internal data structure meant to help the `ToolNode` accept
251
254
  tool calls with additional context (e.g. state) when dispatched using the
252
255
  Send API.
253
256
 
@@ -268,16 +271,16 @@ class ToolCallWithContext(TypedDict):
268
271
 
269
272
 
270
273
  def msg_content_output(output: Any) -> str | list[dict]:
271
- """Convert tool output to ToolMessage content format.
274
+ """Convert tool output to `ToolMessage` content format.
272
275
 
273
- Handles str, list[dict] (content blocks), and arbitrary objects by attempting
276
+ Handles `str`, `list[dict]` (content blocks), and arbitrary objects by attempting
274
277
  JSON serialization with fallback to str().
275
278
 
276
279
  Args:
277
280
  output: Tool execution output of any type.
278
281
 
279
282
  Returns:
280
- String or list of content blocks suitable for ToolMessage.content.
283
+ String or list of content blocks suitable for `ToolMessage.content`.
281
284
  """
282
285
  if isinstance(output, str) or (
283
286
  isinstance(output, list)
@@ -297,11 +300,15 @@ def msg_content_output(output: Any) -> str | list[dict]:
297
300
  class ToolInvocationError(ToolException):
298
301
  """An error occurred while invoking a tool due to invalid arguments.
299
302
 
300
- This exception is only raised when invoking a tool using the ToolNode!
303
+ This exception is only raised when invoking a tool using the `ToolNode`!
301
304
  """
302
305
 
303
306
  def __init__(
304
- self, tool_name: str, source: ValidationError, tool_kwargs: dict[str, Any]
307
+ self,
308
+ tool_name: str,
309
+ source: ValidationError,
310
+ tool_kwargs: dict[str, Any],
311
+ filtered_errors: list[ErrorDetails] | None = None,
305
312
  ) -> None:
306
313
  """Initialize the ToolInvocationError.
307
314
 
@@ -309,13 +316,28 @@ class ToolInvocationError(ToolException):
309
316
  tool_name: The name of the tool that failed.
310
317
  source: The exception that occurred.
311
318
  tool_kwargs: The keyword arguments that were passed to the tool.
319
+ filtered_errors: Optional list of filtered validation errors excluding
320
+ injected arguments.
312
321
  """
322
+ # Format error display based on filtered errors if provided
323
+ if filtered_errors is not None:
324
+ # Manually format the filtered errors without URLs or fancy formatting
325
+ error_str_parts = []
326
+ for error in filtered_errors:
327
+ loc_str = ".".join(str(loc) for loc in error.get("loc", ()))
328
+ msg = error.get("msg", "Unknown error")
329
+ error_str_parts.append(f"{loc_str}: {msg}")
330
+ error_display_str = "\n".join(error_str_parts)
331
+ else:
332
+ error_display_str = str(source)
333
+
313
334
  self.message = TOOL_INVOCATION_ERROR_TEMPLATE.format(
314
- tool_name=tool_name, tool_kwargs=tool_kwargs, error=source
335
+ tool_name=tool_name, tool_kwargs=tool_kwargs, error=error_display_str
315
336
  )
316
337
  self.tool_name = tool_name
317
338
  self.tool_kwargs = tool_kwargs
318
339
  self.source = source
340
+ self.filtered_errors = filtered_errors
319
341
  super().__init__(self.message)
320
342
 
321
343
 
@@ -338,7 +360,7 @@ def _handle_tool_error(
338
360
  """Generate error message content based on exception handling configuration.
339
361
 
340
362
  This function centralizes error message generation logic, supporting different
341
- error handling strategies configured via the ToolNode's handle_tool_errors
363
+ error handling strategies configured via the `ToolNode`'s `handle_tool_errors`
342
364
  parameter.
343
365
 
344
366
  Args:
@@ -350,12 +372,12 @@ def _handle_tool_error(
350
372
  - tuple: Not used in this context (handled by caller)
351
373
 
352
374
  Returns:
353
- A string containing the error message to include in the ToolMessage.
375
+ A string containing the error message to include in the `ToolMessage`.
354
376
 
355
377
  Raises:
356
378
  ValueError: If flag is not one of the supported types.
357
379
 
358
- Note:
380
+ !!! note
359
381
  The tuple case is handled by the caller through exception type checking,
360
382
  not by this function directly.
361
383
  """
@@ -392,9 +414,9 @@ def _infer_handled_types(handler: Callable[..., str]) -> tuple[type[Exception],
392
414
 
393
415
  Raises:
394
416
  ValueError: If the handler's annotation contains non-Exception types or
395
- if Union types contain non-Exception types.
417
+ if Union types contain non-Exception types.
396
418
 
397
- Note:
419
+ !!! note
398
420
  This function supports both single exception types and Union types for
399
421
  handlers that need to handle multiple exception types differently.
400
422
  """
@@ -440,6 +462,59 @@ def _infer_handled_types(handler: Callable[..., str]) -> tuple[type[Exception],
440
462
  return (Exception,)
441
463
 
442
464
 
465
+ def _filter_validation_errors(
466
+ validation_error: ValidationError,
467
+ tool_to_state_args: dict[str, str | None],
468
+ tool_to_store_arg: str | None,
469
+ tool_to_runtime_arg: str | None,
470
+ ) -> list[ErrorDetails]:
471
+ """Filter validation errors to only include LLM-controlled arguments.
472
+
473
+ When a tool invocation fails validation, only errors for arguments that the LLM
474
+ controls should be included in error messages. This ensures the LLM receives
475
+ focused, actionable feedback about parameters it can actually fix. System-injected
476
+ arguments (state, store, runtime) are filtered out since the LLM has no control
477
+ over them.
478
+
479
+ This function also removes injected argument values from the `input` field in error
480
+ details, ensuring that only LLM-provided arguments appear in error messages.
481
+
482
+ Args:
483
+ validation_error: The Pydantic ValidationError raised during tool invocation.
484
+ tool_to_state_args: Mapping of state argument names to state field names.
485
+ tool_to_store_arg: Name of the store argument, if any.
486
+ tool_to_runtime_arg: Name of the runtime argument, if any.
487
+
488
+ Returns:
489
+ List of ErrorDetails containing only errors for LLM-controlled arguments,
490
+ with system-injected argument values removed from the input field.
491
+ """
492
+ injected_args = set(tool_to_state_args.keys())
493
+ if tool_to_store_arg:
494
+ injected_args.add(tool_to_store_arg)
495
+ if tool_to_runtime_arg:
496
+ injected_args.add(tool_to_runtime_arg)
497
+
498
+ filtered_errors: list[ErrorDetails] = []
499
+ for error in validation_error.errors():
500
+ # Check if error location contains any injected argument
501
+ # error['loc'] is a tuple like ('field_name',) or ('field_name', 'nested_field')
502
+ if error["loc"] and error["loc"][0] not in injected_args:
503
+ # Create a copy of the error dict to avoid mutating the original
504
+ error_copy: dict[str, Any] = {**error}
505
+
506
+ # Remove injected arguments from input_value if it's a dict
507
+ if isinstance(error_copy.get("input"), dict):
508
+ input_dict = error_copy["input"]
509
+ input_copy = {k: v for k, v in input_dict.items() if k not in injected_args}
510
+ error_copy["input"] = input_copy
511
+
512
+ # Cast is safe because ErrorDetails is a TypedDict compatible with this structure
513
+ filtered_errors.append(error_copy) # type: ignore[arg-type]
514
+
515
+ return filtered_errors
516
+
517
+
443
518
  class _ToolNode(RunnableCallable):
444
519
  """A node for executing tools in LangGraph workflows.
445
520
 
@@ -560,7 +635,7 @@ class _ToolNode(RunnableCallable):
560
635
  wrap_tool_call: ToolCallWrapper | None = None,
561
636
  awrap_tool_call: AsyncToolCallWrapper | None = None,
562
637
  ) -> None:
563
- """Initialize ToolNode with tools and configuration.
638
+ """Initialize `ToolNode` with tools and configuration.
564
639
 
565
640
  Args:
566
641
  tools: Sequence of tools to make available for execution.
@@ -621,17 +696,10 @@ class _ToolNode(RunnableCallable):
621
696
  )
622
697
  tool_runtimes.append(tool_runtime)
623
698
 
624
- # Inject tool arguments (including runtime)
625
-
626
- injected_tool_calls = []
699
+ # Pass original tool calls without injection
627
700
  input_types = [input_type] * len(tool_calls)
628
- for call, tool_runtime in zip(tool_calls, tool_runtimes, strict=False):
629
- injected_call = self._inject_tool_args(call, tool_runtime)
630
- injected_tool_calls.append(injected_call)
631
701
  with get_executor_for_config(config) as executor:
632
- outputs = list(
633
- executor.map(self._run_one, injected_tool_calls, input_types, tool_runtimes)
634
- )
702
+ outputs = list(executor.map(self._run_one, tool_calls, input_types, tool_runtimes))
635
703
 
636
704
  return self._combine_tool_outputs(outputs, input_type)
637
705
 
@@ -658,12 +726,10 @@ class _ToolNode(RunnableCallable):
658
726
  )
659
727
  tool_runtimes.append(tool_runtime)
660
728
 
661
- injected_tool_calls = []
729
+ # Pass original tool calls without injection
662
730
  coros = []
663
731
  for call, tool_runtime in zip(tool_calls, tool_runtimes, strict=False):
664
- injected_call = self._inject_tool_args(call, tool_runtime)
665
- injected_tool_calls.append(injected_call)
666
- coros.append(self._arun_one(injected_call, input_type, tool_runtime))
732
+ coros.append(self._arun_one(call, input_type, tool_runtime)) # type: ignore[arg-type]
667
733
  outputs = await asyncio.gather(*coros)
668
734
 
669
735
  return self._combine_tool_outputs(outputs, input_type)
@@ -740,13 +806,23 @@ class _ToolNode(RunnableCallable):
740
806
  msg = f"Tool {call['name']} is not registered with ToolNode"
741
807
  raise TypeError(msg)
742
808
 
743
- call_args = {**call, "type": "tool_call"}
809
+ # Inject state, store, and runtime right before invocation
810
+ injected_call = self._inject_tool_args(call, request.runtime)
811
+ call_args = {**injected_call, "type": "tool_call"}
744
812
 
745
813
  try:
746
814
  try:
747
815
  response = tool.invoke(call_args, config)
748
816
  except ValidationError as exc:
749
- raise ToolInvocationError(call["name"], exc, call["args"]) from exc
817
+ # Filter out errors for injected arguments
818
+ filtered_errors = _filter_validation_errors(
819
+ exc,
820
+ self._tool_to_state_args.get(call["name"], {}),
821
+ self._tool_to_store_arg.get(call["name"]),
822
+ self._tool_to_runtime_arg.get(call["name"]),
823
+ )
824
+ # Use original call["args"] without injected values for error reporting
825
+ raise ToolInvocationError(call["name"], exc, call["args"], filtered_errors) from exc
750
826
 
751
827
  # GraphInterrupt is a special exception that will always be raised.
752
828
  # It can be triggered in the following scenarios,
@@ -885,13 +961,23 @@ class _ToolNode(RunnableCallable):
885
961
  msg = f"Tool {call['name']} is not registered with ToolNode"
886
962
  raise TypeError(msg)
887
963
 
888
- call_args = {**call, "type": "tool_call"}
964
+ # Inject state, store, and runtime right before invocation
965
+ injected_call = self._inject_tool_args(call, request.runtime)
966
+ call_args = {**injected_call, "type": "tool_call"}
889
967
 
890
968
  try:
891
969
  try:
892
970
  response = await tool.ainvoke(call_args, config)
893
971
  except ValidationError as exc:
894
- raise ToolInvocationError(call["name"], exc, call["args"]) from exc
972
+ # Filter out errors for injected arguments
973
+ filtered_errors = _filter_validation_errors(
974
+ exc,
975
+ self._tool_to_state_args.get(call["name"], {}),
976
+ self._tool_to_store_arg.get(call["name"]),
977
+ self._tool_to_runtime_arg.get(call["name"]),
978
+ )
979
+ # Use original call["args"] without injected values for error reporting
980
+ raise ToolInvocationError(call["name"], exc, call["args"], filtered_errors) from exc
895
981
 
896
982
  # GraphInterrupt is a special exception that will always be raised.
897
983
  # It can be triggered in the following scenarios,
@@ -1181,11 +1267,11 @@ class _ToolNode(RunnableCallable):
1181
1267
 
1182
1268
  Raises:
1183
1269
  ValueError: If a tool requires store injection but no store is provided,
1184
- or if state injection requirements cannot be satisfied.
1270
+ or if state injection requirements cannot be satisfied.
1185
1271
 
1186
- Note:
1272
+ !!! note
1187
1273
  This method is called automatically during tool execution. It should not
1188
- be called from outside the ToolNode.
1274
+ be called from outside the `ToolNode`.
1189
1275
  """
1190
1276
  if tool_call["name"] not in self.tools_by_name:
1191
1277
  return tool_call
@@ -1329,9 +1415,9 @@ def tools_condition(
1329
1415
  return tools_condition(state, messages_key="chat_history")
1330
1416
  ```
1331
1417
 
1332
- Note:
1333
- This function is designed to work seamlessly with ToolNode and standard
1334
- LangGraph patterns. It expects the last message to be an AIMessage when
1418
+ !!! note
1419
+ This function is designed to work seamlessly with `ToolNode` and standard
1420
+ LangGraph patterns. It expects the last message to be an `AIMessage` when
1335
1421
  tool calls are present, which is the standard output format for tool-calling
1336
1422
  language models.
1337
1423
  """
@@ -1353,16 +1439,16 @@ def tools_condition(
1353
1439
  class ToolRuntime(_DirectlyInjectedToolArg, Generic[ContextT, StateT]):
1354
1440
  """Runtime context automatically injected into tools.
1355
1441
 
1356
- When a tool function has a parameter named 'tool_runtime' with type hint
1357
- 'ToolRuntime', the tool execution system will automatically inject
1358
- an instance containing:
1442
+ When a tool function has a parameter named `tool_runtime` with type hint
1443
+ `ToolRuntime`, the tool execution system will automatically inject an instance
1444
+ containing:
1359
1445
 
1360
- - state: The current graph state
1361
- - tool_call_id: The ID of the current tool call
1362
- - config: RunnableConfig for the current execution
1363
- - context: Runtime context (from langgraph Runtime)
1364
- - store: BaseStore instance for persistent storage (from langgraph Runtime)
1365
- - stream_writer: StreamWriter for streaming output (from langgraph Runtime)
1446
+ - `state`: The current graph state
1447
+ - `tool_call_id`: The ID of the current tool call
1448
+ - `config`: `RunnableConfig` for the current execution
1449
+ - `context`: Runtime context (from langgraph `Runtime`)
1450
+ - `store`: `BaseStore` instance for persistent storage (from langgraph `Runtime`)
1451
+ - `stream_writer`: `StreamWriter` for streaming output (from langgraph `Runtime`)
1366
1452
 
1367
1453
  No `Annotated` wrapper is needed - just use `runtime: ToolRuntime`
1368
1454
  as a parameter.
@@ -1396,7 +1482,7 @@ class ToolRuntime(_DirectlyInjectedToolArg, Generic[ContextT, StateT]):
1396
1482
  return f"Processed {x}"
1397
1483
  ```
1398
1484
 
1399
- Note:
1485
+ !!! note
1400
1486
  This is a marker class used for type checking and detection.
1401
1487
  The actual runtime object will be constructed during tool execution.
1402
1488
  """
@@ -1470,7 +1556,7 @@ class InjectedState(InjectedToolArg):
1470
1556
  ]
1471
1557
  ```
1472
1558
 
1473
- Note:
1559
+ !!! note
1474
1560
  - `InjectedState` arguments are automatically excluded from tool schemas
1475
1561
  presented to language models
1476
1562
  - `ToolNode` handles the injection process during execution
@@ -1550,7 +1636,7 @@ class InjectedStore(InjectedToolArg):
1550
1636
  result2 = graph.invoke({"messages": [HumanMessage("What's my favorite color?")]})
1551
1637
  ```
1552
1638
 
1553
- Note:
1639
+ !!! note
1554
1640
  - `InjectedStore` arguments are automatically excluded from tool schemas
1555
1641
  presented to language models
1556
1642
  - The store instance is automatically injected by `ToolNode` during execution
@@ -1,18 +1,19 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain
3
- Version: 1.0.0rc2
3
+ Version: 1.0.2
4
4
  Summary: Building applications with LLMs through composability
5
- Project-URL: homepage, https://docs.langchain.com/
6
- Project-URL: repository, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
7
- Project-URL: changelog, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D1%22
8
- Project-URL: twitter, https://x.com/LangChainAI
9
- Project-URL: slack, https://www.langchain.com/join-community
10
- Project-URL: reddit, https://www.reddit.com/r/LangChain/
5
+ Project-URL: Homepage, https://docs.langchain.com/
6
+ Project-URL: Documentation, https://reference.langchain.com/python/langchain/langchain/
7
+ Project-URL: Source, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
8
+ Project-URL: Changelog, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D1%22
9
+ Project-URL: Twitter, https://x.com/LangChainAI
10
+ Project-URL: Slack, https://www.langchain.com/join-community
11
+ Project-URL: Reddit, https://www.reddit.com/r/LangChain/
11
12
  License: MIT
12
13
  License-File: LICENSE
13
14
  Requires-Python: <4.0.0,>=3.10.0
14
- Requires-Dist: langchain-core<2.0.0,>=1.0.0rc3
15
- Requires-Dist: langgraph<2.0.0,>=1.0.0a4
15
+ Requires-Dist: langchain-core<2.0.0,>=1.0.0
16
+ Requires-Dist: langgraph<1.1.0,>=1.0.0
16
17
  Requires-Dist: pydantic<3.0.0,>=2.7.4
17
18
  Provides-Extra: anthropic
18
19
  Requires-Dist: langchain-anthropic; extra == 'anthropic'
@@ -30,6 +31,8 @@ Provides-Extra: google-vertexai
30
31
  Requires-Dist: langchain-google-vertexai; extra == 'google-vertexai'
31
32
  Provides-Extra: groq
32
33
  Requires-Dist: langchain-groq; extra == 'groq'
34
+ Provides-Extra: huggingface
35
+ Requires-Dist: langchain-huggingface; extra == 'huggingface'
33
36
  Provides-Extra: mistralai
34
37
  Requires-Dist: langchain-mistralai; extra == 'mistralai'
35
38
  Provides-Extra: ollama
@@ -1,30 +1,34 @@
1
- langchain/__init__.py,sha256=zTVJXeQnKRDdEeQc-ivRY9P9TV6An_dmmkgW6phY86I,64
1
+ langchain/__init__.py,sha256=3EzcPFwVNXhQEus521J89yt2GHHJs6VenVpzltp09Uk,61
2
2
  langchain/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  langchain/agents/__init__.py,sha256=tDjbhFSC6XHQUZ_XvjHwmbICFfjxmE9xKFMbUVSIwfs,522
4
- langchain/agents/factory.py,sha256=RhClhqWInb2i0pjNJj9-0OvnB6waj0r8Qbm5bN7ofw8,62214
4
+ langchain/agents/factory.py,sha256=vUpv5sD91eaLUH_2GK5TrjnblwEl7h9L-3GrDtBueq4,62774
5
5
  langchain/agents/structured_output.py,sha256=BDSF6PWVih41M7IGyjCHWy46jmDxZsfBt_B4llE9OOc,13764
6
- langchain/agents/middleware/__init__.py,sha256=FBoTr4TAyuLJiOKV-mJN3oaLE6D6Q5ubI5sCbnsCCSs,1955
6
+ langchain/agents/middleware/__init__.py,sha256=Vm-Ajh4YoaahAa9b_XEAuiyoupKNIwZVzX-8JN1xKkA,2251
7
+ langchain/agents/middleware/_execution.py,sha256=Xyjh3HxTHbgA-C9FFE4WXUOqKUW8mdOB455XRlA_BOU,14251
8
+ langchain/agents/middleware/_redaction.py,sha256=LJeNOvdZ0gd4273Lqgpbxh7EiuuZ6q5LlqeHK4eyin4,11210
7
9
  langchain/agents/middleware/context_editing.py,sha256=0sUpDc0FvOKMERNnEKnhBqrTjX_rCVWjIX8hH3RTG8U,8749
10
+ langchain/agents/middleware/file_search.py,sha256=RiBNJRfy8R5E8TvjQRVgXf1O0UDtXqEarirFPnihbtI,12757
8
11
  langchain/agents/middleware/human_in_the_loop.py,sha256=N7Vt31rlHS7J-cA0EBDS2mlQW-SMvvxyAwjBnAY9vZU,12650
9
12
  langchain/agents/middleware/model_call_limit.py,sha256=yYBcOY5DKNIG6_9f-rkTjIj_BLVUO1tuShgxt00P8W0,7735
10
13
  langchain/agents/middleware/model_fallback.py,sha256=VKDN81jfFB9zJOaJZ94tfwzodk3zRXRwz6CqQ6MkJtw,4097
11
- langchain/agents/middleware/pii.py,sha256=rkGojBFIJGMs1p1cKNh0npafts_0UUJ0_NeZsyJo498,24760
14
+ langchain/agents/middleware/pii.py,sha256=I3nTAnfvrHqre9SoRJvlw0koT8-x3gGZdSvb0uKH5xg,10978
15
+ langchain/agents/middleware/shell_tool.py,sha256=HqPTht0Gx3zAv3JmWS_R1Hsd6yQEkvDjlCXXwQq8Fg8,26627
12
16
  langchain/agents/middleware/summarization.py,sha256=H1VxRkkbauw4p4sMMKyc_uZGbJhtqoVvOF7y_5JBXTc,10329
13
17
  langchain/agents/middleware/todo.py,sha256=0PyHV4u5JaBBuMmPWmDr3orZ5T5F6lk2jiVoBzVVMM4,9808
14
18
  langchain/agents/middleware/tool_call_limit.py,sha256=AHA-No4oUze2-2d1INnX8d_9otFuDB8uoWayJpt9nPo,12321
15
19
  langchain/agents/middleware/tool_emulator.py,sha256=UmN5UIMsikDnFvcPbNLNDOF4RXvIxqd-AMG46LVI0iA,7211
16
20
  langchain/agents/middleware/tool_retry.py,sha256=M76csBFFZa37djxtfUCqNU-x2erTNtxZqoO__DozWxA,13787
17
21
  langchain/agents/middleware/tool_selection.py,sha256=6RYdgkg6aSNx1w-YxRyL2Hct7UPnMRgGg6YVZVtW5TU,11638
18
- langchain/agents/middleware/types.py,sha256=FVuYue9cLB5C1ZNcYLIGNPN6IUaNXY3UsWQ6gC-gZNs,55547
22
+ langchain/agents/middleware/types.py,sha256=4oQAoTSHL1LQh9OiUxz_WJ6JrAvKRiUSXUAHawOcLkU,55670
19
23
  langchain/chat_models/__init__.py,sha256=lQwcJkHtGjrclCL7sBFocQuzRdRgZRPzIIPnGhEJmVQ,533
20
- langchain/chat_models/base.py,sha256=k1Qnuh7O_3LwsWtcVFSWsWP00hxiEyninwltTdi1rk8,35655
21
- langchain/embeddings/__init__.py,sha256=crY7GUw7RSA25JgpYl7I4WPloVCVY6eUmJbSSSchWis,444
22
- langchain/embeddings/base.py,sha256=1aNxDLQmS_l7RMcvjnK7Cv7rtgKrKt6Sl7mgXM2_JWI,7329
24
+ langchain/chat_models/base.py,sha256=XowWNeNZbVpGOOGGd2Wmvs2h6h7TzjFcQOx4KnJ2g4M,35544
25
+ langchain/embeddings/__init__.py,sha256=FYmjgpgjQc8wTy_qu2VnwgYiSqF6tUtwH6jtea6VbSs,843
26
+ langchain/embeddings/base.py,sha256=V9YgYiRAJs5U5o8BQUsmfV8XtRQrRWV3rENXHLKiECg,8511
23
27
  langchain/messages/__init__.py,sha256=p7NlF1yf8MkMgJzJ2wggXGkkA_okz1f-g63KoflL6PA,1710
24
28
  langchain/rate_limiters/__init__.py,sha256=5490xUNhet37N2nX6kbJlDgf8u1DX-C1Cs_r7etXn8A,351
25
29
  langchain/tools/__init__.py,sha256=hMzbaGcfHhNYfJx20uV57uMd9a-gNLbmopG4gDReeEc,628
26
- langchain/tools/tool_node.py,sha256=0rk5SZ0L80X6DJA5ohzyuqydL-S40i5LHMXJsY2t0JI,65016
27
- langchain-1.0.0rc2.dist-info/METADATA,sha256=WG0BlqzPbwur0AlHyITXIE35MX7iSpzGIfqqD4dFLxc,4544
28
- langchain-1.0.0rc2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
29
- langchain-1.0.0rc2.dist-info/licenses/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
30
- langchain-1.0.0rc2.dist-info/RECORD,,
30
+ langchain/tools/tool_node.py,sha256=3wHSg6TQk43Zv3xrkuLwYZn5Hv5w3oZOv0vkmI_40sQ,69489
31
+ langchain-1.0.2.dist-info/METADATA,sha256=XNAflqRgEm9FqOvFckJKFFhjmP4ouNOJ8HdPXJrKgbc,4709
32
+ langchain-1.0.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
33
+ langchain-1.0.2.dist-info/licenses/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
34
+ langchain-1.0.2.dist-info/RECORD,,