langchain 1.0.2__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

langchain/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """Main entrypoint into LangChain."""
2
2
 
3
- __version__ = "1.0.1"
3
+ __version__ = "1.0.3"
@@ -11,6 +11,7 @@ from langchain_core.tools import BaseTool
11
11
  from langgraph._internal._runnable import RunnableCallable
12
12
  from langgraph.constants import END, START
13
13
  from langgraph.graph.state import StateGraph
14
+ from langgraph.prebuilt.tool_node import ToolCallWithContext, ToolNode
14
15
  from langgraph.runtime import Runtime # noqa: TC002
15
16
  from langgraph.types import Command, Send
16
17
  from langgraph.typing import ContextT # noqa: TC002
@@ -33,11 +34,11 @@ from langchain.agents.structured_output import (
33
34
  ProviderStrategy,
34
35
  ProviderStrategyBinding,
35
36
  ResponseFormat,
37
+ StructuredOutputError,
36
38
  StructuredOutputValidationError,
37
39
  ToolStrategy,
38
40
  )
39
41
  from langchain.chat_models import init_chat_model
40
- from langchain.tools.tool_node import ToolCallWithContext, _ToolNode
41
42
 
42
43
  if TYPE_CHECKING:
43
44
  from collections.abc import Awaitable, Callable, Sequence
@@ -48,7 +49,7 @@ if TYPE_CHECKING:
48
49
  from langgraph.store.base import BaseStore
49
50
  from langgraph.types import Checkpointer
50
51
 
51
- from langchain.tools.tool_node import ToolCallRequest, ToolCallWrapper
52
+ from langchain.agents.middleware.types import ToolCallRequest, ToolCallWrapper
52
53
 
53
54
  STRUCTURED_OUTPUT_ERROR_TEMPLATE = "Error: {error}\n Please fix your mistakes."
54
55
 
@@ -529,44 +530,65 @@ def create_agent( # noqa: PLR0915
529
530
 
530
531
  Args:
531
532
  model: The language model for the agent. Can be a string identifier
532
- (e.g., `"openai:gpt-4"`) or a chat model instance (e.g., `ChatOpenAI()`).
533
+ (e.g., `"openai:gpt-4"`) or a direct chat model instance (e.g.,
534
+ [`ChatOpenAI`][langchain_openai.ChatOpenAI] or other another
535
+ [chat model](https://docs.langchain.com/oss/python/integrations/chat)).
536
+
533
537
  For a full list of supported model strings, see
534
538
  [`init_chat_model`][langchain.chat_models.init_chat_model(model_provider)].
535
- tools: A list of tools, `dicts`, or `Callable`. If `None` or an empty list,
536
- the agent will consist of a model node without a tool calling loop.
537
- system_prompt: An optional system prompt for the LLM. Prompts are converted to a
538
- `SystemMessage` and added to the beginning of the message list.
539
+ tools: A list of tools, `dicts`, or `Callable`.
540
+
541
+ If `None` or an empty list, the agent will consist of a model node without a
542
+ tool calling loop.
543
+ system_prompt: An optional system prompt for the LLM.
544
+
545
+ Prompts are converted to a
546
+ [`SystemMessage`][langchain.messages.SystemMessage] and added to the
547
+ beginning of the message list.
539
548
  middleware: A sequence of middleware instances to apply to the agent.
540
- Middleware can intercept and modify agent behavior at various stages.
549
+
550
+ Middleware can intercept and modify agent behavior at various stages. See
551
+ the [full guide](https://docs.langchain.com/oss/python/langchain/middleware).
541
552
  response_format: An optional configuration for structured responses.
553
+
542
554
  Can be a `ToolStrategy`, `ProviderStrategy`, or a Pydantic model class.
555
+
543
556
  If provided, the agent will handle structured output during the
544
557
  conversation flow. Raw schemas will be wrapped in an appropriate strategy
545
558
  based on model capabilities.
546
559
  state_schema: An optional `TypedDict` schema that extends `AgentState`.
560
+
547
561
  When provided, this schema is used instead of `AgentState` as the base
548
562
  schema for merging with middleware state schemas. This allows users to
549
563
  add custom state fields without needing to create custom middleware.
550
- Generally, it's recommended to use state_schema extensions via middleware
564
+ Generally, it's recommended to use `state_schema` extensions via middleware
551
565
  to keep relevant extensions scoped to corresponding hooks / tools.
566
+
552
567
  The schema must be a subclass of `AgentState[ResponseT]`.
553
568
  context_schema: An optional schema for runtime context.
554
- checkpointer: An optional checkpoint saver object. This is used for persisting
555
- the state of the graph (e.g., as chat memory) for a single thread
556
- (e.g., a single conversation).
557
- store: An optional store object. This is used for persisting data
558
- across multiple threads (e.g., multiple conversations / users).
569
+ checkpointer: An optional checkpoint saver object.
570
+
571
+ Used for persisting the state of the graph (e.g., as chat memory) for a
572
+ single thread (e.g., a single conversation).
573
+ store: An optional store object.
574
+
575
+ Used for persisting data across multiple threads (e.g., multiple
576
+ conversations / users).
559
577
  interrupt_before: An optional list of node names to interrupt before.
578
+
560
579
  Useful if you want to add a user confirmation or other interrupt
561
580
  before taking an action.
562
581
  interrupt_after: An optional list of node names to interrupt after.
582
+
563
583
  Useful if you want to return directly or run additional processing
564
584
  on an output.
565
- debug: Whether to enable verbose logging for graph execution. When enabled,
566
- prints detailed information about each node execution, state updates,
567
- and transitions during agent runtime. Useful for debugging middleware
568
- behavior and understanding agent execution flow.
585
+ debug: Whether to enable verbose logging for graph execution.
586
+
587
+ When enabled, prints detailed information about each node execution, state
588
+ updates, and transitions during agent runtime. Useful for debugging
589
+ middleware behavior and understanding agent execution flow.
569
590
  name: An optional name for the `CompiledStateGraph`.
591
+
570
592
  This name will be automatically used when adding the agent graph to
571
593
  another graph as a subgraph node - particularly useful for building
572
594
  multi-agent systems.
@@ -576,11 +598,12 @@ def create_agent( # noqa: PLR0915
576
598
  A compiled `StateGraph` that can be used for chat interactions.
577
599
 
578
600
  The agent node calls the language model with the messages list (after applying
579
- the system prompt). If the resulting `AIMessage` contains `tool_calls`, the graph
580
- will then call the tools. The tools node executes the tools and adds the responses
581
- to the messages list as `ToolMessage` objects. The agent node then calls the
582
- language model again. The process repeats until no more `tool_calls` are
583
- present in the response. The agent then returns the full list of messages.
601
+ the system prompt). If the resulting [`AIMessage`][langchain.messages.AIMessage]
602
+ contains `tool_calls`, the graph will then call the tools. The tools node executes
603
+ the tools and adds the responses to the messages list as
604
+ [`ToolMessage`][langchain.messages.ToolMessage] objects. The agent node then calls
605
+ the language model again. The process repeats until no more `tool_calls` are present
606
+ in the response. The agent then returns the full list of messages.
584
607
 
585
608
  Example:
586
609
  ```python
@@ -675,7 +698,7 @@ def create_agent( # noqa: PLR0915
675
698
  awrap_tool_call_wrapper = _chain_async_tool_call_wrappers(async_wrappers)
676
699
 
677
700
  # Setup tools
678
- tool_node: _ToolNode | None = None
701
+ tool_node: ToolNode | None = None
679
702
  # Extract built-in provider tools (dict format) and regular tools (BaseTool/callables)
680
703
  built_in_tools = [t for t in tools if isinstance(t, dict)]
681
704
  regular_tools = [t for t in tools if not isinstance(t, dict)]
@@ -685,7 +708,7 @@ def create_agent( # noqa: PLR0915
685
708
 
686
709
  # Only create ToolNode if we have client-side tools
687
710
  tool_node = (
688
- _ToolNode(
711
+ ToolNode(
689
712
  tools=available_tools,
690
713
  wrap_tool_call=wrap_tool_call_wrapper,
691
714
  awrap_tool_call=awrap_tool_call_wrapper,
@@ -797,8 +820,16 @@ def create_agent( # noqa: PLR0915
797
820
  provider_strategy_binding = ProviderStrategyBinding.from_schema_spec(
798
821
  effective_response_format.schema_spec
799
822
  )
800
- structured_response = provider_strategy_binding.parse(output)
801
- return {"messages": [output], "structured_response": structured_response}
823
+ try:
824
+ structured_response = provider_strategy_binding.parse(output)
825
+ except Exception as exc: # noqa: BLE001
826
+ schema_name = getattr(
827
+ effective_response_format.schema_spec.schema, "__name__", "response_format"
828
+ )
829
+ validation_error = StructuredOutputValidationError(schema_name, exc, output)
830
+ raise validation_error
831
+ else:
832
+ return {"messages": [output], "structured_response": structured_response}
802
833
  return {"messages": [output]}
803
834
 
804
835
  # Handle structured output with tool strategy
@@ -812,11 +843,11 @@ def create_agent( # noqa: PLR0915
812
843
  ]
813
844
 
814
845
  if structured_tool_calls:
815
- exception: Exception | None = None
846
+ exception: StructuredOutputError | None = None
816
847
  if len(structured_tool_calls) > 1:
817
848
  # Handle multiple structured outputs error
818
849
  tool_names = [tc["name"] for tc in structured_tool_calls]
819
- exception = MultipleStructuredOutputsError(tool_names)
850
+ exception = MultipleStructuredOutputsError(tool_names, output)
820
851
  should_retry, error_message = _handle_structured_output_error(
821
852
  exception, effective_response_format
822
853
  )
@@ -858,7 +889,7 @@ def create_agent( # noqa: PLR0915
858
889
  "structured_response": structured_response,
859
890
  }
860
891
  except Exception as exc: # noqa: BLE001
861
- exception = StructuredOutputValidationError(tool_call["name"], exc)
892
+ exception = StructuredOutputValidationError(tool_call["name"], exc, output)
862
893
  should_retry, error_message = _handle_structured_output_error(
863
894
  exception, effective_response_format
864
895
  )
@@ -1491,7 +1522,7 @@ def _make_model_to_model_edge(
1491
1522
 
1492
1523
  def _make_tools_to_model_edge(
1493
1524
  *,
1494
- tool_node: _ToolNode,
1525
+ tool_node: ToolNode,
1495
1526
  model_destination: str,
1496
1527
  structured_output_tools: dict[str, OutputToolBinding],
1497
1528
  end_destination: str,
@@ -182,7 +182,7 @@ class ClearToolUsesEdit(ContextEdit):
182
182
 
183
183
 
184
184
  class ContextEditingMiddleware(AgentMiddleware):
185
- """Middleware that automatically prunes tool results to manage context size.
185
+ """Automatically prunes tool results to manage context size.
186
186
 
187
187
  The middleware applies a sequence of edits when the total input token count
188
188
  exceeds configured thresholds. Currently the `ClearToolUsesEdit` strategy is
@@ -87,7 +87,7 @@ class ModelCallLimitExceededError(Exception):
87
87
 
88
88
 
89
89
  class ModelCallLimitMiddleware(AgentMiddleware[ModelCallLimitState, Any]):
90
- """Middleware that tracks model call counts and enforces limits.
90
+ """Tracks model call counts and enforces limits.
91
91
 
92
92
  This middleware monitors the number of model calls made during agent execution
93
93
  and can terminate the agent when specified limits are reached. It supports
@@ -31,7 +31,7 @@ class ModelFallbackMiddleware(AgentMiddleware):
31
31
 
32
32
  fallback = ModelFallbackMiddleware(
33
33
  "openai:gpt-4o-mini", # Try first on error
34
- "anthropic:claude-3-5-sonnet-20241022", # Then this
34
+ "anthropic:claude-sonnet-4-5-20250929", # Then this
35
35
  )
36
36
 
37
37
  agent = create_agent(
@@ -45,7 +45,7 @@ if TYPE_CHECKING:
45
45
  from langgraph.runtime import Runtime
46
46
  from langgraph.types import Command
47
47
 
48
- from langchain.tools.tool_node import ToolCallRequest
48
+ from langchain.agents.middleware.types import ToolCallRequest
49
49
 
50
50
  LOGGER = logging.getLogger(__name__)
51
51
  _DONE_MARKER_PREFIX = "__LC_SHELL_DONE__"
@@ -60,7 +60,7 @@ _SEARCH_RANGE_FOR_TOOL_PAIRS = 5
60
60
 
61
61
 
62
62
  class SummarizationMiddleware(AgentMiddleware):
63
- """Middleware that summarizes conversation history when token limits are approached.
63
+ """Summarizes conversation history when token limits are approached.
64
64
 
65
65
  This middleware monitors message token counts and automatically summarizes older
66
66
  messages when a threshold is reached, preserving recent messages and maintaining
@@ -4,7 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  from typing import TYPE_CHECKING, Annotated, Any, Literal
6
6
 
7
- from langchain_core.messages import AIMessage, AnyMessage, HumanMessage
7
+ from langchain_core.messages import AIMessage
8
8
  from langgraph.channels.untracked_value import UntrackedValue
9
9
  from typing_extensions import NotRequired
10
10
 
@@ -33,53 +33,6 @@ class ToolCallLimitState(AgentState):
33
33
  run_tool_call_count: NotRequired[Annotated[dict[str, int], UntrackedValue, PrivateStateAttr]]
34
34
 
35
35
 
36
- def _count_tool_calls_in_messages(messages: list[AnyMessage], tool_name: str | None = None) -> int:
37
- """Count tool calls in a list of messages.
38
-
39
- Args:
40
- messages: List of messages to count tool calls in.
41
- tool_name: If specified, only count calls to this specific tool.
42
- If `None`, count all tool calls.
43
-
44
- Returns:
45
- The total number of tool calls (optionally filtered by tool_name).
46
- """
47
- count = 0
48
- for message in messages:
49
- if isinstance(message, AIMessage) and message.tool_calls:
50
- if tool_name is None:
51
- # Count all tool calls
52
- count += len(message.tool_calls)
53
- else:
54
- # Count only calls to the specified tool
55
- count += sum(1 for tc in message.tool_calls if tc["name"] == tool_name)
56
- return count
57
-
58
-
59
- def _get_run_messages(messages: list[AnyMessage]) -> list[AnyMessage]:
60
- """Get messages from the current run (after the last HumanMessage).
61
-
62
- Args:
63
- messages: Full list of messages.
64
-
65
- Returns:
66
- Messages from the current run (after last HumanMessage).
67
- """
68
- # Find the last HumanMessage
69
- last_human_index = -1
70
- for i in range(len(messages) - 1, -1, -1):
71
- if isinstance(messages[i], HumanMessage):
72
- last_human_index = i
73
- break
74
-
75
- # If no HumanMessage found, return all messages
76
- if last_human_index == -1:
77
- return messages
78
-
79
- # Return messages after the last HumanMessage
80
- return messages[last_human_index + 1 :]
81
-
82
-
83
36
  def _build_tool_limit_exceeded_message(
84
37
  thread_count: int,
85
38
  run_count: int,
@@ -146,7 +99,7 @@ class ToolCallLimitExceededError(Exception):
146
99
 
147
100
 
148
101
  class ToolCallLimitMiddleware(AgentMiddleware[ToolCallLimitState, Any]):
149
- """Middleware that tracks tool call counts and enforces limits.
102
+ """Tracks tool call counts and enforces limits.
150
103
 
151
104
  This middleware monitors the number of tool calls made during agent execution
152
105
  and can terminate the agent when specified limits are reached. It supports
@@ -15,12 +15,12 @@ if TYPE_CHECKING:
15
15
 
16
16
  from langgraph.types import Command
17
17
 
18
+ from langchain.agents.middleware.types import ToolCallRequest
18
19
  from langchain.tools import BaseTool
19
- from langchain.tools.tool_node import ToolCallRequest
20
20
 
21
21
 
22
22
  class LLMToolEmulator(AgentMiddleware):
23
- """Middleware that emulates specified tools using an LLM instead of executing them.
23
+ """Emulates specified tools using an LLM instead of executing them.
24
24
 
25
25
  This middleware allows selective emulation of tools for testing purposes.
26
26
  By default (when tools=None), all tools are emulated. You can specify which
@@ -47,9 +47,7 @@ class LLMToolEmulator(AgentMiddleware):
47
47
 
48
48
  Use a custom model for emulation:
49
49
  ```python
50
- middleware = LLMToolEmulator(
51
- tools=["get_weather"], model="anthropic:claude-3-5-sonnet-latest"
52
- )
50
+ middleware = LLMToolEmulator(tools=["get_weather"], model="anthropic:claude-sonnet-4-5")
53
51
  ```
54
52
 
55
53
  Emulate specific tools by passing tool instances:
@@ -71,7 +69,7 @@ class LLMToolEmulator(AgentMiddleware):
71
69
  If None (default), ALL tools will be emulated.
72
70
  If empty list, no tools will be emulated.
73
71
  model: Model to use for emulation.
74
- Defaults to "anthropic:claude-3-5-sonnet-latest".
72
+ Defaults to "anthropic:claude-sonnet-4-5".
75
73
  Can be a model identifier string or BaseChatModel instance.
76
74
  """
77
75
  super().__init__()
@@ -91,7 +89,7 @@ class LLMToolEmulator(AgentMiddleware):
91
89
 
92
90
  # Initialize emulator model
93
91
  if model is None:
94
- self.model = init_chat_model("anthropic:claude-3-5-sonnet-latest", temperature=1)
92
+ self.model = init_chat_model("anthropic:claude-sonnet-4-5", temperature=1)
95
93
  elif isinstance(model, BaseChatModel):
96
94
  self.model = model
97
95
  else:
@@ -16,8 +16,8 @@ if TYPE_CHECKING:
16
16
 
17
17
  from langgraph.types import Command
18
18
 
19
+ from langchain.agents.middleware.types import ToolCallRequest
19
20
  from langchain.tools import BaseTool
20
- from langchain.tools.tool_node import ToolCallRequest
21
21
 
22
22
 
23
23
  class ToolRetryMiddleware(AgentMiddleware):
@@ -19,14 +19,13 @@ from typing import (
19
19
  if TYPE_CHECKING:
20
20
  from collections.abc import Awaitable
21
21
 
22
- from langchain.tools.tool_node import ToolCallRequest
23
-
24
22
  # Needed as top level import for Pydantic schema generation on AgentState
25
23
  from typing import TypeAlias
26
24
 
27
25
  from langchain_core.messages import AIMessage, AnyMessage, BaseMessage, ToolMessage # noqa: TC002
28
26
  from langgraph.channels.ephemeral_value import EphemeralValue
29
27
  from langgraph.graph.message import add_messages
28
+ from langgraph.prebuilt.tool_node import ToolCallRequest, ToolCallWrapper
30
29
  from langgraph.types import Command # noqa: TC002
31
30
  from langgraph.typing import ContextT
32
31
  from typing_extensions import NotRequired, Required, TypedDict, TypeVar, Unpack
@@ -45,6 +44,8 @@ __all__ = [
45
44
  "ModelRequest",
46
45
  "ModelResponse",
47
46
  "OmitFromSchema",
47
+ "ToolCallRequest",
48
+ "ToolCallWrapper",
48
49
  "after_agent",
49
50
  "after_model",
50
51
  "before_agent",
@@ -34,17 +34,21 @@ SchemaKind = Literal["pydantic", "dataclass", "typeddict", "json_schema"]
34
34
  class StructuredOutputError(Exception):
35
35
  """Base class for structured output errors."""
36
36
 
37
+ ai_message: AIMessage
38
+
37
39
 
38
40
  class MultipleStructuredOutputsError(StructuredOutputError):
39
41
  """Raised when model returns multiple structured output tool calls when only one is expected."""
40
42
 
41
- def __init__(self, tool_names: list[str]) -> None:
43
+ def __init__(self, tool_names: list[str], ai_message: AIMessage) -> None:
42
44
  """Initialize `MultipleStructuredOutputsError`.
43
45
 
44
46
  Args:
45
47
  tool_names: The names of the tools called for structured output.
48
+ ai_message: The AI message that contained the invalid multiple tool calls.
46
49
  """
47
50
  self.tool_names = tool_names
51
+ self.ai_message = ai_message
48
52
 
49
53
  super().__init__(
50
54
  "Model incorrectly returned multiple structured responses "
@@ -55,15 +59,17 @@ class MultipleStructuredOutputsError(StructuredOutputError):
55
59
  class StructuredOutputValidationError(StructuredOutputError):
56
60
  """Raised when structured output tool call arguments fail to parse according to the schema."""
57
61
 
58
- def __init__(self, tool_name: str, source: Exception) -> None:
62
+ def __init__(self, tool_name: str, source: Exception, ai_message: AIMessage) -> None:
59
63
  """Initialize `StructuredOutputValidationError`.
60
64
 
61
65
  Args:
62
66
  tool_name: The name of the tool that failed.
63
67
  source: The exception that occurred.
68
+ ai_message: The AI message that contained the invalid structured output.
64
69
  """
65
70
  self.tool_name = tool_name
66
71
  self.source = source
72
+ self.ai_message = ai_message
67
73
  super().__init__(f"Failed to parse structured output for tool '{tool_name}': {source}.")
68
74
 
69
75
 
@@ -64,26 +64,34 @@ def init_chat_model(
64
64
  config_prefix: str | None = None,
65
65
  **kwargs: Any,
66
66
  ) -> BaseChatModel | _ConfigurableModel:
67
- """Initialize a chat model in a single line using the model's name and provider.
67
+ """Initialize a chat model from any supported provider using a unified interface.
68
+
69
+ **Two main use cases:**
70
+
71
+ 1. **Fixed model** – specify the model upfront and get a ready-to-use chat model.
72
+ 2. **Configurable model** – choose to specify parameters (including model name) at
73
+ runtime via `config`. Makes it easy to switch between models/providers without
74
+ changing your code
68
75
 
69
76
  !!! note
70
- Requires the integration package for your model provider to be installed.
77
+ Requires the integration package for the chosen model provider to be installed.
71
78
 
72
79
  See the `model_provider` parameter below for specific package names
73
80
  (e.g., `pip install langchain-openai`).
74
81
 
75
82
  Refer to the [provider integration's API reference](https://docs.langchain.com/oss/python/integrations/providers)
76
- for supported model parameters.
83
+ for supported model parameters to use as `**kwargs`.
77
84
 
78
85
  Args:
79
86
  model: The name of the model, e.g. `'o3-mini'`, `'claude-sonnet-4-5'`.
80
87
 
81
- You can also specify model and model provider in a single argument using:
82
-
88
+ You can also specify model and model provider in a single argument using
83
89
  `'{model_provider}:{model}'` format, e.g. `'openai:o1'`.
84
90
  model_provider: The model provider if not specified as part of the model arg
85
- (see above). Supported `model_provider` values and the corresponding
86
- integration package are:
91
+ (see above).
92
+
93
+ Supported `model_provider` values and the corresponding integration package
94
+ are:
87
95
 
88
96
  - `openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
89
97
  - `anthropic` -> [`langchain-anthropic`](https://docs.langchain.com/oss/python/integrations/providers/anthropic)
@@ -120,27 +128,36 @@ def init_chat_model(
120
128
  - `deepseek...` -> `deepseek`
121
129
  - `grok...` -> `xai`
122
130
  - `sonar...` -> `perplexity`
123
- configurable_fields: Which model parameters are configurable:
131
+ configurable_fields: Which model parameters are configurable at runtime:
124
132
 
125
- - `None`: No configurable fields.
133
+ - `None`: No configurable fields (i.e., a fixed model).
126
134
  - `'any'`: All fields are configurable. **See security note below.**
127
135
  - `list[str] | Tuple[str, ...]`: Specified fields are configurable.
128
136
 
129
- Fields are assumed to have `config_prefix` stripped if there is a
130
- `config_prefix`. If model is specified, then defaults to `None`. If model is
131
- not specified, then defaults to `("model", "model_provider")`.
137
+ Fields are assumed to have `config_prefix` stripped if a `config_prefix` is
138
+ specified.
139
+
140
+ If `model` is specified, then defaults to `None`.
141
+
142
+ If `model` is not specified, then defaults to `("model", "model_provider")`.
132
143
 
133
144
  !!! warning "Security note"
134
145
  Setting `configurable_fields="any"` means fields like `api_key`,
135
- `base_url`, etc. can be altered at runtime, potentially redirecting
136
- model requests to a different service/user. Make sure that if you're
137
- accepting untrusted configurations that you enumerate the
138
- `configurable_fields=(...)` explicitly.
139
-
140
- config_prefix: If `'config_prefix'` is a non-empty string then model will be
141
- configurable at runtime via the
142
- `config["configurable"]["{config_prefix}_{param}"]` keys. If
143
- `'config_prefix'` is an empty string then model will be configurable via
146
+ `base_url`, etc., can be altered at runtime, potentially redirecting
147
+ model requests to a different service/user.
148
+
149
+ Make sure that if you're accepting untrusted configurations that you
150
+ enumerate the `configurable_fields=(...)` explicitly.
151
+
152
+ config_prefix: Optional prefix for configuration keys.
153
+
154
+ Useful when you have multiple configurable models in the same application.
155
+
156
+ If `'config_prefix'` is a non-empty string then `model` will be configurable
157
+ at runtime via the `config["configurable"]["{config_prefix}_{param}"]` keys.
158
+ See examples below.
159
+
160
+ If `'config_prefix'` is an empty string then model will be configurable via
144
161
  `config["configurable"]["{param}"]`.
145
162
  **kwargs: Additional model-specific keyword args to pass to the underlying
146
163
  chat model's `__init__` method. Common parameters include:
@@ -150,10 +167,13 @@ def init_chat_model(
150
167
  - `timeout`: Maximum time (in seconds) to wait for a response.
151
168
  - `max_retries`: Maximum number of retry attempts for failed requests.
152
169
  - `base_url`: Custom API endpoint URL.
153
- - `rate_limiter`: A `BaseRateLimiter` instance to control request rate.
170
+ - `rate_limiter`: A
171
+ [`BaseRateLimiter`][langchain_core.rate_limiters.BaseRateLimiter]
172
+ instance to control request rate.
154
173
 
155
- Refer to the specific model provider's documentation for all available
156
- parameters.
174
+ Refer to the specific model provider's
175
+ [integration reference](https://reference.langchain.com/python/integrations/)
176
+ for all available parameters.
157
177
 
158
178
  Returns:
159
179
  A `BaseChatModel` corresponding to the `model_name` and `model_provider`
@@ -165,32 +185,34 @@ def init_chat_model(
165
185
  ValueError: If `model_provider` cannot be inferred or isn't supported.
166
186
  ImportError: If the model provider integration package is not installed.
167
187
 
168
- ???+ note "Initialize a non-configurable model"
188
+ ???+ example "Initialize a non-configurable model"
169
189
 
170
190
  ```python
171
191
  # pip install langchain langchain-openai langchain-anthropic langchain-google-vertexai
192
+
172
193
  from langchain.chat_models import init_chat_model
173
194
 
174
195
  o3_mini = init_chat_model("openai:o3-mini", temperature=0)
175
196
  claude_sonnet = init_chat_model("anthropic:claude-sonnet-4-5", temperature=0)
176
- gemini_2_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
197
+ gemini_2-5_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
177
198
 
178
199
  o3_mini.invoke("what's your name")
179
200
  claude_sonnet.invoke("what's your name")
180
- gemini_2_flash.invoke("what's your name")
201
+ gemini_2-5_flash.invoke("what's your name")
181
202
  ```
182
203
 
183
- ??? note "Partially configurable model with no default"
204
+ ??? example "Partially configurable model with no default"
184
205
 
185
206
  ```python
186
207
  # pip install langchain langchain-openai langchain-anthropic
208
+
187
209
  from langchain.chat_models import init_chat_model
188
210
 
189
- # We don't need to specify configurable=True if a model isn't specified.
211
+ # (We don't need to specify configurable=True if a model isn't specified.)
190
212
  configurable_model = init_chat_model(temperature=0)
191
213
 
192
214
  configurable_model.invoke("what's your name", config={"configurable": {"model": "gpt-4o"}})
193
- # GPT-4o response
215
+ # Use GPT-4o to generate the response
194
216
 
195
217
  configurable_model.invoke(
196
218
  "what's your name",
@@ -198,10 +220,11 @@ def init_chat_model(
198
220
  )
199
221
  ```
200
222
 
201
- ??? note "Fully configurable model with a default"
223
+ ??? example "Fully configurable model with a default"
202
224
 
203
225
  ```python
204
226
  # pip install langchain langchain-openai langchain-anthropic
227
+
205
228
  from langchain.chat_models import init_chat_model
206
229
 
207
230
  configurable_model_with_default = init_chat_model(
@@ -212,7 +235,7 @@ def init_chat_model(
212
235
  )
213
236
 
214
237
  configurable_model_with_default.invoke("what's your name")
215
- # GPT-4o response with temperature 0
238
+ # GPT-4o response with temperature 0 (as set in default)
216
239
 
217
240
  configurable_model_with_default.invoke(
218
241
  "what's your name",
@@ -223,15 +246,17 @@ def init_chat_model(
223
246
  }
224
247
  },
225
248
  )
249
+ # Override default to use Sonnet 4.5 with temperature 0.6 to generate response
226
250
  ```
227
251
 
228
- ??? note "Bind tools to a configurable model"
252
+ ??? example "Bind tools to a configurable model"
229
253
 
230
254
  You can call any chat model declarative methods on a configurable model in the
231
255
  same way that you would with a normal model:
232
256
 
233
257
  ```python
234
258
  # pip install langchain langchain-openai langchain-anthropic
259
+
235
260
  from langchain.chat_models import init_chat_model
236
261
  from pydantic import BaseModel, Field
237
262
 
@@ -261,11 +286,13 @@ def init_chat_model(
261
286
  configurable_model_with_tools.invoke(
262
287
  "Which city is hotter today and which is bigger: LA or NY?"
263
288
  )
289
+ # Use GPT-4o
264
290
 
265
291
  configurable_model_with_tools.invoke(
266
292
  "Which city is hotter today and which is bigger: LA or NY?",
267
293
  config={"configurable": {"model": "claude-sonnet-4-5"}},
268
294
  )
295
+ # Use Sonnet 4.5
269
296
  ```
270
297
 
271
298
  """ # noqa: E501