langchain 1.0.0a12__py3-none-any.whl → 1.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. langchain/__init__.py +1 -1
  2. langchain/agents/__init__.py +7 -1
  3. langchain/agents/factory.py +722 -226
  4. langchain/agents/middleware/__init__.py +36 -9
  5. langchain/agents/middleware/_execution.py +388 -0
  6. langchain/agents/middleware/_redaction.py +350 -0
  7. langchain/agents/middleware/context_editing.py +46 -17
  8. langchain/agents/middleware/file_search.py +382 -0
  9. langchain/agents/middleware/human_in_the_loop.py +220 -173
  10. langchain/agents/middleware/model_call_limit.py +43 -10
  11. langchain/agents/middleware/model_fallback.py +79 -36
  12. langchain/agents/middleware/pii.py +68 -504
  13. langchain/agents/middleware/shell_tool.py +718 -0
  14. langchain/agents/middleware/summarization.py +2 -2
  15. langchain/agents/middleware/{planning.py → todo.py} +35 -16
  16. langchain/agents/middleware/tool_call_limit.py +308 -114
  17. langchain/agents/middleware/tool_emulator.py +200 -0
  18. langchain/agents/middleware/tool_retry.py +384 -0
  19. langchain/agents/middleware/tool_selection.py +25 -21
  20. langchain/agents/middleware/types.py +714 -257
  21. langchain/agents/structured_output.py +37 -27
  22. langchain/chat_models/__init__.py +7 -1
  23. langchain/chat_models/base.py +192 -190
  24. langchain/embeddings/__init__.py +13 -3
  25. langchain/embeddings/base.py +49 -29
  26. langchain/messages/__init__.py +50 -1
  27. langchain/tools/__init__.py +9 -7
  28. langchain/tools/tool_node.py +16 -1174
  29. langchain-1.0.4.dist-info/METADATA +92 -0
  30. langchain-1.0.4.dist-info/RECORD +34 -0
  31. langchain/_internal/__init__.py +0 -0
  32. langchain/_internal/_documents.py +0 -35
  33. langchain/_internal/_lazy_import.py +0 -35
  34. langchain/_internal/_prompts.py +0 -158
  35. langchain/_internal/_typing.py +0 -70
  36. langchain/_internal/_utils.py +0 -7
  37. langchain/agents/_internal/__init__.py +0 -1
  38. langchain/agents/_internal/_typing.py +0 -13
  39. langchain/agents/middleware/prompt_caching.py +0 -86
  40. langchain/documents/__init__.py +0 -7
  41. langchain/embeddings/cache.py +0 -361
  42. langchain/storage/__init__.py +0 -22
  43. langchain/storage/encoder_backed.py +0 -123
  44. langchain/storage/exceptions.py +0 -5
  45. langchain/storage/in_memory.py +0 -13
  46. langchain-1.0.0a12.dist-info/METADATA +0 -122
  47. langchain-1.0.0a12.dist-info/RECORD +0 -43
  48. {langchain-1.0.0a12.dist-info → langchain-1.0.4.dist-info}/WHEEL +0 -0
  49. {langchain-1.0.0a12.dist-info → langchain-1.0.4.dist-info}/licenses/LICENSE +0 -0
@@ -34,17 +34,21 @@ SchemaKind = Literal["pydantic", "dataclass", "typeddict", "json_schema"]
34
34
  class StructuredOutputError(Exception):
35
35
  """Base class for structured output errors."""
36
36
 
37
+ ai_message: AIMessage
38
+
37
39
 
38
40
  class MultipleStructuredOutputsError(StructuredOutputError):
39
41
  """Raised when model returns multiple structured output tool calls when only one is expected."""
40
42
 
41
- def __init__(self, tool_names: list[str]) -> None:
42
- """Initialize MultipleStructuredOutputsError.
43
+ def __init__(self, tool_names: list[str], ai_message: AIMessage) -> None:
44
+ """Initialize `MultipleStructuredOutputsError`.
43
45
 
44
46
  Args:
45
47
  tool_names: The names of the tools called for structured output.
48
+ ai_message: The AI message that contained the invalid multiple tool calls.
46
49
  """
47
50
  self.tool_names = tool_names
51
+ self.ai_message = ai_message
48
52
 
49
53
  super().__init__(
50
54
  "Model incorrectly returned multiple structured responses "
@@ -55,15 +59,17 @@ class MultipleStructuredOutputsError(StructuredOutputError):
55
59
  class StructuredOutputValidationError(StructuredOutputError):
56
60
  """Raised when structured output tool call arguments fail to parse according to the schema."""
57
61
 
58
- def __init__(self, tool_name: str, source: Exception) -> None:
59
- """Initialize StructuredOutputValidationError.
62
+ def __init__(self, tool_name: str, source: Exception, ai_message: AIMessage) -> None:
63
+ """Initialize `StructuredOutputValidationError`.
60
64
 
61
65
  Args:
62
66
  tool_name: The name of the tool that failed.
63
67
  source: The exception that occurred.
68
+ ai_message: The AI message that contained the invalid structured output.
64
69
  """
65
70
  self.tool_name = tool_name
66
71
  self.source = source
72
+ self.ai_message = ai_message
67
73
  super().__init__(f"Failed to parse structured output for tool '{tool_name}': {source}.")
68
74
 
69
75
 
@@ -73,8 +79,9 @@ def _parse_with_schema(
73
79
  """Parse data using for any supported schema type.
74
80
 
75
81
  Args:
76
- schema: The schema type (Pydantic model, dataclass, or TypedDict)
77
- schema_kind: One of "pydantic", "dataclass", "typeddict", or "json_schema"
82
+ schema: The schema type (Pydantic model, `dataclass`, or `TypedDict`)
83
+ schema_kind: One of `"pydantic"`, `"dataclass"`, `"typeddict"`, or
84
+ `"json_schema"`
78
85
  data: The data to parse
79
86
 
80
87
  Returns:
@@ -99,13 +106,14 @@ class _SchemaSpec(Generic[SchemaT]):
99
106
  """Describes a structured output schema."""
100
107
 
101
108
  schema: type[SchemaT]
102
- """The schema for the response, can be a Pydantic model, dataclass, TypedDict,
109
+ """The schema for the response, can be a Pydantic model, `dataclass`, `TypedDict`,
103
110
  or JSON schema dict."""
104
111
 
105
112
  name: str
106
113
  """Name of the schema, used for tool calling.
107
114
 
108
- If not provided, the name will be the model name or "response_format" if it's a JSON schema.
115
+ If not provided, the name will be the model name or `"response_format"` if it's a
116
+ JSON schema.
109
117
  """
110
118
 
111
119
  description: str
@@ -186,14 +194,15 @@ class ToolStrategy(Generic[SchemaT]):
186
194
  handle_errors: (
187
195
  bool | str | type[Exception] | tuple[type[Exception], ...] | Callable[[Exception], str]
188
196
  )
189
- """Error handling strategy for structured output via ToolStrategy. Default is True.
190
-
191
- - True: Catch all errors with default error template
192
- - str: Catch all errors with this custom message
193
- - type[Exception]: Only catch this exception type with default message
194
- - tuple[type[Exception], ...]: Only catch these exception types with default message
195
- - Callable[[Exception], str]: Custom function that returns error message
196
- - False: No retry, let exceptions propagate
197
+ """Error handling strategy for structured output via `ToolStrategy`.
198
+
199
+ - `True`: Catch all errors with default error template
200
+ - `str`: Catch all errors with this custom message
201
+ - `type[Exception]`: Only catch this exception type with default message
202
+ - `tuple[type[Exception], ...]`: Only catch these exception types with default
203
+ message
204
+ - `Callable[[Exception], str]`: Custom function that returns error message
205
+ - `False`: No retry, let exceptions propagate
197
206
  """
198
207
 
199
208
  def __init__(
@@ -207,9 +216,10 @@ class ToolStrategy(Generic[SchemaT]):
207
216
  | tuple[type[Exception], ...]
208
217
  | Callable[[Exception], str] = True,
209
218
  ) -> None:
210
- """Initialize ToolStrategy.
219
+ """Initialize `ToolStrategy`.
211
220
 
212
- Initialize ToolStrategy with schemas, tool message content, and error handling strategy.
221
+ Initialize `ToolStrategy` with schemas, tool message content, and error handling
222
+ strategy.
213
223
  """
214
224
  self.schema = schema
215
225
  self.tool_message_content = tool_message_content
@@ -285,13 +295,13 @@ class OutputToolBinding(Generic[SchemaT]):
285
295
 
286
296
  @classmethod
287
297
  def from_schema_spec(cls, schema_spec: _SchemaSpec[SchemaT]) -> Self:
288
- """Create an OutputToolBinding instance from a SchemaSpec.
298
+ """Create an `OutputToolBinding` instance from a `SchemaSpec`.
289
299
 
290
300
  Args:
291
- schema_spec: The SchemaSpec to convert
301
+ schema_spec: The `SchemaSpec` to convert
292
302
 
293
303
  Returns:
294
- An OutputToolBinding instance with the appropriate tool created
304
+ An `OutputToolBinding` instance with the appropriate tool created
295
305
  """
296
306
  return cls(
297
307
  schema=schema_spec.schema,
@@ -329,20 +339,20 @@ class ProviderStrategyBinding(Generic[SchemaT]):
329
339
 
330
340
  schema: type[SchemaT]
331
341
  """The original schema provided for structured output
332
- (Pydantic model, dataclass, TypedDict, or JSON schema dict)."""
342
+ (Pydantic model, `dataclass`, `TypedDict`, or JSON schema dict)."""
333
343
 
334
344
  schema_kind: SchemaKind
335
345
  """Classification of the schema type for proper response construction."""
336
346
 
337
347
  @classmethod
338
348
  def from_schema_spec(cls, schema_spec: _SchemaSpec[SchemaT]) -> Self:
339
- """Create a ProviderStrategyBinding instance from a SchemaSpec.
349
+ """Create a `ProviderStrategyBinding` instance from a `SchemaSpec`.
340
350
 
341
351
  Args:
342
- schema_spec: The SchemaSpec to convert
352
+ schema_spec: The `SchemaSpec` to convert
343
353
 
344
354
  Returns:
345
- A ProviderStrategyBinding instance for parsing native structured output
355
+ A `ProviderStrategyBinding` instance for parsing native structured output
346
356
  """
347
357
  return cls(
348
358
  schema=schema_spec.schema,
@@ -350,10 +360,10 @@ class ProviderStrategyBinding(Generic[SchemaT]):
350
360
  )
351
361
 
352
362
  def parse(self, response: AIMessage) -> SchemaT:
353
- """Parse AIMessage content according to the schema.
363
+ """Parse `AIMessage` content according to the schema.
354
364
 
355
365
  Args:
356
- response: The AI message containing the structured output
366
+ response: The `AIMessage` containing the structured output
357
367
 
358
368
  Returns:
359
369
  The parsed response according to the schema
@@ -1,4 +1,10 @@
1
- """Chat models."""
1
+ """Entrypoint to using [chat models](https://docs.langchain.com/oss/python/langchain/models) in LangChain.
2
+
3
+ !!! warning "Reference docs"
4
+ This page contains **reference documentation** for chat models. See
5
+ [the docs](https://docs.langchain.com/oss/python/langchain/models) for conceptual
6
+ guides, tutorials, and examples on using chat models.
7
+ """ # noqa: E501
2
8
 
3
9
  from langchain_core.language_models import BaseChatModel
4
10
 
@@ -4,14 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  import warnings
6
6
  from importlib import util
7
- from typing import (
8
- TYPE_CHECKING,
9
- Any,
10
- Literal,
11
- TypeAlias,
12
- cast,
13
- overload,
14
- )
7
+ from typing import TYPE_CHECKING, Any, Literal, TypeAlias, cast, overload
15
8
 
16
9
  from langchain_core.language_models import BaseChatModel, LanguageModelInput
17
10
  from langchain_core.messages import AIMessage, AnyMessage
@@ -32,7 +25,7 @@ def init_chat_model(
32
25
  model: str,
33
26
  *,
34
27
  model_provider: str | None = None,
35
- configurable_fields: Literal[None] = None,
28
+ configurable_fields: None = None,
36
29
  config_prefix: str | None = None,
37
30
  **kwargs: Any,
38
31
  ) -> BaseChatModel: ...
@@ -40,10 +33,10 @@ def init_chat_model(
40
33
 
41
34
  @overload
42
35
  def init_chat_model(
43
- model: Literal[None] = None,
36
+ model: None = None,
44
37
  *,
45
38
  model_provider: str | None = None,
46
- configurable_fields: Literal[None] = None,
39
+ configurable_fields: None = None,
47
40
  config_prefix: str | None = None,
48
41
  **kwargs: Any,
49
42
  ) -> _ConfigurableModel: ...
@@ -71,224 +64,236 @@ def init_chat_model(
71
64
  config_prefix: str | None = None,
72
65
  **kwargs: Any,
73
66
  ) -> BaseChatModel | _ConfigurableModel:
74
- """Initialize a ChatModel from the model name and provider.
67
+ """Initialize a chat model from any supported provider using a unified interface.
75
68
 
76
- **Note:** Must have the integration package corresponding to the model provider
77
- installed.
69
+ **Two main use cases:**
70
+
71
+ 1. **Fixed model** – specify the model upfront and get a ready-to-use chat model.
72
+ 2. **Configurable model** – choose to specify parameters (including model name) at
73
+ runtime via `config`. Makes it easy to switch between models/providers without
74
+ changing your code
75
+
76
+ !!! note
77
+ Requires the integration package for the chosen model provider to be installed.
78
+
79
+ See the `model_provider` parameter below for specific package names
80
+ (e.g., `pip install langchain-openai`).
81
+
82
+ Refer to the [provider integration's API reference](https://docs.langchain.com/oss/python/integrations/providers)
83
+ for supported model parameters to use as `**kwargs`.
78
84
 
79
85
  Args:
80
- model: The name of the model, e.g. "o3-mini", "claude-3-5-sonnet-latest". You can
81
- also specify model and model provider in a single argument using
82
- '{model_provider}:{model}' format, e.g. "openai:o1".
83
- model_provider: The model provider if not specified as part of model arg (see
84
- above). Supported model_provider values and the corresponding integration
85
- package are:
86
-
87
- - 'openai' -> langchain-openai
88
- - 'anthropic' -> langchain-anthropic
89
- - 'azure_openai' -> langchain-openai
90
- - 'azure_ai' -> langchain-azure-ai
91
- - 'google_vertexai' -> langchain-google-vertexai
92
- - 'google_genai' -> langchain-google-genai
93
- - 'bedrock' -> langchain-aws
94
- - 'bedrock_converse' -> langchain-aws
95
- - 'cohere' -> langchain-cohere
96
- - 'fireworks' -> langchain-fireworks
97
- - 'together' -> langchain-together
98
- - 'mistralai' -> langchain-mistralai
99
- - 'huggingface' -> langchain-huggingface
100
- - 'groq' -> langchain-groq
101
- - 'ollama' -> langchain-ollama
102
- - 'google_anthropic_vertex' -> langchain-google-vertexai
103
- - 'deepseek' -> langchain-deepseek
104
- - 'ibm' -> langchain-ibm
105
- - 'nvidia' -> langchain-nvidia-ai-endpoints
106
- - 'xai' -> langchain-xai
107
- - 'perplexity' -> langchain-perplexity
108
-
109
- Will attempt to infer model_provider from model if not specified. The
86
+ model: The name or ID of the model, e.g. `'o3-mini'`, `'claude-sonnet-4-5-20250929'`.
87
+
88
+ You can also specify model and model provider in a single argument using
89
+ `'{model_provider}:{model}'` format, e.g. `'openai:o1'`.
90
+ model_provider: The model provider if not specified as part of the model arg
91
+ (see above).
92
+
93
+ Supported `model_provider` values and the corresponding integration package
94
+ are:
95
+
96
+ - `openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
97
+ - `anthropic` -> [`langchain-anthropic`](https://docs.langchain.com/oss/python/integrations/providers/anthropic)
98
+ - `azure_openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
99
+ - `azure_ai` -> [`langchain-azure-ai`](https://docs.langchain.com/oss/python/integrations/providers/microsoft)
100
+ - `google_vertexai` -> [`langchain-google-vertexai`](https://docs.langchain.com/oss/python/integrations/providers/google)
101
+ - `google_genai` -> [`langchain-google-genai`](https://docs.langchain.com/oss/python/integrations/providers/google)
102
+ - `bedrock` -> [`langchain-aws`](https://docs.langchain.com/oss/python/integrations/providers/aws)
103
+ - `bedrock_converse` -> [`langchain-aws`](https://docs.langchain.com/oss/python/integrations/providers/aws)
104
+ - `cohere` -> [`langchain-cohere`](https://docs.langchain.com/oss/python/integrations/providers/cohere)
105
+ - `fireworks` -> [`langchain-fireworks`](https://docs.langchain.com/oss/python/integrations/providers/fireworks)
106
+ - `together` -> [`langchain-together`](https://docs.langchain.com/oss/python/integrations/providers/together)
107
+ - `mistralai` -> [`langchain-mistralai`](https://docs.langchain.com/oss/python/integrations/providers/mistralai)
108
+ - `huggingface` -> [`langchain-huggingface`](https://docs.langchain.com/oss/python/integrations/providers/huggingface)
109
+ - `groq` -> [`langchain-groq`](https://docs.langchain.com/oss/python/integrations/providers/groq)
110
+ - `ollama` -> [`langchain-ollama`](https://docs.langchain.com/oss/python/integrations/providers/ollama)
111
+ - `google_anthropic_vertex` -> [`langchain-google-vertexai`](https://docs.langchain.com/oss/python/integrations/providers/google)
112
+ - `deepseek` -> [`langchain-deepseek`](https://docs.langchain.com/oss/python/integrations/providers/deepseek)
113
+ - `ibm` -> [`langchain-ibm`](https://docs.langchain.com/oss/python/integrations/providers/deepseek)
114
+ - `nvidia` -> [`langchain-nvidia-ai-endpoints`](https://docs.langchain.com/oss/python/integrations/providers/nvidia)
115
+ - `xai` -> [`langchain-xai`](https://docs.langchain.com/oss/python/integrations/providers/xai)
116
+ - `perplexity` -> [`langchain-perplexity`](https://docs.langchain.com/oss/python/integrations/providers/perplexity)
117
+
118
+ Will attempt to infer `model_provider` from model if not specified. The
110
119
  following providers will be inferred based on these model prefixes:
111
120
 
112
- - 'gpt-...' | 'o1...' | 'o3...' -> 'openai'
113
- - 'claude...' -> 'anthropic'
114
- - 'amazon....' -> 'bedrock'
115
- - 'gemini...' -> 'google_vertexai'
116
- - 'command...' -> 'cohere'
117
- - 'accounts/fireworks...' -> 'fireworks'
118
- - 'mistral...' -> 'mistralai'
119
- - 'deepseek...' -> 'deepseek'
120
- - 'grok...' -> 'xai'
121
- - 'sonar...' -> 'perplexity'
122
- configurable_fields: Which model parameters are
123
- configurable:
124
-
125
- - None: No configurable fields.
126
- - "any": All fields are configurable. *See Security Note below.*
127
- - Union[List[str], Tuple[str, ...]]: Specified fields are configurable.
128
-
129
- Fields are assumed to have config_prefix stripped if there is a
130
- config_prefix. If model is specified, then defaults to None. If model is
131
- not specified, then defaults to ``("model", "model_provider")``.
132
-
133
- ***Security Note***: Setting ``configurable_fields="any"`` means fields like
134
- api_key, base_url, etc. can be altered at runtime, potentially redirecting
135
- model requests to a different service/user. Make sure that if you're
136
- accepting untrusted configurations that you enumerate the
137
- ``configurable_fields=(...)`` explicitly.
138
-
139
- config_prefix: If config_prefix is a non-empty string then model will be
140
- configurable at runtime via the
141
- ``config["configurable"]["{config_prefix}_{param}"]`` keys. If
142
- config_prefix is an empty string then model will be configurable via
143
- ``config["configurable"]["{param}"]``.
144
- kwargs: Additional model-specific keyword args to pass to
145
- ``<<selected ChatModel>>.__init__(model=model_name, **kwargs)``. Examples
146
- include:
147
- * temperature: Model temperature.
148
- * max_tokens: Max output tokens.
149
- * timeout: The maximum time (in seconds) to wait for a response from the model
150
- before canceling the request.
151
- * max_retries: The maximum number of attempts the system will make to resend a
152
- request if it fails due to issues like network timeouts or rate limits.
153
- * base_url: The URL of the API endpoint where requests are sent.
154
- * rate_limiter: A ``BaseRateLimiter`` to space out requests to avoid exceeding
155
- rate limits.
121
+ - `gpt-...` | `o1...` | `o3...` -> `openai`
122
+ - `claude...` -> `anthropic`
123
+ - `amazon...` -> `bedrock`
124
+ - `gemini...` -> `google_vertexai`
125
+ - `command...` -> `cohere`
126
+ - `accounts/fireworks...` -> `fireworks`
127
+ - `mistral...` -> `mistralai`
128
+ - `deepseek...` -> `deepseek`
129
+ - `grok...` -> `xai`
130
+ - `sonar...` -> `perplexity`
131
+ configurable_fields: Which model parameters are configurable at runtime:
156
132
 
157
- Returns:
158
- A BaseChatModel corresponding to the model_name and model_provider specified if
159
- configurability is inferred to be False. If configurable, a chat model emulator
160
- that initializes the underlying model at runtime once a config is passed in.
133
+ - `None`: No configurable fields (i.e., a fixed model).
134
+ - `'any'`: All fields are configurable. **See security note below.**
135
+ - `list[str] | Tuple[str, ...]`: Specified fields are configurable.
161
136
 
162
- Raises:
163
- ValueError: If model_provider cannot be inferred or isn't supported.
164
- ImportError: If the model provider integration package is not installed.
137
+ Fields are assumed to have `config_prefix` stripped if a `config_prefix` is
138
+ specified.
165
139
 
166
- ??? note "Init non-configurable model"
167
- :open:
140
+ If `model` is specified, then defaults to `None`.
168
141
 
169
- .. code-block:: python
142
+ If `model` is not specified, then defaults to `("model", "model_provider")`.
170
143
 
171
- # pip install langchain langchain-openai langchain-anthropic langchain-google-vertexai
172
- from langchain.chat_models import init_chat_model
144
+ !!! warning "Security note"
145
+ Setting `configurable_fields="any"` means fields like `api_key`,
146
+ `base_url`, etc., can be altered at runtime, potentially redirecting
147
+ model requests to a different service/user.
173
148
 
174
- o3_mini = init_chat_model("openai:o3-mini", temperature=0)
175
- claude_sonnet = init_chat_model("anthropic:claude-3-5-sonnet-latest", temperature=0)
176
- gemini_2_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
149
+ Make sure that if you're accepting untrusted configurations that you
150
+ enumerate the `configurable_fields=(...)` explicitly.
177
151
 
178
- o3_mini.invoke("what's your name")
179
- claude_sonnet.invoke("what's your name")
180
- gemini_2_flash.invoke("what's your name")
152
+ config_prefix: Optional prefix for configuration keys.
181
153
 
154
+ Useful when you have multiple configurable models in the same application.
182
155
 
183
- ??? note "Partially configurable model with no default"
156
+ If `'config_prefix'` is a non-empty string then `model` will be configurable
157
+ at runtime via the `config["configurable"]["{config_prefix}_{param}"]` keys.
158
+ See examples below.
184
159
 
185
- .. code-block:: python
160
+ If `'config_prefix'` is an empty string then model will be configurable via
161
+ `config["configurable"]["{param}"]`.
162
+ **kwargs: Additional model-specific keyword args to pass to the underlying
163
+ chat model's `__init__` method. Common parameters include:
186
164
 
187
- # pip install langchain langchain-openai langchain-anthropic
188
- from langchain.chat_models import init_chat_model
165
+ - `temperature`: Model temperature for controlling randomness.
166
+ - `max_tokens`: Maximum number of output tokens.
167
+ - `timeout`: Maximum time (in seconds) to wait for a response.
168
+ - `max_retries`: Maximum number of retry attempts for failed requests.
169
+ - `base_url`: Custom API endpoint URL.
170
+ - `rate_limiter`: A
171
+ [`BaseRateLimiter`][langchain_core.rate_limiters.BaseRateLimiter]
172
+ instance to control request rate.
189
173
 
190
- # We don't need to specify configurable=True if a model isn't specified.
191
- configurable_model = init_chat_model(temperature=0)
174
+ Refer to the specific model provider's
175
+ [integration reference](https://reference.langchain.com/python/integrations/)
176
+ for all available parameters.
192
177
 
193
- configurable_model.invoke(
194
- "what's your name", config={"configurable": {"model": "gpt-4o"}}
195
- )
196
- # GPT-4o response
178
+ Returns:
179
+ A `BaseChatModel` corresponding to the `model_name` and `model_provider`
180
+ specified if configurability is inferred to be `False`. If configurable, a
181
+ chat model emulator that initializes the underlying model at runtime once a
182
+ config is passed in.
197
183
 
198
- configurable_model.invoke(
199
- "what's your name", config={"configurable": {"model": "claude-3-5-sonnet-latest"}}
200
- )
201
- # claude-3.5 sonnet response
184
+ Raises:
185
+ ValueError: If `model_provider` cannot be inferred or isn't supported.
186
+ ImportError: If the model provider integration package is not installed.
202
187
 
203
- ??? note "Fully configurable model with a default"
188
+ ???+ example "Initialize a non-configurable model"
204
189
 
205
- .. code-block:: python
190
+ ```python
191
+ # pip install langchain langchain-openai langchain-anthropic langchain-google-vertexai
206
192
 
207
- # pip install langchain langchain-openai langchain-anthropic
208
- from langchain.chat_models import init_chat_model
193
+ from langchain.chat_models import init_chat_model
209
194
 
210
- configurable_model_with_default = init_chat_model(
211
- "openai:gpt-4o",
212
- configurable_fields="any", # this allows us to configure other params like temperature, max_tokens, etc at runtime.
213
- config_prefix="foo",
214
- temperature=0,
215
- )
195
+ o3_mini = init_chat_model("openai:o3-mini", temperature=0)
196
+ claude_sonnet = init_chat_model("anthropic:claude-sonnet-4-5-20250929", temperature=0)
197
+ gemini_2-5_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
216
198
 
217
- configurable_model_with_default.invoke("what's your name")
218
- # GPT-4o response with temperature 0
219
-
220
- configurable_model_with_default.invoke(
221
- "what's your name",
222
- config={
223
- "configurable": {
224
- "foo_model": "anthropic:claude-3-5-sonnet-latest",
225
- "foo_temperature": 0.6,
226
- }
227
- },
228
- )
229
- # Claude-3.5 sonnet response with temperature 0.6
199
+ o3_mini.invoke("what's your name")
200
+ claude_sonnet.invoke("what's your name")
201
+ gemini_2-5_flash.invoke("what's your name")
202
+ ```
230
203
 
231
- ??? note "Bind tools to a configurable model"
204
+ ??? example "Partially configurable model with no default"
232
205
 
233
- You can call any ChatModel declarative methods on a configurable model in the
234
- same way that you would with a normal model.
206
+ ```python
207
+ # pip install langchain langchain-openai langchain-anthropic
235
208
 
236
- .. code-block:: python
209
+ from langchain.chat_models import init_chat_model
237
210
 
238
- # pip install langchain langchain-openai langchain-anthropic
239
- from langchain.chat_models import init_chat_model
240
- from pydantic import BaseModel, Field
211
+ # (We don't need to specify configurable=True if a model isn't specified.)
212
+ configurable_model = init_chat_model(temperature=0)
241
213
 
214
+ configurable_model.invoke("what's your name", config={"configurable": {"model": "gpt-4o"}})
215
+ # Use GPT-4o to generate the response
242
216
 
243
- class GetWeather(BaseModel):
244
- '''Get the current weather in a given location'''
217
+ configurable_model.invoke(
218
+ "what's your name",
219
+ config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
220
+ )
221
+ ```
245
222
 
246
- location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
223
+ ??? example "Fully configurable model with a default"
247
224
 
225
+ ```python
226
+ # pip install langchain langchain-openai langchain-anthropic
248
227
 
249
- class GetPopulation(BaseModel):
250
- '''Get the current population in a given location'''
228
+ from langchain.chat_models import init_chat_model
251
229
 
252
- location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
230
+ configurable_model_with_default = init_chat_model(
231
+ "openai:gpt-4o",
232
+ configurable_fields="any", # This allows us to configure other params like temperature, max_tokens, etc at runtime.
233
+ config_prefix="foo",
234
+ temperature=0,
235
+ )
253
236
 
237
+ configurable_model_with_default.invoke("what's your name")
238
+ # GPT-4o response with temperature 0 (as set in default)
239
+
240
+ configurable_model_with_default.invoke(
241
+ "what's your name",
242
+ config={
243
+ "configurable": {
244
+ "foo_model": "anthropic:claude-sonnet-4-5-20250929",
245
+ "foo_temperature": 0.6,
246
+ }
247
+ },
248
+ )
249
+ # Override default to use Sonnet 4.5 with temperature 0.6 to generate response
250
+ ```
254
251
 
255
- configurable_model = init_chat_model(
256
- "gpt-4o", configurable_fields=("model", "model_provider"), temperature=0
257
- )
252
+ ??? example "Bind tools to a configurable model"
258
253
 
259
- configurable_model_with_tools = configurable_model.bind_tools(
260
- [GetWeather, GetPopulation]
261
- )
262
- configurable_model_with_tools.invoke(
263
- "Which city is hotter today and which is bigger: LA or NY?"
264
- )
265
- # GPT-4o response with tool calls
254
+ You can call any chat model declarative methods on a configurable model in the
255
+ same way that you would with a normal model:
266
256
 
267
- configurable_model_with_tools.invoke(
268
- "Which city is hotter today and which is bigger: LA or NY?",
269
- config={"configurable": {"model": "claude-3-5-sonnet-latest"}},
270
- )
271
- # Claude-3.5 sonnet response with tools
257
+ ```python
258
+ # pip install langchain langchain-openai langchain-anthropic
259
+
260
+ from langchain.chat_models import init_chat_model
261
+ from pydantic import BaseModel, Field
272
262
 
273
- !!! version-added "Added in version 0.2.7"
274
263
 
275
- !!! warning "Behavior changed in 0.2.8"
276
- Support for ``configurable_fields`` and ``config_prefix`` added.
264
+ class GetWeather(BaseModel):
265
+ '''Get the current weather in a given location'''
277
266
 
278
- !!! warning "Behavior changed in 0.2.12"
279
- Support for Ollama via langchain-ollama package added
280
- (langchain_ollama.ChatOllama). Previously,
281
- the now-deprecated langchain-community version of Ollama was imported
282
- (langchain_community.chat_models.ChatOllama).
267
+ location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
283
268
 
284
- Support for AWS Bedrock models via the Converse API added
285
- (model_provider="bedrock_converse").
286
269
 
287
- !!! warning "Behavior changed in 0.3.5"
288
- Out of beta.
270
+ class GetPopulation(BaseModel):
271
+ '''Get the current population in a given location'''
289
272
 
290
- !!! warning "Behavior changed in 0.3.19"
291
- Support for Deepseek, IBM, Nvidia, and xAI models added.
273
+ location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
274
+
275
+
276
+ configurable_model = init_chat_model(
277
+ "gpt-4o", configurable_fields=("model", "model_provider"), temperature=0
278
+ )
279
+
280
+ configurable_model_with_tools = configurable_model.bind_tools(
281
+ [
282
+ GetWeather,
283
+ GetPopulation,
284
+ ]
285
+ )
286
+ configurable_model_with_tools.invoke(
287
+ "Which city is hotter today and which is bigger: LA or NY?"
288
+ )
289
+ # Use GPT-4o
290
+
291
+ configurable_model_with_tools.invoke(
292
+ "Which city is hotter today and which is bigger: LA or NY?",
293
+ config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
294
+ )
295
+ # Use Sonnet 4.5
296
+ ```
292
297
 
293
298
  """ # noqa: E501
294
299
  if not model and not configurable_fields:
@@ -626,11 +631,8 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
626
631
 
627
632
  @property
628
633
  def InputType(self) -> TypeAlias:
629
- """Get the input type for this runnable."""
630
- from langchain_core.prompt_values import (
631
- ChatPromptValueConcrete,
632
- StringPromptValue,
633
- )
634
+ """Get the input type for this `Runnable`."""
635
+ from langchain_core.prompt_values import ChatPromptValueConcrete, StringPromptValue
634
636
 
635
637
  # This is a version of LanguageModelInput which replaces the abstract
636
638
  # base class BaseMessage with a union of its subclasses, which makes