langchain 1.0.1__py3-none-any.whl → 1.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -34,17 +34,21 @@ SchemaKind = Literal["pydantic", "dataclass", "typeddict", "json_schema"]
34
34
  class StructuredOutputError(Exception):
35
35
  """Base class for structured output errors."""
36
36
 
37
+ ai_message: AIMessage
38
+
37
39
 
38
40
  class MultipleStructuredOutputsError(StructuredOutputError):
39
41
  """Raised when model returns multiple structured output tool calls when only one is expected."""
40
42
 
41
- def __init__(self, tool_names: list[str]) -> None:
43
+ def __init__(self, tool_names: list[str], ai_message: AIMessage) -> None:
42
44
  """Initialize `MultipleStructuredOutputsError`.
43
45
 
44
46
  Args:
45
47
  tool_names: The names of the tools called for structured output.
48
+ ai_message: The AI message that contained the invalid multiple tool calls.
46
49
  """
47
50
  self.tool_names = tool_names
51
+ self.ai_message = ai_message
48
52
 
49
53
  super().__init__(
50
54
  "Model incorrectly returned multiple structured responses "
@@ -55,15 +59,17 @@ class MultipleStructuredOutputsError(StructuredOutputError):
55
59
  class StructuredOutputValidationError(StructuredOutputError):
56
60
  """Raised when structured output tool call arguments fail to parse according to the schema."""
57
61
 
58
- def __init__(self, tool_name: str, source: Exception) -> None:
62
+ def __init__(self, tool_name: str, source: Exception, ai_message: AIMessage) -> None:
59
63
  """Initialize `StructuredOutputValidationError`.
60
64
 
61
65
  Args:
62
66
  tool_name: The name of the tool that failed.
63
67
  source: The exception that occurred.
68
+ ai_message: The AI message that contained the invalid structured output.
64
69
  """
65
70
  self.tool_name = tool_name
66
71
  self.source = source
72
+ self.ai_message = ai_message
67
73
  super().__init__(f"Failed to parse structured output for tool '{tool_name}': {source}.")
68
74
 
69
75
 
@@ -64,26 +64,34 @@ def init_chat_model(
64
64
  config_prefix: str | None = None,
65
65
  **kwargs: Any,
66
66
  ) -> BaseChatModel | _ConfigurableModel:
67
- """Initialize a chat model in a single line using the model's name and provider.
67
+ """Initialize a chat model from any supported provider using a unified interface.
68
+
69
+ **Two main use cases:**
70
+
71
+ 1. **Fixed model** – specify the model upfront and get a ready-to-use chat model.
72
+ 2. **Configurable model** – choose to specify parameters (including model name) at
73
+ runtime via `config`. Makes it easy to switch between models/providers without
74
+ changing your code
68
75
 
69
76
  !!! note
70
- Requires the integration package for your model provider to be installed.
77
+ Requires the integration package for the chosen model provider to be installed.
71
78
 
72
79
  See the `model_provider` parameter below for specific package names
73
80
  (e.g., `pip install langchain-openai`).
74
81
 
75
82
  Refer to the [provider integration's API reference](https://docs.langchain.com/oss/python/integrations/providers)
76
- for supported model parameters.
83
+ for supported model parameters to use as `**kwargs`.
77
84
 
78
85
  Args:
79
- model: The name of the model, e.g. `'o3-mini'`, `'claude-sonnet-4-5'`.
80
-
81
- You can also specify model and model provider in a single argument using:
86
+ model: The name or ID of the model, e.g. `'o3-mini'`, `'claude-sonnet-4-5-20250929'`.
82
87
 
88
+ You can also specify model and model provider in a single argument using
83
89
  `'{model_provider}:{model}'` format, e.g. `'openai:o1'`.
84
90
  model_provider: The model provider if not specified as part of the model arg
85
- (see above). Supported `model_provider` values and the corresponding
86
- integration package are:
91
+ (see above).
92
+
93
+ Supported `model_provider` values and the corresponding integration package
94
+ are:
87
95
 
88
96
  - `openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
89
97
  - `anthropic` -> [`langchain-anthropic`](https://docs.langchain.com/oss/python/integrations/providers/anthropic)
@@ -120,27 +128,36 @@ def init_chat_model(
120
128
  - `deepseek...` -> `deepseek`
121
129
  - `grok...` -> `xai`
122
130
  - `sonar...` -> `perplexity`
123
- configurable_fields: Which model parameters are configurable:
131
+ configurable_fields: Which model parameters are configurable at runtime:
124
132
 
125
- - `None`: No configurable fields.
133
+ - `None`: No configurable fields (i.e., a fixed model).
126
134
  - `'any'`: All fields are configurable. **See security note below.**
127
135
  - `list[str] | Tuple[str, ...]`: Specified fields are configurable.
128
136
 
129
- Fields are assumed to have `config_prefix` stripped if there is a
130
- `config_prefix`. If model is specified, then defaults to `None`. If model is
131
- not specified, then defaults to `("model", "model_provider")`.
137
+ Fields are assumed to have `config_prefix` stripped if a `config_prefix` is
138
+ specified.
139
+
140
+ If `model` is specified, then defaults to `None`.
141
+
142
+ If `model` is not specified, then defaults to `("model", "model_provider")`.
132
143
 
133
144
  !!! warning "Security note"
134
145
  Setting `configurable_fields="any"` means fields like `api_key`,
135
- `base_url`, etc. can be altered at runtime, potentially redirecting
136
- model requests to a different service/user. Make sure that if you're
137
- accepting untrusted configurations that you enumerate the
138
- `configurable_fields=(...)` explicitly.
139
-
140
- config_prefix: If `'config_prefix'` is a non-empty string then model will be
141
- configurable at runtime via the
142
- `config["configurable"]["{config_prefix}_{param}"]` keys. If
143
- `'config_prefix'` is an empty string then model will be configurable via
146
+ `base_url`, etc., can be altered at runtime, potentially redirecting
147
+ model requests to a different service/user.
148
+
149
+ Make sure that if you're accepting untrusted configurations that you
150
+ enumerate the `configurable_fields=(...)` explicitly.
151
+
152
+ config_prefix: Optional prefix for configuration keys.
153
+
154
+ Useful when you have multiple configurable models in the same application.
155
+
156
+ If `'config_prefix'` is a non-empty string then `model` will be configurable
157
+ at runtime via the `config["configurable"]["{config_prefix}_{param}"]` keys.
158
+ See examples below.
159
+
160
+ If `'config_prefix'` is an empty string then model will be configurable via
144
161
  `config["configurable"]["{param}"]`.
145
162
  **kwargs: Additional model-specific keyword args to pass to the underlying
146
163
  chat model's `__init__` method. Common parameters include:
@@ -150,10 +167,13 @@ def init_chat_model(
150
167
  - `timeout`: Maximum time (in seconds) to wait for a response.
151
168
  - `max_retries`: Maximum number of retry attempts for failed requests.
152
169
  - `base_url`: Custom API endpoint URL.
153
- - `rate_limiter`: A `BaseRateLimiter` instance to control request rate.
170
+ - `rate_limiter`: A
171
+ [`BaseRateLimiter`][langchain_core.rate_limiters.BaseRateLimiter]
172
+ instance to control request rate.
154
173
 
155
- Refer to the specific model provider's documentation for all available
156
- parameters.
174
+ Refer to the specific model provider's
175
+ [integration reference](https://reference.langchain.com/python/integrations/)
176
+ for all available parameters.
157
177
 
158
178
  Returns:
159
179
  A `BaseChatModel` corresponding to the `model_name` and `model_provider`
@@ -165,43 +185,46 @@ def init_chat_model(
165
185
  ValueError: If `model_provider` cannot be inferred or isn't supported.
166
186
  ImportError: If the model provider integration package is not installed.
167
187
 
168
- ???+ note "Initialize a non-configurable model"
188
+ ???+ example "Initialize a non-configurable model"
169
189
 
170
190
  ```python
171
191
  # pip install langchain langchain-openai langchain-anthropic langchain-google-vertexai
192
+
172
193
  from langchain.chat_models import init_chat_model
173
194
 
174
195
  o3_mini = init_chat_model("openai:o3-mini", temperature=0)
175
- claude_sonnet = init_chat_model("anthropic:claude-sonnet-4-5", temperature=0)
176
- gemini_2_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
196
+ claude_sonnet = init_chat_model("anthropic:claude-sonnet-4-5-20250929", temperature=0)
197
+ gemini_2-5_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
177
198
 
178
199
  o3_mini.invoke("what's your name")
179
200
  claude_sonnet.invoke("what's your name")
180
- gemini_2_flash.invoke("what's your name")
201
+ gemini_2-5_flash.invoke("what's your name")
181
202
  ```
182
203
 
183
- ??? note "Partially configurable model with no default"
204
+ ??? example "Partially configurable model with no default"
184
205
 
185
206
  ```python
186
207
  # pip install langchain langchain-openai langchain-anthropic
208
+
187
209
  from langchain.chat_models import init_chat_model
188
210
 
189
- # We don't need to specify configurable=True if a model isn't specified.
211
+ # (We don't need to specify configurable=True if a model isn't specified.)
190
212
  configurable_model = init_chat_model(temperature=0)
191
213
 
192
214
  configurable_model.invoke("what's your name", config={"configurable": {"model": "gpt-4o"}})
193
- # GPT-4o response
215
+ # Use GPT-4o to generate the response
194
216
 
195
217
  configurable_model.invoke(
196
218
  "what's your name",
197
- config={"configurable": {"model": "claude-sonnet-4-5"}},
219
+ config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
198
220
  )
199
221
  ```
200
222
 
201
- ??? note "Fully configurable model with a default"
223
+ ??? example "Fully configurable model with a default"
202
224
 
203
225
  ```python
204
226
  # pip install langchain langchain-openai langchain-anthropic
227
+
205
228
  from langchain.chat_models import init_chat_model
206
229
 
207
230
  configurable_model_with_default = init_chat_model(
@@ -212,26 +235,28 @@ def init_chat_model(
212
235
  )
213
236
 
214
237
  configurable_model_with_default.invoke("what's your name")
215
- # GPT-4o response with temperature 0
238
+ # GPT-4o response with temperature 0 (as set in default)
216
239
 
217
240
  configurable_model_with_default.invoke(
218
241
  "what's your name",
219
242
  config={
220
243
  "configurable": {
221
- "foo_model": "anthropic:claude-sonnet-4-5",
244
+ "foo_model": "anthropic:claude-sonnet-4-5-20250929",
222
245
  "foo_temperature": 0.6,
223
246
  }
224
247
  },
225
248
  )
249
+ # Override default to use Sonnet 4.5 with temperature 0.6 to generate response
226
250
  ```
227
251
 
228
- ??? note "Bind tools to a configurable model"
252
+ ??? example "Bind tools to a configurable model"
229
253
 
230
254
  You can call any chat model declarative methods on a configurable model in the
231
255
  same way that you would with a normal model:
232
256
 
233
257
  ```python
234
258
  # pip install langchain langchain-openai langchain-anthropic
259
+
235
260
  from langchain.chat_models import init_chat_model
236
261
  from pydantic import BaseModel, Field
237
262
 
@@ -261,11 +286,13 @@ def init_chat_model(
261
286
  configurable_model_with_tools.invoke(
262
287
  "Which city is hotter today and which is bigger: LA or NY?"
263
288
  )
289
+ # Use GPT-4o
264
290
 
265
291
  configurable_model_with_tools.invoke(
266
292
  "Which city is hotter today and which is bigger: LA or NY?",
267
- config={"configurable": {"model": "claude-sonnet-4-5"}},
293
+ config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
268
294
  )
295
+ # Use Sonnet 4.5
269
296
  ```
270
297
 
271
298
  """ # noqa: E501
@@ -1,4 +1,4 @@
1
- """Embeddings.
1
+ """Embeddings models.
2
2
 
3
3
  !!! warning "Reference docs"
4
4
  This page contains **reference documentation** for Embeddings. See
@@ -126,26 +126,27 @@ def init_embeddings(
126
126
  provider: str | None = None,
127
127
  **kwargs: Any,
128
128
  ) -> Embeddings:
129
- """Initialize an embeddings model from a model name and optional provider.
129
+ """Initialize an embedding model from a model name and optional provider.
130
130
 
131
131
  !!! note
132
- Must have the integration package corresponding to the model provider
133
- installed.
132
+ Requires the integration package for the chosen model provider to be installed.
134
133
 
135
- Args:
136
- model: Name of the model to use.
134
+ See the `model_provider` parameter below for specific package names
135
+ (e.g., `pip install langchain-openai`).
137
136
 
138
- Can be either:
137
+ Refer to the [provider integration's API reference](https://docs.langchain.com/oss/python/integrations/providers)
138
+ for supported model parameters to use as `**kwargs`.
139
139
 
140
- - A model string like `"openai:text-embedding-3-small"`
141
- - Just the model name if the provider is specified separately or can be
142
- inferred.
140
+ Args:
141
+ model: The name of the model, e.g. `'openai:text-embedding-3-small'`.
143
142
 
144
- See supported providers under the `provider` arg description.
145
- provider: Optional explicit provider name. If not specified, will attempt to
146
- parse from the model string in the `model` arg.
143
+ You can also specify model and model provider in a single argument using
144
+ `'{model_provider}:{model}'` format, e.g. `'openai:text-embedding-3-small'`.
145
+ provider: The model provider if not specified as part of the model arg
146
+ (see above).
147
147
 
148
- Supported providers:
148
+ Supported `provider` values and the corresponding integration package
149
+ are:
149
150
 
150
151
  - `openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
151
152
  - `azure_openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
@@ -157,7 +158,10 @@ def init_embeddings(
157
158
  - `ollama` -> [`langchain-ollama`](https://docs.langchain.com/oss/python/integrations/providers/ollama)
158
159
 
159
160
  **kwargs: Additional model-specific parameters passed to the embedding model.
160
- These vary by provider, see the provider-specific documentation for details.
161
+
162
+ These vary by provider. Refer to the specific model provider's
163
+ [integration reference](https://reference.langchain.com/python/integrations/)
164
+ for all available parameters.
161
165
 
162
166
  Returns:
163
167
  An `Embeddings` instance that can generate embeddings for text.
@@ -166,9 +170,11 @@ def init_embeddings(
166
170
  ValueError: If the model provider is not supported or cannot be determined
167
171
  ImportError: If the required provider package is not installed
168
172
 
169
- ???+ note "Example Usage"
173
+ ???+ example
170
174
 
171
175
  ```python
176
+ # pip install langchain langchain-openai
177
+
172
178
  # Using a model string
173
179
  model = init_embeddings("openai:text-embedding-3-small")
174
180
  model.embed_query("Hello, world!")
@@ -1,4 +1,4 @@
1
- """Message types.
1
+ """Message and message content types.
2
2
 
3
3
  Includes message types for different roles (e.g., human, AI, system), as well as types
4
4
  for message content blocks (e.g., text, image, audio) and tool calls.
@@ -21,10 +21,12 @@ from langchain_core.messages import (
21
21
  FileContentBlock,
22
22
  HumanMessage,
23
23
  ImageContentBlock,
24
+ InputTokenDetails,
24
25
  InvalidToolCall,
25
26
  MessageLikeRepresentation,
26
27
  NonStandardAnnotation,
27
28
  NonStandardContentBlock,
29
+ OutputTokenDetails,
28
30
  PlainTextContentBlock,
29
31
  ReasoningContentBlock,
30
32
  RemoveMessage,
@@ -36,6 +38,7 @@ from langchain_core.messages import (
36
38
  ToolCall,
37
39
  ToolCallChunk,
38
40
  ToolMessage,
41
+ UsageMetadata,
39
42
  VideoContentBlock,
40
43
  trim_messages,
41
44
  )
@@ -52,10 +55,12 @@ __all__ = [
52
55
  "FileContentBlock",
53
56
  "HumanMessage",
54
57
  "ImageContentBlock",
58
+ "InputTokenDetails",
55
59
  "InvalidToolCall",
56
60
  "MessageLikeRepresentation",
57
61
  "NonStandardAnnotation",
58
62
  "NonStandardContentBlock",
63
+ "OutputTokenDetails",
59
64
  "PlainTextContentBlock",
60
65
  "ReasoningContentBlock",
61
66
  "RemoveMessage",
@@ -67,6 +72,7 @@ __all__ = [
67
72
  "ToolCall",
68
73
  "ToolCallChunk",
69
74
  "ToolMessage",
75
+ "UsageMetadata",
70
76
  "VideoContentBlock",
71
77
  "trim_messages",
72
78
  ]