langchain 1.0.0a15__py3-none-any.whl → 1.0.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

@@ -71,107 +71,115 @@ def init_chat_model(
71
71
  config_prefix: str | None = None,
72
72
  **kwargs: Any,
73
73
  ) -> BaseChatModel | _ConfigurableModel:
74
- """Initialize a ChatModel from the model name and provider.
74
+ """Initialize a chat model in a single line using the model's name and provider.
75
75
 
76
76
  !!! note
77
- Must have the integration package corresponding to the model provider
78
- installed.
77
+ Requires the integration package for your model provider to be installed.
78
+
79
+ See the `model_provider` parameter below for specific package names
80
+ (e.g., `pip install langchain-openai`).
81
+
82
+ Refer to the [provider integration's API reference](https://docs.langchain.com/oss/python/integrations/providers)
83
+ for supported model parameters.
79
84
 
80
85
  Args:
81
- model: The name of the model, e.g. "o3-mini", "claude-3-5-sonnet-latest". You can
82
- also specify model and model provider in a single argument using
83
- '{model_provider}:{model}' format, e.g. "openai:o1".
84
- model_provider: The model provider if not specified as part of model arg (see
85
- above). Supported model_provider values and the corresponding integration
86
- package are:
87
-
88
- - 'openai' -> langchain-openai
89
- - 'anthropic' -> langchain-anthropic
90
- - 'azure_openai' -> langchain-openai
91
- - 'azure_ai' -> langchain-azure-ai
92
- - 'google_vertexai' -> langchain-google-vertexai
93
- - 'google_genai' -> langchain-google-genai
94
- - 'bedrock' -> langchain-aws
95
- - 'bedrock_converse' -> langchain-aws
96
- - 'cohere' -> langchain-cohere
97
- - 'fireworks' -> langchain-fireworks
98
- - 'together' -> langchain-together
99
- - 'mistralai' -> langchain-mistralai
100
- - 'huggingface' -> langchain-huggingface
101
- - 'groq' -> langchain-groq
102
- - 'ollama' -> langchain-ollama
103
- - 'google_anthropic_vertex' -> langchain-google-vertexai
104
- - 'deepseek' -> langchain-deepseek
105
- - 'ibm' -> langchain-ibm
106
- - 'nvidia' -> langchain-nvidia-ai-endpoints
107
- - 'xai' -> langchain-xai
108
- - 'perplexity' -> langchain-perplexity
109
-
110
- Will attempt to infer model_provider from model if not specified. The
86
+ model: The name of the model, e.g. `'o3-mini'`, `'claude-sonnet-4-5-20250929'`.
87
+
88
+ You can also specify model and model provider in a single argument using:
89
+
90
+ `'{model_provider}:{model}'` format, e.g. `'openai:o1'`.
91
+ model_provider: The model provider if not specified as part of the model arg
92
+ (see above). Supported `model_provider` values and the corresponding
93
+ integration package are:
94
+
95
+ - `openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
96
+ - `anthropic` -> [`langchain-anthropic`](https://docs.langchain.com/oss/python/integrations/providers/anthropic)
97
+ - `azure_openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
98
+ - `azure_ai` -> [`langchain-azure-ai`](https://docs.langchain.com/oss/python/integrations/providers/microsoft)
99
+ - `google_vertexai` -> [`langchain-google-vertexai`](https://docs.langchain.com/oss/python/integrations/providers/google)
100
+ - `google_genai` -> [`langchain-google-genai`](https://docs.langchain.com/oss/python/integrations/providers/google)
101
+ - `bedrock` -> [`langchain-aws`](https://docs.langchain.com/oss/python/integrations/providers/aws)
102
+ - `bedrock_converse` -> [`langchain-aws`](https://docs.langchain.com/oss/python/integrations/providers/aws)
103
+ - `cohere` -> [`langchain-cohere`](https://docs.langchain.com/oss/python/integrations/providers/cohere)
104
+ - `fireworks` -> [`langchain-fireworks`](https://docs.langchain.com/oss/python/integrations/providers/fireworks)
105
+ - `together` -> [`langchain-together`](https://docs.langchain.com/oss/python/integrations/providers/together)
106
+ - `mistralai` -> [`langchain-mistralai`](https://docs.langchain.com/oss/python/integrations/providers/mistralai)
107
+ - `huggingface` -> [`langchain-huggingface`](https://docs.langchain.com/oss/python/integrations/providers/huggingface)
108
+ - `groq` -> [`langchain-groq`](https://docs.langchain.com/oss/python/integrations/providers/groq)
109
+ - `ollama` -> [`langchain-ollama`](https://docs.langchain.com/oss/python/integrations/providers/ollama)
110
+ - `google_anthropic_vertex` -> [`langchain-google-vertexai`](https://docs.langchain.com/oss/python/integrations/providers/google)
111
+ - `deepseek` -> [`langchain-deepseek`](https://docs.langchain.com/oss/python/integrations/providers/deepseek)
112
+ - `ibm` -> [`langchain-ibm`](https://docs.langchain.com/oss/python/integrations/providers/deepseek)
113
+ - `nvidia` -> [`langchain-nvidia-ai-endpoints`](https://docs.langchain.com/oss/python/integrations/providers/nvidia)
114
+ - `xai` -> [`langchain-xai`](https://docs.langchain.com/oss/python/integrations/providers/xai)
115
+ - `perplexity` -> [`langchain-perplexity`](https://docs.langchain.com/oss/python/integrations/providers/perplexity)
116
+
117
+ Will attempt to infer `model_provider` from model if not specified. The
111
118
  following providers will be inferred based on these model prefixes:
112
119
 
113
- - 'gpt-...' | 'o1...' | 'o3...' -> 'openai'
114
- - 'claude...' -> 'anthropic'
115
- - 'amazon....' -> 'bedrock'
116
- - 'gemini...' -> 'google_vertexai'
117
- - 'command...' -> 'cohere'
118
- - 'accounts/fireworks...' -> 'fireworks'
119
- - 'mistral...' -> 'mistralai'
120
- - 'deepseek...' -> 'deepseek'
121
- - 'grok...' -> 'xai'
122
- - 'sonar...' -> 'perplexity'
123
- configurable_fields: Which model parameters are
124
- configurable:
125
-
126
- - None: No configurable fields.
127
- - "any": All fields are configurable. *See Security Note below.*
128
- - Union[List[str], Tuple[str, ...]]: Specified fields are configurable.
129
-
130
- Fields are assumed to have config_prefix stripped if there is a
131
- config_prefix. If model is specified, then defaults to None. If model is
120
+ - `gpt-...` | `o1...` | `o3...` -> `openai`
121
+ - `claude...` -> `anthropic`
122
+ - `amazon...` -> `bedrock`
123
+ - `gemini...` -> `google_vertexai`
124
+ - `command...` -> `cohere`
125
+ - `accounts/fireworks...` -> `fireworks`
126
+ - `mistral...` -> `mistralai`
127
+ - `deepseek...` -> `deepseek`
128
+ - `grok...` -> `xai`
129
+ - `sonar...` -> `perplexity`
130
+ configurable_fields: Which model parameters are configurable:
131
+
132
+ - `None`: No configurable fields.
133
+ - `'any'`: All fields are configurable. **See security note below.**
134
+ - `list[str] | Tuple[str, ...]`: Specified fields are configurable.
135
+
136
+ Fields are assumed to have `config_prefix` stripped if there is a
137
+ `config_prefix`. If model is specified, then defaults to `None`. If model is
132
138
  not specified, then defaults to `("model", "model_provider")`.
133
139
 
134
- **Security Note**: Setting `configurable_fields="any"` means fields like
135
- api_key, base_url, etc. can be altered at runtime, potentially redirecting
136
- model requests to a different service/user. Make sure that if you're
137
- accepting untrusted configurations that you enumerate the
138
- `configurable_fields=(...)` explicitly.
140
+ !!! warning "Security note"
141
+ Setting `configurable_fields="any"` means fields like `api_key`,
142
+ `base_url`, etc. can be altered at runtime, potentially redirecting
143
+ model requests to a different service/user. Make sure that if you're
144
+ accepting untrusted configurations that you enumerate the
145
+ `configurable_fields=(...)` explicitly.
139
146
 
140
- config_prefix: If config_prefix is a non-empty string then model will be
147
+ config_prefix: If `'config_prefix'` is a non-empty string then model will be
141
148
  configurable at runtime via the
142
149
  `config["configurable"]["{config_prefix}_{param}"]` keys. If
143
- config_prefix is an empty string then model will be configurable via
150
+ `'config_prefix'` is an empty string then model will be configurable via
144
151
  `config["configurable"]["{param}"]`.
145
- kwargs: Additional model-specific keyword args to pass to
146
- `<<selected ChatModel>>.__init__(model=model_name, **kwargs)`. Examples
147
- include:
148
- * temperature: Model temperature.
149
- * max_tokens: Max output tokens.
150
- * timeout: The maximum time (in seconds) to wait for a response from the model
151
- before canceling the request.
152
- * max_retries: The maximum number of attempts the system will make to resend a
153
- request if it fails due to issues like network timeouts or rate limits.
154
- * base_url: The URL of the API endpoint where requests are sent.
155
- * rate_limiter: A `BaseRateLimiter` to space out requests to avoid exceeding
156
- rate limits.
152
+ **kwargs: Additional model-specific keyword args to pass to the underlying
153
+ chat model's `__init__` method. Common parameters include:
154
+
155
+ - `temperature`: Model temperature for controlling randomness.
156
+ - `max_tokens`: Maximum number of output tokens.
157
+ - `timeout`: Maximum time (in seconds) to wait for a response.
158
+ - `max_retries`: Maximum number of retry attempts for failed requests.
159
+ - `base_url`: Custom API endpoint URL.
160
+ - `rate_limiter`: A `BaseRateLimiter` instance to control request rate.
161
+
162
+ Refer to the specific model provider's documentation for all available
163
+ parameters.
157
164
 
158
165
  Returns:
159
- A BaseChatModel corresponding to the model_name and model_provider specified if
160
- configurability is inferred to be False. If configurable, a chat model emulator
161
- that initializes the underlying model at runtime once a config is passed in.
166
+ A `BaseChatModel` corresponding to the `model_name` and `model_provider`
167
+ specified if configurability is inferred to be `False`. If configurable, a
168
+ chat model emulator that initializes the underlying model at runtime once a
169
+ config is passed in.
162
170
 
163
171
  Raises:
164
- ValueError: If model_provider cannot be inferred or isn't supported.
172
+ ValueError: If `model_provider` cannot be inferred or isn't supported.
165
173
  ImportError: If the model provider integration package is not installed.
166
174
 
167
- ???+ note "Init non-configurable model"
175
+ ???+ note "Initialize a non-configurable model"
168
176
 
169
177
  ```python
170
178
  # pip install langchain langchain-openai langchain-anthropic langchain-google-vertexai
171
179
  from langchain.chat_models import init_chat_model
172
180
 
173
181
  o3_mini = init_chat_model("openai:o3-mini", temperature=0)
174
- claude_sonnet = init_chat_model("anthropic:claude-3-5-sonnet-latest", temperature=0)
182
+ claude_sonnet = init_chat_model("anthropic:claude-sonnet-4-5-20250929", temperature=0)
175
183
  gemini_2_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
176
184
 
177
185
  o3_mini.invoke("what's your name")
@@ -192,9 +200,9 @@ def init_chat_model(
192
200
  # GPT-4o response
193
201
 
194
202
  configurable_model.invoke(
195
- "what's your name", config={"configurable": {"model": "claude-3-5-sonnet-latest"}}
203
+ "what's your name",
204
+ config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
196
205
  )
197
- # claude-3.5 sonnet response
198
206
  ```
199
207
 
200
208
  ??? note "Fully configurable model with a default"
@@ -205,7 +213,7 @@ def init_chat_model(
205
213
 
206
214
  configurable_model_with_default = init_chat_model(
207
215
  "openai:gpt-4o",
208
- configurable_fields="any", # this allows us to configure other params like temperature, max_tokens, etc at runtime.
216
+ configurable_fields="any", # This allows us to configure other params like temperature, max_tokens, etc at runtime.
209
217
  config_prefix="foo",
210
218
  temperature=0,
211
219
  )
@@ -217,18 +225,17 @@ def init_chat_model(
217
225
  "what's your name",
218
226
  config={
219
227
  "configurable": {
220
- "foo_model": "anthropic:claude-3-5-sonnet-latest",
228
+ "foo_model": "anthropic:claude-sonnet-4-5-20250929",
221
229
  "foo_temperature": 0.6,
222
230
  }
223
231
  },
224
232
  )
225
- # Claude-3.5 sonnet response with temperature 0.6
226
233
  ```
227
234
 
228
235
  ??? note "Bind tools to a configurable model"
229
236
 
230
- You can call any ChatModel declarative methods on a configurable model in the
231
- same way that you would with a normal model.
237
+ You can call any chat model declarative methods on a configurable model in the
238
+ same way that you would with a normal model:
232
239
 
233
240
  ```python
234
241
  # pip install langchain langchain-openai langchain-anthropic
@@ -252,39 +259,22 @@ def init_chat_model(
252
259
  "gpt-4o", configurable_fields=("model", "model_provider"), temperature=0
253
260
  )
254
261
 
255
- configurable_model_with_tools = configurable_model.bind_tools([GetWeather, GetPopulation])
262
+ configurable_model_with_tools = configurable_model.bind_tools(
263
+ [
264
+ GetWeather,
265
+ GetPopulation,
266
+ ]
267
+ )
256
268
  configurable_model_with_tools.invoke(
257
269
  "Which city is hotter today and which is bigger: LA or NY?"
258
270
  )
259
- # GPT-4o response with tool calls
260
271
 
261
272
  configurable_model_with_tools.invoke(
262
273
  "Which city is hotter today and which is bigger: LA or NY?",
263
- config={"configurable": {"model": "claude-3-5-sonnet-latest"}},
274
+ config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
264
275
  )
265
- # Claude-3.5 sonnet response with tools
266
276
  ```
267
277
 
268
- !!! version-added "Added in version 0.2.7"
269
-
270
- !!! warning "Behavior changed in 0.2.8"
271
- Support for `configurable_fields` and `config_prefix` added.
272
-
273
- !!! warning "Behavior changed in 0.2.12"
274
- Support for Ollama via langchain-ollama package added
275
- (langchain_ollama.ChatOllama). Previously,
276
- the now-deprecated langchain-community version of Ollama was imported
277
- (langchain_community.chat_models.ChatOllama).
278
-
279
- Support for AWS Bedrock models via the Converse API added
280
- (model_provider="bedrock_converse").
281
-
282
- !!! warning "Behavior changed in 0.3.5"
283
- Out of beta.
284
-
285
- !!! warning "Behavior changed in 0.3.19"
286
- Support for Deepseek, IBM, Nvidia, and xAI models added.
287
-
288
278
  """ # noqa: E501
289
279
  if not model and not configurable_fields:
290
280
  configurable_fields = ("model", "model_provider")
@@ -1,4 +1,10 @@
1
- """Embeddings."""
1
+ """Embeddings.
2
+
3
+ !!! warning "Reference docs"
4
+ This page contains **reference documentation** for Embeddings. See
5
+ [the docs](https://docs.langchain.com/oss/python/langchain/retrieval#embedding-models)
6
+ for conceptual guides, tutorials, and examples on using Embeddings.
7
+ """
2
8
 
3
9
  from langchain_core.embeddings import Embeddings
4
10
 
@@ -134,7 +134,7 @@ def init_embeddings(
134
134
 
135
135
  Args:
136
136
  model: Name of the model to use. Can be either:
137
- - A model string like "openai:text-embedding-3-small"
137
+ - A model string like `"openai:text-embedding-3-small"`
138
138
  - Just the model name if provider is specified
139
139
  provider: Optional explicit provider name. If not specified,
140
140
  will attempt to parse from the model string. Supported providers
@@ -1,4 +1,13 @@
1
- """Message types."""
1
+ """Message types.
2
+
3
+ Includes message types for different roles (e.g., human, AI, system), as well as types
4
+ for message content blocks (e.g., text, image, audio) and tool calls.
5
+
6
+ !!! warning "Reference docs"
7
+ This page contains **reference documentation** for Messages. See
8
+ [the docs](https://docs.langchain.com/oss/python/langchain/messages) for conceptual
9
+ guides, tutorials, and examples on using Messages.
10
+ """
2
11
 
3
12
  from langchain_core.messages import (
4
13
  AIMessage,
@@ -1,4 +1,10 @@
1
- """Tools."""
1
+ """Tools.
2
+
3
+ !!! warning "Reference docs"
4
+ This page contains **reference documentation** for Tools. See
5
+ [the docs](https://docs.langchain.com/oss/python/langchain/tools) for conceptual
6
+ guides, tutorials, and examples on using Tools.
7
+ """
2
8
 
3
9
  from langchain_core.tools import (
4
10
  BaseTool,
@@ -8,7 +14,7 @@ from langchain_core.tools import (
8
14
  tool,
9
15
  )
10
16
 
11
- from langchain.tools.tool_node import InjectedState, InjectedStore
17
+ from langchain.tools.tool_node import InjectedState, InjectedStore, ToolRuntime
12
18
 
13
19
  __all__ = [
14
20
  "BaseTool",
@@ -17,5 +23,6 @@ __all__ = [
17
23
  "InjectedToolArg",
18
24
  "InjectedToolCallId",
19
25
  "ToolException",
26
+ "ToolRuntime",
20
27
  "tool",
21
28
  ]