langchain 1.0.0a12__py3-none-any.whl → 1.0.0a14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (40) hide show
  1. langchain/__init__.py +1 -1
  2. langchain/agents/factory.py +597 -171
  3. langchain/agents/middleware/__init__.py +9 -3
  4. langchain/agents/middleware/context_editing.py +15 -14
  5. langchain/agents/middleware/human_in_the_loop.py +213 -170
  6. langchain/agents/middleware/model_call_limit.py +2 -2
  7. langchain/agents/middleware/model_fallback.py +46 -36
  8. langchain/agents/middleware/pii.py +25 -27
  9. langchain/agents/middleware/planning.py +16 -11
  10. langchain/agents/middleware/prompt_caching.py +14 -11
  11. langchain/agents/middleware/summarization.py +1 -1
  12. langchain/agents/middleware/tool_call_limit.py +5 -5
  13. langchain/agents/middleware/tool_emulator.py +200 -0
  14. langchain/agents/middleware/tool_selection.py +25 -21
  15. langchain/agents/middleware/types.py +623 -225
  16. langchain/chat_models/base.py +85 -90
  17. langchain/embeddings/__init__.py +0 -2
  18. langchain/embeddings/base.py +20 -20
  19. langchain/messages/__init__.py +34 -0
  20. langchain/tools/__init__.py +2 -6
  21. langchain/tools/tool_node.py +410 -83
  22. {langchain-1.0.0a12.dist-info → langchain-1.0.0a14.dist-info}/METADATA +8 -5
  23. langchain-1.0.0a14.dist-info/RECORD +30 -0
  24. langchain/_internal/__init__.py +0 -0
  25. langchain/_internal/_documents.py +0 -35
  26. langchain/_internal/_lazy_import.py +0 -35
  27. langchain/_internal/_prompts.py +0 -158
  28. langchain/_internal/_typing.py +0 -70
  29. langchain/_internal/_utils.py +0 -7
  30. langchain/agents/_internal/__init__.py +0 -1
  31. langchain/agents/_internal/_typing.py +0 -13
  32. langchain/documents/__init__.py +0 -7
  33. langchain/embeddings/cache.py +0 -361
  34. langchain/storage/__init__.py +0 -22
  35. langchain/storage/encoder_backed.py +0 -123
  36. langchain/storage/exceptions.py +0 -5
  37. langchain/storage/in_memory.py +0 -13
  38. langchain-1.0.0a12.dist-info/RECORD +0 -43
  39. {langchain-1.0.0a12.dist-info → langchain-1.0.0a14.dist-info}/WHEEL +0 -0
  40. {langchain-1.0.0a12.dist-info → langchain-1.0.0a14.dist-info}/licenses/LICENSE +0 -0
@@ -32,7 +32,7 @@ def init_chat_model(
32
32
  model: str,
33
33
  *,
34
34
  model_provider: str | None = None,
35
- configurable_fields: Literal[None] = None,
35
+ configurable_fields: None = None,
36
36
  config_prefix: str | None = None,
37
37
  **kwargs: Any,
38
38
  ) -> BaseChatModel: ...
@@ -40,10 +40,10 @@ def init_chat_model(
40
40
 
41
41
  @overload
42
42
  def init_chat_model(
43
- model: Literal[None] = None,
43
+ model: None = None,
44
44
  *,
45
45
  model_provider: str | None = None,
46
- configurable_fields: Literal[None] = None,
46
+ configurable_fields: None = None,
47
47
  config_prefix: str | None = None,
48
48
  **kwargs: Any,
49
49
  ) -> _ConfigurableModel: ...
@@ -73,8 +73,9 @@ def init_chat_model(
73
73
  ) -> BaseChatModel | _ConfigurableModel:
74
74
  """Initialize a ChatModel from the model name and provider.
75
75
 
76
- **Note:** Must have the integration package corresponding to the model provider
77
- installed.
76
+ !!! note
77
+ Must have the integration package corresponding to the model provider
78
+ installed.
78
79
 
79
80
  Args:
80
81
  model: The name of the model, e.g. "o3-mini", "claude-3-5-sonnet-latest". You can
@@ -128,21 +129,21 @@ def init_chat_model(
128
129
 
129
130
  Fields are assumed to have config_prefix stripped if there is a
130
131
  config_prefix. If model is specified, then defaults to None. If model is
131
- not specified, then defaults to ``("model", "model_provider")``.
132
+ not specified, then defaults to `("model", "model_provider")`.
132
133
 
133
- ***Security Note***: Setting ``configurable_fields="any"`` means fields like
134
+ **Security Note**: Setting `configurable_fields="any"` means fields like
134
135
  api_key, base_url, etc. can be altered at runtime, potentially redirecting
135
136
  model requests to a different service/user. Make sure that if you're
136
137
  accepting untrusted configurations that you enumerate the
137
- ``configurable_fields=(...)`` explicitly.
138
+ `configurable_fields=(...)` explicitly.
138
139
 
139
140
  config_prefix: If config_prefix is a non-empty string then model will be
140
141
  configurable at runtime via the
141
- ``config["configurable"]["{config_prefix}_{param}"]`` keys. If
142
+ `config["configurable"]["{config_prefix}_{param}"]` keys. If
142
143
  config_prefix is an empty string then model will be configurable via
143
- ``config["configurable"]["{param}"]``.
144
+ `config["configurable"]["{param}"]`.
144
145
  kwargs: Additional model-specific keyword args to pass to
145
- ``<<selected ChatModel>>.__init__(model=model_name, **kwargs)``. Examples
146
+ `<<selected ChatModel>>.__init__(model=model_name, **kwargs)`. Examples
146
147
  include:
147
148
  * temperature: Model temperature.
148
149
  * max_tokens: Max output tokens.
@@ -151,7 +152,7 @@ def init_chat_model(
151
152
  * max_retries: The maximum number of attempts the system will make to resend a
152
153
  request if it fails due to issues like network timeouts or rate limits.
153
154
  * base_url: The URL of the API endpoint where requests are sent.
154
- * rate_limiter: A ``BaseRateLimiter`` to space out requests to avoid exceeding
155
+ * rate_limiter: A `BaseRateLimiter` to space out requests to avoid exceeding
155
156
  rate limits.
156
157
 
157
158
  Returns:
@@ -163,117 +164,111 @@ def init_chat_model(
163
164
  ValueError: If model_provider cannot be inferred or isn't supported.
164
165
  ImportError: If the model provider integration package is not installed.
165
166
 
166
- ??? note "Init non-configurable model"
167
- :open:
167
+ ???+ note "Init non-configurable model"
168
168
 
169
- .. code-block:: python
169
+ ```python
170
+ # pip install langchain langchain-openai langchain-anthropic langchain-google-vertexai
171
+ from langchain.chat_models import init_chat_model
170
172
 
171
- # pip install langchain langchain-openai langchain-anthropic langchain-google-vertexai
172
- from langchain.chat_models import init_chat_model
173
-
174
- o3_mini = init_chat_model("openai:o3-mini", temperature=0)
175
- claude_sonnet = init_chat_model("anthropic:claude-3-5-sonnet-latest", temperature=0)
176
- gemini_2_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
177
-
178
- o3_mini.invoke("what's your name")
179
- claude_sonnet.invoke("what's your name")
180
- gemini_2_flash.invoke("what's your name")
173
+ o3_mini = init_chat_model("openai:o3-mini", temperature=0)
174
+ claude_sonnet = init_chat_model("anthropic:claude-3-5-sonnet-latest", temperature=0)
175
+ gemini_2_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
181
176
 
177
+ o3_mini.invoke("what's your name")
178
+ claude_sonnet.invoke("what's your name")
179
+ gemini_2_flash.invoke("what's your name")
180
+ ```
182
181
 
183
182
  ??? note "Partially configurable model with no default"
184
183
 
185
- .. code-block:: python
184
+ ```python
185
+ # pip install langchain langchain-openai langchain-anthropic
186
+ from langchain.chat_models import init_chat_model
186
187
 
187
- # pip install langchain langchain-openai langchain-anthropic
188
- from langchain.chat_models import init_chat_model
188
+ # We don't need to specify configurable=True if a model isn't specified.
189
+ configurable_model = init_chat_model(temperature=0)
189
190
 
190
- # We don't need to specify configurable=True if a model isn't specified.
191
- configurable_model = init_chat_model(temperature=0)
191
+ configurable_model.invoke("what's your name", config={"configurable": {"model": "gpt-4o"}})
192
+ # GPT-4o response
192
193
 
193
- configurable_model.invoke(
194
- "what's your name", config={"configurable": {"model": "gpt-4o"}}
195
- )
196
- # GPT-4o response
197
-
198
- configurable_model.invoke(
199
- "what's your name", config={"configurable": {"model": "claude-3-5-sonnet-latest"}}
200
- )
201
- # claude-3.5 sonnet response
194
+ configurable_model.invoke(
195
+ "what's your name", config={"configurable": {"model": "claude-3-5-sonnet-latest"}}
196
+ )
197
+ # claude-3.5 sonnet response
198
+ ```
202
199
 
203
200
  ??? note "Fully configurable model with a default"
204
201
 
205
- .. code-block:: python
206
-
207
- # pip install langchain langchain-openai langchain-anthropic
208
- from langchain.chat_models import init_chat_model
202
+ ```python
203
+ # pip install langchain langchain-openai langchain-anthropic
204
+ from langchain.chat_models import init_chat_model
209
205
 
210
- configurable_model_with_default = init_chat_model(
211
- "openai:gpt-4o",
212
- configurable_fields="any", # this allows us to configure other params like temperature, max_tokens, etc at runtime.
213
- config_prefix="foo",
214
- temperature=0,
215
- )
206
+ configurable_model_with_default = init_chat_model(
207
+ "openai:gpt-4o",
208
+ configurable_fields="any", # this allows us to configure other params like temperature, max_tokens, etc at runtime.
209
+ config_prefix="foo",
210
+ temperature=0,
211
+ )
216
212
 
217
- configurable_model_with_default.invoke("what's your name")
218
- # GPT-4o response with temperature 0
219
-
220
- configurable_model_with_default.invoke(
221
- "what's your name",
222
- config={
223
- "configurable": {
224
- "foo_model": "anthropic:claude-3-5-sonnet-latest",
225
- "foo_temperature": 0.6,
226
- }
227
- },
228
- )
229
- # Claude-3.5 sonnet response with temperature 0.6
213
+ configurable_model_with_default.invoke("what's your name")
214
+ # GPT-4o response with temperature 0
215
+
216
+ configurable_model_with_default.invoke(
217
+ "what's your name",
218
+ config={
219
+ "configurable": {
220
+ "foo_model": "anthropic:claude-3-5-sonnet-latest",
221
+ "foo_temperature": 0.6,
222
+ }
223
+ },
224
+ )
225
+ # Claude-3.5 sonnet response with temperature 0.6
226
+ ```
230
227
 
231
228
  ??? note "Bind tools to a configurable model"
232
229
 
233
230
  You can call any ChatModel declarative methods on a configurable model in the
234
231
  same way that you would with a normal model.
235
232
 
236
- .. code-block:: python
237
-
238
- # pip install langchain langchain-openai langchain-anthropic
239
- from langchain.chat_models import init_chat_model
240
- from pydantic import BaseModel, Field
233
+ ```python
234
+ # pip install langchain langchain-openai langchain-anthropic
235
+ from langchain.chat_models import init_chat_model
236
+ from pydantic import BaseModel, Field
241
237
 
242
238
 
243
- class GetWeather(BaseModel):
244
- '''Get the current weather in a given location'''
239
+ class GetWeather(BaseModel):
240
+ '''Get the current weather in a given location'''
245
241
 
246
- location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
242
+ location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
247
243
 
248
244
 
249
- class GetPopulation(BaseModel):
250
- '''Get the current population in a given location'''
245
+ class GetPopulation(BaseModel):
246
+ '''Get the current population in a given location'''
251
247
 
252
- location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
248
+ location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
253
249
 
254
250
 
255
- configurable_model = init_chat_model(
256
- "gpt-4o", configurable_fields=("model", "model_provider"), temperature=0
257
- )
251
+ configurable_model = init_chat_model(
252
+ "gpt-4o", configurable_fields=("model", "model_provider"), temperature=0
253
+ )
258
254
 
259
- configurable_model_with_tools = configurable_model.bind_tools(
260
- [GetWeather, GetPopulation]
261
- )
262
- configurable_model_with_tools.invoke(
263
- "Which city is hotter today and which is bigger: LA or NY?"
264
- )
265
- # GPT-4o response with tool calls
255
+ configurable_model_with_tools = configurable_model.bind_tools([GetWeather, GetPopulation])
256
+ configurable_model_with_tools.invoke(
257
+ "Which city is hotter today and which is bigger: LA or NY?"
258
+ )
259
+ # GPT-4o response with tool calls
266
260
 
267
- configurable_model_with_tools.invoke(
268
- "Which city is hotter today and which is bigger: LA or NY?",
269
- config={"configurable": {"model": "claude-3-5-sonnet-latest"}},
270
- )
271
- # Claude-3.5 sonnet response with tools
261
+ configurable_model_with_tools.invoke(
262
+ "Which city is hotter today and which is bigger: LA or NY?",
263
+ config={"configurable": {"model": "claude-3-5-sonnet-latest"}},
264
+ )
265
+ # Claude-3.5 sonnet response with tools
266
+ ```
272
267
 
273
268
  !!! version-added "Added in version 0.2.7"
274
269
 
275
270
  !!! warning "Behavior changed in 0.2.8"
276
- Support for ``configurable_fields`` and ``config_prefix`` added.
271
+ Support for `configurable_fields` and `config_prefix` added.
277
272
 
278
273
  !!! warning "Behavior changed in 0.2.12"
279
274
  Support for Ollama via langchain-ollama package added
@@ -626,7 +621,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
626
621
 
627
622
  @property
628
623
  def InputType(self) -> TypeAlias:
629
- """Get the input type for this runnable."""
624
+ """Get the input type for this `Runnable`."""
630
625
  from langchain_core.prompt_values import (
631
626
  ChatPromptValueConcrete,
632
627
  StringPromptValue,
@@ -3,10 +3,8 @@
3
3
  from langchain_core.embeddings import Embeddings
4
4
 
5
5
  from langchain.embeddings.base import init_embeddings
6
- from langchain.embeddings.cache import CacheBackedEmbeddings
7
6
 
8
7
  __all__ = [
9
- "CacheBackedEmbeddings",
10
8
  "Embeddings",
11
9
  "init_embeddings",
12
10
  ]
@@ -35,13 +35,13 @@ def _parse_model_string(model_name: str) -> tuple[str, str]:
35
35
  Returns:
36
36
  A tuple of (provider, model_name)
37
37
 
38
- .. code-block:: python
38
+ ```python
39
+ _parse_model_string("openai:text-embedding-3-small")
40
+ # Returns: ("openai", "text-embedding-3-small")
39
41
 
40
- _parse_model_string("openai:text-embedding-3-small")
41
- # Returns: ("openai", "text-embedding-3-small")
42
-
43
- _parse_model_string("bedrock:amazon.titan-embed-text-v1")
44
- # Returns: ("bedrock", "amazon.titan-embed-text-v1")
42
+ _parse_model_string("bedrock:amazon.titan-embed-text-v1")
43
+ # Returns: ("bedrock", "amazon.titan-embed-text-v1")
44
+ ```
45
45
 
46
46
  Raises:
47
47
  ValueError: If the model string is not in the correct format or
@@ -128,8 +128,9 @@ def init_embeddings(
128
128
  ) -> Embeddings:
129
129
  """Initialize an embeddings model from a model name and optional provider.
130
130
 
131
- **Note:** Must have the integration package corresponding to the model provider
132
- installed.
131
+ !!! note
132
+ Must have the integration package corresponding to the model provider
133
+ installed.
133
134
 
134
135
  Args:
135
136
  model: Name of the model to use. Can be either:
@@ -151,21 +152,20 @@ def init_embeddings(
151
152
  ValueError: If the model provider is not supported or cannot be determined
152
153
  ImportError: If the required provider package is not installed
153
154
 
154
- ??? note "Example Usage"
155
- :open:
156
-
157
- .. code-block:: python
155
+ ???+ note "Example Usage"
158
156
 
159
- # Using a model string
160
- model = init_embeddings("openai:text-embedding-3-small")
161
- model.embed_query("Hello, world!")
157
+ ```python
158
+ # Using a model string
159
+ model = init_embeddings("openai:text-embedding-3-small")
160
+ model.embed_query("Hello, world!")
162
161
 
163
- # Using explicit provider
164
- model = init_embeddings(model="text-embedding-3-small", provider="openai")
165
- model.embed_documents(["Hello, world!", "Goodbye, world!"])
162
+ # Using explicit provider
163
+ model = init_embeddings(model="text-embedding-3-small", provider="openai")
164
+ model.embed_documents(["Hello, world!", "Goodbye, world!"])
166
165
 
167
- # With additional parameters
168
- model = init_embeddings("openai:text-embedding-3-small", api_key="sk-...")
166
+ # With additional parameters
167
+ model = init_embeddings("openai:text-embedding-3-small", api_key="sk-...")
168
+ ```
169
169
 
170
170
  !!! version-added "Added in version 0.3.9"
171
171
 
@@ -3,27 +3,61 @@
3
3
  from langchain_core.messages import (
4
4
  AIMessage,
5
5
  AIMessageChunk,
6
+ Annotation,
6
7
  AnyMessage,
8
+ AudioContentBlock,
9
+ Citation,
10
+ ContentBlock,
11
+ DataContentBlock,
12
+ FileContentBlock,
7
13
  HumanMessage,
14
+ ImageContentBlock,
8
15
  InvalidToolCall,
9
16
  MessageLikeRepresentation,
17
+ NonStandardAnnotation,
18
+ NonStandardContentBlock,
19
+ PlainTextContentBlock,
20
+ ReasoningContentBlock,
21
+ RemoveMessage,
22
+ ServerToolCall,
23
+ ServerToolCallChunk,
24
+ ServerToolResult,
10
25
  SystemMessage,
26
+ TextContentBlock,
11
27
  ToolCall,
12
28
  ToolCallChunk,
13
29
  ToolMessage,
30
+ VideoContentBlock,
14
31
  trim_messages,
15
32
  )
16
33
 
17
34
  __all__ = [
18
35
  "AIMessage",
19
36
  "AIMessageChunk",
37
+ "Annotation",
20
38
  "AnyMessage",
39
+ "AudioContentBlock",
40
+ "Citation",
41
+ "ContentBlock",
42
+ "DataContentBlock",
43
+ "FileContentBlock",
21
44
  "HumanMessage",
45
+ "ImageContentBlock",
22
46
  "InvalidToolCall",
23
47
  "MessageLikeRepresentation",
48
+ "NonStandardAnnotation",
49
+ "NonStandardContentBlock",
50
+ "PlainTextContentBlock",
51
+ "ReasoningContentBlock",
52
+ "RemoveMessage",
53
+ "ServerToolCall",
54
+ "ServerToolCallChunk",
55
+ "ServerToolResult",
24
56
  "SystemMessage",
57
+ "TextContentBlock",
25
58
  "ToolCall",
26
59
  "ToolCallChunk",
27
60
  "ToolMessage",
61
+ "VideoContentBlock",
28
62
  "trim_messages",
29
63
  ]
@@ -8,11 +8,7 @@ from langchain_core.tools import (
8
8
  tool,
9
9
  )
10
10
 
11
- from langchain.tools.tool_node import (
12
- InjectedState,
13
- InjectedStore,
14
- ToolNode,
15
- )
11
+ from langchain.tools.tool_node import InjectedState, InjectedStore, ToolInvocationError
16
12
 
17
13
  __all__ = [
18
14
  "BaseTool",
@@ -21,6 +17,6 @@ __all__ = [
21
17
  "InjectedToolArg",
22
18
  "InjectedToolCallId",
23
19
  "ToolException",
24
- "ToolNode",
20
+ "ToolInvocationError",
25
21
  "tool",
26
22
  ]