langchain 1.0.0a11__py3-none-any.whl → 1.0.0a13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (34) hide show
  1. langchain/__init__.py +1 -1
  2. langchain/agents/factory.py +511 -180
  3. langchain/agents/middleware/__init__.py +9 -3
  4. langchain/agents/middleware/context_editing.py +15 -14
  5. langchain/agents/middleware/human_in_the_loop.py +213 -170
  6. langchain/agents/middleware/model_call_limit.py +2 -2
  7. langchain/agents/middleware/model_fallback.py +46 -36
  8. langchain/agents/middleware/pii.py +19 -19
  9. langchain/agents/middleware/planning.py +16 -11
  10. langchain/agents/middleware/prompt_caching.py +14 -11
  11. langchain/agents/middleware/summarization.py +1 -1
  12. langchain/agents/middleware/tool_call_limit.py +5 -5
  13. langchain/agents/middleware/tool_emulator.py +200 -0
  14. langchain/agents/middleware/tool_selection.py +25 -21
  15. langchain/agents/middleware/types.py +484 -225
  16. langchain/chat_models/base.py +85 -90
  17. langchain/embeddings/base.py +20 -20
  18. langchain/embeddings/cache.py +21 -21
  19. langchain/messages/__init__.py +2 -0
  20. langchain/storage/encoder_backed.py +22 -23
  21. langchain/tools/tool_node.py +388 -80
  22. {langchain-1.0.0a11.dist-info → langchain-1.0.0a13.dist-info}/METADATA +8 -5
  23. langchain-1.0.0a13.dist-info/RECORD +36 -0
  24. langchain/_internal/__init__.py +0 -0
  25. langchain/_internal/_documents.py +0 -35
  26. langchain/_internal/_lazy_import.py +0 -35
  27. langchain/_internal/_prompts.py +0 -158
  28. langchain/_internal/_typing.py +0 -70
  29. langchain/_internal/_utils.py +0 -7
  30. langchain/agents/_internal/__init__.py +0 -1
  31. langchain/agents/_internal/_typing.py +0 -13
  32. langchain-1.0.0a11.dist-info/RECORD +0 -43
  33. {langchain-1.0.0a11.dist-info → langchain-1.0.0a13.dist-info}/WHEEL +0 -0
  34. {langchain-1.0.0a11.dist-info → langchain-1.0.0a13.dist-info}/licenses/LICENSE +0 -0
@@ -32,7 +32,7 @@ def init_chat_model(
32
32
  model: str,
33
33
  *,
34
34
  model_provider: str | None = None,
35
- configurable_fields: Literal[None] = None,
35
+ configurable_fields: None = None,
36
36
  config_prefix: str | None = None,
37
37
  **kwargs: Any,
38
38
  ) -> BaseChatModel: ...
@@ -40,10 +40,10 @@ def init_chat_model(
40
40
 
41
41
  @overload
42
42
  def init_chat_model(
43
- model: Literal[None] = None,
43
+ model: None = None,
44
44
  *,
45
45
  model_provider: str | None = None,
46
- configurable_fields: Literal[None] = None,
46
+ configurable_fields: None = None,
47
47
  config_prefix: str | None = None,
48
48
  **kwargs: Any,
49
49
  ) -> _ConfigurableModel: ...
@@ -73,8 +73,9 @@ def init_chat_model(
73
73
  ) -> BaseChatModel | _ConfigurableModel:
74
74
  """Initialize a ChatModel from the model name and provider.
75
75
 
76
- **Note:** Must have the integration package corresponding to the model provider
77
- installed.
76
+ !!! note
77
+ Must have the integration package corresponding to the model provider
78
+ installed.
78
79
 
79
80
  Args:
80
81
  model: The name of the model, e.g. "o3-mini", "claude-3-5-sonnet-latest". You can
@@ -128,21 +129,21 @@ def init_chat_model(
128
129
 
129
130
  Fields are assumed to have config_prefix stripped if there is a
130
131
  config_prefix. If model is specified, then defaults to None. If model is
131
- not specified, then defaults to ``("model", "model_provider")``.
132
+ not specified, then defaults to `("model", "model_provider")`.
132
133
 
133
- ***Security Note***: Setting ``configurable_fields="any"`` means fields like
134
+ **Security Note**: Setting `configurable_fields="any"` means fields like
134
135
  api_key, base_url, etc. can be altered at runtime, potentially redirecting
135
136
  model requests to a different service/user. Make sure that if you're
136
137
  accepting untrusted configurations that you enumerate the
137
- ``configurable_fields=(...)`` explicitly.
138
+ `configurable_fields=(...)` explicitly.
138
139
 
139
140
  config_prefix: If config_prefix is a non-empty string then model will be
140
141
  configurable at runtime via the
141
- ``config["configurable"]["{config_prefix}_{param}"]`` keys. If
142
+ `config["configurable"]["{config_prefix}_{param}"]` keys. If
142
143
  config_prefix is an empty string then model will be configurable via
143
- ``config["configurable"]["{param}"]``.
144
+ `config["configurable"]["{param}"]`.
144
145
  kwargs: Additional model-specific keyword args to pass to
145
- ``<<selected ChatModel>>.__init__(model=model_name, **kwargs)``. Examples
146
+ `<<selected ChatModel>>.__init__(model=model_name, **kwargs)`. Examples
146
147
  include:
147
148
  * temperature: Model temperature.
148
149
  * max_tokens: Max output tokens.
@@ -151,7 +152,7 @@ def init_chat_model(
151
152
  * max_retries: The maximum number of attempts the system will make to resend a
152
153
  request if it fails due to issues like network timeouts or rate limits.
153
154
  * base_url: The URL of the API endpoint where requests are sent.
154
- * rate_limiter: A ``BaseRateLimiter`` to space out requests to avoid exceeding
155
+ * rate_limiter: A `BaseRateLimiter` to space out requests to avoid exceeding
155
156
  rate limits.
156
157
 
157
158
  Returns:
@@ -163,117 +164,111 @@ def init_chat_model(
163
164
  ValueError: If model_provider cannot be inferred or isn't supported.
164
165
  ImportError: If the model provider integration package is not installed.
165
166
 
166
- ??? note "Init non-configurable model"
167
- :open:
167
+ ???+ note "Init non-configurable model"
168
168
 
169
- .. code-block:: python
169
+ ```python
170
+ # pip install langchain langchain-openai langchain-anthropic langchain-google-vertexai
171
+ from langchain.chat_models import init_chat_model
170
172
 
171
- # pip install langchain langchain-openai langchain-anthropic langchain-google-vertexai
172
- from langchain.chat_models import init_chat_model
173
-
174
- o3_mini = init_chat_model("openai:o3-mini", temperature=0)
175
- claude_sonnet = init_chat_model("anthropic:claude-3-5-sonnet-latest", temperature=0)
176
- gemini_2_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
177
-
178
- o3_mini.invoke("what's your name")
179
- claude_sonnet.invoke("what's your name")
180
- gemini_2_flash.invoke("what's your name")
173
+ o3_mini = init_chat_model("openai:o3-mini", temperature=0)
174
+ claude_sonnet = init_chat_model("anthropic:claude-3-5-sonnet-latest", temperature=0)
175
+ gemini_2_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
181
176
 
177
+ o3_mini.invoke("what's your name")
178
+ claude_sonnet.invoke("what's your name")
179
+ gemini_2_flash.invoke("what's your name")
180
+ ```
182
181
 
183
182
  ??? note "Partially configurable model with no default"
184
183
 
185
- .. code-block:: python
184
+ ```python
185
+ # pip install langchain langchain-openai langchain-anthropic
186
+ from langchain.chat_models import init_chat_model
186
187
 
187
- # pip install langchain langchain-openai langchain-anthropic
188
- from langchain.chat_models import init_chat_model
188
+ # We don't need to specify configurable=True if a model isn't specified.
189
+ configurable_model = init_chat_model(temperature=0)
189
190
 
190
- # We don't need to specify configurable=True if a model isn't specified.
191
- configurable_model = init_chat_model(temperature=0)
191
+ configurable_model.invoke("what's your name", config={"configurable": {"model": "gpt-4o"}})
192
+ # GPT-4o response
192
193
 
193
- configurable_model.invoke(
194
- "what's your name", config={"configurable": {"model": "gpt-4o"}}
195
- )
196
- # GPT-4o response
197
-
198
- configurable_model.invoke(
199
- "what's your name", config={"configurable": {"model": "claude-3-5-sonnet-latest"}}
200
- )
201
- # claude-3.5 sonnet response
194
+ configurable_model.invoke(
195
+ "what's your name", config={"configurable": {"model": "claude-3-5-sonnet-latest"}}
196
+ )
197
+ # claude-3.5 sonnet response
198
+ ```
202
199
 
203
200
  ??? note "Fully configurable model with a default"
204
201
 
205
- .. code-block:: python
206
-
207
- # pip install langchain langchain-openai langchain-anthropic
208
- from langchain.chat_models import init_chat_model
202
+ ```python
203
+ # pip install langchain langchain-openai langchain-anthropic
204
+ from langchain.chat_models import init_chat_model
209
205
 
210
- configurable_model_with_default = init_chat_model(
211
- "openai:gpt-4o",
212
- configurable_fields="any", # this allows us to configure other params like temperature, max_tokens, etc at runtime.
213
- config_prefix="foo",
214
- temperature=0,
215
- )
206
+ configurable_model_with_default = init_chat_model(
207
+ "openai:gpt-4o",
208
+ configurable_fields="any", # this allows us to configure other params like temperature, max_tokens, etc at runtime.
209
+ config_prefix="foo",
210
+ temperature=0,
211
+ )
216
212
 
217
- configurable_model_with_default.invoke("what's your name")
218
- # GPT-4o response with temperature 0
219
-
220
- configurable_model_with_default.invoke(
221
- "what's your name",
222
- config={
223
- "configurable": {
224
- "foo_model": "anthropic:claude-3-5-sonnet-latest",
225
- "foo_temperature": 0.6,
226
- }
227
- },
228
- )
229
- # Claude-3.5 sonnet response with temperature 0.6
213
+ configurable_model_with_default.invoke("what's your name")
214
+ # GPT-4o response with temperature 0
215
+
216
+ configurable_model_with_default.invoke(
217
+ "what's your name",
218
+ config={
219
+ "configurable": {
220
+ "foo_model": "anthropic:claude-3-5-sonnet-latest",
221
+ "foo_temperature": 0.6,
222
+ }
223
+ },
224
+ )
225
+ # Claude-3.5 sonnet response with temperature 0.6
226
+ ```
230
227
 
231
228
  ??? note "Bind tools to a configurable model"
232
229
 
233
230
  You can call any ChatModel declarative methods on a configurable model in the
234
231
  same way that you would with a normal model.
235
232
 
236
- .. code-block:: python
237
-
238
- # pip install langchain langchain-openai langchain-anthropic
239
- from langchain.chat_models import init_chat_model
240
- from pydantic import BaseModel, Field
233
+ ```python
234
+ # pip install langchain langchain-openai langchain-anthropic
235
+ from langchain.chat_models import init_chat_model
236
+ from pydantic import BaseModel, Field
241
237
 
242
238
 
243
- class GetWeather(BaseModel):
244
- '''Get the current weather in a given location'''
239
+ class GetWeather(BaseModel):
240
+ '''Get the current weather in a given location'''
245
241
 
246
- location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
242
+ location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
247
243
 
248
244
 
249
- class GetPopulation(BaseModel):
250
- '''Get the current population in a given location'''
245
+ class GetPopulation(BaseModel):
246
+ '''Get the current population in a given location'''
251
247
 
252
- location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
248
+ location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
253
249
 
254
250
 
255
- configurable_model = init_chat_model(
256
- "gpt-4o", configurable_fields=("model", "model_provider"), temperature=0
257
- )
251
+ configurable_model = init_chat_model(
252
+ "gpt-4o", configurable_fields=("model", "model_provider"), temperature=0
253
+ )
258
254
 
259
- configurable_model_with_tools = configurable_model.bind_tools(
260
- [GetWeather, GetPopulation]
261
- )
262
- configurable_model_with_tools.invoke(
263
- "Which city is hotter today and which is bigger: LA or NY?"
264
- )
265
- # GPT-4o response with tool calls
255
+ configurable_model_with_tools = configurable_model.bind_tools([GetWeather, GetPopulation])
256
+ configurable_model_with_tools.invoke(
257
+ "Which city is hotter today and which is bigger: LA or NY?"
258
+ )
259
+ # GPT-4o response with tool calls
266
260
 
267
- configurable_model_with_tools.invoke(
268
- "Which city is hotter today and which is bigger: LA or NY?",
269
- config={"configurable": {"model": "claude-3-5-sonnet-latest"}},
270
- )
271
- # Claude-3.5 sonnet response with tools
261
+ configurable_model_with_tools.invoke(
262
+ "Which city is hotter today and which is bigger: LA or NY?",
263
+ config={"configurable": {"model": "claude-3-5-sonnet-latest"}},
264
+ )
265
+ # Claude-3.5 sonnet response with tools
266
+ ```
272
267
 
273
268
  !!! version-added "Added in version 0.2.7"
274
269
 
275
270
  !!! warning "Behavior changed in 0.2.8"
276
- Support for ``configurable_fields`` and ``config_prefix`` added.
271
+ Support for `configurable_fields` and `config_prefix` added.
277
272
 
278
273
  !!! warning "Behavior changed in 0.2.12"
279
274
  Support for Ollama via langchain-ollama package added
@@ -626,7 +621,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
626
621
 
627
622
  @property
628
623
  def InputType(self) -> TypeAlias:
629
- """Get the input type for this runnable."""
624
+ """Get the input type for this `Runnable`."""
630
625
  from langchain_core.prompt_values import (
631
626
  ChatPromptValueConcrete,
632
627
  StringPromptValue,
@@ -35,13 +35,13 @@ def _parse_model_string(model_name: str) -> tuple[str, str]:
35
35
  Returns:
36
36
  A tuple of (provider, model_name)
37
37
 
38
- .. code-block:: python
38
+ ```python
39
+ _parse_model_string("openai:text-embedding-3-small")
40
+ # Returns: ("openai", "text-embedding-3-small")
39
41
 
40
- _parse_model_string("openai:text-embedding-3-small")
41
- # Returns: ("openai", "text-embedding-3-small")
42
-
43
- _parse_model_string("bedrock:amazon.titan-embed-text-v1")
44
- # Returns: ("bedrock", "amazon.titan-embed-text-v1")
42
+ _parse_model_string("bedrock:amazon.titan-embed-text-v1")
43
+ # Returns: ("bedrock", "amazon.titan-embed-text-v1")
44
+ ```
45
45
 
46
46
  Raises:
47
47
  ValueError: If the model string is not in the correct format or
@@ -128,8 +128,9 @@ def init_embeddings(
128
128
  ) -> Embeddings:
129
129
  """Initialize an embeddings model from a model name and optional provider.
130
130
 
131
- **Note:** Must have the integration package corresponding to the model provider
132
- installed.
131
+ !!! note
132
+ Must have the integration package corresponding to the model provider
133
+ installed.
133
134
 
134
135
  Args:
135
136
  model: Name of the model to use. Can be either:
@@ -151,21 +152,20 @@ def init_embeddings(
151
152
  ValueError: If the model provider is not supported or cannot be determined
152
153
  ImportError: If the required provider package is not installed
153
154
 
154
- ??? note "Example Usage"
155
- :open:
156
-
157
- .. code-block:: python
155
+ ???+ note "Example Usage"
158
156
 
159
- # Using a model string
160
- model = init_embeddings("openai:text-embedding-3-small")
161
- model.embed_query("Hello, world!")
157
+ ```python
158
+ # Using a model string
159
+ model = init_embeddings("openai:text-embedding-3-small")
160
+ model.embed_query("Hello, world!")
162
161
 
163
- # Using explicit provider
164
- model = init_embeddings(model="text-embedding-3-small", provider="openai")
165
- model.embed_documents(["Hello, world!", "Goodbye, world!"])
162
+ # Using explicit provider
163
+ model = init_embeddings(model="text-embedding-3-small", provider="openai")
164
+ model.embed_documents(["Hello, world!", "Goodbye, world!"])
166
165
 
167
- # With additional parameters
168
- model = init_embeddings("openai:text-embedding-3-small", api_key="sk-...")
166
+ # With additional parameters
167
+ model = init_embeddings("openai:text-embedding-3-small", api_key="sk-...")
168
+ ```
169
169
 
170
170
  !!! version-added "Added in version 0.3.9"
171
171
 
@@ -49,10 +49,10 @@ def _make_default_key_encoder(namespace: str, algorithm: str) -> Callable[[str],
49
49
  Args:
50
50
  namespace: Prefix that segregates keys from different embedding models.
51
51
  algorithm:
52
- * ``'sha1'`` - fast but not collision-resistant
53
- * ``'blake2b'`` - cryptographically strong, faster than SHA-1
54
- * ``'sha256'`` - cryptographically strong, slower than SHA-1
55
- * ``'sha512'`` - cryptographically strong, slower than SHA-1
52
+ * `'sha1'` - fast but not collision-resistant
53
+ * `'blake2b'` - cryptographically strong, faster than SHA-1
54
+ * `'sha256'` - cryptographically strong, slower than SHA-1
55
+ * `'sha512'` - cryptographically strong, slower than SHA-1
56
56
 
57
57
  Returns:
58
58
  A function that encodes a key using the specified algorithm.
@@ -122,24 +122,24 @@ class CacheBackedEmbeddings(Embeddings):
122
122
  embeddings too, pass in a query_embedding_store to constructor.
123
123
 
124
124
  Examples:
125
- .. code-block: python
125
+ ```python
126
+ from langchain.embeddings import CacheBackedEmbeddings
127
+ from langchain.storage import LocalFileStore
128
+ from langchain_community.embeddings import OpenAIEmbeddings
126
129
 
127
- from langchain.embeddings import CacheBackedEmbeddings
128
- from langchain.storage import LocalFileStore
129
- from langchain_community.embeddings import OpenAIEmbeddings
130
+ store = LocalFileStore("./my_cache")
130
131
 
131
- store = LocalFileStore('./my_cache')
132
-
133
- underlying_embedder = OpenAIEmbeddings()
134
- embedder = CacheBackedEmbeddings.from_bytes_store(
135
- underlying_embedder, store, namespace=underlying_embedder.model
136
- )
132
+ underlying_embedder = OpenAIEmbeddings()
133
+ embedder = CacheBackedEmbeddings.from_bytes_store(
134
+ underlying_embedder, store, namespace=underlying_embedder.model
135
+ )
137
136
 
138
- # Embedding is computed and cached
139
- embeddings = embedder.embed_documents(["hello", "goodbye"])
137
+ # Embedding is computed and cached
138
+ embeddings = embedder.embed_documents(["hello", "goodbye"])
140
139
 
141
- # Embeddings are retrieved from the cache, no computation is done
142
- embeddings = embedder.embed_documents(["hello", "goodbye"])
140
+ # Embeddings are retrieved from the cache, no computation is done
141
+ embeddings = embedder.embed_documents(["hello", "goodbye"])
142
+ ```
143
143
  """
144
144
 
145
145
  def __init__(
@@ -157,7 +157,7 @@ class CacheBackedEmbeddings(Embeddings):
157
157
  document_embedding_store: The store to use for caching document embeddings.
158
158
  batch_size: The number of documents to embed between store updates.
159
159
  query_embedding_store: The store to use for caching query embeddings.
160
- If ``None``, query embeddings are not cached.
160
+ If `None`, query embeddings are not cached.
161
161
  """
162
162
  super().__init__()
163
163
  self.document_embedding_store = document_embedding_store
@@ -235,7 +235,7 @@ class CacheBackedEmbeddings(Embeddings):
235
235
  """Embed query text.
236
236
 
237
237
  By default, this method does not cache queries. To enable caching, set the
238
- ``cache_query`` parameter to ``True`` when initializing the embedder.
238
+ `cache_query` parameter to `True` when initializing the embedder.
239
239
 
240
240
  Args:
241
241
  text: The text to embed.
@@ -258,7 +258,7 @@ class CacheBackedEmbeddings(Embeddings):
258
258
  """Embed query text.
259
259
 
260
260
  By default, this method does not cache queries. To enable caching, set the
261
- ``cache_query`` parameter to ``True`` when initializing the embedder.
261
+ `cache_query` parameter to `True` when initializing the embedder.
262
262
 
263
263
  Args:
264
264
  text: The text to embed.
@@ -7,6 +7,7 @@ from langchain_core.messages import (
7
7
  HumanMessage,
8
8
  InvalidToolCall,
9
9
  MessageLikeRepresentation,
10
+ RemoveMessage,
10
11
  SystemMessage,
11
12
  ToolCall,
12
13
  ToolCallChunk,
@@ -21,6 +22,7 @@ __all__ = [
21
22
  "HumanMessage",
22
23
  "InvalidToolCall",
23
24
  "MessageLikeRepresentation",
25
+ "RemoveMessage",
24
26
  "SystemMessage",
25
27
  "ToolCall",
26
28
  "ToolCallChunk",
@@ -17,39 +17,38 @@ class EncoderBackedStore(BaseStore[K, V]):
17
17
 
18
18
  Examples that uses JSON for encoding/decoding:
19
19
 
20
- .. code-block:: python
20
+ ```python
21
+ import json
21
22
 
22
- import json
23
23
 
24
+ def key_encoder(key: int) -> str:
25
+ return json.dumps(key)
24
26
 
25
- def key_encoder(key: int) -> str:
26
- return json.dumps(key)
27
27
 
28
+ def value_serializer(value: float) -> str:
29
+ return json.dumps(value)
28
30
 
29
- def value_serializer(value: float) -> str:
30
- return json.dumps(value)
31
31
 
32
+ def value_deserializer(serialized_value: str) -> float:
33
+ return json.loads(serialized_value)
32
34
 
33
- def value_deserializer(serialized_value: str) -> float:
34
- return json.loads(serialized_value)
35
35
 
36
+ # Create an instance of the abstract store
37
+ abstract_store = MyCustomStore()
36
38
 
37
- # Create an instance of the abstract store
38
- abstract_store = MyCustomStore()
39
-
40
- # Create an instance of the encoder-backed store
41
- store = EncoderBackedStore(
42
- store=abstract_store,
43
- key_encoder=key_encoder,
44
- value_serializer=value_serializer,
45
- value_deserializer=value_deserializer,
46
- )
47
-
48
- # Use the encoder-backed store methods
49
- store.mset([(1, 3.14), (2, 2.718)])
50
- values = store.mget([1, 2]) # Retrieves [3.14, 2.718]
51
- store.mdelete([1, 2]) # Deletes the keys 1 and 2
39
+ # Create an instance of the encoder-backed store
40
+ store = EncoderBackedStore(
41
+ store=abstract_store,
42
+ key_encoder=key_encoder,
43
+ value_serializer=value_serializer,
44
+ value_deserializer=value_deserializer,
45
+ )
52
46
 
47
+ # Use the encoder-backed store methods
48
+ store.mset([(1, 3.14), (2, 2.718)])
49
+ values = store.mget([1, 2]) # Retrieves [3.14, 2.718]
50
+ store.mdelete([1, 2]) # Deletes the keys 1 and 2
51
+ ```
53
52
  """
54
53
 
55
54
  def __init__(