langchain 0.3.0.dev2__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (33) hide show
  1. langchain/_api/module_import.py +2 -2
  2. langchain/agents/agent_toolkits/vectorstore/base.py +2 -2
  3. langchain/agents/openai_assistant/base.py +3 -3
  4. langchain/chains/base.py +2 -3
  5. langchain/chains/combine_documents/map_reduce.py +10 -0
  6. langchain/chains/combine_documents/map_rerank.py +10 -0
  7. langchain/chains/combine_documents/reduce.py +10 -0
  8. langchain/chains/combine_documents/refine.py +10 -0
  9. langchain/chains/combine_documents/stuff.py +1 -1
  10. langchain/chains/conversation/base.py +1 -1
  11. langchain/chains/llm.py +4 -3
  12. langchain/chains/mapreduce.py +1 -1
  13. langchain/chains/openai_functions/qa_with_structure.py +2 -2
  14. langchain/chains/openai_functions/tagging.py +4 -4
  15. langchain/chains/qa_with_sources/base.py +2 -2
  16. langchain/chains/qa_with_sources/loading.py +5 -5
  17. langchain/chains/question_answering/chain.py +5 -5
  18. langchain/chains/retrieval_qa/base.py +3 -3
  19. langchain/chat_models/base.py +56 -38
  20. langchain/memory/buffer.py +36 -2
  21. langchain/memory/buffer_window.py +14 -1
  22. langchain/memory/chat_memory.py +18 -1
  23. langchain/memory/entity.py +51 -0
  24. langchain/memory/summary.py +14 -1
  25. langchain/memory/summary_buffer.py +15 -1
  26. langchain/memory/token_buffer.py +14 -1
  27. langchain/memory/vectorstore.py +12 -1
  28. langchain/retrievers/ensemble.py +14 -11
  29. {langchain-0.3.0.dev2.dist-info → langchain-0.3.2.dist-info}/METADATA +3 -3
  30. {langchain-0.3.0.dev2.dist-info → langchain-0.3.2.dist-info}/RECORD +33 -33
  31. {langchain-0.3.0.dev2.dist-info → langchain-0.3.2.dist-info}/LICENSE +0 -0
  32. {langchain-0.3.0.dev2.dist-info → langchain-0.3.2.dist-info}/WHEEL +0 -0
  33. {langchain-0.3.0.dev2.dist-info → langchain-0.3.2.dist-info}/entry_points.txt +0 -0
@@ -101,7 +101,7 @@ def create_importer(
101
101
  f">> from {new_module} import {name}\n"
102
102
  "You can use the langchain cli to **automatically** "
103
103
  "upgrade many imports. Please see documentation here "
104
- "<https://python.langchain.com/v0.2/docs/versions/v0_2/>"
104
+ "<https://python.langchain.com/docs/versions/v0_2/>"
105
105
  ),
106
106
  )
107
107
  return result
@@ -133,7 +133,7 @@ def create_importer(
133
133
  f">> from {fallback_module} import {name}\n"
134
134
  "You can use the langchain cli to **automatically** "
135
135
  "upgrade many imports. Please see documentation here "
136
- "<https://python.langchain.com/v0.2/docs/versions/v0_2/>"
136
+ "<https://python.langchain.com/docs/versions/v0_2/>"
137
137
  ),
138
138
  )
139
139
  return result
@@ -23,7 +23,7 @@ from langchain.chains.llm import LLMChain
23
23
  "See API reference for this function for a replacement implementation: "
24
24
  "https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_agent.html " # noqa: E501
25
25
  "Read more here on how to create agents that query vector stores: "
26
- "https://python.langchain.com/v0.2/docs/how_to/qa_chat_history_how_to/#agents"
26
+ "https://python.langchain.com/docs/how_to/qa_chat_history_how_to/#agents"
27
27
  ),
28
28
  )
29
29
  def create_vectorstore_agent(
@@ -112,7 +112,7 @@ def create_vectorstore_agent(
112
112
  "See API reference for this function for a replacement implementation: "
113
113
  "https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_router_agent.html " # noqa: E501
114
114
  "Read more here on how to create agents that query vector stores: "
115
- "https://python.langchain.com/v0.2/docs/how_to/qa_chat_history_how_to/#agents"
115
+ "https://python.langchain.com/docs/how_to/qa_chat_history_how_to/#agents"
116
116
  ),
117
117
  )
118
118
  def create_vectorstore_router_agent(
@@ -277,7 +277,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
277
277
  return cls(assistant_id=assistant.id, client=client, **kwargs)
278
278
 
279
279
  def invoke(
280
- self, input: dict, config: Optional[RunnableConfig] = None
280
+ self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any
281
281
  ) -> OutputType:
282
282
  """Invoke assistant.
283
283
 
@@ -310,7 +310,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
310
310
  inheritable_metadata=config.get("metadata"),
311
311
  )
312
312
  run_manager = callback_manager.on_chain_start(
313
- dumpd(self), input, name=config.get("run_name")
313
+ dumpd(self), input, name=config.get("run_name") or self.get_name()
314
314
  )
315
315
  try:
316
316
  # Being run within AgentExecutor and there are tool outputs to submit.
@@ -429,7 +429,7 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
429
429
  inheritable_metadata=config.get("metadata"),
430
430
  )
431
431
  run_manager = callback_manager.on_chain_start(
432
- dumpd(self), input, name=config.get("run_name")
432
+ dumpd(self), input, name=config.get("run_name") or self.get_name()
433
433
  )
434
434
  try:
435
435
  # Being run within AgentExecutor and there are tool outputs to submit.
langchain/chains/base.py CHANGED
@@ -18,7 +18,6 @@ from langchain_core.callbacks import (
18
18
  CallbackManagerForChainRun,
19
19
  Callbacks,
20
20
  )
21
- from langchain_core.load.dump import dumpd
22
21
  from langchain_core.memory import BaseMemory
23
22
  from langchain_core.outputs import RunInfo
24
23
  from langchain_core.runnables import (
@@ -150,7 +149,7 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
150
149
  new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
151
150
 
152
151
  run_manager = callback_manager.on_chain_start(
153
- dumpd(self),
152
+ None,
154
153
  inputs,
155
154
  run_id,
156
155
  name=run_name,
@@ -202,7 +201,7 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
202
201
  )
203
202
  new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
204
203
  run_manager = await callback_manager.on_chain_start(
205
- dumpd(self),
204
+ None,
206
205
  inputs,
207
206
  run_id,
208
207
  name=run_name,
@@ -4,6 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  from typing import Any, Dict, List, Optional, Tuple, Type
6
6
 
7
+ from langchain_core._api import deprecated
7
8
  from langchain_core.callbacks import Callbacks
8
9
  from langchain_core.documents import Document
9
10
  from langchain_core.runnables.config import RunnableConfig
@@ -15,6 +16,15 @@ from langchain.chains.combine_documents.reduce import ReduceDocumentsChain
15
16
  from langchain.chains.llm import LLMChain
16
17
 
17
18
 
19
+ @deprecated(
20
+ since="0.3.1",
21
+ removal="1.0",
22
+ message=(
23
+ "This class is deprecated. Please see the migration guide here for "
24
+ "a recommended replacement: "
25
+ "https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain/"
26
+ ),
27
+ )
18
28
  class MapReduceDocumentsChain(BaseCombineDocumentsChain):
19
29
  """Combining documents by mapping a chain over them, then combining results.
20
30
 
@@ -4,6 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union, cast
6
6
 
7
+ from langchain_core._api import deprecated
7
8
  from langchain_core.callbacks import Callbacks
8
9
  from langchain_core.documents import Document
9
10
  from langchain_core.runnables.config import RunnableConfig
@@ -16,6 +17,15 @@ from langchain.chains.llm import LLMChain
16
17
  from langchain.output_parsers.regex import RegexParser
17
18
 
18
19
 
20
+ @deprecated(
21
+ since="0.3.1",
22
+ removal="1.0",
23
+ message=(
24
+ "This class is deprecated. Please see the migration guide here for "
25
+ "a recommended replacement: "
26
+ "https://python.langchain.com/docs/versions/migrating_chains/map_rerank_docs_chain/" # noqa: E501
27
+ ),
28
+ )
19
29
  class MapRerankDocumentsChain(BaseCombineDocumentsChain):
20
30
  """Combining documents by mapping a chain over them, then reranking results.
21
31
 
@@ -4,6 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  from typing import Any, Callable, List, Optional, Protocol, Tuple
6
6
 
7
+ from langchain_core._api import deprecated
7
8
  from langchain_core.callbacks import Callbacks
8
9
  from langchain_core.documents import Document
9
10
  from pydantic import ConfigDict
@@ -121,6 +122,15 @@ async def acollapse_docs(
121
122
  return Document(page_content=result, metadata=combined_metadata)
122
123
 
123
124
 
125
+ @deprecated(
126
+ since="0.3.1",
127
+ removal="1.0",
128
+ message=(
129
+ "This class is deprecated. Please see the migration guide here for "
130
+ "a recommended replacement: "
131
+ "https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain/"
132
+ ),
133
+ )
124
134
  class ReduceDocumentsChain(BaseCombineDocumentsChain):
125
135
  """Combine documents by recursively reducing them.
126
136
 
@@ -4,6 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  from typing import Any, Dict, List, Tuple
6
6
 
7
+ from langchain_core._api import deprecated
7
8
  from langchain_core.callbacks import Callbacks
8
9
  from langchain_core.documents import Document
9
10
  from langchain_core.prompts import BasePromptTemplate, format_document
@@ -20,6 +21,15 @@ def _get_default_document_prompt() -> PromptTemplate:
20
21
  return PromptTemplate(input_variables=["page_content"], template="{page_content}")
21
22
 
22
23
 
24
+ @deprecated(
25
+ since="0.3.1",
26
+ removal="1.0",
27
+ message=(
28
+ "This class is deprecated. Please see the migration guide here for "
29
+ "a recommended replacement: "
30
+ "https://python.langchain.com/docs/versions/migrating_chains/refine_docs_chain/" # noqa: E501
31
+ ),
32
+ )
23
33
  class RefineDocumentsChain(BaseCombineDocumentsChain):
24
34
  """Combine documents by doing a first pass and then refining on more documents.
25
35
 
@@ -102,7 +102,7 @@ def create_stuff_documents_chain(
102
102
  message=(
103
103
  "This class is deprecated. Use the `create_stuff_documents_chain` constructor "
104
104
  "instead. See migration guide here: "
105
- "https://python.langchain.com/v0.2/docs/versions/migrating_chains/stuff_docs_chain/" # noqa: E501
105
+ "https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain/" # noqa: E501
106
106
  ),
107
107
  )
108
108
  class StuffDocumentsChain(BaseCombineDocumentsChain):
@@ -25,7 +25,7 @@ class ConversationChain(LLMChain):
25
25
  """Chain to have a conversation and load context from memory.
26
26
 
27
27
  This class is deprecated in favor of ``RunnableWithMessageHistory``. Please refer
28
- to this tutorial for more detail: https://python.langchain.com/v0.2/docs/tutorials/chatbot/
28
+ to this tutorial for more detail: https://python.langchain.com/docs/tutorials/chatbot/
29
29
 
30
30
  ``RunnableWithMessageHistory`` offers several benefits, including:
31
31
 
langchain/chains/llm.py CHANGED
@@ -17,7 +17,6 @@ from langchain_core.language_models import (
17
17
  BaseLanguageModel,
18
18
  LanguageModelInput,
19
19
  )
20
- from langchain_core.load.dump import dumpd
21
20
  from langchain_core.messages import BaseMessage
22
21
  from langchain_core.output_parsers import BaseLLMOutputParser, StrOutputParser
23
22
  from langchain_core.outputs import ChatGeneration, Generation, LLMResult
@@ -241,8 +240,9 @@ class LLMChain(Chain):
241
240
  callbacks, self.callbacks, self.verbose
242
241
  )
243
242
  run_manager = callback_manager.on_chain_start(
244
- dumpd(self),
243
+ None,
245
244
  {"input_list": input_list},
245
+ name=self.get_name(),
246
246
  )
247
247
  try:
248
248
  response = self.generate(input_list, run_manager=run_manager)
@@ -261,8 +261,9 @@ class LLMChain(Chain):
261
261
  callbacks, self.callbacks, self.verbose
262
262
  )
263
263
  run_manager = await callback_manager.on_chain_start(
264
- dumpd(self),
264
+ None,
265
265
  {"input_list": input_list},
266
+ name=self.get_name(),
266
267
  )
267
268
  try:
268
269
  response = await self.agenerate(input_list, run_manager=run_manager)
@@ -31,7 +31,7 @@ from langchain.chains.llm import LLMChain
31
31
  "Refer here for a recommended map-reduce implementation using langgraph: "
32
32
  "https://langchain-ai.github.io/langgraph/how-tos/map-reduce/. See also "
33
33
  "migration guide: "
34
- "https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_reduce_chain/" # noqa: E501
34
+ "https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain/" # noqa: E501
35
35
  ),
36
36
  )
37
37
  class MapReduceChain(Chain):
@@ -32,7 +32,7 @@ class AnswerWithSources(BaseModel):
32
32
  message=(
33
33
  "This function is deprecated. Refer to this guide on retrieval and question "
34
34
  "answering with structured responses: "
35
- "https://python.langchain.com/v0.2/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
35
+ "https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
36
36
  ),
37
37
  )
38
38
  def create_qa_with_structure_chain(
@@ -114,7 +114,7 @@ def create_qa_with_structure_chain(
114
114
  message=(
115
115
  "This function is deprecated. Refer to this guide on retrieval and question "
116
116
  "answering with sources: "
117
- "https://python.langchain.com/v0.2/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
117
+ "https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
118
118
  ),
119
119
  )
120
120
  def create_qa_with_sources_chain(
@@ -38,7 +38,7 @@ Passage:
38
38
  "See API reference for this function for replacement: "
39
39
  "<https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.tagging.create_tagging_chain.html> " # noqa: E501
40
40
  "You can read more about `with_structured_output` here: "
41
- "<https://python.langchain.com/v0.2/docs/how_to/structured_output/>. "
41
+ "<https://python.langchain.com/docs/how_to/structured_output/>. "
42
42
  "If you notice other issues, please provide "
43
43
  "feedback here: "
44
44
  "<https://github.com/langchain-ai/langchain/discussions/18154>"
@@ -78,7 +78,7 @@ def create_tagging_chain(
78
78
  "Why did the cat cross the road? To get to the other "
79
79
  "side... and then lay down in the middle of it!"
80
80
  )
81
- Read more here: https://python.langchain.com/v0.2/docs/how_to/structured_output/
81
+ Read more here: https://python.langchain.com/docs/how_to/structured_output/
82
82
 
83
83
  Args:
84
84
  schema: The schema of the entities to extract.
@@ -109,7 +109,7 @@ def create_tagging_chain(
109
109
  "See API reference for this function for replacement: "
110
110
  "<https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.tagging.create_tagging_chain_pydantic.html> " # noqa: E501
111
111
  "You can read more about `with_structured_output` here: "
112
- "<https://python.langchain.com/v0.2/docs/how_to/structured_output/>. "
112
+ "<https://python.langchain.com/docs/how_to/structured_output/>. "
113
113
  "If you notice other issues, please provide "
114
114
  "feedback here: "
115
115
  "<https://github.com/langchain-ai/langchain/discussions/18154>"
@@ -147,7 +147,7 @@ def create_tagging_chain_pydantic(
147
147
  "Why did the cat cross the road? To get to the other "
148
148
  "side... and then lay down in the middle of it!"
149
149
  )
150
- Read more here: https://python.langchain.com/v0.2/docs/how_to/structured_output/
150
+ Read more here: https://python.langchain.com/docs/how_to/structured_output/
151
151
 
152
152
  Args:
153
153
  pydantic_schema: The pydantic schema of the entities to extract.
@@ -37,7 +37,7 @@ from langchain.chains.qa_with_sources.map_reduce_prompt import (
37
37
  message=(
38
38
  "This class is deprecated. Refer to this guide on retrieval and question "
39
39
  "answering with sources: "
40
- "https://python.langchain.com/v0.2/docs/how_to/qa_sources/"
40
+ "https://python.langchain.com/docs/how_to/qa_sources/"
41
41
  ),
42
42
  )
43
43
  class BaseQAWithSourcesChain(Chain, ABC):
@@ -216,7 +216,7 @@ class BaseQAWithSourcesChain(Chain, ABC):
216
216
  message=(
217
217
  "This class is deprecated. Refer to this guide on retrieval and question "
218
218
  "answering with sources: "
219
- "https://python.langchain.com/v0.2/docs/how_to/qa_sources/"
219
+ "https://python.langchain.com/docs/how_to/qa_sources/"
220
220
  ),
221
221
  )
222
222
  class QAWithSourcesChain(BaseQAWithSourcesChain):
@@ -158,13 +158,13 @@ def _load_refine_chain(
158
158
  message=(
159
159
  "This function is deprecated. Refer to this guide on retrieval and question "
160
160
  "answering with sources: "
161
- "https://python.langchain.com/v0.2/docs/how_to/qa_sources/"
161
+ "https://python.langchain.com/docs/how_to/qa_sources/"
162
162
  "\nSee also the following migration guides for replacements "
163
163
  "based on `chain_type`:\n"
164
- "stuff: https://python.langchain.com/v0.2/docs/versions/migrating_chains/stuff_docs_chain\n" # noqa: E501
165
- "map_reduce: https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_reduce_chain\n" # noqa: E501
166
- "refine: https://python.langchain.com/v0.2/docs/versions/migrating_chains/refine_chain\n" # noqa: E501
167
- "map_rerank: https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_rerank_docs_chain\n" # noqa: E501
164
+ "stuff: https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain\n" # noqa: E501
165
+ "map_reduce: https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain\n" # noqa: E501
166
+ "refine: https://python.langchain.com/docs/versions/migrating_chains/refine_chain\n" # noqa: E501
167
+ "map_rerank: https://python.langchain.com/docs/versions/migrating_chains/map_rerank_docs_chain\n" # noqa: E501
168
168
  ),
169
169
  )
170
170
  def load_qa_with_sources_chain(
@@ -223,12 +223,12 @@ def _load_refine_chain(
223
223
  message=(
224
224
  "This class is deprecated. See the following migration guides for replacements "
225
225
  "based on `chain_type`:\n"
226
- "stuff: https://python.langchain.com/v0.2/docs/versions/migrating_chains/stuff_docs_chain\n" # noqa: E501
227
- "map_reduce: https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_reduce_chain\n" # noqa: E501
228
- "refine: https://python.langchain.com/v0.2/docs/versions/migrating_chains/refine_chain\n" # noqa: E501
229
- "map_rerank: https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_rerank_docs_chain\n" # noqa: E501
226
+ "stuff: https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain\n" # noqa: E501
227
+ "map_reduce: https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain\n" # noqa: E501
228
+ "refine: https://python.langchain.com/docs/versions/migrating_chains/refine_chain\n" # noqa: E501
229
+ "map_rerank: https://python.langchain.com/docs/versions/migrating_chains/map_rerank_docs_chain\n" # noqa: E501
230
230
  "\nSee also guides on retrieval and question-answering here: "
231
- "https://python.langchain.com/v0.2/docs/how_to/#qa-with-rag"
231
+ "https://python.langchain.com/docs/how_to/#qa-with-rag"
232
232
  ),
233
233
  )
234
234
  def load_qa_chain(
@@ -34,7 +34,7 @@ from langchain.chains.question_answering.stuff_prompt import PROMPT_SELECTOR
34
34
  message=(
35
35
  "This class is deprecated. Use the `create_retrieval_chain` constructor "
36
36
  "instead. See migration guide here: "
37
- "https://python.langchain.com/v0.2/docs/versions/migrating_chains/retrieval_qa/"
37
+ "https://python.langchain.com/docs/versions/migrating_chains/retrieval_qa/"
38
38
  ),
39
39
  )
40
40
  class BaseRetrievalQA(Chain):
@@ -210,7 +210,7 @@ class BaseRetrievalQA(Chain):
210
210
  message=(
211
211
  "This class is deprecated. Use the `create_retrieval_chain` constructor "
212
212
  "instead. See migration guide here: "
213
- "https://python.langchain.com/v0.2/docs/versions/migrating_chains/retrieval_qa/"
213
+ "https://python.langchain.com/docs/versions/migrating_chains/retrieval_qa/"
214
214
  ),
215
215
  )
216
216
  class RetrievalQA(BaseRetrievalQA):
@@ -295,7 +295,7 @@ class RetrievalQA(BaseRetrievalQA):
295
295
  message=(
296
296
  "This class is deprecated. Use the `create_retrieval_chain` constructor "
297
297
  "instead. See migration guide here: "
298
- "https://python.langchain.com/v0.2/docs/versions/migrating_chains/retrieval_qa/"
298
+ "https://python.langchain.com/docs/versions/migrating_chains/retrieval_qa/"
299
299
  ),
300
300
  )
301
301
  class VectorDBQA(BaseRetrievalQA):
@@ -98,48 +98,42 @@ def init_chat_model(
98
98
 
99
99
  Must have the integration package corresponding to the model provider installed.
100
100
 
101
- .. versionadded:: 0.2.7
102
-
103
- .. versionchanged:: 0.2.8
104
-
105
- Support for ``configurable_fields`` and ``config_prefix`` added.
106
-
107
- .. versionchanged:: 0.2.12
108
-
109
- Support for Ollama via langchain-ollama package added. Previously
110
- langchain-community version of Ollama (now deprecated) was installed by default.
111
-
112
101
  Args:
113
102
  model: The name of the model, e.g. "gpt-4o", "claude-3-opus-20240229".
114
103
  model_provider: The model provider. Supported model_provider values and the
115
104
  corresponding integration package:
116
- - openai (langchain-openai)
117
- - anthropic (langchain-anthropic)
118
- - azure_openai (langchain-openai)
119
- - google_vertexai (langchain-google-vertexai)
120
- - google_genai (langchain-google-genai)
121
- - bedrock (langchain-aws)
122
- - cohere (langchain-cohere)
123
- - fireworks (langchain-fireworks)
124
- - together (langchain-together)
125
- - mistralai (langchain-mistralai)
126
- - huggingface (langchain-huggingface)
127
- - groq (langchain-groq)
128
- - ollama (langchain-ollama) [support added in langchain==0.2.12]
105
+
106
+ - openai (langchain-openai)
107
+ - anthropic (langchain-anthropic)
108
+ - azure_openai (langchain-openai)
109
+ - google_vertexai (langchain-google-vertexai)
110
+ - google_genai (langchain-google-genai)
111
+ - bedrock (langchain-aws)
112
+ - bedrock_converse (langchain-aws)
113
+ - cohere (langchain-cohere)
114
+ - fireworks (langchain-fireworks)
115
+ - together (langchain-together)
116
+ - mistralai (langchain-mistralai)
117
+ - huggingface (langchain-huggingface)
118
+ - groq (langchain-groq)
119
+ - ollama (langchain-ollama) [support added in langchain==0.2.12]
129
120
 
130
121
  Will attempt to infer model_provider from model if not specified. The
131
122
  following providers will be inferred based on these model prefixes:
132
- - gpt-3... or gpt-4... -> openai
133
- - claude... -> anthropic
134
- - amazon.... -> bedrock
135
- - gemini... -> google_vertexai
136
- - command... -> cohere
137
- - accounts/fireworks... -> fireworks
123
+
124
+ - gpt-3..., gpt-4..., or o1... -> openai
125
+ - claude... -> anthropic
126
+ - amazon.... -> bedrock
127
+ - gemini... -> google_vertexai
128
+ - command... -> cohere
129
+ - accounts/fireworks... -> fireworks
130
+ - mistral... -> mistralai
138
131
  configurable_fields: Which model parameters are
139
132
  configurable:
140
- - None: No configurable fields.
141
- - "any": All fields are configurable. *See Security Note below.*
142
- - Union[List[str], Tuple[str, ...]]: Specified fields are configurable.
133
+
134
+ - None: No configurable fields.
135
+ - "any": All fields are configurable. *See Security Note below.*
136
+ - Union[List[str], Tuple[str, ...]]: Specified fields are configurable.
143
137
 
144
138
  Fields are assumed to have config_prefix stripped if there is a
145
139
  config_prefix. If model is specified, then defaults to None. If model is
@@ -168,7 +162,9 @@ def init_chat_model(
168
162
  ValueError: If model_provider cannot be inferred or isn't supported.
169
163
  ImportError: If the model provider integration package is not installed.
170
164
 
171
- Initialize non-configurable models:
165
+ .. dropdown:: Init non-configurable model
166
+ :open:
167
+
172
168
  .. code-block:: python
173
169
 
174
170
  # pip install langchain langchain-openai langchain-anthropic langchain-google-vertexai
@@ -183,7 +179,8 @@ def init_chat_model(
183
179
  gemini_15.invoke("what's your name")
184
180
 
185
181
 
186
- Create a partially configurable model with no default model:
182
+ .. dropdown:: Partially configurable model with no default
183
+
187
184
  .. code-block:: python
188
185
 
189
186
  # pip install langchain langchain-openai langchain-anthropic
@@ -204,7 +201,8 @@ def init_chat_model(
204
201
  )
205
202
  # claude-3.5 sonnet response
206
203
 
207
- Create a fully configurable model with a default model and a config prefix:
204
+ .. dropdown:: Fully configurable model with a default
205
+
208
206
  .. code-block:: python
209
207
 
210
208
  # pip install langchain langchain-openai langchain-anthropic
@@ -233,7 +231,8 @@ def init_chat_model(
233
231
  )
234
232
  # Claude-3.5 sonnet response with temperature 0.6
235
233
 
236
- Bind tools to a configurable model:
234
+ .. dropdown:: Bind tools to a configurable model
235
+
237
236
  You can call any ChatModel declarative methods on a configurable model in the
238
237
  same way that you would with a normal model.
239
238
 
@@ -270,6 +269,23 @@ def init_chat_model(
270
269
  config={"configurable": {"model": "claude-3-5-sonnet-20240620"}}
271
270
  )
272
271
  # Claude-3.5 sonnet response with tools
272
+
273
+ .. versionadded:: 0.2.7
274
+
275
+ .. versionchanged:: 0.2.8
276
+
277
+ Support for ``configurable_fields`` and ``config_prefix`` added.
278
+
279
+ .. versionchanged:: 0.2.12
280
+
281
+ Support for ChatOllama via langchain-ollama package added
282
+ (langchain_ollama.ChatOllama). Previously,
283
+ the now-deprecated langchain-community version of Ollama was imported
284
+ (langchain_community.chat_models.ChatOllama).
285
+
286
+ Support for langchain_aws.ChatBedrockConverse added
287
+ (model_provider="bedrock_converse").
288
+
273
289
  """ # noqa: E501
274
290
  if not model and not configurable_fields:
275
291
  configurable_fields = ("model", "model_provider")
@@ -415,7 +431,7 @@ _SUPPORTED_PROVIDERS = {
415
431
 
416
432
 
417
433
  def _attempt_infer_model_provider(model_name: str) -> Optional[str]:
418
- if model_name.startswith("gpt-3") or model_name.startswith("gpt-4"):
434
+ if any(model_name.startswith(pre) for pre in ("gpt-3", "gpt-4", "o1")):
419
435
  return "openai"
420
436
  elif model_name.startswith("claude"):
421
437
  return "anthropic"
@@ -427,6 +443,8 @@ def _attempt_infer_model_provider(model_name: str) -> Optional[str]:
427
443
  return "google_vertexai"
428
444
  elif model_name.startswith("amazon."):
429
445
  return "bedrock"
446
+ elif model_name.startswith("mistral"):
447
+ return "mistralai"
430
448
  else:
431
449
  return None
432
450
 
@@ -1,5 +1,6 @@
1
1
  from typing import Any, Dict, List, Optional
2
2
 
3
+ from langchain_core._api import deprecated
3
4
  from langchain_core.messages import BaseMessage, get_buffer_string
4
5
  from langchain_core.utils import pre_init
5
6
 
@@ -7,8 +8,23 @@ from langchain.memory.chat_memory import BaseChatMemory, BaseMemory
7
8
  from langchain.memory.utils import get_prompt_input_key
8
9
 
9
10
 
11
+ @deprecated(
12
+ since="0.3.1",
13
+ removal="1.0.0",
14
+ message=(
15
+ "Please see the migration guide at: "
16
+ "https://python.langchain.com/docs/versions/migrating_memory/"
17
+ ),
18
+ )
10
19
  class ConversationBufferMemory(BaseChatMemory):
11
- """Buffer for storing conversation memory."""
20
+ """A basic memory implementation that simply stores the conversation history.
21
+
22
+ This stores the entire conversation history in memory without any
23
+ additional processing.
24
+
25
+ Note that additional processing may be required in some situations when the
26
+ conversation history is too large to fit in the context window of the model.
27
+ """
12
28
 
13
29
  human_prefix: str = "Human"
14
30
  ai_prefix: str = "AI"
@@ -71,8 +87,26 @@ class ConversationBufferMemory(BaseChatMemory):
71
87
  return {self.memory_key: buffer}
72
88
 
73
89
 
90
+ @deprecated(
91
+ since="0.3.1",
92
+ removal="1.0.0",
93
+ message=(
94
+ "Please see the migration guide at: "
95
+ "https://python.langchain.com/docs/versions/migrating_memory/"
96
+ ),
97
+ )
74
98
  class ConversationStringBufferMemory(BaseMemory):
75
- """Buffer for storing conversation memory."""
99
+ """A basic memory implementation that simply stores the conversation history.
100
+
101
+ This stores the entire conversation history in memory without any
102
+ additional processing.
103
+
104
+ Equivalent to ConversationBufferMemory but tailored more specifically
105
+ for string-based conversations rather than chat models.
106
+
107
+ Note that additional processing may be required in some situations when the
108
+ conversation history is too large to fit in the context window of the model.
109
+ """
76
110
 
77
111
  human_prefix: str = "Human"
78
112
  ai_prefix: str = "AI"
@@ -1,12 +1,25 @@
1
1
  from typing import Any, Dict, List, Union
2
2
 
3
+ from langchain_core._api import deprecated
3
4
  from langchain_core.messages import BaseMessage, get_buffer_string
4
5
 
5
6
  from langchain.memory.chat_memory import BaseChatMemory
6
7
 
7
8
 
9
+ @deprecated(
10
+ since="0.3.1",
11
+ removal="1.0.0",
12
+ message=(
13
+ "Please see the migration guide at: "
14
+ "https://python.langchain.com/docs/versions/migrating_memory/"
15
+ ),
16
+ )
8
17
  class ConversationBufferWindowMemory(BaseChatMemory):
9
- """Buffer for storing conversation memory inside a limited size window."""
18
+ """Use to keep track of the last k turns of a conversation.
19
+
20
+ If the number of messages in the conversation is more than the maximum number
21
+ of messages to keep, the oldest messages are dropped.
22
+ """
10
23
 
11
24
  human_prefix: str = "Human"
12
25
  ai_prefix: str = "AI"
@@ -2,6 +2,7 @@ import warnings
2
2
  from abc import ABC
3
3
  from typing import Any, Dict, Optional, Tuple
4
4
 
5
+ from langchain_core._api import deprecated
5
6
  from langchain_core.chat_history import (
6
7
  BaseChatMessageHistory,
7
8
  InMemoryChatMessageHistory,
@@ -13,8 +14,24 @@ from pydantic import Field
13
14
  from langchain.memory.utils import get_prompt_input_key
14
15
 
15
16
 
17
+ @deprecated(
18
+ since="0.3.1",
19
+ removal="1.0.0",
20
+ message=(
21
+ "Please see the migration guide at: "
22
+ "https://python.langchain.com/docs/versions/migrating_memory/"
23
+ ),
24
+ )
16
25
  class BaseChatMemory(BaseMemory, ABC):
17
- """Abstract base class for chat memory."""
26
+ """Abstract base class for chat memory.
27
+
28
+ **ATTENTION** This abstraction was created prior to when chat models had
29
+ native tool calling capabilities.
30
+ It does **NOT** support native tool calling capabilities for chat models and
31
+ will fail SILENTLY if used with a chat model that has native tool calling.
32
+
33
+ DO NOT USE THIS ABSTRACTION FOR NEW CODE.
34
+ """
18
35
 
19
36
  chat_memory: BaseChatMessageHistory = Field(
20
37
  default_factory=InMemoryChatMessageHistory
@@ -1,8 +1,11 @@
1
+ """Deprecated as of LangChain v0.3.4 and will be removed in LangChain v1.0.0."""
2
+
1
3
  import logging
2
4
  from abc import ABC, abstractmethod
3
5
  from itertools import islice
4
6
  from typing import Any, Dict, Iterable, List, Optional
5
7
 
8
+ from langchain_core._api import deprecated
6
9
  from langchain_core.language_models import BaseLanguageModel
7
10
  from langchain_core.messages import BaseMessage, get_buffer_string
8
11
  from langchain_core.prompts import BasePromptTemplate
@@ -19,6 +22,14 @@ from langchain.memory.utils import get_prompt_input_key
19
22
  logger = logging.getLogger(__name__)
20
23
 
21
24
 
25
+ @deprecated(
26
+ since="0.3.1",
27
+ removal="1.0.0",
28
+ message=(
29
+ "Please see the migration guide at: "
30
+ "https://python.langchain.com/docs/versions/migrating_memory/"
31
+ ),
32
+ )
22
33
  class BaseEntityStore(BaseModel, ABC):
23
34
  """Abstract base class for Entity store."""
24
35
 
@@ -48,6 +59,14 @@ class BaseEntityStore(BaseModel, ABC):
48
59
  pass
49
60
 
50
61
 
62
+ @deprecated(
63
+ since="0.3.1",
64
+ removal="1.0.0",
65
+ message=(
66
+ "Please see the migration guide at: "
67
+ "https://python.langchain.com/docs/versions/migrating_memory/"
68
+ ),
69
+ )
51
70
  class InMemoryEntityStore(BaseEntityStore):
52
71
  """In-memory Entity store."""
53
72
 
@@ -69,6 +88,14 @@ class InMemoryEntityStore(BaseEntityStore):
69
88
  return self.store.clear()
70
89
 
71
90
 
91
+ @deprecated(
92
+ since="0.3.1",
93
+ removal="1.0.0",
94
+ message=(
95
+ "Please see the migration guide at: "
96
+ "https://python.langchain.com/docs/versions/migrating_memory/"
97
+ ),
98
+ )
72
99
  class UpstashRedisEntityStore(BaseEntityStore):
73
100
  """Upstash Redis backed Entity store.
74
101
 
@@ -147,6 +174,14 @@ class UpstashRedisEntityStore(BaseEntityStore):
147
174
  scan_and_delete(cursor)
148
175
 
149
176
 
177
+ @deprecated(
178
+ since="0.3.1",
179
+ removal="1.0.0",
180
+ message=(
181
+ "Please see the migration guide at: "
182
+ "https://python.langchain.com/docs/versions/migrating_memory/"
183
+ ),
184
+ )
150
185
  class RedisEntityStore(BaseEntityStore):
151
186
  """Redis-backed Entity store.
152
187
 
@@ -238,6 +273,14 @@ class RedisEntityStore(BaseEntityStore):
238
273
  self.redis_client.delete(*keybatch)
239
274
 
240
275
 
276
+ @deprecated(
277
+ since="0.3.1",
278
+ removal="1.0.0",
279
+ message=(
280
+ "Please see the migration guide at: "
281
+ "https://python.langchain.com/docs/versions/migrating_memory/"
282
+ ),
283
+ )
241
284
  class SQLiteEntityStore(BaseEntityStore):
242
285
  """SQLite-backed Entity store"""
243
286
 
@@ -335,6 +378,14 @@ class SQLiteEntityStore(BaseEntityStore):
335
378
  self.conn.execute(query)
336
379
 
337
380
 
381
+ @deprecated(
382
+ since="0.3.1",
383
+ removal="1.0.0",
384
+ message=(
385
+ "Please see the migration guide at: "
386
+ "https://python.langchain.com/docs/versions/migrating_memory/"
387
+ ),
388
+ )
338
389
  class ConversationEntityMemory(BaseChatMemory):
339
390
  """Entity extractor & summarizer memory.
340
391
 
@@ -57,8 +57,21 @@ class SummarizerMixin(BaseModel):
57
57
  return await chain.apredict(summary=existing_summary, new_lines=new_lines)
58
58
 
59
59
 
60
+ @deprecated(
61
+ since="0.3.1",
62
+ removal="1.0.0",
63
+ message=(
64
+ "Please see the migration guide at: "
65
+ "https://python.langchain.com/docs/versions/migrating_memory/"
66
+ ),
67
+ )
60
68
  class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin):
61
- """Conversation summarizer to chat memory."""
69
+ """Continually summarizes the conversation history.
70
+
71
+ The summary is updated after each conversation turn.
72
+ The implementations returns a summary of the conversation history which
73
+ can be used to provide context to the model.
74
+ """
62
75
 
63
76
  buffer: str = ""
64
77
  memory_key: str = "history" #: :meta private:
@@ -1,5 +1,6 @@
1
1
  from typing import Any, Dict, List, Union
2
2
 
3
+ from langchain_core._api import deprecated
3
4
  from langchain_core.messages import BaseMessage, get_buffer_string
4
5
  from langchain_core.utils import pre_init
5
6
 
@@ -7,8 +8,21 @@ from langchain.memory.chat_memory import BaseChatMemory
7
8
  from langchain.memory.summary import SummarizerMixin
8
9
 
9
10
 
11
+ @deprecated(
12
+ since="0.3.1",
13
+ removal="1.0.0",
14
+ message=(
15
+ "Please see the migration guide at: "
16
+ "https://python.langchain.com/docs/versions/migrating_memory/"
17
+ ),
18
+ )
10
19
  class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
11
- """Buffer with summarizer for storing conversation memory."""
20
+ """Buffer with summarizer for storing conversation memory.
21
+
22
+ Provides a running summary of the conversation together with the most recent
23
+ messages in the conversation under the constraint that the total number of
24
+ tokens in the conversation does not exceed a certain limit.
25
+ """
12
26
 
13
27
  max_token_limit: int = 2000
14
28
  moving_summary_buffer: str = ""
@@ -1,13 +1,26 @@
1
1
  from typing import Any, Dict, List
2
2
 
3
+ from langchain_core._api import deprecated
3
4
  from langchain_core.language_models import BaseLanguageModel
4
5
  from langchain_core.messages import BaseMessage, get_buffer_string
5
6
 
6
7
  from langchain.memory.chat_memory import BaseChatMemory
7
8
 
8
9
 
10
+ @deprecated(
11
+ since="0.3.1",
12
+ removal="1.0.0",
13
+ message=(
14
+ "Please see the migration guide at: "
15
+ "https://python.langchain.com/docs/versions/migrating_memory/"
16
+ ),
17
+ )
9
18
  class ConversationTokenBufferMemory(BaseChatMemory):
10
- """Conversation chat memory with token limit."""
19
+ """Conversation chat memory with token limit.
20
+
21
+ Keeps only the most recent messages in the conversation under the constraint
22
+ that the total number of tokens in the conversation does not exceed a certain limit.
23
+ """
11
24
 
12
25
  human_prefix: str = "Human"
13
26
  ai_prefix: str = "AI"
@@ -2,6 +2,7 @@
2
2
 
3
3
  from typing import Any, Dict, List, Optional, Sequence, Union
4
4
 
5
+ from langchain_core._api import deprecated
5
6
  from langchain_core.documents import Document
6
7
  from langchain_core.vectorstores import VectorStoreRetriever
7
8
  from pydantic import Field
@@ -10,8 +11,18 @@ from langchain.memory.chat_memory import BaseMemory
10
11
  from langchain.memory.utils import get_prompt_input_key
11
12
 
12
13
 
14
+ @deprecated(
15
+ since="0.3.1",
16
+ removal="1.0.0",
17
+ message=(
18
+ "Please see the migration guide at: "
19
+ "https://python.langchain.com/docs/versions/migrating_memory/"
20
+ ),
21
+ )
13
22
  class VectorStoreRetrieverMemory(BaseMemory):
14
- """VectorStoreRetriever-backed memory."""
23
+ """Store the conversation history in a vector store and retrieves the relevant
24
+ parts of past conversation based on the input.
25
+ """
15
26
 
16
27
  retriever: VectorStoreRetriever = Field(exclude=True)
17
28
  """VectorStoreRetriever object to connect to."""
@@ -24,7 +24,6 @@ from langchain_core.callbacks import (
24
24
  CallbackManagerForRetrieverRun,
25
25
  )
26
26
  from langchain_core.documents import Document
27
- from langchain_core.load.dump import dumpd
28
27
  from langchain_core.retrievers import BaseRetriever, RetrieverLike
29
28
  from langchain_core.runnables import RunnableConfig
30
29
  from langchain_core.runnables.config import ensure_config, patch_config
@@ -107,9 +106,9 @@ class EnsembleRetriever(BaseRetriever):
107
106
  local_metadata=self.metadata,
108
107
  )
109
108
  run_manager = callback_manager.on_retriever_start(
110
- dumpd(self),
109
+ None,
111
110
  input,
112
- name=config.get("run_name"),
111
+ name=config.get("run_name") or self.get_name(),
113
112
  **kwargs,
114
113
  )
115
114
  try:
@@ -140,9 +139,9 @@ class EnsembleRetriever(BaseRetriever):
140
139
  local_metadata=self.metadata,
141
140
  )
142
141
  run_manager = await callback_manager.on_retriever_start(
143
- dumpd(self),
142
+ None,
144
143
  input,
145
- name=config.get("run_name"),
144
+ name=config.get("run_name") or self.get_name(),
146
145
  **kwargs,
147
146
  )
148
147
  try:
@@ -311,9 +310,11 @@ class EnsembleRetriever(BaseRetriever):
311
310
  for doc_list, weight in zip(doc_lists, self.weights):
312
311
  for rank, doc in enumerate(doc_list, start=1):
313
312
  rrf_score[
314
- doc.page_content
315
- if self.id_key is None
316
- else doc.metadata[self.id_key]
313
+ (
314
+ doc.page_content
315
+ if self.id_key is None
316
+ else doc.metadata[self.id_key]
317
+ )
317
318
  ] += weight / (rank + self.c)
318
319
 
319
320
  # Docs are deduplicated by their contents then sorted by their scores
@@ -321,9 +322,11 @@ class EnsembleRetriever(BaseRetriever):
321
322
  sorted_docs = sorted(
322
323
  unique_by_key(
323
324
  all_docs,
324
- lambda doc: doc.page_content
325
- if self.id_key is None
326
- else doc.metadata[self.id_key],
325
+ lambda doc: (
326
+ doc.page_content
327
+ if self.id_key is None
328
+ else doc.metadata[self.id_key]
329
+ ),
327
330
  ),
328
331
  reverse=True,
329
332
  key=lambda doc: rrf_score[
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain
3
- Version: 0.3.0.dev2
3
+ Version: 0.3.2
4
4
  Summary: Building applications with LLMs through composability
5
5
  Home-page: https://github.com/langchain-ai/langchain
6
6
  License: MIT
@@ -15,8 +15,8 @@ Requires-Dist: PyYAML (>=5.3)
15
15
  Requires-Dist: SQLAlchemy (>=1.4,<3)
16
16
  Requires-Dist: aiohttp (>=3.8.3,<4.0.0)
17
17
  Requires-Dist: async-timeout (>=4.0.0,<5.0.0) ; python_version < "3.11"
18
- Requires-Dist: langchain-core (>=0.3.0.dev5,<0.4.0)
19
- Requires-Dist: langchain-text-splitters (>=0.3.0.dev1,<0.4.0)
18
+ Requires-Dist: langchain-core (>=0.3.8,<0.4.0)
19
+ Requires-Dist: langchain-text-splitters (>=0.3.0,<0.4.0)
20
20
  Requires-Dist: langsmith (>=0.1.17,<0.2.0)
21
21
  Requires-Dist: numpy (>=1,<2) ; python_version < "3.12"
22
22
  Requires-Dist: numpy (>=1.26.0,<2.0.0) ; python_version >= "3.12"
@@ -2,7 +2,7 @@ langchain/__init__.py,sha256=4cqV-N_QJnfjk52DqtR2e72vsmJC1R6PkflvRdLjZQI,13709
2
2
  langchain/_api/__init__.py,sha256=0FuHuMNUBMrst1Y1nm5yZzQr2xbLmb7rxMsimqKBXhs,733
3
3
  langchain/_api/deprecation.py,sha256=MpH4S7a11UDuoAGCv1RLWGn4pwhoFwEOrtONJGep40U,471
4
4
  langchain/_api/interactive_env.py,sha256=NlnXizhm1TG3l_qKNI0qHJiHkh9q2jRjt5zGJsg_BCA,139
5
- langchain/_api/module_import.py,sha256=6-oUmNY0ogtiqu18BJsu4rS-F0QvR8L6XkSWSN5WHWw,6357
5
+ langchain/_api/module_import.py,sha256=q6UZ1WadWx7curQq8HV8nGwt9WmKd7tJ0mipUyG7ll0,6347
6
6
  langchain/_api/path.py,sha256=ovJP6Pcf7L_KaKvMMet9G9OzfLTb-sZV2pEw3Tp7o3I,122
7
7
  langchain/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
8
  langchain/adapters/openai.py,sha256=kWvS_DdRtpcc49vDY8zLUo3BrtXA3a89bLJu3Sksvaw,1996
@@ -74,7 +74,7 @@ langchain/agents/agent_toolkits/sql/toolkit.py,sha256=CCVWRJKVuECq-eFRjatJjYsy81
74
74
  langchain/agents/agent_toolkits/steam/__init__.py,sha256=iOMgxWCt0FTNLMNq0wScgSN_YdBBq-56VM6j0Ud8GpI,21
75
75
  langchain/agents/agent_toolkits/steam/toolkit.py,sha256=V0_xpO4mC4rfWBaLyTPW-pKwd-EScTTUnvgtB1sW6Cw,659
76
76
  langchain/agents/agent_toolkits/vectorstore/__init__.py,sha256=uT5qVHjIcx3yFkWfxOzbRKL5xwWcMuFGQ-es9O7b2NQ,56
77
- langchain/agents/agent_toolkits/vectorstore/base.py,sha256=YH40cUjcwTGQEYo-JagDKoTnQ5Pr9SwBs1jyqMEalnk,8472
77
+ langchain/agents/agent_toolkits/vectorstore/base.py,sha256=lnzst1sehVH8FExMPF8Q1TFg9BROmfp5uHRzE2LB7Mo,8462
78
78
  langchain/agents/agent_toolkits/vectorstore/prompt.py,sha256=DndLnLxi9iKjuYKo5E1nscHCOPeCoNcpl8dFHcSltxU,834
79
79
  langchain/agents/agent_toolkits/vectorstore/toolkit.py,sha256=dJhQ-0bfPOSliIFickPnJ40iefUPUyL3uV2pVaYP4pA,3210
80
80
  langchain/agents/agent_toolkits/xorbits/__init__.py,sha256=LJ-yZ3UKg4vjibzbgMXocR03vcsU_7ZvU7TlScM9RlE,1095
@@ -111,7 +111,7 @@ langchain/agents/mrkl/base.py,sha256=GnWny1MWbfms9b3tF18nyuejzC5NHwDxJbXQBsXxrSw
111
111
  langchain/agents/mrkl/output_parser.py,sha256=YQGSjQq5pR4kFUg1HrOS3laV6xgtHgtIOQ_TtJY0UFI,3720
112
112
  langchain/agents/mrkl/prompt.py,sha256=2dTMP2lAWiLvCtuEijgQRjbKDlbPEnmx77duMwdJ7e4,641
113
113
  langchain/agents/openai_assistant/__init__.py,sha256=Xssaqoxrix3hn1gKSOLmDRQzTxAoJk0ProGXmXQe8Mw,114
114
- langchain/agents/openai_assistant/base.py,sha256=tgvjW8fUug3_jYVscIX8xP9gVWjQpD-ZwGAH2Iwt2Ww,27904
114
+ langchain/agents/openai_assistant/base.py,sha256=UGUEVtJCNudTWYoyc4-z1N7BQpKWnwKybeO9bZzivD0,27957
115
115
  langchain/agents/openai_functions_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
116
116
  langchain/agents/openai_functions_agent/agent_token_buffer_memory.py,sha256=t3J3Qku4lvs-EGTbPRzhrxAwTVBoEj4tu5wbl5u2-N0,3764
117
117
  langchain/agents/openai_functions_agent/base.py,sha256=katIW0vE87B7ezm9WU_fEMfeHSQPHZptM0zppQfnY-4,13474
@@ -212,23 +212,23 @@ langchain/chains/api/openapi/response_chain.py,sha256=7vHhIF1-3JUgOXeyWb9CAkG0Ji
212
212
  langchain/chains/api/podcast_docs.py,sha256=mPW1GrX0X6kaGuGpVYFXNvSoLNoUFse8CaoJSUSa4KU,1920
213
213
  langchain/chains/api/prompt.py,sha256=YERLepjWuo2J4wg40DWWfHH4Tsm-9eab-cIllHFxMk4,1031
214
214
  langchain/chains/api/tmdb_docs.py,sha256=8yoowa2d53-oytU0dycV-0w9wRe9xOXAPz-s8gQ6EpE,1537
215
- langchain/chains/base.py,sha256=cVee_nkH2qIvsOGi1S5PyUhdXfG4HJ_P1lEbjl0YsS0,30694
215
+ langchain/chains/base.py,sha256=CPpnBTWHShEjlVzrfSZlGVZlIO2dY9bljHophNi2p4U,30637
216
216
  langchain/chains/chat_vector_db/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
217
217
  langchain/chains/chat_vector_db/prompts.py,sha256=4YM7z5Wi8ftJEVj3ZG8YOcudYwGHCNvQh4Gf_6592yc,694
218
218
  langchain/chains/combine_documents/__init__.py,sha256=tJZmkLOD4JGjh9OxkCdTMUzbBCb-47fHLyklQo6ida4,367
219
219
  langchain/chains/combine_documents/base.py,sha256=BEpMx797bwYJTv1QXmafkzRVGh-i9NbNvTJEh1YgWA4,10267
220
- langchain/chains/combine_documents/map_reduce.py,sha256=fw-T0UR-zBVGQYvBBOXYzOo2qwKOEhyfdEGrhjr5Ujo,11810
221
- langchain/chains/combine_documents/map_rerank.py,sha256=wpSmHuYRs0wqgf37THjpCMkr1gibsrPAGO1k57uvW3I,8988
222
- langchain/chains/combine_documents/reduce.py,sha256=kUwfDC1asChEx18Xke61TBKN56Q0UU7qyJCcsPH8kYY,13839
223
- langchain/chains/combine_documents/refine.py,sha256=aIKLLMxvhLbthn0geGCo1rBbWcGjcdQwv3a8Luhv5ag,9128
224
- langchain/chains/combine_documents/stuff.py,sha256=wpCY5c3WB08nXYIlVfgJzOYL_k_uySty2KSQiOKir-I,11560
220
+ langchain/chains/combine_documents/map_reduce.py,sha256=qcYHuTtkSM5kalrErz7GWgRVwV0Ba_Fbf-FHxcq4mNM,12130
221
+ langchain/chains/combine_documents/map_rerank.py,sha256=gzx3jo-pvrLouF9XSU0Wmu6nYoPcCMTBJCH5pLfv3vk,9327
222
+ langchain/chains/combine_documents/reduce.py,sha256=UH-HCX3q7ShsH952uEJxrmUO_ilI7q4vQjBCQkkYl-c,14159
223
+ langchain/chains/combine_documents/refine.py,sha256=sZve5lJDTPad-N-AByfoW5OJzg9eZ1Bp0KfFCKstp1A,9463
224
+ langchain/chains/combine_documents/stuff.py,sha256=ebbBrSnMiZjGUjxhQNJ1bw6-aFSpSubwm9Xp2YYKXJM,11555
225
225
  langchain/chains/constitutional_ai/__init__.py,sha256=Woq_Efl5d-MSTkhpg7HLts3kXysJVZLiz3tr05NTf5Q,107
226
226
  langchain/chains/constitutional_ai/base.py,sha256=ubt-A3UpMg1qQ9fwQ4ZGBkahn7AnirGH58CfWvCUIa0,12705
227
227
  langchain/chains/constitutional_ai/models.py,sha256=D_p--Zt-ut32VuU5nHdqmPv5vFZEbO0f9pInVmG8NqU,266
228
228
  langchain/chains/constitutional_ai/principles.py,sha256=vElwvF1w4h8URsj38ucmoKp9hUCzf0sJyoNQmKv1Kws,21739
229
229
  langchain/chains/constitutional_ai/prompts.py,sha256=vL7qEGpLZShdKY8i07874peWB63eTYud6iPJcWcD-Y4,9072
230
230
  langchain/chains/conversation/__init__.py,sha256=hpIiQSoUe0bGkqAGKxG_CEYRFsjHRL4l5uBEpCBetFc,71
231
- langchain/chains/conversation/base.py,sha256=6pxKkGYDiK_pj0HB_yrSMJgOdjInoO6OuolGiZ-ajm4,5555
231
+ langchain/chains/conversation/base.py,sha256=I1YGmPI1BiLA-2lKHiNLoltcSV3Fwl7G0xEvsFHk__E,5550
232
232
  langchain/chains/conversation/memory.py,sha256=KoKmk5FjPEkioolvmFxcJgRr2wRdWIe1LNBHCtGgUKo,1396
233
233
  langchain/chains/conversation/prompt.py,sha256=84xC4dy8yNiCSICT4b6UvZdQXpPifMVw1hf7WnFAVkw,913
234
234
  langchain/chains/conversational_retrieval/__init__.py,sha256=hq7jx-kmg3s8qLYnV7gPmzVIPcGqW69H6cXIjklvGjY,49
@@ -262,7 +262,7 @@ langchain/chains/history_aware_retriever.py,sha256=a92vlxlq0PaOubc_b4jj_WwGivk4T
262
262
  langchain/chains/hyde/__init__.py,sha256=mZ-cb7slBdlK5aG2R_NegBzNCXToHR-tdmfIIA6lKvQ,75
263
263
  langchain/chains/hyde/base.py,sha256=Rc5u4JD3M3CaVsK4PwAVF67ooklcz9H3Fjy5ySoJuyY,3619
264
264
  langchain/chains/hyde/prompts.py,sha256=U4LfozneOyHDIKd8rCbnGSQK84YvZqAtpf5EL435Ol8,1913
265
- langchain/chains/llm.py,sha256=cP1QLNpxAHvdA0AKvQVjlZg8vSc_MafX4q1C_v5GFp4,15504
265
+ langchain/chains/llm.py,sha256=tzLw3OLgBDsHwDNAHV5IP3avRSy8EfZhPnR6tFNJmes,15515
266
266
  langchain/chains/llm_bash/__init__.py,sha256=qvRpa5tj09akj4DLVZoKvWK8-oJrUxc5-7ooAP3mO18,453
267
267
  langchain/chains/llm_checker/__init__.py,sha256=2IHg5XUQTQEoEMutGa66_tzOStNskQnDDXdN9VzJCSo,139
268
268
  langchain/chains/llm_checker/base.py,sha256=_PIC2eDLZUK0ExAe05KOA088ZShgNq7IK1hsFfxg93w,6506
@@ -279,7 +279,7 @@ langchain/chains/llm_summarization_checker/prompts/create_facts.txt,sha256=hM2_E
279
279
  langchain/chains/llm_summarization_checker/prompts/revise_summary.txt,sha256=nSSq5UQMx6gvjMKIs2t_ituuEQzu2nni1wdnywAe-5U,416
280
280
  langchain/chains/llm_symbolic_math/__init__.py,sha256=KQ6bFiFMsqs8PNtU-oo6l-czNBBwQUn2rEirz3gt-w8,470
281
281
  langchain/chains/loading.py,sha256=57shFurz0r_FDoUSTcD5Hv7cZl4Rr2G2A_gT-p7XHCE,28829
282
- langchain/chains/mapreduce.py,sha256=QTb-7lxao99O7NySK_xpA5d_5iQZJHJdH1Gcv-EgDCY,4097
282
+ langchain/chains/mapreduce.py,sha256=90P4QcNczv22qye9JCuOptR9ze-0qIFotb6bOS6HX8g,4092
283
283
  langchain/chains/moderation.py,sha256=ezUrzTOI6uGynyglpSuGom2gK26bKtkER2UuMG4yJWQ,4427
284
284
  langchain/chains/natbot/__init__.py,sha256=ACF2TYNK_CTfvmdLlG5Ry0_j9D6ZfjgfQxmeKe1BAIg,96
285
285
  langchain/chains/natbot/base.py,sha256=zl_sf4dgS8dFRYTY83cdaMXq1oqEsB-ddlb7RHx5SUM,5286
@@ -290,8 +290,8 @@ langchain/chains/openai_functions/base.py,sha256=jfgnAuire9OLOL0kLqKScpjdBEXKYzy
290
290
  langchain/chains/openai_functions/citation_fuzzy_match.py,sha256=cd9kh6DKMKS-eCskWFcJmDQLOemne1SMe4pKHbJ-Mvc,5344
291
291
  langchain/chains/openai_functions/extraction.py,sha256=2P99EoAb8iipW8TNJwNG2gUzgpWYSCZAvPU-kgUNfqU,7390
292
292
  langchain/chains/openai_functions/openapi.py,sha256=oqNFnLboLyFykkjHGvXR9Bd-7tjx7EjkNZnxXh5ISoc,14954
293
- langchain/chains/openai_functions/qa_with_structure.py,sha256=3JRjX4ylqF4eaIOZ5umcH1hPYiWsYyH9PVH2nF-NfFw,4851
294
- langchain/chains/openai_functions/tagging.py,sha256=jY6Rk_dJdYtKP5nrZJ7f-oCsMbBLUivfTVxcreZ7PNI,6524
293
+ langchain/chains/openai_functions/qa_with_structure.py,sha256=hS_b7PZjsgD7OR8QXOboq1LGClbRc6TlKcdqMCATojA,4841
294
+ langchain/chains/openai_functions/tagging.py,sha256=5i4dAe019rCKN_zWYugHkW5U66yO9Gse8AxjxJLdnr0,6504
295
295
  langchain/chains/openai_functions/utils.py,sha256=GDhYjszQGut1UcJ-dyPvkwiT8gHOV0IejRuIfN7_fhw,1255
296
296
  langchain/chains/openai_tools/__init__.py,sha256=xX0If1Nx_ocEOI56EGxCI0v0RZ1_VUegzyODAj0RLVU,134
297
297
  langchain/chains/openai_tools/extraction.py,sha256=sG8qUQKa7f-6JcbH1OWgpTtuUYV-3B-wBZJTDpp101E,3399
@@ -300,8 +300,8 @@ langchain/chains/qa_generation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5
300
300
  langchain/chains/qa_generation/base.py,sha256=obAA1qtCavCO3zkskRtB62is-geDOJG526T1N6tJbts,4187
301
301
  langchain/chains/qa_generation/prompt.py,sha256=W3lYKPUDSKS4N6b_FWlKzjn0tU5J4iQ8CF2FixdtqBo,1875
302
302
  langchain/chains/qa_with_sources/__init__.py,sha256=pYogDy6KwP4fS0m6GqyhLu_1kSd0ba3Ar4aPdIlRTTo,174
303
- langchain/chains/qa_with_sources/base.py,sha256=gov82to5TR_qMqb2Tya9qJJmi44hom10Ie8G0ecpMw0,8531
304
- langchain/chains/qa_with_sources/loading.py,sha256=0tkVQe8UTKRYFTQLbFTb2rHhGRhH2jUFd5Zwz2FMYVs,7964
303
+ langchain/chains/qa_with_sources/base.py,sha256=1N8z2CNstOnuPhy8QNnND-UDn1_XGgAHOmOKOuttKA8,8521
304
+ langchain/chains/qa_with_sources/loading.py,sha256=A_fU5HYt0pGt-PArz-5lcI3WSaGEnF-u9ijEq7pLiew,7939
305
305
  langchain/chains/qa_with_sources/map_reduce_prompt.py,sha256=hAM6OZbefpaaANdFYElB9feUi1iTlg0h54NDrFOw6Fo,6971
306
306
  langchain/chains/qa_with_sources/refine_prompts.py,sha256=MIwQfIXjFFjmNmwgMIq9yM5rOQdjswHnShNpNNc1BwM,1318
307
307
  langchain/chains/qa_with_sources/retrieval.py,sha256=OmIQkHjuEvsiXflvAbd5vt5mF8w7U4HtnBcQd3Ffrn0,2433
@@ -314,14 +314,14 @@ langchain/chains/query_constructor/parser.py,sha256=staGwAmGi0xlAEZ2niDkd9MFntZO
314
314
  langchain/chains/query_constructor/prompt.py,sha256=rwEsTr29cKBKPnn6vKB5rFw-youslUIFQoRIBkBh-j0,6880
315
315
  langchain/chains/query_constructor/schema.py,sha256=FRn_cpTRXuP9N1PprndGqkm-AfV5f-mihX1YAlAZUGE,277
316
316
  langchain/chains/question_answering/__init__.py,sha256=wohLdJqGfpWOwy68EEleW73SCenaQfxPCpMsrCIlohU,144
317
- langchain/chains/question_answering/chain.py,sha256=SBxm8rIYhQV_pHiijiQvrjCS5GTqsJ1M0-eA-OV95Mg,9666
317
+ langchain/chains/question_answering/chain.py,sha256=8Hcg2pA87R1ruofPPpSzQwOdxuCdTfbXPVQg9lwnvkg,9641
318
318
  langchain/chains/question_answering/map_reduce_prompt.py,sha256=CrerC8PqW1-V8SsQQsFsMd7dfjTb04Urf2naQYVGxl0,8013
319
319
  langchain/chains/question_answering/map_rerank_prompt.py,sha256=l2Ha1Xqr5Q6Y-Xh9af8JTni9gLAyhKJhmSErRFGw9s4,1622
320
320
  langchain/chains/question_answering/refine_prompts.py,sha256=JbQKbGaHo-IoHw1Wl16mMvqTi9kjmp_5NK526C_9_nM,2378
321
321
  langchain/chains/question_answering/stuff_prompt.py,sha256=tXecxj10u9x0taPz4I1Kn-J0SOYcjIfr_8RINw9P7ys,1146
322
322
  langchain/chains/retrieval.py,sha256=-ZHLdDQUkIQLjF9DMvpH_YgZKxShTm0GaSIhw1ab_EM,2742
323
323
  langchain/chains/retrieval_qa/__init__.py,sha256=MGGNuZ-HVZDyk551hUjGexK3U9q-2Yi_VJkpi7MV2DE,62
324
- langchain/chains/retrieval_qa/base.py,sha256=novT0PWHR_yEYYbpJ3CsWApZKkQWnGn7pAveiJHugZ4,12028
324
+ langchain/chains/retrieval_qa/base.py,sha256=R8zVrsg9OPPXDKLhQi6ouj9zvvlzqSNr1sNavR_Bji0,12013
325
325
  langchain/chains/retrieval_qa/prompt.py,sha256=c5_tFGFbltYvM9P6K_Zk3dOeYYbiSFN-MkJK6HBoNuA,399
326
326
  langchain/chains/router/__init__.py,sha256=r66J28FWIORVB5QIZ1d8R_HsiBaV1eQMZDZvMC43oAQ,407
327
327
  langchain/chains/router/base.py,sha256=ws6i8C4nk7YWmBqkXBcJ-FybNx4OeDJE-L1IELLK3M4,4517
@@ -360,7 +360,7 @@ langchain/chat_models/azure_openai.py,sha256=aRNol2PNC49PmvdZnwjhQeMFRDOOelPNAXz
360
360
  langchain/chat_models/azureml_endpoint.py,sha256=6mxXm8UFXataLp0NYRGA88V3DpiNKPo095u_JGj7XGE,863
361
361
  langchain/chat_models/baichuan.py,sha256=3-GveFoF5ZNyLdRNK6V4i3EDDjdseOTFWbCMhDbtO9w,643
362
362
  langchain/chat_models/baidu_qianfan_endpoint.py,sha256=CZrX2SMpbE9H7wBXNC6rGvw-YqQl9zjuJrClYQxEzuI,715
363
- langchain/chat_models/base.py,sha256=oBNuZSGqjjd9aMGNVmP7di14rFr4HMzqu-WFyVhwxB8,31024
363
+ langchain/chat_models/base.py,sha256=UauvE8dAtTz_ZSrF7BDNOuP7vvcGsEXeRnq2floq8G0,31312
364
364
  langchain/chat_models/bedrock.py,sha256=HRV3T_0mEnZ8LvJJqAA_UVpt-_03G715oIgomRJw55M,757
365
365
  langchain/chat_models/cohere.py,sha256=EYOECHX-nKRhZVfCfmFGZ2lr51PzaB5OvOEqmBCu1fI,633
366
366
  langchain/chat_models/databricks.py,sha256=5_QkC5lG4OldaHC2FS0XylirJouyZx1YT95SKwc12M0,653
@@ -773,9 +773,9 @@ langchain/load/dump.py,sha256=st-Wju0x5jrMVfMzjeKF1jo3Jvn8b1cCCfLrAaIYvhM,100
773
773
  langchain/load/load.py,sha256=sxSF6ySrMY4ouq77JPiuZKRx2lyVbqLoMi5ni5bHzAI,98
774
774
  langchain/load/serializable.py,sha256=6iZp1sg_ozIDqXTDEk60IP89UEwZEJ4j0oMaHascLKI,412
775
775
  langchain/memory/__init__.py,sha256=kQFlaG2Yuz1Y7U8e3Ngbv-13I3BPGKAI06Lz9sL-Lbc,5574
776
- langchain/memory/buffer.py,sha256=1k_6Q1akqL65M9tGCQGDIRcK6Z9CqhbDzAm8sFE88Hw,4841
777
- langchain/memory/buffer_window.py,sha256=hRFiodXZC1Xu7DFTmbWp5wtosuBkAEHQsPEXmMd-XIk,1616
778
- langchain/memory/chat_memory.py,sha256=38xhAPNCslwtS4G4VmWGZptiLV0eJvTKZVgRaVnbI6A,2777
776
+ langchain/memory/buffer.py,sha256=4QRrzB1Xl57jbHv8Buu1nA_xNiwt-qb8xqCFHzLxrZg,6005
777
+ langchain/memory/buffer_window.py,sha256=ETXU6PWlOaCPlge01fb-FjLgaRio2Cps197Z0krCVII,1986
778
+ langchain/memory/chat_memory.py,sha256=beAqpK58GfbUiCxPMCsTABdmGfd5HlrB4k1qLKtApnw,3353
779
779
  langchain/memory/chat_message_histories/__init__.py,sha256=AdCCNl_rxX4OVVLK6ZwwpMTo8VXzAS4v9bH1v2QjHec,3506
780
780
  langchain/memory/chat_message_histories/astradb.py,sha256=KeIpJKN4LWHdjdpoeStBn8xazqoP0mVHCqZB1lw_AS4,692
781
781
  langchain/memory/chat_message_histories/cassandra.py,sha256=OTSR2lgFyBQWZpw1Gw-aE9Kmtxth8JQGzhN_Qd5mKwM,698
@@ -798,17 +798,17 @@ langchain/memory/chat_message_histories/upstash_redis.py,sha256=M-sV600Ey7erOjRQ
798
798
  langchain/memory/chat_message_histories/xata.py,sha256=mu8boSJYSS5TUp2qj8k210ZnZ2tqjyuRj_SHPH_g4qw,683
799
799
  langchain/memory/chat_message_histories/zep.py,sha256=v2dAHGuV1HANCmxsVZSnXZAzRwIgOmwJ4HxvIM74fYM,680
800
800
  langchain/memory/combined.py,sha256=poPw4QbtfjlQcZK_xWseF4wXxGWtl6XGCgMdWQaK0fs,2946
801
- langchain/memory/entity.py,sha256=PSYJ5q8Xb04_uhKvsL00OgTWVY5_Y4cJCzqxIDu6rV0,15893
801
+ langchain/memory/entity.py,sha256=0K7FDtmitjMtRmAJR7YSBgTx9qSXOQtsaGRqcsIk-WY,17176
802
802
  langchain/memory/kg.py,sha256=DNerFp7WY8z6igywdH7KAuq3W2O1DVoPMBsGvw5WebQ,645
803
803
  langchain/memory/motorhead_memory.py,sha256=OXjtlAQi1ioRXdM3GVcYmReynkKn8Vm1e5TruqecUR8,658
804
804
  langchain/memory/prompt.py,sha256=r8vxZSRydSOWJzRszStN0Wky4n3fyM_QJ2XoKMsP3JA,8181
805
805
  langchain/memory/readonly.py,sha256=IbZFbyuPo_bHEzyACQcLIcOPpczoX5CLfM_n0YllYjw,792
806
806
  langchain/memory/simple.py,sha256=7El81OHJA0HBqwJ-AZDTQFPfB7B5NEsmY_fEOrwD0XA,761
807
- langchain/memory/summary.py,sha256=qjENNU6-N0TNu2cuespnnEy7chBgVEaXIed6OhnIDa8,4113
808
- langchain/memory/summary_buffer.py,sha256=5aM6ocE6jPXC9HqElDTyTwRNR6QydGDUf12BWtu5kTA,5048
809
- langchain/memory/token_buffer.py,sha256=E1N7bWSkAmi-7V7F-7iRl-BADStnplp-zwtUndjXBMM,2144
807
+ langchain/memory/summary.py,sha256=KS6V7eD2hCsX5vtLUO20VbVVFprkXhGAFxVFEiiEIaA,4503
808
+ langchain/memory/summary_buffer.py,sha256=ynYbCa-XEjFeYcVIwyjsiOShWyLj6v1sDmurdv1kGUM,5514
809
+ langchain/memory/token_buffer.py,sha256=jYtua6S5M6R2KyElsqXc8VRuGNsu7YVpavINj91HfGg,2556
810
810
  langchain/memory/utils.py,sha256=PvauM6AkPRX5Hy5sY6NysuieRI9Oae1IeC61y1iIQMs,617
811
- langchain/memory/vectorstore.py,sha256=EKMR42LFQzrn0_EiZl3pI7qQww4ZChuMUB6pfKSqQOY,3857
811
+ langchain/memory/vectorstore.py,sha256=RdOX2EDSFXAC6LEE_9aYWIJcVoZ32lUQuludOgPCAoc,4189
812
812
  langchain/memory/vectorstore_token_buffer_memory.py,sha256=CSuatQSOEs7iKeMBhKLUqDvNrdl12lquvC89q9_NlXo,7602
813
813
  langchain/memory/zep_memory.py,sha256=WMrAJ7jymx0_0d3JnhCuklJxfomsGhEEEQ6uPMJ21Bo,628
814
814
  langchain/model_laboratory.py,sha256=IaJzVG_SbFX7W6ODriqqme-Q5x0MB18j4Bhg1Y-fWLo,3278
@@ -880,7 +880,7 @@ langchain/retrievers/document_compressors/flashrank_rerank.py,sha256=Eo86fJ_T2Ib
880
880
  langchain/retrievers/document_compressors/listwise_rerank.py,sha256=i3dCqXBF27_sHPGxWOlCkVjt4s85QM0ikHZtPp2LpDs,5127
881
881
  langchain/retrievers/elastic_search_bm25.py,sha256=eRboOkRQj-_E53gUQIZzxQ1bX0-uEMv7LAQSD7K7Qf8,665
882
882
  langchain/retrievers/embedchain.py,sha256=IUnhr3QK7IJ4IMHZDrTBpZuVQ1kyxhG-bAjmOMXb5eA,644
883
- langchain/retrievers/ensemble.py,sha256=j3yYsaGlozDL208QDElkT42hxtLoxXHsvMZ8-vccJzM,10508
883
+ langchain/retrievers/ensemble.py,sha256=JSoMkvB9mA-K6poXKtEkb4tT6Q6GEg-O3qwDlnf7xkw,10593
884
884
  langchain/retrievers/google_cloud_documentai_warehouse.py,sha256=wJZu2kOHjrBOpTeaPBxyKMIA9OlMuiZ4kul2FG1lJ0k,695
885
885
  langchain/retrievers/google_vertex_ai_search.py,sha256=MlYVMne4jYU7lif0y5A-cQNC89DPnsCRljrQPm80GKQ,1040
886
886
  langchain/retrievers/kay.py,sha256=rvIPgoA7IrNsYeJ2B4J-gaviS84inzmlifKoNWKEgc8,629
@@ -1335,8 +1335,8 @@ langchain/vectorstores/xata.py,sha256=HW_Oi5Hz8rH2JaUhRNWQ-3hLYmNzD8eAz6K5YqPArm
1335
1335
  langchain/vectorstores/yellowbrick.py,sha256=-lnjGcRE8Q1nEPOTdbKYTw5noS2cy2ce1ePOU804-_o,624
1336
1336
  langchain/vectorstores/zep.py,sha256=RJ2auxoA6uHHLEZknw3_jeFmYJYVt-PWKMBcNMGV6TM,798
1337
1337
  langchain/vectorstores/zilliz.py,sha256=XhPPIUfKPFJw0_svCoBgCnNkkBLoRVVcyuMfOnE5IxU,609
1338
- langchain-0.3.0.dev2.dist-info/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
1339
- langchain-0.3.0.dev2.dist-info/METADATA,sha256=i2ylk8mAsL58w2_ThDHa2g-_frzPaFpm6BjtsaFiMH0,7093
1340
- langchain-0.3.0.dev2.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
1341
- langchain-0.3.0.dev2.dist-info/entry_points.txt,sha256=IgKjoXnkkVC8Nm7ggiFMCNAk01ua6RVTb9cmZTVNm5w,58
1342
- langchain-0.3.0.dev2.dist-info/RECORD,,
1338
+ langchain-0.3.2.dist-info/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
1339
+ langchain-0.3.2.dist-info/METADATA,sha256=C7Uh5KgODQkWm2IoW_fVsOLK71cjo9pchT20cI2Kx7s,7078
1340
+ langchain-0.3.2.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
1341
+ langchain-0.3.2.dist-info/entry_points.txt,sha256=IgKjoXnkkVC8Nm7ggiFMCNAk01ua6RVTb9cmZTVNm5w,58
1342
+ langchain-0.3.2.dist-info/RECORD,,