langchain 0.3.23__py3-none-any.whl → 0.3.25__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain might be problematic. Click here for more details.
- langchain/_api/module_import.py +3 -3
- langchain/agents/agent.py +104 -109
- langchain/agents/agent_iterator.py +11 -15
- langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +2 -2
- langchain/agents/agent_toolkits/vectorstore/base.py +3 -3
- langchain/agents/agent_toolkits/vectorstore/toolkit.py +4 -6
- langchain/agents/chat/base.py +7 -6
- langchain/agents/chat/output_parser.py +2 -1
- langchain/agents/conversational/base.py +5 -4
- langchain/agents/conversational_chat/base.py +9 -8
- langchain/agents/format_scratchpad/log.py +1 -3
- langchain/agents/format_scratchpad/log_to_messages.py +3 -5
- langchain/agents/format_scratchpad/openai_functions.py +4 -4
- langchain/agents/format_scratchpad/tools.py +3 -3
- langchain/agents/format_scratchpad/xml.py +1 -3
- langchain/agents/initialize.py +2 -1
- langchain/agents/json_chat/base.py +3 -2
- langchain/agents/loading.py +5 -5
- langchain/agents/mrkl/base.py +6 -5
- langchain/agents/openai_assistant/base.py +17 -17
- langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +6 -6
- langchain/agents/openai_functions_agent/base.py +13 -12
- langchain/agents/openai_functions_multi_agent/base.py +15 -14
- langchain/agents/openai_tools/base.py +2 -1
- langchain/agents/output_parsers/openai_functions.py +2 -2
- langchain/agents/output_parsers/openai_tools.py +6 -6
- langchain/agents/output_parsers/react_json_single_input.py +2 -1
- langchain/agents/output_parsers/self_ask.py +2 -1
- langchain/agents/output_parsers/tools.py +7 -7
- langchain/agents/react/agent.py +3 -2
- langchain/agents/react/base.py +4 -3
- langchain/agents/schema.py +3 -3
- langchain/agents/self_ask_with_search/base.py +2 -1
- langchain/agents/structured_chat/base.py +9 -8
- langchain/agents/structured_chat/output_parser.py +2 -1
- langchain/agents/tool_calling_agent/base.py +3 -2
- langchain/agents/tools.py +4 -4
- langchain/agents/types.py +3 -3
- langchain/agents/utils.py +1 -1
- langchain/agents/xml/base.py +7 -6
- langchain/callbacks/streaming_aiter.py +3 -2
- langchain/callbacks/streaming_aiter_final_only.py +3 -3
- langchain/callbacks/streaming_stdout_final_only.py +3 -3
- langchain/chains/api/base.py +11 -12
- langchain/chains/base.py +47 -50
- langchain/chains/combine_documents/base.py +23 -23
- langchain/chains/combine_documents/map_reduce.py +12 -12
- langchain/chains/combine_documents/map_rerank.py +16 -15
- langchain/chains/combine_documents/reduce.py +17 -17
- langchain/chains/combine_documents/refine.py +12 -12
- langchain/chains/combine_documents/stuff.py +10 -10
- langchain/chains/constitutional_ai/base.py +9 -9
- langchain/chains/conversation/base.py +2 -4
- langchain/chains/conversational_retrieval/base.py +30 -30
- langchain/chains/elasticsearch_database/base.py +13 -13
- langchain/chains/example_generator.py +1 -3
- langchain/chains/flare/base.py +13 -12
- langchain/chains/flare/prompts.py +2 -4
- langchain/chains/hyde/base.py +8 -8
- langchain/chains/llm.py +31 -30
- langchain/chains/llm_checker/base.py +6 -6
- langchain/chains/llm_math/base.py +10 -10
- langchain/chains/llm_summarization_checker/base.py +6 -6
- langchain/chains/loading.py +12 -14
- langchain/chains/mapreduce.py +7 -6
- langchain/chains/moderation.py +8 -8
- langchain/chains/natbot/base.py +6 -6
- langchain/chains/openai_functions/base.py +8 -10
- langchain/chains/openai_functions/citation_fuzzy_match.py +4 -4
- langchain/chains/openai_functions/extraction.py +3 -3
- langchain/chains/openai_functions/openapi.py +12 -12
- langchain/chains/openai_functions/qa_with_structure.py +4 -4
- langchain/chains/openai_functions/utils.py +2 -2
- langchain/chains/openai_tools/extraction.py +2 -2
- langchain/chains/prompt_selector.py +3 -3
- langchain/chains/qa_generation/base.py +5 -5
- langchain/chains/qa_with_sources/base.py +21 -21
- langchain/chains/qa_with_sources/loading.py +2 -1
- langchain/chains/qa_with_sources/retrieval.py +6 -6
- langchain/chains/qa_with_sources/vector_db.py +8 -8
- langchain/chains/query_constructor/base.py +4 -3
- langchain/chains/query_constructor/parser.py +5 -4
- langchain/chains/question_answering/chain.py +3 -2
- langchain/chains/retrieval.py +2 -2
- langchain/chains/retrieval_qa/base.py +16 -16
- langchain/chains/router/base.py +12 -11
- langchain/chains/router/embedding_router.py +12 -11
- langchain/chains/router/llm_router.py +12 -12
- langchain/chains/router/multi_prompt.py +3 -3
- langchain/chains/router/multi_retrieval_qa.py +5 -4
- langchain/chains/sequential.py +18 -18
- langchain/chains/sql_database/query.py +21 -5
- langchain/chains/structured_output/base.py +14 -13
- langchain/chains/summarize/chain.py +4 -3
- langchain/chains/transform.py +12 -11
- langchain/chat_models/base.py +27 -31
- langchain/embeddings/__init__.py +1 -1
- langchain/embeddings/base.py +4 -6
- langchain/embeddings/cache.py +19 -18
- langchain/evaluation/agents/trajectory_eval_chain.py +16 -19
- langchain/evaluation/comparison/eval_chain.py +10 -10
- langchain/evaluation/criteria/eval_chain.py +11 -10
- langchain/evaluation/embedding_distance/base.py +21 -21
- langchain/evaluation/exact_match/base.py +3 -3
- langchain/evaluation/loading.py +7 -8
- langchain/evaluation/qa/eval_chain.py +7 -6
- langchain/evaluation/regex_match/base.py +3 -3
- langchain/evaluation/schema.py +6 -5
- langchain/evaluation/scoring/eval_chain.py +9 -9
- langchain/evaluation/string_distance/base.py +23 -23
- langchain/hub.py +2 -1
- langchain/indexes/_sql_record_manager.py +8 -7
- langchain/indexes/vectorstore.py +11 -11
- langchain/llms/__init__.py +3 -3
- langchain/memory/buffer.py +13 -13
- langchain/memory/buffer_window.py +5 -5
- langchain/memory/chat_memory.py +5 -5
- langchain/memory/combined.py +10 -10
- langchain/memory/entity.py +8 -7
- langchain/memory/readonly.py +4 -4
- langchain/memory/simple.py +5 -5
- langchain/memory/summary.py +8 -8
- langchain/memory/summary_buffer.py +11 -11
- langchain/memory/token_buffer.py +5 -5
- langchain/memory/utils.py +2 -2
- langchain/memory/vectorstore.py +15 -14
- langchain/memory/vectorstore_token_buffer_memory.py +7 -7
- langchain/model_laboratory.py +4 -3
- langchain/output_parsers/combining.py +5 -5
- langchain/output_parsers/datetime.py +1 -2
- langchain/output_parsers/enum.py +4 -5
- langchain/output_parsers/pandas_dataframe.py +5 -5
- langchain/output_parsers/regex.py +4 -4
- langchain/output_parsers/regex_dict.py +4 -4
- langchain/output_parsers/retry.py +2 -2
- langchain/output_parsers/structured.py +5 -5
- langchain/output_parsers/yaml.py +3 -3
- langchain/pydantic_v1/__init__.py +1 -6
- langchain/pydantic_v1/dataclasses.py +1 -5
- langchain/pydantic_v1/main.py +1 -5
- langchain/retrievers/contextual_compression.py +3 -3
- langchain/retrievers/document_compressors/base.py +3 -2
- langchain/retrievers/document_compressors/chain_extract.py +4 -3
- langchain/retrievers/document_compressors/chain_filter.py +3 -2
- langchain/retrievers/document_compressors/cohere_rerank.py +4 -3
- langchain/retrievers/document_compressors/cross_encoder.py +1 -2
- langchain/retrievers/document_compressors/cross_encoder_rerank.py +2 -1
- langchain/retrievers/document_compressors/embeddings_filter.py +3 -2
- langchain/retrievers/document_compressors/listwise_rerank.py +6 -5
- langchain/retrievers/ensemble.py +15 -19
- langchain/retrievers/merger_retriever.py +7 -12
- langchain/retrievers/multi_query.py +14 -13
- langchain/retrievers/multi_vector.py +4 -4
- langchain/retrievers/parent_document_retriever.py +9 -8
- langchain/retrievers/re_phraser.py +2 -3
- langchain/retrievers/self_query/base.py +13 -12
- langchain/retrievers/time_weighted_retriever.py +14 -14
- langchain/runnables/openai_functions.py +4 -3
- langchain/smith/evaluation/config.py +7 -6
- langchain/smith/evaluation/progress.py +3 -2
- langchain/smith/evaluation/runner_utils.py +66 -69
- langchain/smith/evaluation/string_run_evaluator.py +38 -31
- langchain/storage/encoder_backed.py +7 -11
- langchain/storage/file_system.py +5 -4
- {langchain-0.3.23.dist-info → langchain-0.3.25.dist-info}/METADATA +3 -3
- {langchain-0.3.23.dist-info → langchain-0.3.25.dist-info}/RECORD +169 -169
- {langchain-0.3.23.dist-info → langchain-0.3.25.dist-info}/WHEEL +1 -1
- langchain-0.3.25.dist-info/entry_points.txt +4 -0
- langchain-0.3.23.dist-info/entry_points.txt +0 -5
- {langchain-0.3.23.dist-info → langchain-0.3.25.dist-info}/licenses/LICENSE +0 -0
langchain/indexes/vectorstore.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""Vectorstore stubs for the indexing api."""
|
|
2
2
|
|
|
3
|
-
from typing import Any,
|
|
3
|
+
from typing import Any, Optional
|
|
4
4
|
|
|
5
5
|
from langchain_core.document_loaders import BaseLoader
|
|
6
6
|
from langchain_core.documents import Document
|
|
@@ -33,7 +33,7 @@ class VectorStoreIndexWrapper(BaseModel):
|
|
|
33
33
|
self,
|
|
34
34
|
question: str,
|
|
35
35
|
llm: Optional[BaseLanguageModel] = None,
|
|
36
|
-
retriever_kwargs: Optional[
|
|
36
|
+
retriever_kwargs: Optional[dict[str, Any]] = None,
|
|
37
37
|
**kwargs: Any,
|
|
38
38
|
) -> str:
|
|
39
39
|
"""Query the vectorstore using the provided LLM.
|
|
@@ -65,7 +65,7 @@ class VectorStoreIndexWrapper(BaseModel):
|
|
|
65
65
|
self,
|
|
66
66
|
question: str,
|
|
67
67
|
llm: Optional[BaseLanguageModel] = None,
|
|
68
|
-
retriever_kwargs: Optional[
|
|
68
|
+
retriever_kwargs: Optional[dict[str, Any]] = None,
|
|
69
69
|
**kwargs: Any,
|
|
70
70
|
) -> str:
|
|
71
71
|
"""Asynchronously query the vectorstore using the provided LLM.
|
|
@@ -97,7 +97,7 @@ class VectorStoreIndexWrapper(BaseModel):
|
|
|
97
97
|
self,
|
|
98
98
|
question: str,
|
|
99
99
|
llm: Optional[BaseLanguageModel] = None,
|
|
100
|
-
retriever_kwargs: Optional[
|
|
100
|
+
retriever_kwargs: Optional[dict[str, Any]] = None,
|
|
101
101
|
**kwargs: Any,
|
|
102
102
|
) -> dict:
|
|
103
103
|
"""Query the vectorstore and retrieve the answer along with sources.
|
|
@@ -129,7 +129,7 @@ class VectorStoreIndexWrapper(BaseModel):
|
|
|
129
129
|
self,
|
|
130
130
|
question: str,
|
|
131
131
|
llm: Optional[BaseLanguageModel] = None,
|
|
132
|
-
retriever_kwargs: Optional[
|
|
132
|
+
retriever_kwargs: Optional[dict[str, Any]] = None,
|
|
133
133
|
**kwargs: Any,
|
|
134
134
|
) -> dict:
|
|
135
135
|
"""Asynchronously query the vectorstore and retrieve the answer and sources.
|
|
@@ -158,7 +158,7 @@ class VectorStoreIndexWrapper(BaseModel):
|
|
|
158
158
|
return await chain.ainvoke({chain.question_key: question})
|
|
159
159
|
|
|
160
160
|
|
|
161
|
-
def _get_in_memory_vectorstore() ->
|
|
161
|
+
def _get_in_memory_vectorstore() -> type[VectorStore]:
|
|
162
162
|
"""Get the InMemoryVectorStore."""
|
|
163
163
|
import warnings
|
|
164
164
|
|
|
@@ -179,7 +179,7 @@ def _get_in_memory_vectorstore() -> Type[VectorStore]:
|
|
|
179
179
|
class VectorstoreIndexCreator(BaseModel):
|
|
180
180
|
"""Logic for creating indexes."""
|
|
181
181
|
|
|
182
|
-
vectorstore_cls:
|
|
182
|
+
vectorstore_cls: type[VectorStore] = Field(
|
|
183
183
|
default_factory=_get_in_memory_vectorstore
|
|
184
184
|
)
|
|
185
185
|
embedding: Embeddings
|
|
@@ -191,7 +191,7 @@ class VectorstoreIndexCreator(BaseModel):
|
|
|
191
191
|
extra="forbid",
|
|
192
192
|
)
|
|
193
193
|
|
|
194
|
-
def from_loaders(self, loaders:
|
|
194
|
+
def from_loaders(self, loaders: list[BaseLoader]) -> VectorStoreIndexWrapper:
|
|
195
195
|
"""Create a vectorstore index from a list of loaders.
|
|
196
196
|
|
|
197
197
|
Args:
|
|
@@ -205,7 +205,7 @@ class VectorstoreIndexCreator(BaseModel):
|
|
|
205
205
|
docs.extend(loader.load())
|
|
206
206
|
return self.from_documents(docs)
|
|
207
207
|
|
|
208
|
-
async def afrom_loaders(self, loaders:
|
|
208
|
+
async def afrom_loaders(self, loaders: list[BaseLoader]) -> VectorStoreIndexWrapper:
|
|
209
209
|
"""Asynchronously create a vectorstore index from a list of loaders.
|
|
210
210
|
|
|
211
211
|
Args:
|
|
@@ -220,7 +220,7 @@ class VectorstoreIndexCreator(BaseModel):
|
|
|
220
220
|
docs.append(doc)
|
|
221
221
|
return await self.afrom_documents(docs)
|
|
222
222
|
|
|
223
|
-
def from_documents(self, documents:
|
|
223
|
+
def from_documents(self, documents: list[Document]) -> VectorStoreIndexWrapper:
|
|
224
224
|
"""Create a vectorstore index from a list of documents.
|
|
225
225
|
|
|
226
226
|
Args:
|
|
@@ -236,7 +236,7 @@ class VectorstoreIndexCreator(BaseModel):
|
|
|
236
236
|
return VectorStoreIndexWrapper(vectorstore=vectorstore)
|
|
237
237
|
|
|
238
238
|
async def afrom_documents(
|
|
239
|
-
self, documents:
|
|
239
|
+
self, documents: list[Document]
|
|
240
240
|
) -> VectorStoreIndexWrapper:
|
|
241
241
|
"""Asynchronously create a vectorstore index from a list of documents.
|
|
242
242
|
|
langchain/llms/__init__.py
CHANGED
|
@@ -19,7 +19,7 @@ access to the large language model (**LLM**) APIs and services.
|
|
|
19
19
|
""" # noqa: E501
|
|
20
20
|
|
|
21
21
|
import warnings
|
|
22
|
-
from typing import Any, Callable
|
|
22
|
+
from typing import Any, Callable
|
|
23
23
|
|
|
24
24
|
from langchain_core._api import LangChainDeprecationWarning
|
|
25
25
|
from langchain_core.language_models.llms import BaseLLM
|
|
@@ -557,7 +557,7 @@ def __getattr__(name: str) -> Any:
|
|
|
557
557
|
|
|
558
558
|
if name == "type_to_cls_dict":
|
|
559
559
|
# for backwards compatibility
|
|
560
|
-
type_to_cls_dict:
|
|
560
|
+
type_to_cls_dict: dict[str, type[BaseLLM]] = {
|
|
561
561
|
k: v() for k, v in get_type_to_cls_dict().items()
|
|
562
562
|
}
|
|
563
563
|
return type_to_cls_dict
|
|
@@ -650,7 +650,7 @@ __all__ = [
|
|
|
650
650
|
]
|
|
651
651
|
|
|
652
652
|
|
|
653
|
-
def get_type_to_cls_dict() ->
|
|
653
|
+
def get_type_to_cls_dict() -> dict[str, Callable[[], type[BaseLLM]]]:
|
|
654
654
|
return {
|
|
655
655
|
"ai21": _import_ai21,
|
|
656
656
|
"aleph_alpha": _import_aleph_alpha,
|
langchain/memory/buffer.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any,
|
|
1
|
+
from typing import Any, Optional
|
|
2
2
|
|
|
3
3
|
from langchain_core._api import deprecated
|
|
4
4
|
from langchain_core.messages import BaseMessage, get_buffer_string
|
|
@@ -43,7 +43,7 @@ class ConversationBufferMemory(BaseChatMemory):
|
|
|
43
43
|
else await self.abuffer_as_str()
|
|
44
44
|
)
|
|
45
45
|
|
|
46
|
-
def _buffer_as_str(self, messages:
|
|
46
|
+
def _buffer_as_str(self, messages: list[BaseMessage]) -> str:
|
|
47
47
|
return get_buffer_string(
|
|
48
48
|
messages,
|
|
49
49
|
human_prefix=self.human_prefix,
|
|
@@ -61,27 +61,27 @@ class ConversationBufferMemory(BaseChatMemory):
|
|
|
61
61
|
return self._buffer_as_str(messages)
|
|
62
62
|
|
|
63
63
|
@property
|
|
64
|
-
def buffer_as_messages(self) ->
|
|
64
|
+
def buffer_as_messages(self) -> list[BaseMessage]:
|
|
65
65
|
"""Exposes the buffer as a list of messages in case return_messages is False."""
|
|
66
66
|
return self.chat_memory.messages
|
|
67
67
|
|
|
68
|
-
async def abuffer_as_messages(self) ->
|
|
68
|
+
async def abuffer_as_messages(self) -> list[BaseMessage]:
|
|
69
69
|
"""Exposes the buffer as a list of messages in case return_messages is False."""
|
|
70
70
|
return await self.chat_memory.aget_messages()
|
|
71
71
|
|
|
72
72
|
@property
|
|
73
|
-
def memory_variables(self) ->
|
|
73
|
+
def memory_variables(self) -> list[str]:
|
|
74
74
|
"""Will always return list of memory variables.
|
|
75
75
|
|
|
76
76
|
:meta private:
|
|
77
77
|
"""
|
|
78
78
|
return [self.memory_key]
|
|
79
79
|
|
|
80
|
-
def load_memory_variables(self, inputs:
|
|
80
|
+
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
|
81
81
|
"""Return history buffer."""
|
|
82
82
|
return {self.memory_key: self.buffer}
|
|
83
83
|
|
|
84
|
-
async def aload_memory_variables(self, inputs:
|
|
84
|
+
async def aload_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
|
85
85
|
"""Return key-value pairs given the text input to the chain."""
|
|
86
86
|
buffer = await self.abuffer()
|
|
87
87
|
return {self.memory_key: buffer}
|
|
@@ -117,7 +117,7 @@ class ConversationStringBufferMemory(BaseMemory):
|
|
|
117
117
|
memory_key: str = "history" #: :meta private:
|
|
118
118
|
|
|
119
119
|
@pre_init
|
|
120
|
-
def validate_chains(cls, values:
|
|
120
|
+
def validate_chains(cls, values: dict) -> dict:
|
|
121
121
|
"""Validate that return messages is not True."""
|
|
122
122
|
if values.get("return_messages", False):
|
|
123
123
|
raise ValueError(
|
|
@@ -126,21 +126,21 @@ class ConversationStringBufferMemory(BaseMemory):
|
|
|
126
126
|
return values
|
|
127
127
|
|
|
128
128
|
@property
|
|
129
|
-
def memory_variables(self) ->
|
|
129
|
+
def memory_variables(self) -> list[str]:
|
|
130
130
|
"""Will always return list of memory variables.
|
|
131
131
|
:meta private:
|
|
132
132
|
"""
|
|
133
133
|
return [self.memory_key]
|
|
134
134
|
|
|
135
|
-
def load_memory_variables(self, inputs:
|
|
135
|
+
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
|
|
136
136
|
"""Return history buffer."""
|
|
137
137
|
return {self.memory_key: self.buffer}
|
|
138
138
|
|
|
139
|
-
async def aload_memory_variables(self, inputs:
|
|
139
|
+
async def aload_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
|
|
140
140
|
"""Return history buffer."""
|
|
141
141
|
return self.load_memory_variables(inputs)
|
|
142
142
|
|
|
143
|
-
def save_context(self, inputs:
|
|
143
|
+
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
|
|
144
144
|
"""Save context from this conversation to buffer."""
|
|
145
145
|
if self.input_key is None:
|
|
146
146
|
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
|
|
@@ -157,7 +157,7 @@ class ConversationStringBufferMemory(BaseMemory):
|
|
|
157
157
|
self.buffer += "\n" + "\n".join([human, ai])
|
|
158
158
|
|
|
159
159
|
async def asave_context(
|
|
160
|
-
self, inputs:
|
|
160
|
+
self, inputs: dict[str, Any], outputs: dict[str, str]
|
|
161
161
|
) -> None:
|
|
162
162
|
"""Save context from this conversation to buffer."""
|
|
163
163
|
return self.save_context(inputs, outputs)
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any,
|
|
1
|
+
from typing import Any, Union
|
|
2
2
|
|
|
3
3
|
from langchain_core._api import deprecated
|
|
4
4
|
from langchain_core.messages import BaseMessage, get_buffer_string
|
|
@@ -28,7 +28,7 @@ class ConversationBufferWindowMemory(BaseChatMemory):
|
|
|
28
28
|
"""Number of messages to store in buffer."""
|
|
29
29
|
|
|
30
30
|
@property
|
|
31
|
-
def buffer(self) -> Union[str,
|
|
31
|
+
def buffer(self) -> Union[str, list[BaseMessage]]:
|
|
32
32
|
"""String buffer of memory."""
|
|
33
33
|
return self.buffer_as_messages if self.return_messages else self.buffer_as_str
|
|
34
34
|
|
|
@@ -43,18 +43,18 @@ class ConversationBufferWindowMemory(BaseChatMemory):
|
|
|
43
43
|
)
|
|
44
44
|
|
|
45
45
|
@property
|
|
46
|
-
def buffer_as_messages(self) ->
|
|
46
|
+
def buffer_as_messages(self) -> list[BaseMessage]:
|
|
47
47
|
"""Exposes the buffer as a list of messages in case return_messages is True."""
|
|
48
48
|
return self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else []
|
|
49
49
|
|
|
50
50
|
@property
|
|
51
|
-
def memory_variables(self) ->
|
|
51
|
+
def memory_variables(self) -> list[str]:
|
|
52
52
|
"""Will always return list of memory variables.
|
|
53
53
|
|
|
54
54
|
:meta private:
|
|
55
55
|
"""
|
|
56
56
|
return [self.memory_key]
|
|
57
57
|
|
|
58
|
-
def load_memory_variables(self, inputs:
|
|
58
|
+
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
|
59
59
|
"""Return history buffer."""
|
|
60
60
|
return {self.memory_key: self.buffer}
|
langchain/memory/chat_memory.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import warnings
|
|
2
2
|
from abc import ABC
|
|
3
|
-
from typing import Any,
|
|
3
|
+
from typing import Any, Optional
|
|
4
4
|
|
|
5
5
|
from langchain_core._api import deprecated
|
|
6
6
|
from langchain_core.chat_history import (
|
|
@@ -41,8 +41,8 @@ class BaseChatMemory(BaseMemory, ABC):
|
|
|
41
41
|
return_messages: bool = False
|
|
42
42
|
|
|
43
43
|
def _get_input_output(
|
|
44
|
-
self, inputs:
|
|
45
|
-
) ->
|
|
44
|
+
self, inputs: dict[str, Any], outputs: dict[str, str]
|
|
45
|
+
) -> tuple[str, str]:
|
|
46
46
|
if self.input_key is None:
|
|
47
47
|
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
|
|
48
48
|
else:
|
|
@@ -67,7 +67,7 @@ class BaseChatMemory(BaseMemory, ABC):
|
|
|
67
67
|
output_key = self.output_key
|
|
68
68
|
return inputs[prompt_input_key], outputs[output_key]
|
|
69
69
|
|
|
70
|
-
def save_context(self, inputs:
|
|
70
|
+
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
|
|
71
71
|
"""Save context from this conversation to buffer."""
|
|
72
72
|
input_str, output_str = self._get_input_output(inputs, outputs)
|
|
73
73
|
self.chat_memory.add_messages(
|
|
@@ -78,7 +78,7 @@ class BaseChatMemory(BaseMemory, ABC):
|
|
|
78
78
|
)
|
|
79
79
|
|
|
80
80
|
async def asave_context(
|
|
81
|
-
self, inputs:
|
|
81
|
+
self, inputs: dict[str, Any], outputs: dict[str, str]
|
|
82
82
|
) -> None:
|
|
83
83
|
"""Save context from this conversation to buffer."""
|
|
84
84
|
input_str, output_str = self._get_input_output(inputs, outputs)
|
langchain/memory/combined.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import warnings
|
|
2
|
-
from typing import Any
|
|
2
|
+
from typing import Any
|
|
3
3
|
|
|
4
4
|
from langchain_core.memory import BaseMemory
|
|
5
5
|
from pydantic import field_validator
|
|
@@ -10,15 +10,15 @@ from langchain.memory.chat_memory import BaseChatMemory
|
|
|
10
10
|
class CombinedMemory(BaseMemory):
|
|
11
11
|
"""Combining multiple memories' data together."""
|
|
12
12
|
|
|
13
|
-
memories:
|
|
13
|
+
memories: list[BaseMemory]
|
|
14
14
|
"""For tracking all the memories that should be accessed."""
|
|
15
15
|
|
|
16
16
|
@field_validator("memories")
|
|
17
17
|
@classmethod
|
|
18
18
|
def check_repeated_memory_variable(
|
|
19
|
-
cls, value:
|
|
20
|
-
) ->
|
|
21
|
-
all_variables:
|
|
19
|
+
cls, value: list[BaseMemory]
|
|
20
|
+
) -> list[BaseMemory]:
|
|
21
|
+
all_variables: set[str] = set()
|
|
22
22
|
for val in value:
|
|
23
23
|
overlap = all_variables.intersection(val.memory_variables)
|
|
24
24
|
if overlap:
|
|
@@ -32,7 +32,7 @@ class CombinedMemory(BaseMemory):
|
|
|
32
32
|
|
|
33
33
|
@field_validator("memories")
|
|
34
34
|
@classmethod
|
|
35
|
-
def check_input_key(cls, value:
|
|
35
|
+
def check_input_key(cls, value: list[BaseMemory]) -> list[BaseMemory]:
|
|
36
36
|
"""Check that if memories are of type BaseChatMemory that input keys exist."""
|
|
37
37
|
for val in value:
|
|
38
38
|
if isinstance(val, BaseChatMemory):
|
|
@@ -45,7 +45,7 @@ class CombinedMemory(BaseMemory):
|
|
|
45
45
|
return value
|
|
46
46
|
|
|
47
47
|
@property
|
|
48
|
-
def memory_variables(self) ->
|
|
48
|
+
def memory_variables(self) -> list[str]:
|
|
49
49
|
"""All the memory variables that this instance provides."""
|
|
50
50
|
"""Collected from the all the linked memories."""
|
|
51
51
|
|
|
@@ -56,9 +56,9 @@ class CombinedMemory(BaseMemory):
|
|
|
56
56
|
|
|
57
57
|
return memory_variables
|
|
58
58
|
|
|
59
|
-
def load_memory_variables(self, inputs:
|
|
59
|
+
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
|
|
60
60
|
"""Load all vars from sub-memories."""
|
|
61
|
-
memory_data:
|
|
61
|
+
memory_data: dict[str, Any] = {}
|
|
62
62
|
|
|
63
63
|
# Collect vars from all sub-memories
|
|
64
64
|
for memory in self.memories:
|
|
@@ -72,7 +72,7 @@ class CombinedMemory(BaseMemory):
|
|
|
72
72
|
|
|
73
73
|
return memory_data
|
|
74
74
|
|
|
75
|
-
def save_context(self, inputs:
|
|
75
|
+
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
|
|
76
76
|
"""Save context from this session for every memory."""
|
|
77
77
|
# Save context for all sub-memories
|
|
78
78
|
for memory in self.memories:
|
langchain/memory/entity.py
CHANGED
|
@@ -2,8 +2,9 @@
|
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
4
|
from abc import ABC, abstractmethod
|
|
5
|
+
from collections.abc import Iterable
|
|
5
6
|
from itertools import islice
|
|
6
|
-
from typing import Any,
|
|
7
|
+
from typing import Any, Optional
|
|
7
8
|
|
|
8
9
|
from langchain_core._api import deprecated
|
|
9
10
|
from langchain_core.language_models import BaseLanguageModel
|
|
@@ -70,7 +71,7 @@ class BaseEntityStore(BaseModel, ABC):
|
|
|
70
71
|
class InMemoryEntityStore(BaseEntityStore):
|
|
71
72
|
"""In-memory Entity store."""
|
|
72
73
|
|
|
73
|
-
store:
|
|
74
|
+
store: dict[str, Optional[str]] = {}
|
|
74
75
|
|
|
75
76
|
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
|
|
76
77
|
return self.store.get(key, default)
|
|
@@ -403,7 +404,7 @@ class ConversationEntityMemory(BaseChatMemory):
|
|
|
403
404
|
|
|
404
405
|
# Cache of recently detected entity names, if any
|
|
405
406
|
# It is updated when load_memory_variables is called:
|
|
406
|
-
entity_cache:
|
|
407
|
+
entity_cache: list[str] = []
|
|
407
408
|
|
|
408
409
|
# Number of recent message pairs to consider when updating entities:
|
|
409
410
|
k: int = 3
|
|
@@ -414,19 +415,19 @@ class ConversationEntityMemory(BaseChatMemory):
|
|
|
414
415
|
entity_store: BaseEntityStore = Field(default_factory=InMemoryEntityStore)
|
|
415
416
|
|
|
416
417
|
@property
|
|
417
|
-
def buffer(self) ->
|
|
418
|
+
def buffer(self) -> list[BaseMessage]:
|
|
418
419
|
"""Access chat memory messages."""
|
|
419
420
|
return self.chat_memory.messages
|
|
420
421
|
|
|
421
422
|
@property
|
|
422
|
-
def memory_variables(self) ->
|
|
423
|
+
def memory_variables(self) -> list[str]:
|
|
423
424
|
"""Will always return list of memory variables.
|
|
424
425
|
|
|
425
426
|
:meta private:
|
|
426
427
|
"""
|
|
427
428
|
return ["entities", self.chat_history_key]
|
|
428
429
|
|
|
429
|
-
def load_memory_variables(self, inputs:
|
|
430
|
+
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
|
430
431
|
"""
|
|
431
432
|
Returns chat history and all generated entities with summaries if available,
|
|
432
433
|
and updates or clears the recent entity cache.
|
|
@@ -491,7 +492,7 @@ class ConversationEntityMemory(BaseChatMemory):
|
|
|
491
492
|
"entities": entity_summaries,
|
|
492
493
|
}
|
|
493
494
|
|
|
494
|
-
def save_context(self, inputs:
|
|
495
|
+
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
|
|
495
496
|
"""
|
|
496
497
|
Save context from this conversation history to the entity store.
|
|
497
498
|
|
langchain/memory/readonly.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any
|
|
1
|
+
from typing import Any
|
|
2
2
|
|
|
3
3
|
from langchain_core.memory import BaseMemory
|
|
4
4
|
|
|
@@ -9,15 +9,15 @@ class ReadOnlySharedMemory(BaseMemory):
|
|
|
9
9
|
memory: BaseMemory
|
|
10
10
|
|
|
11
11
|
@property
|
|
12
|
-
def memory_variables(self) ->
|
|
12
|
+
def memory_variables(self) -> list[str]:
|
|
13
13
|
"""Return memory variables."""
|
|
14
14
|
return self.memory.memory_variables
|
|
15
15
|
|
|
16
|
-
def load_memory_variables(self, inputs:
|
|
16
|
+
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
|
|
17
17
|
"""Load memory variables from memory."""
|
|
18
18
|
return self.memory.load_memory_variables(inputs)
|
|
19
19
|
|
|
20
|
-
def save_context(self, inputs:
|
|
20
|
+
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
|
|
21
21
|
"""Nothing should be saved or changed"""
|
|
22
22
|
pass
|
|
23
23
|
|
langchain/memory/simple.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any
|
|
1
|
+
from typing import Any
|
|
2
2
|
|
|
3
3
|
from langchain_core.memory import BaseMemory
|
|
4
4
|
|
|
@@ -8,16 +8,16 @@ class SimpleMemory(BaseMemory):
|
|
|
8
8
|
ever change between prompts.
|
|
9
9
|
"""
|
|
10
10
|
|
|
11
|
-
memories:
|
|
11
|
+
memories: dict[str, Any] = dict()
|
|
12
12
|
|
|
13
13
|
@property
|
|
14
|
-
def memory_variables(self) ->
|
|
14
|
+
def memory_variables(self) -> list[str]:
|
|
15
15
|
return list(self.memories.keys())
|
|
16
16
|
|
|
17
|
-
def load_memory_variables(self, inputs:
|
|
17
|
+
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
|
|
18
18
|
return self.memories
|
|
19
19
|
|
|
20
|
-
def save_context(self, inputs:
|
|
20
|
+
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
|
|
21
21
|
"""Nothing should be saved or changed, my memory is set in stone."""
|
|
22
22
|
pass
|
|
23
23
|
|
langchain/memory/summary.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
from typing import Any
|
|
3
|
+
from typing import Any
|
|
4
4
|
|
|
5
5
|
from langchain_core._api import deprecated
|
|
6
6
|
from langchain_core.caches import BaseCache as BaseCache # For model_rebuild
|
|
@@ -32,10 +32,10 @@ class SummarizerMixin(BaseModel):
|
|
|
32
32
|
ai_prefix: str = "AI"
|
|
33
33
|
llm: BaseLanguageModel
|
|
34
34
|
prompt: BasePromptTemplate = SUMMARY_PROMPT
|
|
35
|
-
summary_message_cls:
|
|
35
|
+
summary_message_cls: type[BaseMessage] = SystemMessage
|
|
36
36
|
|
|
37
37
|
def predict_new_summary(
|
|
38
|
-
self, messages:
|
|
38
|
+
self, messages: list[BaseMessage], existing_summary: str
|
|
39
39
|
) -> str:
|
|
40
40
|
new_lines = get_buffer_string(
|
|
41
41
|
messages,
|
|
@@ -47,7 +47,7 @@ class SummarizerMixin(BaseModel):
|
|
|
47
47
|
return chain.predict(summary=existing_summary, new_lines=new_lines)
|
|
48
48
|
|
|
49
49
|
async def apredict_new_summary(
|
|
50
|
-
self, messages:
|
|
50
|
+
self, messages: list[BaseMessage], existing_summary: str
|
|
51
51
|
) -> str:
|
|
52
52
|
new_lines = get_buffer_string(
|
|
53
53
|
messages,
|
|
@@ -95,14 +95,14 @@ class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin):
|
|
|
95
95
|
return obj
|
|
96
96
|
|
|
97
97
|
@property
|
|
98
|
-
def memory_variables(self) ->
|
|
98
|
+
def memory_variables(self) -> list[str]:
|
|
99
99
|
"""Will always return list of memory variables.
|
|
100
100
|
|
|
101
101
|
:meta private:
|
|
102
102
|
"""
|
|
103
103
|
return [self.memory_key]
|
|
104
104
|
|
|
105
|
-
def load_memory_variables(self, inputs:
|
|
105
|
+
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
|
106
106
|
"""Return history buffer."""
|
|
107
107
|
if self.return_messages:
|
|
108
108
|
buffer: Any = [self.summary_message_cls(content=self.buffer)]
|
|
@@ -111,7 +111,7 @@ class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin):
|
|
|
111
111
|
return {self.memory_key: buffer}
|
|
112
112
|
|
|
113
113
|
@pre_init
|
|
114
|
-
def validate_prompt_input_variables(cls, values:
|
|
114
|
+
def validate_prompt_input_variables(cls, values: dict) -> dict:
|
|
115
115
|
"""Validate that prompt input variables are consistent."""
|
|
116
116
|
prompt_variables = values["prompt"].input_variables
|
|
117
117
|
expected_keys = {"summary", "new_lines"}
|
|
@@ -122,7 +122,7 @@ class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin):
|
|
|
122
122
|
)
|
|
123
123
|
return values
|
|
124
124
|
|
|
125
|
-
def save_context(self, inputs:
|
|
125
|
+
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
|
|
126
126
|
"""Save context from this conversation to buffer."""
|
|
127
127
|
super().save_context(inputs, outputs)
|
|
128
128
|
self.buffer = self.predict_new_summary(
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any,
|
|
1
|
+
from typing import Any, Union
|
|
2
2
|
|
|
3
3
|
from langchain_core._api import deprecated
|
|
4
4
|
from langchain_core.messages import BaseMessage, get_buffer_string
|
|
@@ -29,28 +29,28 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
29
29
|
memory_key: str = "history"
|
|
30
30
|
|
|
31
31
|
@property
|
|
32
|
-
def buffer(self) -> Union[str,
|
|
32
|
+
def buffer(self) -> Union[str, list[BaseMessage]]:
|
|
33
33
|
"""String buffer of memory."""
|
|
34
34
|
return self.load_memory_variables({})[self.memory_key]
|
|
35
35
|
|
|
36
|
-
async def abuffer(self) -> Union[str,
|
|
36
|
+
async def abuffer(self) -> Union[str, list[BaseMessage]]:
|
|
37
37
|
"""Async memory buffer."""
|
|
38
38
|
memory_variables = await self.aload_memory_variables({})
|
|
39
39
|
return memory_variables[self.memory_key]
|
|
40
40
|
|
|
41
41
|
@property
|
|
42
|
-
def memory_variables(self) ->
|
|
42
|
+
def memory_variables(self) -> list[str]:
|
|
43
43
|
"""Will always return list of memory variables.
|
|
44
44
|
|
|
45
45
|
:meta private:
|
|
46
46
|
"""
|
|
47
47
|
return [self.memory_key]
|
|
48
48
|
|
|
49
|
-
def load_memory_variables(self, inputs:
|
|
49
|
+
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
|
50
50
|
"""Return history buffer."""
|
|
51
51
|
buffer = self.chat_memory.messages
|
|
52
52
|
if self.moving_summary_buffer != "":
|
|
53
|
-
first_messages:
|
|
53
|
+
first_messages: list[BaseMessage] = [
|
|
54
54
|
self.summary_message_cls(content=self.moving_summary_buffer)
|
|
55
55
|
]
|
|
56
56
|
buffer = first_messages + buffer
|
|
@@ -62,11 +62,11 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
62
62
|
)
|
|
63
63
|
return {self.memory_key: final_buffer}
|
|
64
64
|
|
|
65
|
-
async def aload_memory_variables(self, inputs:
|
|
65
|
+
async def aload_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
|
66
66
|
"""Asynchronously return key-value pairs given the text input to the chain."""
|
|
67
67
|
buffer = await self.chat_memory.aget_messages()
|
|
68
68
|
if self.moving_summary_buffer != "":
|
|
69
|
-
first_messages:
|
|
69
|
+
first_messages: list[BaseMessage] = [
|
|
70
70
|
self.summary_message_cls(content=self.moving_summary_buffer)
|
|
71
71
|
]
|
|
72
72
|
buffer = first_messages + buffer
|
|
@@ -79,7 +79,7 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
79
79
|
return {self.memory_key: final_buffer}
|
|
80
80
|
|
|
81
81
|
@pre_init
|
|
82
|
-
def validate_prompt_input_variables(cls, values:
|
|
82
|
+
def validate_prompt_input_variables(cls, values: dict) -> dict:
|
|
83
83
|
"""Validate that prompt input variables are consistent."""
|
|
84
84
|
prompt_variables = values["prompt"].input_variables
|
|
85
85
|
expected_keys = {"summary", "new_lines"}
|
|
@@ -90,13 +90,13 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
90
90
|
)
|
|
91
91
|
return values
|
|
92
92
|
|
|
93
|
-
def save_context(self, inputs:
|
|
93
|
+
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
|
|
94
94
|
"""Save context from this conversation to buffer."""
|
|
95
95
|
super().save_context(inputs, outputs)
|
|
96
96
|
self.prune()
|
|
97
97
|
|
|
98
98
|
async def asave_context(
|
|
99
|
-
self, inputs:
|
|
99
|
+
self, inputs: dict[str, Any], outputs: dict[str, str]
|
|
100
100
|
) -> None:
|
|
101
101
|
"""Asynchronously save context from this conversation to buffer."""
|
|
102
102
|
await super().asave_context(inputs, outputs)
|
langchain/memory/token_buffer.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any
|
|
1
|
+
from typing import Any
|
|
2
2
|
|
|
3
3
|
from langchain_core._api import deprecated
|
|
4
4
|
from langchain_core.language_models import BaseLanguageModel
|
|
@@ -43,23 +43,23 @@ class ConversationTokenBufferMemory(BaseChatMemory):
|
|
|
43
43
|
)
|
|
44
44
|
|
|
45
45
|
@property
|
|
46
|
-
def buffer_as_messages(self) ->
|
|
46
|
+
def buffer_as_messages(self) -> list[BaseMessage]:
|
|
47
47
|
"""Exposes the buffer as a list of messages in case return_messages is True."""
|
|
48
48
|
return self.chat_memory.messages
|
|
49
49
|
|
|
50
50
|
@property
|
|
51
|
-
def memory_variables(self) ->
|
|
51
|
+
def memory_variables(self) -> list[str]:
|
|
52
52
|
"""Will always return list of memory variables.
|
|
53
53
|
|
|
54
54
|
:meta private:
|
|
55
55
|
"""
|
|
56
56
|
return [self.memory_key]
|
|
57
57
|
|
|
58
|
-
def load_memory_variables(self, inputs:
|
|
58
|
+
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
|
59
59
|
"""Return history buffer."""
|
|
60
60
|
return {self.memory_key: self.buffer}
|
|
61
61
|
|
|
62
|
-
def save_context(self, inputs:
|
|
62
|
+
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
|
|
63
63
|
"""Save context from this conversation to buffer. Pruned."""
|
|
64
64
|
super().save_context(inputs, outputs)
|
|
65
65
|
# Prune buffer if it exceeds max token limit
|
langchain/memory/utils.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
from typing import Any
|
|
1
|
+
from typing import Any
|
|
2
2
|
|
|
3
3
|
|
|
4
|
-
def get_prompt_input_key(inputs:
|
|
4
|
+
def get_prompt_input_key(inputs: dict[str, Any], memory_variables: list[str]) -> str:
|
|
5
5
|
"""
|
|
6
6
|
Get the prompt input key.
|
|
7
7
|
|