langchain 0.3.22__py3-none-any.whl → 0.3.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/_api/module_import.py +3 -3
- langchain/agents/agent.py +104 -109
- langchain/agents/agent_iterator.py +11 -15
- langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +2 -2
- langchain/agents/agent_toolkits/vectorstore/base.py +3 -3
- langchain/agents/agent_toolkits/vectorstore/toolkit.py +4 -6
- langchain/agents/chat/base.py +7 -6
- langchain/agents/chat/output_parser.py +2 -1
- langchain/agents/conversational/base.py +5 -4
- langchain/agents/conversational_chat/base.py +9 -8
- langchain/agents/format_scratchpad/log.py +1 -3
- langchain/agents/format_scratchpad/log_to_messages.py +3 -5
- langchain/agents/format_scratchpad/openai_functions.py +4 -4
- langchain/agents/format_scratchpad/tools.py +3 -3
- langchain/agents/format_scratchpad/xml.py +1 -3
- langchain/agents/initialize.py +2 -1
- langchain/agents/json_chat/base.py +3 -2
- langchain/agents/loading.py +5 -5
- langchain/agents/mrkl/base.py +6 -5
- langchain/agents/openai_assistant/base.py +13 -17
- langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +6 -6
- langchain/agents/openai_functions_agent/base.py +13 -12
- langchain/agents/openai_functions_multi_agent/base.py +15 -14
- langchain/agents/openai_tools/base.py +2 -1
- langchain/agents/output_parsers/openai_functions.py +2 -2
- langchain/agents/output_parsers/openai_tools.py +6 -6
- langchain/agents/output_parsers/react_json_single_input.py +2 -1
- langchain/agents/output_parsers/self_ask.py +2 -1
- langchain/agents/output_parsers/tools.py +7 -7
- langchain/agents/react/agent.py +3 -2
- langchain/agents/react/base.py +4 -3
- langchain/agents/schema.py +3 -3
- langchain/agents/self_ask_with_search/base.py +2 -1
- langchain/agents/structured_chat/base.py +9 -8
- langchain/agents/structured_chat/output_parser.py +2 -1
- langchain/agents/tool_calling_agent/base.py +3 -2
- langchain/agents/tools.py +4 -4
- langchain/agents/types.py +3 -3
- langchain/agents/utils.py +1 -1
- langchain/agents/xml/base.py +7 -6
- langchain/callbacks/streaming_aiter.py +3 -2
- langchain/callbacks/streaming_aiter_final_only.py +3 -3
- langchain/callbacks/streaming_stdout_final_only.py +3 -3
- langchain/chains/api/base.py +11 -12
- langchain/chains/base.py +47 -50
- langchain/chains/combine_documents/base.py +23 -23
- langchain/chains/combine_documents/map_reduce.py +12 -12
- langchain/chains/combine_documents/map_rerank.py +16 -15
- langchain/chains/combine_documents/reduce.py +17 -17
- langchain/chains/combine_documents/refine.py +12 -12
- langchain/chains/combine_documents/stuff.py +10 -10
- langchain/chains/constitutional_ai/base.py +9 -9
- langchain/chains/conversation/base.py +2 -4
- langchain/chains/conversational_retrieval/base.py +30 -30
- langchain/chains/elasticsearch_database/base.py +13 -13
- langchain/chains/example_generator.py +1 -3
- langchain/chains/flare/base.py +13 -12
- langchain/chains/flare/prompts.py +2 -4
- langchain/chains/hyde/base.py +8 -8
- langchain/chains/llm.py +31 -30
- langchain/chains/llm_checker/base.py +6 -6
- langchain/chains/llm_math/base.py +10 -10
- langchain/chains/llm_summarization_checker/base.py +6 -6
- langchain/chains/loading.py +12 -14
- langchain/chains/mapreduce.py +7 -6
- langchain/chains/moderation.py +8 -8
- langchain/chains/natbot/base.py +6 -6
- langchain/chains/openai_functions/base.py +8 -10
- langchain/chains/openai_functions/citation_fuzzy_match.py +4 -4
- langchain/chains/openai_functions/extraction.py +3 -3
- langchain/chains/openai_functions/openapi.py +12 -12
- langchain/chains/openai_functions/qa_with_structure.py +4 -4
- langchain/chains/openai_functions/utils.py +2 -2
- langchain/chains/openai_tools/extraction.py +2 -2
- langchain/chains/prompt_selector.py +3 -3
- langchain/chains/qa_generation/base.py +5 -5
- langchain/chains/qa_with_sources/base.py +21 -21
- langchain/chains/qa_with_sources/loading.py +2 -1
- langchain/chains/qa_with_sources/retrieval.py +6 -6
- langchain/chains/qa_with_sources/vector_db.py +8 -8
- langchain/chains/query_constructor/base.py +4 -3
- langchain/chains/query_constructor/parser.py +5 -4
- langchain/chains/question_answering/chain.py +3 -2
- langchain/chains/retrieval.py +2 -2
- langchain/chains/retrieval_qa/base.py +16 -16
- langchain/chains/router/base.py +12 -11
- langchain/chains/router/embedding_router.py +12 -11
- langchain/chains/router/llm_router.py +12 -12
- langchain/chains/router/multi_prompt.py +3 -3
- langchain/chains/router/multi_retrieval_qa.py +5 -4
- langchain/chains/sequential.py +18 -18
- langchain/chains/sql_database/query.py +4 -4
- langchain/chains/structured_output/base.py +14 -13
- langchain/chains/summarize/chain.py +4 -3
- langchain/chains/transform.py +12 -11
- langchain/chat_models/base.py +34 -31
- langchain/embeddings/__init__.py +1 -1
- langchain/embeddings/base.py +4 -4
- langchain/embeddings/cache.py +19 -18
- langchain/evaluation/agents/trajectory_eval_chain.py +16 -19
- langchain/evaluation/comparison/eval_chain.py +10 -10
- langchain/evaluation/criteria/eval_chain.py +11 -10
- langchain/evaluation/embedding_distance/base.py +21 -21
- langchain/evaluation/exact_match/base.py +3 -3
- langchain/evaluation/loading.py +7 -8
- langchain/evaluation/qa/eval_chain.py +7 -6
- langchain/evaluation/regex_match/base.py +3 -3
- langchain/evaluation/schema.py +6 -5
- langchain/evaluation/scoring/eval_chain.py +9 -9
- langchain/evaluation/string_distance/base.py +23 -23
- langchain/hub.py +2 -1
- langchain/indexes/_sql_record_manager.py +8 -7
- langchain/indexes/vectorstore.py +11 -11
- langchain/llms/__init__.py +3 -3
- langchain/memory/buffer.py +13 -13
- langchain/memory/buffer_window.py +5 -5
- langchain/memory/chat_memory.py +5 -5
- langchain/memory/combined.py +10 -10
- langchain/memory/entity.py +8 -7
- langchain/memory/readonly.py +4 -4
- langchain/memory/simple.py +5 -5
- langchain/memory/summary.py +8 -8
- langchain/memory/summary_buffer.py +11 -11
- langchain/memory/token_buffer.py +5 -5
- langchain/memory/utils.py +2 -2
- langchain/memory/vectorstore.py +15 -14
- langchain/memory/vectorstore_token_buffer_memory.py +7 -7
- langchain/model_laboratory.py +4 -3
- langchain/output_parsers/combining.py +5 -5
- langchain/output_parsers/datetime.py +1 -2
- langchain/output_parsers/enum.py +4 -5
- langchain/output_parsers/pandas_dataframe.py +5 -5
- langchain/output_parsers/regex.py +4 -4
- langchain/output_parsers/regex_dict.py +4 -4
- langchain/output_parsers/retry.py +2 -2
- langchain/output_parsers/structured.py +5 -5
- langchain/output_parsers/yaml.py +3 -3
- langchain/pydantic_v1/__init__.py +1 -6
- langchain/pydantic_v1/dataclasses.py +1 -5
- langchain/pydantic_v1/main.py +1 -5
- langchain/retrievers/contextual_compression.py +3 -3
- langchain/retrievers/document_compressors/base.py +3 -2
- langchain/retrievers/document_compressors/chain_extract.py +4 -3
- langchain/retrievers/document_compressors/chain_filter.py +3 -2
- langchain/retrievers/document_compressors/cohere_rerank.py +4 -3
- langchain/retrievers/document_compressors/cross_encoder.py +1 -2
- langchain/retrievers/document_compressors/cross_encoder_rerank.py +2 -1
- langchain/retrievers/document_compressors/embeddings_filter.py +3 -2
- langchain/retrievers/document_compressors/listwise_rerank.py +6 -5
- langchain/retrievers/ensemble.py +15 -19
- langchain/retrievers/merger_retriever.py +7 -12
- langchain/retrievers/multi_query.py +14 -13
- langchain/retrievers/multi_vector.py +4 -4
- langchain/retrievers/parent_document_retriever.py +9 -8
- langchain/retrievers/re_phraser.py +2 -3
- langchain/retrievers/self_query/base.py +13 -12
- langchain/retrievers/time_weighted_retriever.py +14 -14
- langchain/runnables/openai_functions.py +4 -3
- langchain/smith/evaluation/config.py +7 -6
- langchain/smith/evaluation/progress.py +3 -2
- langchain/smith/evaluation/runner_utils.py +58 -61
- langchain/smith/evaluation/string_run_evaluator.py +29 -29
- langchain/storage/encoder_backed.py +7 -11
- langchain/storage/file_system.py +5 -4
- {langchain-0.3.22.dist-info → langchain-0.3.24.dist-info}/METADATA +5 -3
- {langchain-0.3.22.dist-info → langchain-0.3.24.dist-info}/RECORD +169 -169
- {langchain-0.3.22.dist-info → langchain-0.3.24.dist-info}/WHEEL +1 -1
- langchain-0.3.24.dist-info/entry_points.txt +4 -0
- langchain-0.3.22.dist-info/entry_points.txt +0 -5
- {langchain-0.3.22.dist-info → langchain-0.3.24.dist-info}/licenses/LICENSE +0 -0
|
@@ -2,7 +2,8 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from
|
|
5
|
+
from collections.abc import Sequence
|
|
6
|
+
from typing import Any, Optional, Union, cast
|
|
6
7
|
|
|
7
8
|
from langchain_core._api import deprecated
|
|
8
9
|
from langchain_core.callbacks import Callbacks
|
|
@@ -79,7 +80,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
|
|
|
79
80
|
"""Key in output of llm_chain to rank on."""
|
|
80
81
|
answer_key: str
|
|
81
82
|
"""Key in output of llm_chain to return as answer."""
|
|
82
|
-
metadata_keys: Optional[
|
|
83
|
+
metadata_keys: Optional[list[str]] = None
|
|
83
84
|
"""Additional metadata from the chosen document to return."""
|
|
84
85
|
return_intermediate_steps: bool = False
|
|
85
86
|
"""Return intermediate steps.
|
|
@@ -92,19 +93,19 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
|
|
|
92
93
|
|
|
93
94
|
def get_output_schema(
|
|
94
95
|
self, config: Optional[RunnableConfig] = None
|
|
95
|
-
) ->
|
|
96
|
-
schema:
|
|
96
|
+
) -> type[BaseModel]:
|
|
97
|
+
schema: dict[str, Any] = {
|
|
97
98
|
self.output_key: (str, None),
|
|
98
99
|
}
|
|
99
100
|
if self.return_intermediate_steps:
|
|
100
|
-
schema["intermediate_steps"] = (
|
|
101
|
+
schema["intermediate_steps"] = (list[str], None)
|
|
101
102
|
if self.metadata_keys:
|
|
102
103
|
schema.update({key: (Any, None) for key in self.metadata_keys})
|
|
103
104
|
|
|
104
105
|
return create_model("MapRerankOutput", **schema)
|
|
105
106
|
|
|
106
107
|
@property
|
|
107
|
-
def output_keys(self) ->
|
|
108
|
+
def output_keys(self) -> list[str]:
|
|
108
109
|
"""Expect input key.
|
|
109
110
|
|
|
110
111
|
:meta private:
|
|
@@ -140,7 +141,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
|
|
|
140
141
|
|
|
141
142
|
@model_validator(mode="before")
|
|
142
143
|
@classmethod
|
|
143
|
-
def get_default_document_variable_name(cls, values:
|
|
144
|
+
def get_default_document_variable_name(cls, values: dict) -> Any:
|
|
144
145
|
"""Get default document variable name, if not provided."""
|
|
145
146
|
if "llm_chain" not in values:
|
|
146
147
|
raise ValueError("llm_chain must be provided")
|
|
@@ -163,8 +164,8 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
|
|
|
163
164
|
return values
|
|
164
165
|
|
|
165
166
|
def combine_docs(
|
|
166
|
-
self, docs:
|
|
167
|
-
) ->
|
|
167
|
+
self, docs: list[Document], callbacks: Callbacks = None, **kwargs: Any
|
|
168
|
+
) -> tuple[str, dict]:
|
|
168
169
|
"""Combine documents in a map rerank manner.
|
|
169
170
|
|
|
170
171
|
Combine by mapping first chain over all documents, then reranking the results.
|
|
@@ -187,8 +188,8 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
|
|
|
187
188
|
return self._process_results(docs, results)
|
|
188
189
|
|
|
189
190
|
async def acombine_docs(
|
|
190
|
-
self, docs:
|
|
191
|
-
) ->
|
|
191
|
+
self, docs: list[Document], callbacks: Callbacks = None, **kwargs: Any
|
|
192
|
+
) -> tuple[str, dict]:
|
|
192
193
|
"""Combine documents in a map rerank manner.
|
|
193
194
|
|
|
194
195
|
Combine by mapping first chain over all documents, then reranking the results.
|
|
@@ -212,10 +213,10 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
|
|
|
212
213
|
|
|
213
214
|
def _process_results(
|
|
214
215
|
self,
|
|
215
|
-
docs:
|
|
216
|
-
results: Sequence[Union[str,
|
|
217
|
-
) ->
|
|
218
|
-
typed_results = cast(
|
|
216
|
+
docs: list[Document],
|
|
217
|
+
results: Sequence[Union[str, list[str], dict[str, str]]],
|
|
218
|
+
) -> tuple[str, dict]:
|
|
219
|
+
typed_results = cast(list[dict], results)
|
|
219
220
|
sorted_res = sorted(
|
|
220
221
|
zip(typed_results, docs), key=lambda x: -int(x[0][self.rank_key])
|
|
221
222
|
)
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from typing import Any, Callable,
|
|
5
|
+
from typing import Any, Callable, Optional, Protocol
|
|
6
6
|
|
|
7
7
|
from langchain_core._api import deprecated
|
|
8
8
|
from langchain_core.callbacks import Callbacks
|
|
@@ -15,20 +15,20 @@ from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
|
|
15
15
|
class CombineDocsProtocol(Protocol):
|
|
16
16
|
"""Interface for the combine_docs method."""
|
|
17
17
|
|
|
18
|
-
def __call__(self, docs:
|
|
18
|
+
def __call__(self, docs: list[Document], **kwargs: Any) -> str:
|
|
19
19
|
"""Interface for the combine_docs method."""
|
|
20
20
|
|
|
21
21
|
|
|
22
22
|
class AsyncCombineDocsProtocol(Protocol):
|
|
23
23
|
"""Interface for the combine_docs method."""
|
|
24
24
|
|
|
25
|
-
async def __call__(self, docs:
|
|
25
|
+
async def __call__(self, docs: list[Document], **kwargs: Any) -> str:
|
|
26
26
|
"""Async interface for the combine_docs method."""
|
|
27
27
|
|
|
28
28
|
|
|
29
29
|
def split_list_of_docs(
|
|
30
|
-
docs:
|
|
31
|
-
) ->
|
|
30
|
+
docs: list[Document], length_func: Callable, token_max: int, **kwargs: Any
|
|
31
|
+
) -> list[list[Document]]:
|
|
32
32
|
"""Split Documents into subsets that each meet a cumulative length constraint.
|
|
33
33
|
|
|
34
34
|
Args:
|
|
@@ -59,7 +59,7 @@ def split_list_of_docs(
|
|
|
59
59
|
|
|
60
60
|
|
|
61
61
|
def collapse_docs(
|
|
62
|
-
docs:
|
|
62
|
+
docs: list[Document],
|
|
63
63
|
combine_document_func: CombineDocsProtocol,
|
|
64
64
|
**kwargs: Any,
|
|
65
65
|
) -> Document:
|
|
@@ -91,7 +91,7 @@ def collapse_docs(
|
|
|
91
91
|
|
|
92
92
|
|
|
93
93
|
async def acollapse_docs(
|
|
94
|
-
docs:
|
|
94
|
+
docs: list[Document],
|
|
95
95
|
combine_document_func: AsyncCombineDocsProtocol,
|
|
96
96
|
**kwargs: Any,
|
|
97
97
|
) -> Document:
|
|
@@ -229,11 +229,11 @@ class ReduceDocumentsChain(BaseCombineDocumentsChain):
|
|
|
229
229
|
|
|
230
230
|
def combine_docs(
|
|
231
231
|
self,
|
|
232
|
-
docs:
|
|
232
|
+
docs: list[Document],
|
|
233
233
|
token_max: Optional[int] = None,
|
|
234
234
|
callbacks: Callbacks = None,
|
|
235
235
|
**kwargs: Any,
|
|
236
|
-
) ->
|
|
236
|
+
) -> tuple[str, dict]:
|
|
237
237
|
"""Combine multiple documents recursively.
|
|
238
238
|
|
|
239
239
|
Args:
|
|
@@ -258,11 +258,11 @@ class ReduceDocumentsChain(BaseCombineDocumentsChain):
|
|
|
258
258
|
|
|
259
259
|
async def acombine_docs(
|
|
260
260
|
self,
|
|
261
|
-
docs:
|
|
261
|
+
docs: list[Document],
|
|
262
262
|
token_max: Optional[int] = None,
|
|
263
263
|
callbacks: Callbacks = None,
|
|
264
264
|
**kwargs: Any,
|
|
265
|
-
) ->
|
|
265
|
+
) -> tuple[str, dict]:
|
|
266
266
|
"""Async combine multiple documents recursively.
|
|
267
267
|
|
|
268
268
|
Args:
|
|
@@ -287,16 +287,16 @@ class ReduceDocumentsChain(BaseCombineDocumentsChain):
|
|
|
287
287
|
|
|
288
288
|
def _collapse(
|
|
289
289
|
self,
|
|
290
|
-
docs:
|
|
290
|
+
docs: list[Document],
|
|
291
291
|
token_max: Optional[int] = None,
|
|
292
292
|
callbacks: Callbacks = None,
|
|
293
293
|
**kwargs: Any,
|
|
294
|
-
) ->
|
|
294
|
+
) -> tuple[list[Document], dict]:
|
|
295
295
|
result_docs = docs
|
|
296
296
|
length_func = self.combine_documents_chain.prompt_length
|
|
297
297
|
num_tokens = length_func(result_docs, **kwargs)
|
|
298
298
|
|
|
299
|
-
def _collapse_docs_func(docs:
|
|
299
|
+
def _collapse_docs_func(docs: list[Document], **kwargs: Any) -> str:
|
|
300
300
|
return self._collapse_chain.run(
|
|
301
301
|
input_documents=docs, callbacks=callbacks, **kwargs
|
|
302
302
|
)
|
|
@@ -322,16 +322,16 @@ class ReduceDocumentsChain(BaseCombineDocumentsChain):
|
|
|
322
322
|
|
|
323
323
|
async def _acollapse(
|
|
324
324
|
self,
|
|
325
|
-
docs:
|
|
325
|
+
docs: list[Document],
|
|
326
326
|
token_max: Optional[int] = None,
|
|
327
327
|
callbacks: Callbacks = None,
|
|
328
328
|
**kwargs: Any,
|
|
329
|
-
) ->
|
|
329
|
+
) -> tuple[list[Document], dict]:
|
|
330
330
|
result_docs = docs
|
|
331
331
|
length_func = self.combine_documents_chain.prompt_length
|
|
332
332
|
num_tokens = length_func(result_docs, **kwargs)
|
|
333
333
|
|
|
334
|
-
async def _collapse_docs_func(docs:
|
|
334
|
+
async def _collapse_docs_func(docs: list[Document], **kwargs: Any) -> str:
|
|
335
335
|
return await self._collapse_chain.arun(
|
|
336
336
|
input_documents=docs, callbacks=callbacks, **kwargs
|
|
337
337
|
)
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from typing import Any
|
|
5
|
+
from typing import Any
|
|
6
6
|
|
|
7
7
|
from langchain_core._api import deprecated
|
|
8
8
|
from langchain_core.callbacks import Callbacks
|
|
@@ -98,7 +98,7 @@ class RefineDocumentsChain(BaseCombineDocumentsChain):
|
|
|
98
98
|
"""Return the results of the refine steps in the output."""
|
|
99
99
|
|
|
100
100
|
@property
|
|
101
|
-
def output_keys(self) ->
|
|
101
|
+
def output_keys(self) -> list[str]:
|
|
102
102
|
"""Expect input key.
|
|
103
103
|
|
|
104
104
|
:meta private:
|
|
@@ -115,7 +115,7 @@ class RefineDocumentsChain(BaseCombineDocumentsChain):
|
|
|
115
115
|
|
|
116
116
|
@model_validator(mode="before")
|
|
117
117
|
@classmethod
|
|
118
|
-
def get_return_intermediate_steps(cls, values:
|
|
118
|
+
def get_return_intermediate_steps(cls, values: dict) -> Any:
|
|
119
119
|
"""For backwards compatibility."""
|
|
120
120
|
if "return_refine_steps" in values:
|
|
121
121
|
values["return_intermediate_steps"] = values["return_refine_steps"]
|
|
@@ -124,7 +124,7 @@ class RefineDocumentsChain(BaseCombineDocumentsChain):
|
|
|
124
124
|
|
|
125
125
|
@model_validator(mode="before")
|
|
126
126
|
@classmethod
|
|
127
|
-
def get_default_document_variable_name(cls, values:
|
|
127
|
+
def get_default_document_variable_name(cls, values: dict) -> Any:
|
|
128
128
|
"""Get default document variable name, if not provided."""
|
|
129
129
|
if "initial_llm_chain" not in values:
|
|
130
130
|
raise ValueError("initial_llm_chain must be provided")
|
|
@@ -147,8 +147,8 @@ class RefineDocumentsChain(BaseCombineDocumentsChain):
|
|
|
147
147
|
return values
|
|
148
148
|
|
|
149
149
|
def combine_docs(
|
|
150
|
-
self, docs:
|
|
151
|
-
) ->
|
|
150
|
+
self, docs: list[Document], callbacks: Callbacks = None, **kwargs: Any
|
|
151
|
+
) -> tuple[str, dict]:
|
|
152
152
|
"""Combine by mapping first chain over all, then stuffing into final chain.
|
|
153
153
|
|
|
154
154
|
Args:
|
|
@@ -172,8 +172,8 @@ class RefineDocumentsChain(BaseCombineDocumentsChain):
|
|
|
172
172
|
return self._construct_result(refine_steps, res)
|
|
173
173
|
|
|
174
174
|
async def acombine_docs(
|
|
175
|
-
self, docs:
|
|
176
|
-
) ->
|
|
175
|
+
self, docs: list[Document], callbacks: Callbacks = None, **kwargs: Any
|
|
176
|
+
) -> tuple[str, dict]:
|
|
177
177
|
"""Async combine by mapping a first chain over all, then stuffing
|
|
178
178
|
into a final chain.
|
|
179
179
|
|
|
@@ -197,22 +197,22 @@ class RefineDocumentsChain(BaseCombineDocumentsChain):
|
|
|
197
197
|
refine_steps.append(res)
|
|
198
198
|
return self._construct_result(refine_steps, res)
|
|
199
199
|
|
|
200
|
-
def _construct_result(self, refine_steps:
|
|
200
|
+
def _construct_result(self, refine_steps: list[str], res: str) -> tuple[str, dict]:
|
|
201
201
|
if self.return_intermediate_steps:
|
|
202
202
|
extra_return_dict = {"intermediate_steps": refine_steps}
|
|
203
203
|
else:
|
|
204
204
|
extra_return_dict = {}
|
|
205
205
|
return res, extra_return_dict
|
|
206
206
|
|
|
207
|
-
def _construct_refine_inputs(self, doc: Document, res: str) ->
|
|
207
|
+
def _construct_refine_inputs(self, doc: Document, res: str) -> dict[str, Any]:
|
|
208
208
|
return {
|
|
209
209
|
self.document_variable_name: format_document(doc, self.document_prompt),
|
|
210
210
|
self.initial_response_name: res,
|
|
211
211
|
}
|
|
212
212
|
|
|
213
213
|
def _construct_initial_inputs(
|
|
214
|
-
self, docs:
|
|
215
|
-
) ->
|
|
214
|
+
self, docs: list[Document], **kwargs: Any
|
|
215
|
+
) -> dict[str, Any]:
|
|
216
216
|
base_info = {"page_content": docs[0].page_content}
|
|
217
217
|
base_info.update(docs[0].metadata)
|
|
218
218
|
document_info = {k: base_info[k] for k in self.document_prompt.input_variables}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""Chain that combines documents by stuffing into context."""
|
|
2
2
|
|
|
3
|
-
from typing import Any,
|
|
3
|
+
from typing import Any, Optional
|
|
4
4
|
|
|
5
5
|
from langchain_core._api import deprecated
|
|
6
6
|
from langchain_core.callbacks import Callbacks
|
|
@@ -29,7 +29,7 @@ def create_stuff_documents_chain(
|
|
|
29
29
|
document_prompt: Optional[BasePromptTemplate] = None,
|
|
30
30
|
document_separator: str = DEFAULT_DOCUMENT_SEPARATOR,
|
|
31
31
|
document_variable_name: str = DOCUMENTS_KEY,
|
|
32
|
-
) -> Runnable[
|
|
32
|
+
) -> Runnable[dict[str, Any], Any]:
|
|
33
33
|
"""Create a chain for passing a list of Documents to a model.
|
|
34
34
|
|
|
35
35
|
Args:
|
|
@@ -163,7 +163,7 @@ class StuffDocumentsChain(BaseCombineDocumentsChain):
|
|
|
163
163
|
|
|
164
164
|
@model_validator(mode="before")
|
|
165
165
|
@classmethod
|
|
166
|
-
def get_default_document_variable_name(cls, values:
|
|
166
|
+
def get_default_document_variable_name(cls, values: dict) -> Any:
|
|
167
167
|
"""Get default document variable name, if not provided.
|
|
168
168
|
|
|
169
169
|
If only one variable is present in the llm_chain.prompt,
|
|
@@ -188,13 +188,13 @@ class StuffDocumentsChain(BaseCombineDocumentsChain):
|
|
|
188
188
|
return values
|
|
189
189
|
|
|
190
190
|
@property
|
|
191
|
-
def input_keys(self) ->
|
|
191
|
+
def input_keys(self) -> list[str]:
|
|
192
192
|
extra_keys = [
|
|
193
193
|
k for k in self.llm_chain.input_keys if k != self.document_variable_name
|
|
194
194
|
]
|
|
195
195
|
return super().input_keys + extra_keys
|
|
196
196
|
|
|
197
|
-
def _get_inputs(self, docs:
|
|
197
|
+
def _get_inputs(self, docs: list[Document], **kwargs: Any) -> dict:
|
|
198
198
|
"""Construct inputs from kwargs and docs.
|
|
199
199
|
|
|
200
200
|
Format and then join all the documents together into one input with name
|
|
@@ -220,7 +220,7 @@ class StuffDocumentsChain(BaseCombineDocumentsChain):
|
|
|
220
220
|
inputs[self.document_variable_name] = self.document_separator.join(doc_strings)
|
|
221
221
|
return inputs
|
|
222
222
|
|
|
223
|
-
def prompt_length(self, docs:
|
|
223
|
+
def prompt_length(self, docs: list[Document], **kwargs: Any) -> Optional[int]:
|
|
224
224
|
"""Return the prompt length given the documents passed in.
|
|
225
225
|
|
|
226
226
|
This can be used by a caller to determine whether passing in a list
|
|
@@ -241,8 +241,8 @@ class StuffDocumentsChain(BaseCombineDocumentsChain):
|
|
|
241
241
|
return self.llm_chain._get_num_tokens(prompt)
|
|
242
242
|
|
|
243
243
|
def combine_docs(
|
|
244
|
-
self, docs:
|
|
245
|
-
) ->
|
|
244
|
+
self, docs: list[Document], callbacks: Callbacks = None, **kwargs: Any
|
|
245
|
+
) -> tuple[str, dict]:
|
|
246
246
|
"""Stuff all documents into one prompt and pass to LLM.
|
|
247
247
|
|
|
248
248
|
Args:
|
|
@@ -259,8 +259,8 @@ class StuffDocumentsChain(BaseCombineDocumentsChain):
|
|
|
259
259
|
return self.llm_chain.predict(callbacks=callbacks, **inputs), {}
|
|
260
260
|
|
|
261
261
|
async def acombine_docs(
|
|
262
|
-
self, docs:
|
|
263
|
-
) ->
|
|
262
|
+
self, docs: list[Document], callbacks: Callbacks = None, **kwargs: Any
|
|
263
|
+
) -> tuple[str, dict]:
|
|
264
264
|
"""Async stuff all documents into one prompt and pass to LLM.
|
|
265
265
|
|
|
266
266
|
Args:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""Chain for applying constitutional principles to the outputs of another chain."""
|
|
2
2
|
|
|
3
|
-
from typing import Any,
|
|
3
|
+
from typing import Any, Optional
|
|
4
4
|
|
|
5
5
|
from langchain_core._api import deprecated
|
|
6
6
|
from langchain_core.callbacks import CallbackManagerForChainRun
|
|
@@ -190,15 +190,15 @@ class ConstitutionalChain(Chain):
|
|
|
190
190
|
""" # noqa: E501
|
|
191
191
|
|
|
192
192
|
chain: LLMChain
|
|
193
|
-
constitutional_principles:
|
|
193
|
+
constitutional_principles: list[ConstitutionalPrinciple]
|
|
194
194
|
critique_chain: LLMChain
|
|
195
195
|
revision_chain: LLMChain
|
|
196
196
|
return_intermediate_steps: bool = False
|
|
197
197
|
|
|
198
198
|
@classmethod
|
|
199
199
|
def get_principles(
|
|
200
|
-
cls, names: Optional[
|
|
201
|
-
) ->
|
|
200
|
+
cls, names: Optional[list[str]] = None
|
|
201
|
+
) -> list[ConstitutionalPrinciple]:
|
|
202
202
|
if names is None:
|
|
203
203
|
return list(PRINCIPLES.values())
|
|
204
204
|
else:
|
|
@@ -224,12 +224,12 @@ class ConstitutionalChain(Chain):
|
|
|
224
224
|
)
|
|
225
225
|
|
|
226
226
|
@property
|
|
227
|
-
def input_keys(self) ->
|
|
227
|
+
def input_keys(self) -> list[str]:
|
|
228
228
|
"""Input keys."""
|
|
229
229
|
return self.chain.input_keys
|
|
230
230
|
|
|
231
231
|
@property
|
|
232
|
-
def output_keys(self) ->
|
|
232
|
+
def output_keys(self) -> list[str]:
|
|
233
233
|
"""Output keys."""
|
|
234
234
|
if self.return_intermediate_steps:
|
|
235
235
|
return ["output", "critiques_and_revisions", "initial_output"]
|
|
@@ -237,9 +237,9 @@ class ConstitutionalChain(Chain):
|
|
|
237
237
|
|
|
238
238
|
def _call(
|
|
239
239
|
self,
|
|
240
|
-
inputs:
|
|
240
|
+
inputs: dict[str, Any],
|
|
241
241
|
run_manager: Optional[CallbackManagerForChainRun] = None,
|
|
242
|
-
) ->
|
|
242
|
+
) -> dict[str, Any]:
|
|
243
243
|
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
|
|
244
244
|
response = self.chain.run(
|
|
245
245
|
**inputs,
|
|
@@ -305,7 +305,7 @@ class ConstitutionalChain(Chain):
|
|
|
305
305
|
color="yellow",
|
|
306
306
|
)
|
|
307
307
|
|
|
308
|
-
final_output:
|
|
308
|
+
final_output: dict[str, Any] = {"output": response}
|
|
309
309
|
if self.return_intermediate_steps:
|
|
310
310
|
final_output["initial_output"] = initial_response
|
|
311
311
|
final_output["critiques_and_revisions"] = critiques_and_revisions
|
|
@@ -1,7 +1,5 @@
|
|
|
1
1
|
"""Chain that carries on a conversation and calls an LLM."""
|
|
2
2
|
|
|
3
|
-
from typing import List
|
|
4
|
-
|
|
5
3
|
from langchain_core._api import deprecated
|
|
6
4
|
from langchain_core.memory import BaseMemory
|
|
7
5
|
from langchain_core.prompts import BasePromptTemplate
|
|
@@ -21,7 +19,7 @@ from langchain.memory.buffer import ConversationBufferMemory
|
|
|
21
19
|
),
|
|
22
20
|
removal="1.0",
|
|
23
21
|
)
|
|
24
|
-
class ConversationChain(LLMChain):
|
|
22
|
+
class ConversationChain(LLMChain):
|
|
25
23
|
"""Chain to have a conversation and load context from memory.
|
|
26
24
|
|
|
27
25
|
This class is deprecated in favor of ``RunnableWithMessageHistory``. Please refer
|
|
@@ -121,7 +119,7 @@ class ConversationChain(LLMChain): # type: ignore[override, override]
|
|
|
121
119
|
return False
|
|
122
120
|
|
|
123
121
|
@property
|
|
124
|
-
def input_keys(self) ->
|
|
122
|
+
def input_keys(self) -> list[str]:
|
|
125
123
|
"""Use this since so some prompt vars come from history."""
|
|
126
124
|
return [self.input_key]
|
|
127
125
|
|
|
@@ -6,7 +6,7 @@ import inspect
|
|
|
6
6
|
import warnings
|
|
7
7
|
from abc import abstractmethod
|
|
8
8
|
from pathlib import Path
|
|
9
|
-
from typing import Any, Callable,
|
|
9
|
+
from typing import Any, Callable, Optional, Union
|
|
10
10
|
|
|
11
11
|
from langchain_core._api import deprecated
|
|
12
12
|
from langchain_core.callbacks import (
|
|
@@ -32,13 +32,13 @@ from langchain.chains.question_answering import load_qa_chain
|
|
|
32
32
|
|
|
33
33
|
# Depending on the memory type and configuration, the chat history format may differ.
|
|
34
34
|
# This needs to be consolidated.
|
|
35
|
-
CHAT_TURN_TYPE = Union[
|
|
35
|
+
CHAT_TURN_TYPE = Union[tuple[str, str], BaseMessage]
|
|
36
36
|
|
|
37
37
|
|
|
38
38
|
_ROLE_MAP = {"human": "Human: ", "ai": "Assistant: "}
|
|
39
39
|
|
|
40
40
|
|
|
41
|
-
def _get_chat_history(chat_history:
|
|
41
|
+
def _get_chat_history(chat_history: list[CHAT_TURN_TYPE]) -> str:
|
|
42
42
|
buffer = ""
|
|
43
43
|
for dialogue_turn in chat_history:
|
|
44
44
|
if isinstance(dialogue_turn, BaseMessage):
|
|
@@ -64,7 +64,7 @@ class InputType(BaseModel):
|
|
|
64
64
|
|
|
65
65
|
question: str
|
|
66
66
|
"""The question to answer."""
|
|
67
|
-
chat_history:
|
|
67
|
+
chat_history: list[CHAT_TURN_TYPE] = Field(default_factory=list)
|
|
68
68
|
"""The chat history to use for retrieval."""
|
|
69
69
|
|
|
70
70
|
|
|
@@ -89,7 +89,7 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
89
89
|
"""Return the retrieved source documents as part of the final result."""
|
|
90
90
|
return_generated_question: bool = False
|
|
91
91
|
"""Return the generated question as part of the final result."""
|
|
92
|
-
get_chat_history: Optional[Callable[[
|
|
92
|
+
get_chat_history: Optional[Callable[[list[CHAT_TURN_TYPE]], str]] = None
|
|
93
93
|
"""An optional function to get a string of the chat history.
|
|
94
94
|
If None is provided, will use a default."""
|
|
95
95
|
response_if_no_docs_found: Optional[str] = None
|
|
@@ -103,17 +103,17 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
103
103
|
)
|
|
104
104
|
|
|
105
105
|
@property
|
|
106
|
-
def input_keys(self) ->
|
|
106
|
+
def input_keys(self) -> list[str]:
|
|
107
107
|
"""Input keys."""
|
|
108
108
|
return ["question", "chat_history"]
|
|
109
109
|
|
|
110
110
|
def get_input_schema(
|
|
111
111
|
self, config: Optional[RunnableConfig] = None
|
|
112
|
-
) ->
|
|
112
|
+
) -> type[BaseModel]:
|
|
113
113
|
return InputType
|
|
114
114
|
|
|
115
115
|
@property
|
|
116
|
-
def output_keys(self) ->
|
|
116
|
+
def output_keys(self) -> list[str]:
|
|
117
117
|
"""Return the output keys.
|
|
118
118
|
|
|
119
119
|
:meta private:
|
|
@@ -129,17 +129,17 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
129
129
|
def _get_docs(
|
|
130
130
|
self,
|
|
131
131
|
question: str,
|
|
132
|
-
inputs:
|
|
132
|
+
inputs: dict[str, Any],
|
|
133
133
|
*,
|
|
134
134
|
run_manager: CallbackManagerForChainRun,
|
|
135
|
-
) ->
|
|
135
|
+
) -> list[Document]:
|
|
136
136
|
"""Get docs."""
|
|
137
137
|
|
|
138
138
|
def _call(
|
|
139
139
|
self,
|
|
140
|
-
inputs:
|
|
140
|
+
inputs: dict[str, Any],
|
|
141
141
|
run_manager: Optional[CallbackManagerForChainRun] = None,
|
|
142
|
-
) ->
|
|
142
|
+
) -> dict[str, Any]:
|
|
143
143
|
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
|
|
144
144
|
question = inputs["question"]
|
|
145
145
|
get_chat_history = self.get_chat_history or _get_chat_history
|
|
@@ -159,7 +159,7 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
159
159
|
docs = self._get_docs(new_question, inputs, run_manager=_run_manager)
|
|
160
160
|
else:
|
|
161
161
|
docs = self._get_docs(new_question, inputs) # type: ignore[call-arg]
|
|
162
|
-
output:
|
|
162
|
+
output: dict[str, Any] = {}
|
|
163
163
|
if self.response_if_no_docs_found is not None and len(docs) == 0:
|
|
164
164
|
output[self.output_key] = self.response_if_no_docs_found
|
|
165
165
|
else:
|
|
@@ -182,17 +182,17 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
182
182
|
async def _aget_docs(
|
|
183
183
|
self,
|
|
184
184
|
question: str,
|
|
185
|
-
inputs:
|
|
185
|
+
inputs: dict[str, Any],
|
|
186
186
|
*,
|
|
187
187
|
run_manager: AsyncCallbackManagerForChainRun,
|
|
188
|
-
) ->
|
|
188
|
+
) -> list[Document]:
|
|
189
189
|
"""Get docs."""
|
|
190
190
|
|
|
191
191
|
async def _acall(
|
|
192
192
|
self,
|
|
193
|
-
inputs:
|
|
193
|
+
inputs: dict[str, Any],
|
|
194
194
|
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
|
195
|
-
) ->
|
|
195
|
+
) -> dict[str, Any]:
|
|
196
196
|
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
|
|
197
197
|
question = inputs["question"]
|
|
198
198
|
get_chat_history = self.get_chat_history or _get_chat_history
|
|
@@ -212,7 +212,7 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
212
212
|
else:
|
|
213
213
|
docs = await self._aget_docs(new_question, inputs) # type: ignore[call-arg]
|
|
214
214
|
|
|
215
|
-
output:
|
|
215
|
+
output: dict[str, Any] = {}
|
|
216
216
|
if self.response_if_no_docs_found is not None and len(docs) == 0:
|
|
217
217
|
output[self.output_key] = self.response_if_no_docs_found
|
|
218
218
|
else:
|
|
@@ -368,7 +368,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
|
|
|
368
368
|
"""If set, enforces that the documents returned are less than this limit.
|
|
369
369
|
This is only enforced if `combine_docs_chain` is of type StuffDocumentsChain."""
|
|
370
370
|
|
|
371
|
-
def _reduce_tokens_below_limit(self, docs:
|
|
371
|
+
def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
|
|
372
372
|
num_docs = len(docs)
|
|
373
373
|
|
|
374
374
|
if self.max_tokens_limit and isinstance(
|
|
@@ -388,10 +388,10 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
|
|
|
388
388
|
def _get_docs(
|
|
389
389
|
self,
|
|
390
390
|
question: str,
|
|
391
|
-
inputs:
|
|
391
|
+
inputs: dict[str, Any],
|
|
392
392
|
*,
|
|
393
393
|
run_manager: CallbackManagerForChainRun,
|
|
394
|
-
) ->
|
|
394
|
+
) -> list[Document]:
|
|
395
395
|
"""Get docs."""
|
|
396
396
|
docs = self.retriever.invoke(
|
|
397
397
|
question, config={"callbacks": run_manager.get_child()}
|
|
@@ -401,10 +401,10 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
|
|
|
401
401
|
async def _aget_docs(
|
|
402
402
|
self,
|
|
403
403
|
question: str,
|
|
404
|
-
inputs:
|
|
404
|
+
inputs: dict[str, Any],
|
|
405
405
|
*,
|
|
406
406
|
run_manager: AsyncCallbackManagerForChainRun,
|
|
407
|
-
) ->
|
|
407
|
+
) -> list[Document]:
|
|
408
408
|
"""Get docs."""
|
|
409
409
|
docs = await self.retriever.ainvoke(
|
|
410
410
|
question, config={"callbacks": run_manager.get_child()}
|
|
@@ -420,7 +420,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
|
|
|
420
420
|
chain_type: str = "stuff",
|
|
421
421
|
verbose: bool = False,
|
|
422
422
|
condense_question_llm: Optional[BaseLanguageModel] = None,
|
|
423
|
-
combine_docs_chain_kwargs: Optional[
|
|
423
|
+
combine_docs_chain_kwargs: Optional[dict] = None,
|
|
424
424
|
callbacks: Callbacks = None,
|
|
425
425
|
**kwargs: Any,
|
|
426
426
|
) -> BaseConversationalRetrievalChain:
|
|
@@ -485,7 +485,7 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
|
|
|
485
485
|
|
|
486
486
|
@model_validator(mode="before")
|
|
487
487
|
@classmethod
|
|
488
|
-
def raise_deprecation(cls, values:
|
|
488
|
+
def raise_deprecation(cls, values: dict) -> Any:
|
|
489
489
|
warnings.warn(
|
|
490
490
|
"`ChatVectorDBChain` is deprecated - "
|
|
491
491
|
"please use `from langchain.chains import ConversationalRetrievalChain`"
|
|
@@ -495,10 +495,10 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
|
|
|
495
495
|
def _get_docs(
|
|
496
496
|
self,
|
|
497
497
|
question: str,
|
|
498
|
-
inputs:
|
|
498
|
+
inputs: dict[str, Any],
|
|
499
499
|
*,
|
|
500
500
|
run_manager: CallbackManagerForChainRun,
|
|
501
|
-
) ->
|
|
501
|
+
) -> list[Document]:
|
|
502
502
|
"""Get docs."""
|
|
503
503
|
vectordbkwargs = inputs.get("vectordbkwargs", {})
|
|
504
504
|
full_kwargs = {**self.search_kwargs, **vectordbkwargs}
|
|
@@ -509,10 +509,10 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
|
|
|
509
509
|
async def _aget_docs(
|
|
510
510
|
self,
|
|
511
511
|
question: str,
|
|
512
|
-
inputs:
|
|
512
|
+
inputs: dict[str, Any],
|
|
513
513
|
*,
|
|
514
514
|
run_manager: AsyncCallbackManagerForChainRun,
|
|
515
|
-
) ->
|
|
515
|
+
) -> list[Document]:
|
|
516
516
|
"""Get docs."""
|
|
517
517
|
raise NotImplementedError("ChatVectorDBChain does not support async")
|
|
518
518
|
|
|
@@ -523,7 +523,7 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
|
|
|
523
523
|
vectorstore: VectorStore,
|
|
524
524
|
condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT,
|
|
525
525
|
chain_type: str = "stuff",
|
|
526
|
-
combine_docs_chain_kwargs: Optional[
|
|
526
|
+
combine_docs_chain_kwargs: Optional[dict] = None,
|
|
527
527
|
callbacks: Callbacks = None,
|
|
528
528
|
**kwargs: Any,
|
|
529
529
|
) -> BaseConversationalRetrievalChain:
|