langchain 0.3.23__py3-none-any.whl → 0.3.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/_api/module_import.py +3 -3
- langchain/agents/agent.py +104 -109
- langchain/agents/agent_iterator.py +11 -15
- langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +2 -2
- langchain/agents/agent_toolkits/vectorstore/base.py +3 -3
- langchain/agents/agent_toolkits/vectorstore/toolkit.py +4 -6
- langchain/agents/chat/base.py +7 -6
- langchain/agents/chat/output_parser.py +2 -1
- langchain/agents/conversational/base.py +5 -4
- langchain/agents/conversational_chat/base.py +9 -8
- langchain/agents/format_scratchpad/log.py +1 -3
- langchain/agents/format_scratchpad/log_to_messages.py +3 -5
- langchain/agents/format_scratchpad/openai_functions.py +4 -4
- langchain/agents/format_scratchpad/tools.py +3 -3
- langchain/agents/format_scratchpad/xml.py +1 -3
- langchain/agents/initialize.py +2 -1
- langchain/agents/json_chat/base.py +3 -2
- langchain/agents/loading.py +5 -5
- langchain/agents/mrkl/base.py +6 -5
- langchain/agents/openai_assistant/base.py +13 -17
- langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +6 -6
- langchain/agents/openai_functions_agent/base.py +13 -12
- langchain/agents/openai_functions_multi_agent/base.py +15 -14
- langchain/agents/openai_tools/base.py +2 -1
- langchain/agents/output_parsers/openai_functions.py +2 -2
- langchain/agents/output_parsers/openai_tools.py +6 -6
- langchain/agents/output_parsers/react_json_single_input.py +2 -1
- langchain/agents/output_parsers/self_ask.py +2 -1
- langchain/agents/output_parsers/tools.py +7 -7
- langchain/agents/react/agent.py +3 -2
- langchain/agents/react/base.py +4 -3
- langchain/agents/schema.py +3 -3
- langchain/agents/self_ask_with_search/base.py +2 -1
- langchain/agents/structured_chat/base.py +9 -8
- langchain/agents/structured_chat/output_parser.py +2 -1
- langchain/agents/tool_calling_agent/base.py +3 -2
- langchain/agents/tools.py +4 -4
- langchain/agents/types.py +3 -3
- langchain/agents/utils.py +1 -1
- langchain/agents/xml/base.py +7 -6
- langchain/callbacks/streaming_aiter.py +3 -2
- langchain/callbacks/streaming_aiter_final_only.py +3 -3
- langchain/callbacks/streaming_stdout_final_only.py +3 -3
- langchain/chains/api/base.py +11 -12
- langchain/chains/base.py +47 -50
- langchain/chains/combine_documents/base.py +23 -23
- langchain/chains/combine_documents/map_reduce.py +12 -12
- langchain/chains/combine_documents/map_rerank.py +16 -15
- langchain/chains/combine_documents/reduce.py +17 -17
- langchain/chains/combine_documents/refine.py +12 -12
- langchain/chains/combine_documents/stuff.py +10 -10
- langchain/chains/constitutional_ai/base.py +9 -9
- langchain/chains/conversation/base.py +2 -4
- langchain/chains/conversational_retrieval/base.py +30 -30
- langchain/chains/elasticsearch_database/base.py +13 -13
- langchain/chains/example_generator.py +1 -3
- langchain/chains/flare/base.py +13 -12
- langchain/chains/flare/prompts.py +2 -4
- langchain/chains/hyde/base.py +8 -8
- langchain/chains/llm.py +31 -30
- langchain/chains/llm_checker/base.py +6 -6
- langchain/chains/llm_math/base.py +10 -10
- langchain/chains/llm_summarization_checker/base.py +6 -6
- langchain/chains/loading.py +12 -14
- langchain/chains/mapreduce.py +7 -6
- langchain/chains/moderation.py +8 -8
- langchain/chains/natbot/base.py +6 -6
- langchain/chains/openai_functions/base.py +8 -10
- langchain/chains/openai_functions/citation_fuzzy_match.py +4 -4
- langchain/chains/openai_functions/extraction.py +3 -3
- langchain/chains/openai_functions/openapi.py +12 -12
- langchain/chains/openai_functions/qa_with_structure.py +4 -4
- langchain/chains/openai_functions/utils.py +2 -2
- langchain/chains/openai_tools/extraction.py +2 -2
- langchain/chains/prompt_selector.py +3 -3
- langchain/chains/qa_generation/base.py +5 -5
- langchain/chains/qa_with_sources/base.py +21 -21
- langchain/chains/qa_with_sources/loading.py +2 -1
- langchain/chains/qa_with_sources/retrieval.py +6 -6
- langchain/chains/qa_with_sources/vector_db.py +8 -8
- langchain/chains/query_constructor/base.py +4 -3
- langchain/chains/query_constructor/parser.py +5 -4
- langchain/chains/question_answering/chain.py +3 -2
- langchain/chains/retrieval.py +2 -2
- langchain/chains/retrieval_qa/base.py +16 -16
- langchain/chains/router/base.py +12 -11
- langchain/chains/router/embedding_router.py +12 -11
- langchain/chains/router/llm_router.py +12 -12
- langchain/chains/router/multi_prompt.py +3 -3
- langchain/chains/router/multi_retrieval_qa.py +5 -4
- langchain/chains/sequential.py +18 -18
- langchain/chains/sql_database/query.py +4 -4
- langchain/chains/structured_output/base.py +14 -13
- langchain/chains/summarize/chain.py +4 -3
- langchain/chains/transform.py +12 -11
- langchain/chat_models/base.py +27 -31
- langchain/embeddings/__init__.py +1 -1
- langchain/embeddings/base.py +4 -4
- langchain/embeddings/cache.py +19 -18
- langchain/evaluation/agents/trajectory_eval_chain.py +16 -19
- langchain/evaluation/comparison/eval_chain.py +10 -10
- langchain/evaluation/criteria/eval_chain.py +11 -10
- langchain/evaluation/embedding_distance/base.py +21 -21
- langchain/evaluation/exact_match/base.py +3 -3
- langchain/evaluation/loading.py +7 -8
- langchain/evaluation/qa/eval_chain.py +7 -6
- langchain/evaluation/regex_match/base.py +3 -3
- langchain/evaluation/schema.py +6 -5
- langchain/evaluation/scoring/eval_chain.py +9 -9
- langchain/evaluation/string_distance/base.py +23 -23
- langchain/hub.py +2 -1
- langchain/indexes/_sql_record_manager.py +8 -7
- langchain/indexes/vectorstore.py +11 -11
- langchain/llms/__init__.py +3 -3
- langchain/memory/buffer.py +13 -13
- langchain/memory/buffer_window.py +5 -5
- langchain/memory/chat_memory.py +5 -5
- langchain/memory/combined.py +10 -10
- langchain/memory/entity.py +8 -7
- langchain/memory/readonly.py +4 -4
- langchain/memory/simple.py +5 -5
- langchain/memory/summary.py +8 -8
- langchain/memory/summary_buffer.py +11 -11
- langchain/memory/token_buffer.py +5 -5
- langchain/memory/utils.py +2 -2
- langchain/memory/vectorstore.py +15 -14
- langchain/memory/vectorstore_token_buffer_memory.py +7 -7
- langchain/model_laboratory.py +4 -3
- langchain/output_parsers/combining.py +5 -5
- langchain/output_parsers/datetime.py +1 -2
- langchain/output_parsers/enum.py +4 -5
- langchain/output_parsers/pandas_dataframe.py +5 -5
- langchain/output_parsers/regex.py +4 -4
- langchain/output_parsers/regex_dict.py +4 -4
- langchain/output_parsers/retry.py +2 -2
- langchain/output_parsers/structured.py +5 -5
- langchain/output_parsers/yaml.py +3 -3
- langchain/pydantic_v1/__init__.py +1 -6
- langchain/pydantic_v1/dataclasses.py +1 -5
- langchain/pydantic_v1/main.py +1 -5
- langchain/retrievers/contextual_compression.py +3 -3
- langchain/retrievers/document_compressors/base.py +3 -2
- langchain/retrievers/document_compressors/chain_extract.py +4 -3
- langchain/retrievers/document_compressors/chain_filter.py +3 -2
- langchain/retrievers/document_compressors/cohere_rerank.py +4 -3
- langchain/retrievers/document_compressors/cross_encoder.py +1 -2
- langchain/retrievers/document_compressors/cross_encoder_rerank.py +2 -1
- langchain/retrievers/document_compressors/embeddings_filter.py +3 -2
- langchain/retrievers/document_compressors/listwise_rerank.py +6 -5
- langchain/retrievers/ensemble.py +15 -19
- langchain/retrievers/merger_retriever.py +7 -12
- langchain/retrievers/multi_query.py +14 -13
- langchain/retrievers/multi_vector.py +4 -4
- langchain/retrievers/parent_document_retriever.py +9 -8
- langchain/retrievers/re_phraser.py +2 -3
- langchain/retrievers/self_query/base.py +13 -12
- langchain/retrievers/time_weighted_retriever.py +14 -14
- langchain/runnables/openai_functions.py +4 -3
- langchain/smith/evaluation/config.py +7 -6
- langchain/smith/evaluation/progress.py +3 -2
- langchain/smith/evaluation/runner_utils.py +58 -61
- langchain/smith/evaluation/string_run_evaluator.py +29 -29
- langchain/storage/encoder_backed.py +7 -11
- langchain/storage/file_system.py +5 -4
- {langchain-0.3.23.dist-info → langchain-0.3.24.dist-info}/METADATA +2 -2
- {langchain-0.3.23.dist-info → langchain-0.3.24.dist-info}/RECORD +169 -169
- {langchain-0.3.23.dist-info → langchain-0.3.24.dist-info}/WHEEL +1 -1
- langchain-0.3.24.dist-info/entry_points.txt +4 -0
- langchain-0.3.23.dist-info/entry_points.txt +0 -5
- {langchain-0.3.23.dist-info → langchain-0.3.24.dist-info}/licenses/LICENSE +0 -0
langchain/chains/loading.py
CHANGED
|
@@ -39,16 +39,14 @@ try:
|
|
|
39
39
|
from langchain_community.llms.loading import load_llm, load_llm_from_config
|
|
40
40
|
except ImportError:
|
|
41
41
|
|
|
42
|
-
def load_llm(*args: Any, **kwargs: Any) -> None:
|
|
42
|
+
def load_llm(*args: Any, **kwargs: Any) -> None:
|
|
43
43
|
raise ImportError(
|
|
44
44
|
"To use this load_llm functionality you must install the "
|
|
45
45
|
"langchain_community package. "
|
|
46
46
|
"You can install it with `pip install langchain_community`"
|
|
47
47
|
)
|
|
48
48
|
|
|
49
|
-
def load_llm_from_config(
|
|
50
|
-
*args: Any, **kwargs: Any
|
|
51
|
-
) -> None:
|
|
49
|
+
def load_llm_from_config(*args: Any, **kwargs: Any) -> None:
|
|
52
50
|
raise ImportError(
|
|
53
51
|
"To use this load_llm_from_config functionality you must install the "
|
|
54
52
|
"langchain_community package. "
|
|
@@ -95,9 +93,9 @@ def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedde
|
|
|
95
93
|
else:
|
|
96
94
|
raise ValueError("`embeddings` must be present.")
|
|
97
95
|
return HypotheticalDocumentEmbedder(
|
|
98
|
-
llm_chain=llm_chain,
|
|
96
|
+
llm_chain=llm_chain,
|
|
99
97
|
base_embeddings=embeddings,
|
|
100
|
-
**config,
|
|
98
|
+
**config,
|
|
101
99
|
)
|
|
102
100
|
|
|
103
101
|
|
|
@@ -160,7 +158,7 @@ def _load_map_reduce_documents_chain(
|
|
|
160
158
|
)
|
|
161
159
|
|
|
162
160
|
|
|
163
|
-
def _load_reduce_documents_chain(config: dict, **kwargs: Any) -> ReduceDocumentsChain:
|
|
161
|
+
def _load_reduce_documents_chain(config: dict, **kwargs: Any) -> ReduceDocumentsChain:
|
|
164
162
|
combine_documents_chain = None
|
|
165
163
|
collapse_documents_chain = None
|
|
166
164
|
|
|
@@ -213,7 +211,7 @@ def _load_reduce_documents_chain(config: dict, **kwargs: Any) -> ReduceDocuments
|
|
|
213
211
|
config.pop("collapse_document_chain_path"), **kwargs
|
|
214
212
|
)
|
|
215
213
|
|
|
216
|
-
return ReduceDocumentsChain(
|
|
214
|
+
return ReduceDocumentsChain(
|
|
217
215
|
combine_documents_chain=combine_documents_chain,
|
|
218
216
|
collapse_documents_chain=collapse_documents_chain,
|
|
219
217
|
**config,
|
|
@@ -245,7 +243,7 @@ def _load_llm_bash_chain(config: dict, **kwargs: Any) -> Any:
|
|
|
245
243
|
elif "prompt_path" in config:
|
|
246
244
|
prompt = load_prompt(config.pop("prompt_path"))
|
|
247
245
|
if llm_chain:
|
|
248
|
-
return LLMBashChain(llm_chain=llm_chain, prompt=prompt, **config)
|
|
246
|
+
return LLMBashChain(llm_chain=llm_chain, prompt=prompt, **config)
|
|
249
247
|
else:
|
|
250
248
|
return LLMBashChain(llm=llm, prompt=prompt, **config)
|
|
251
249
|
|
|
@@ -347,7 +345,7 @@ def _load_pal_chain(config: dict, **kwargs: Any) -> Any:
|
|
|
347
345
|
llm_chain = load_chain(config.pop("llm_chain_path"), **kwargs)
|
|
348
346
|
else:
|
|
349
347
|
raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.")
|
|
350
|
-
return PALChain(llm_chain=llm_chain, **config)
|
|
348
|
+
return PALChain(llm_chain=llm_chain, **config)
|
|
351
349
|
|
|
352
350
|
|
|
353
351
|
def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain:
|
|
@@ -410,7 +408,7 @@ def _load_sql_database_chain(config: dict, **kwargs: Any) -> Any:
|
|
|
410
408
|
if "llm_chain" in config:
|
|
411
409
|
llm_chain_config = config.pop("llm_chain")
|
|
412
410
|
chain = load_chain_from_config(llm_chain_config, **kwargs)
|
|
413
|
-
return SQLDatabaseChain(llm_chain=chain, database=database, **config)
|
|
411
|
+
return SQLDatabaseChain(llm_chain=chain, database=database, **config)
|
|
414
412
|
if "llm" in config:
|
|
415
413
|
llm_config = config.pop("llm")
|
|
416
414
|
llm = load_llm_from_config(llm_config, **kwargs)
|
|
@@ -563,8 +561,8 @@ def _load_graph_cypher_chain(config: dict, **kwargs: Any) -> GraphCypherQAChain:
|
|
|
563
561
|
)
|
|
564
562
|
return GraphCypherQAChain(
|
|
565
563
|
graph=graph,
|
|
566
|
-
cypher_generation_chain=cypher_generation_chain,
|
|
567
|
-
qa_chain=qa_chain,
|
|
564
|
+
cypher_generation_chain=cypher_generation_chain,
|
|
565
|
+
qa_chain=qa_chain,
|
|
568
566
|
**config,
|
|
569
567
|
)
|
|
570
568
|
|
|
@@ -702,7 +700,7 @@ def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain:
|
|
|
702
700
|
with open(file_path) as f:
|
|
703
701
|
config = json.load(f)
|
|
704
702
|
elif file_path.suffix.endswith((".yaml", ".yml")):
|
|
705
|
-
with open(file_path
|
|
703
|
+
with open(file_path) as f:
|
|
706
704
|
config = yaml.safe_load(f)
|
|
707
705
|
else:
|
|
708
706
|
raise ValueError("File type must be json or yaml")
|
langchain/chains/mapreduce.py
CHANGED
|
@@ -6,7 +6,8 @@ then combines the results with another one.
|
|
|
6
6
|
|
|
7
7
|
from __future__ import annotations
|
|
8
8
|
|
|
9
|
-
from
|
|
9
|
+
from collections.abc import Mapping
|
|
10
|
+
from typing import Any, Optional
|
|
10
11
|
|
|
11
12
|
from langchain_core._api import deprecated
|
|
12
13
|
from langchain_core.callbacks import CallbackManagerForChainRun, Callbacks
|
|
@@ -84,7 +85,7 @@ class MapReduceChain(Chain):
|
|
|
84
85
|
)
|
|
85
86
|
|
|
86
87
|
@property
|
|
87
|
-
def input_keys(self) ->
|
|
88
|
+
def input_keys(self) -> list[str]:
|
|
88
89
|
"""Expect input key.
|
|
89
90
|
|
|
90
91
|
:meta private:
|
|
@@ -92,7 +93,7 @@ class MapReduceChain(Chain):
|
|
|
92
93
|
return [self.input_key]
|
|
93
94
|
|
|
94
95
|
@property
|
|
95
|
-
def output_keys(self) ->
|
|
96
|
+
def output_keys(self) -> list[str]:
|
|
96
97
|
"""Return output key.
|
|
97
98
|
|
|
98
99
|
:meta private:
|
|
@@ -101,15 +102,15 @@ class MapReduceChain(Chain):
|
|
|
101
102
|
|
|
102
103
|
def _call(
|
|
103
104
|
self,
|
|
104
|
-
inputs:
|
|
105
|
+
inputs: dict[str, str],
|
|
105
106
|
run_manager: Optional[CallbackManagerForChainRun] = None,
|
|
106
|
-
) ->
|
|
107
|
+
) -> dict[str, str]:
|
|
107
108
|
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
|
|
108
109
|
# Split the larger text into smaller chunks.
|
|
109
110
|
doc_text = inputs.pop(self.input_key)
|
|
110
111
|
texts = self.text_splitter.split_text(doc_text)
|
|
111
112
|
docs = [Document(page_content=text) for text in texts]
|
|
112
|
-
_inputs:
|
|
113
|
+
_inputs: dict[str, Any] = {
|
|
113
114
|
**inputs,
|
|
114
115
|
self.combine_documents_chain.input_key: docs,
|
|
115
116
|
}
|
langchain/chains/moderation.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""Pass input through a moderation endpoint."""
|
|
2
2
|
|
|
3
|
-
from typing import Any,
|
|
3
|
+
from typing import Any, Optional
|
|
4
4
|
|
|
5
5
|
from langchain_core.callbacks import (
|
|
6
6
|
AsyncCallbackManagerForChainRun,
|
|
@@ -42,7 +42,7 @@ class OpenAIModerationChain(Chain):
|
|
|
42
42
|
|
|
43
43
|
@model_validator(mode="before")
|
|
44
44
|
@classmethod
|
|
45
|
-
def validate_environment(cls, values:
|
|
45
|
+
def validate_environment(cls, values: dict) -> Any:
|
|
46
46
|
"""Validate that api key and python package exists in environment."""
|
|
47
47
|
openai_api_key = get_from_dict_or_env(
|
|
48
48
|
values, "openai_api_key", "OPENAI_API_KEY"
|
|
@@ -78,7 +78,7 @@ class OpenAIModerationChain(Chain):
|
|
|
78
78
|
return values
|
|
79
79
|
|
|
80
80
|
@property
|
|
81
|
-
def input_keys(self) ->
|
|
81
|
+
def input_keys(self) -> list[str]:
|
|
82
82
|
"""Expect input key.
|
|
83
83
|
|
|
84
84
|
:meta private:
|
|
@@ -86,7 +86,7 @@ class OpenAIModerationChain(Chain):
|
|
|
86
86
|
return [self.input_key]
|
|
87
87
|
|
|
88
88
|
@property
|
|
89
|
-
def output_keys(self) ->
|
|
89
|
+
def output_keys(self) -> list[str]:
|
|
90
90
|
"""Return output key.
|
|
91
91
|
|
|
92
92
|
:meta private:
|
|
@@ -108,9 +108,9 @@ class OpenAIModerationChain(Chain):
|
|
|
108
108
|
|
|
109
109
|
def _call(
|
|
110
110
|
self,
|
|
111
|
-
inputs:
|
|
111
|
+
inputs: dict[str, Any],
|
|
112
112
|
run_manager: Optional[CallbackManagerForChainRun] = None,
|
|
113
|
-
) ->
|
|
113
|
+
) -> dict[str, Any]:
|
|
114
114
|
text = inputs[self.input_key]
|
|
115
115
|
if self.openai_pre_1_0:
|
|
116
116
|
results = self.client.create(text)
|
|
@@ -122,9 +122,9 @@ class OpenAIModerationChain(Chain):
|
|
|
122
122
|
|
|
123
123
|
async def _acall(
|
|
124
124
|
self,
|
|
125
|
-
inputs:
|
|
125
|
+
inputs: dict[str, Any],
|
|
126
126
|
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
|
127
|
-
) ->
|
|
127
|
+
) -> dict[str, Any]:
|
|
128
128
|
if self.openai_pre_1_0:
|
|
129
129
|
return await super()._acall(inputs, run_manager=run_manager)
|
|
130
130
|
text = inputs[self.input_key]
|
langchain/chains/natbot/base.py
CHANGED
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
import warnings
|
|
6
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Optional
|
|
7
7
|
|
|
8
8
|
from langchain_core._api import deprecated
|
|
9
9
|
from langchain_core.caches import BaseCache as BaseCache
|
|
@@ -68,7 +68,7 @@ class NatBotChain(Chain):
|
|
|
68
68
|
|
|
69
69
|
@model_validator(mode="before")
|
|
70
70
|
@classmethod
|
|
71
|
-
def raise_deprecation(cls, values:
|
|
71
|
+
def raise_deprecation(cls, values: dict) -> Any:
|
|
72
72
|
if "llm" in values:
|
|
73
73
|
warnings.warn(
|
|
74
74
|
"Directly instantiating an NatBotChain with an llm is deprecated. "
|
|
@@ -97,7 +97,7 @@ class NatBotChain(Chain):
|
|
|
97
97
|
return cls(llm_chain=llm_chain, objective=objective, **kwargs)
|
|
98
98
|
|
|
99
99
|
@property
|
|
100
|
-
def input_keys(self) ->
|
|
100
|
+
def input_keys(self) -> list[str]:
|
|
101
101
|
"""Expect url and browser content.
|
|
102
102
|
|
|
103
103
|
:meta private:
|
|
@@ -105,7 +105,7 @@ class NatBotChain(Chain):
|
|
|
105
105
|
return [self.input_url_key, self.input_browser_content_key]
|
|
106
106
|
|
|
107
107
|
@property
|
|
108
|
-
def output_keys(self) ->
|
|
108
|
+
def output_keys(self) -> list[str]:
|
|
109
109
|
"""Return command.
|
|
110
110
|
|
|
111
111
|
:meta private:
|
|
@@ -114,9 +114,9 @@ class NatBotChain(Chain):
|
|
|
114
114
|
|
|
115
115
|
def _call(
|
|
116
116
|
self,
|
|
117
|
-
inputs:
|
|
117
|
+
inputs: dict[str, str],
|
|
118
118
|
run_manager: Optional[CallbackManagerForChainRun] = None,
|
|
119
|
-
) ->
|
|
119
|
+
) -> dict[str, str]:
|
|
120
120
|
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
|
|
121
121
|
url = inputs[self.input_url_key]
|
|
122
122
|
browser_content = inputs[self.input_browser_content_key]
|
|
@@ -1,12 +1,10 @@
|
|
|
1
1
|
"""Methods for creating chains that use OpenAI function-calling APIs."""
|
|
2
2
|
|
|
3
|
+
from collections.abc import Sequence
|
|
3
4
|
from typing import (
|
|
4
5
|
Any,
|
|
5
6
|
Callable,
|
|
6
|
-
Dict,
|
|
7
7
|
Optional,
|
|
8
|
-
Sequence,
|
|
9
|
-
Type,
|
|
10
8
|
Union,
|
|
11
9
|
)
|
|
12
10
|
|
|
@@ -45,7 +43,7 @@ __all__ = [
|
|
|
45
43
|
|
|
46
44
|
@deprecated(since="0.1.1", removal="1.0", alternative="create_openai_fn_runnable")
|
|
47
45
|
def create_openai_fn_chain(
|
|
48
|
-
functions: Sequence[Union[
|
|
46
|
+
functions: Sequence[Union[dict[str, Any], type[BaseModel], Callable]],
|
|
49
47
|
llm: BaseLanguageModel,
|
|
50
48
|
prompt: BasePromptTemplate,
|
|
51
49
|
*,
|
|
@@ -53,7 +51,7 @@ def create_openai_fn_chain(
|
|
|
53
51
|
output_key: str = "function",
|
|
54
52
|
output_parser: Optional[BaseLLMOutputParser] = None,
|
|
55
53
|
**kwargs: Any,
|
|
56
|
-
) -> LLMChain:
|
|
54
|
+
) -> LLMChain:
|
|
57
55
|
"""[Legacy] Create an LLM chain that uses OpenAI functions.
|
|
58
56
|
|
|
59
57
|
Args:
|
|
@@ -128,12 +126,12 @@ def create_openai_fn_chain(
|
|
|
128
126
|
raise ValueError("Need to pass in at least one function. Received zero.")
|
|
129
127
|
openai_functions = [convert_to_openai_function(f) for f in functions]
|
|
130
128
|
output_parser = output_parser or get_openai_output_parser(functions)
|
|
131
|
-
llm_kwargs:
|
|
129
|
+
llm_kwargs: dict[str, Any] = {
|
|
132
130
|
"functions": openai_functions,
|
|
133
131
|
}
|
|
134
132
|
if len(openai_functions) == 1 and enforce_single_function_usage:
|
|
135
133
|
llm_kwargs["function_call"] = {"name": openai_functions[0]["name"]}
|
|
136
|
-
llm_chain = LLMChain(
|
|
134
|
+
llm_chain = LLMChain(
|
|
137
135
|
llm=llm,
|
|
138
136
|
prompt=prompt,
|
|
139
137
|
output_parser=output_parser,
|
|
@@ -148,14 +146,14 @@ def create_openai_fn_chain(
|
|
|
148
146
|
since="0.1.1", removal="1.0", alternative="ChatOpenAI.with_structured_output"
|
|
149
147
|
)
|
|
150
148
|
def create_structured_output_chain(
|
|
151
|
-
output_schema: Union[
|
|
149
|
+
output_schema: Union[dict[str, Any], type[BaseModel]],
|
|
152
150
|
llm: BaseLanguageModel,
|
|
153
151
|
prompt: BasePromptTemplate,
|
|
154
152
|
*,
|
|
155
153
|
output_key: str = "function",
|
|
156
154
|
output_parser: Optional[BaseLLMOutputParser] = None,
|
|
157
155
|
**kwargs: Any,
|
|
158
|
-
) -> LLMChain:
|
|
156
|
+
) -> LLMChain:
|
|
159
157
|
"""[Legacy] Create an LLMChain that uses an OpenAI function to get a structured output.
|
|
160
158
|
|
|
161
159
|
Args:
|
|
@@ -218,7 +216,7 @@ def create_structured_output_chain(
|
|
|
218
216
|
class _OutputFormatter(BaseModel):
|
|
219
217
|
"""Output formatter. Should always be used to format your response to the user.""" # noqa: E501
|
|
220
218
|
|
|
221
|
-
output: output_schema # type: ignore
|
|
219
|
+
output: output_schema # type: ignore[valid-type]
|
|
222
220
|
|
|
223
221
|
function = _OutputFormatter
|
|
224
222
|
output_parser = output_parser or PydanticAttrOutputFunctionsParser(
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from
|
|
1
|
+
from collections.abc import Iterator
|
|
2
2
|
|
|
3
3
|
from langchain_core._api import deprecated
|
|
4
4
|
from langchain_core.language_models import BaseChatModel, BaseLanguageModel
|
|
@@ -21,7 +21,7 @@ class FactWithEvidence(BaseModel):
|
|
|
21
21
|
"""
|
|
22
22
|
|
|
23
23
|
fact: str = Field(..., description="Body of the sentence, as part of a response")
|
|
24
|
-
substring_quote:
|
|
24
|
+
substring_quote: list[str] = Field(
|
|
25
25
|
...,
|
|
26
26
|
description=(
|
|
27
27
|
"Each source should be a direct quote from the context, "
|
|
@@ -54,7 +54,7 @@ class QuestionAnswer(BaseModel):
|
|
|
54
54
|
each sentence contains a body and a list of sources."""
|
|
55
55
|
|
|
56
56
|
question: str = Field(..., description="Question that was asked")
|
|
57
|
-
answer:
|
|
57
|
+
answer: list[FactWithEvidence] = Field(
|
|
58
58
|
...,
|
|
59
59
|
description=(
|
|
60
60
|
"Body of the answer, each fact should be "
|
|
@@ -148,7 +148,7 @@ def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain:
|
|
|
148
148
|
)
|
|
149
149
|
),
|
|
150
150
|
]
|
|
151
|
-
prompt = ChatPromptTemplate(messages=messages) # type: ignore[arg-type
|
|
151
|
+
prompt = ChatPromptTemplate(messages=messages) # type: ignore[arg-type]
|
|
152
152
|
|
|
153
153
|
chain = LLMChain(
|
|
154
154
|
llm=llm,
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any,
|
|
1
|
+
from typing import Any, Optional
|
|
2
2
|
|
|
3
3
|
from langchain_core._api import deprecated
|
|
4
4
|
from langchain_core.language_models import BaseLanguageModel
|
|
@@ -83,7 +83,7 @@ def create_extraction_chain(
|
|
|
83
83
|
schema: dict,
|
|
84
84
|
llm: BaseLanguageModel,
|
|
85
85
|
prompt: Optional[BasePromptTemplate] = None,
|
|
86
|
-
tags: Optional[
|
|
86
|
+
tags: Optional[list[str]] = None,
|
|
87
87
|
verbose: bool = False,
|
|
88
88
|
) -> Chain:
|
|
89
89
|
"""Creates a chain that extracts information from a passage.
|
|
@@ -170,7 +170,7 @@ def create_extraction_chain_pydantic(
|
|
|
170
170
|
"""
|
|
171
171
|
|
|
172
172
|
class PydanticSchema(BaseModel):
|
|
173
|
-
info:
|
|
173
|
+
info: list[pydantic_schema]
|
|
174
174
|
|
|
175
175
|
if hasattr(pydantic_schema, "model_json_schema"):
|
|
176
176
|
openai_schema = pydantic_schema.model_json_schema()
|
|
@@ -3,7 +3,7 @@ from __future__ import annotations
|
|
|
3
3
|
import json
|
|
4
4
|
import re
|
|
5
5
|
from collections import defaultdict
|
|
6
|
-
from typing import TYPE_CHECKING, Any, Callable,
|
|
6
|
+
from typing import TYPE_CHECKING, Any, Callable, Optional, Union
|
|
7
7
|
|
|
8
8
|
import requests
|
|
9
9
|
from langchain_core._api import deprecated
|
|
@@ -70,14 +70,14 @@ def _format_url(url: str, path_params: dict) -> str:
|
|
|
70
70
|
return url.format(**new_params)
|
|
71
71
|
|
|
72
72
|
|
|
73
|
-
def _openapi_params_to_json_schema(params:
|
|
73
|
+
def _openapi_params_to_json_schema(params: list[Parameter], spec: OpenAPISpec) -> dict:
|
|
74
74
|
properties = {}
|
|
75
75
|
required = []
|
|
76
76
|
for p in params:
|
|
77
77
|
if p.param_schema:
|
|
78
78
|
schema = spec.get_schema(p.param_schema)
|
|
79
79
|
else:
|
|
80
|
-
media_type_schema = list(p.content.values())[0].media_type_schema
|
|
80
|
+
media_type_schema = list(p.content.values())[0].media_type_schema
|
|
81
81
|
schema = spec.get_schema(media_type_schema)
|
|
82
82
|
if p.description and not schema.description:
|
|
83
83
|
schema.description = p.description
|
|
@@ -89,7 +89,7 @@ def _openapi_params_to_json_schema(params: List[Parameter], spec: OpenAPISpec) -
|
|
|
89
89
|
|
|
90
90
|
def openapi_spec_to_openai_fn(
|
|
91
91
|
spec: OpenAPISpec,
|
|
92
|
-
) ->
|
|
92
|
+
) -> tuple[list[dict[str, Any]], Callable]:
|
|
93
93
|
"""Convert a valid OpenAPI spec to the JSON Schema format expected for OpenAI
|
|
94
94
|
functions.
|
|
95
95
|
|
|
@@ -208,18 +208,18 @@ class SimpleRequestChain(Chain):
|
|
|
208
208
|
"""Key to use for the input of the request."""
|
|
209
209
|
|
|
210
210
|
@property
|
|
211
|
-
def input_keys(self) ->
|
|
211
|
+
def input_keys(self) -> list[str]:
|
|
212
212
|
return [self.input_key]
|
|
213
213
|
|
|
214
214
|
@property
|
|
215
|
-
def output_keys(self) ->
|
|
215
|
+
def output_keys(self) -> list[str]:
|
|
216
216
|
return [self.output_key]
|
|
217
217
|
|
|
218
218
|
def _call(
|
|
219
219
|
self,
|
|
220
|
-
inputs:
|
|
220
|
+
inputs: dict[str, Any],
|
|
221
221
|
run_manager: Optional[CallbackManagerForChainRun] = None,
|
|
222
|
-
) ->
|
|
222
|
+
) -> dict[str, Any]:
|
|
223
223
|
"""Run the logic of this chain and return the output."""
|
|
224
224
|
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
|
|
225
225
|
name = inputs[self.input_key].pop("name")
|
|
@@ -257,10 +257,10 @@ def get_openapi_chain(
|
|
|
257
257
|
llm: Optional[BaseLanguageModel] = None,
|
|
258
258
|
prompt: Optional[BasePromptTemplate] = None,
|
|
259
259
|
request_chain: Optional[Chain] = None,
|
|
260
|
-
llm_chain_kwargs: Optional[
|
|
260
|
+
llm_chain_kwargs: Optional[dict] = None,
|
|
261
261
|
verbose: bool = False,
|
|
262
|
-
headers: Optional[
|
|
263
|
-
params: Optional[
|
|
262
|
+
headers: Optional[dict] = None,
|
|
263
|
+
params: Optional[dict] = None,
|
|
264
264
|
**kwargs: Any,
|
|
265
265
|
) -> SequentialChain:
|
|
266
266
|
"""Create a chain for querying an API from a OpenAPI spec.
|
|
@@ -363,7 +363,7 @@ def get_openapi_chain(
|
|
|
363
363
|
OpenAPISpec.from_text,
|
|
364
364
|
):
|
|
365
365
|
try:
|
|
366
|
-
spec = conversion(spec)
|
|
366
|
+
spec = conversion(spec)
|
|
367
367
|
break
|
|
368
368
|
except ImportError as e:
|
|
369
369
|
raise e
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any,
|
|
1
|
+
from typing import Any, Optional, Union, cast
|
|
2
2
|
|
|
3
3
|
from langchain_core._api import deprecated
|
|
4
4
|
from langchain_core.language_models import BaseLanguageModel
|
|
@@ -21,7 +21,7 @@ class AnswerWithSources(BaseModel):
|
|
|
21
21
|
"""An answer to the question, with sources."""
|
|
22
22
|
|
|
23
23
|
answer: str = Field(..., description="Answer to the question that was asked")
|
|
24
|
-
sources:
|
|
24
|
+
sources: list[str] = Field(
|
|
25
25
|
..., description="List of sources used to answer the question"
|
|
26
26
|
)
|
|
27
27
|
|
|
@@ -37,7 +37,7 @@ class AnswerWithSources(BaseModel):
|
|
|
37
37
|
)
|
|
38
38
|
def create_qa_with_structure_chain(
|
|
39
39
|
llm: BaseLanguageModel,
|
|
40
|
-
schema: Union[dict,
|
|
40
|
+
schema: Union[dict, type[BaseModel]],
|
|
41
41
|
output_parser: str = "base",
|
|
42
42
|
prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None,
|
|
43
43
|
verbose: bool = False,
|
|
@@ -96,7 +96,7 @@ def create_qa_with_structure_chain(
|
|
|
96
96
|
HumanMessagePromptTemplate.from_template("Question: {question}"),
|
|
97
97
|
HumanMessage(content="Tips: Make sure to answer in the correct format"),
|
|
98
98
|
]
|
|
99
|
-
prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type
|
|
99
|
+
prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type]
|
|
100
100
|
|
|
101
101
|
chain = LLMChain(
|
|
102
102
|
llm=llm,
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
from typing import Any
|
|
1
|
+
from typing import Any
|
|
2
2
|
|
|
3
3
|
|
|
4
|
-
def _resolve_schema_references(schema: Any, definitions:
|
|
4
|
+
def _resolve_schema_references(schema: Any, definitions: dict[str, Any]) -> Any:
|
|
5
5
|
"""
|
|
6
6
|
Resolve the $ref keys in a JSON schema object using the provided definitions.
|
|
7
7
|
"""
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import
|
|
1
|
+
from typing import Union
|
|
2
2
|
|
|
3
3
|
from langchain_core._api import deprecated
|
|
4
4
|
from langchain_core.language_models import BaseLanguageModel
|
|
@@ -51,7 +51,7 @@ If a property is not present and is not required in the function parameters, do
|
|
|
51
51
|
),
|
|
52
52
|
)
|
|
53
53
|
def create_extraction_chain_pydantic(
|
|
54
|
-
pydantic_schemas: Union[
|
|
54
|
+
pydantic_schemas: Union[list[type[BaseModel]], type[BaseModel]],
|
|
55
55
|
llm: BaseLanguageModel,
|
|
56
56
|
system_message: str = _EXTRACTION_TEMPLATE,
|
|
57
57
|
) -> Runnable:
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from abc import ABC, abstractmethod
|
|
2
|
-
from typing import Callable
|
|
2
|
+
from typing import Callable
|
|
3
3
|
|
|
4
4
|
from langchain_core.language_models import BaseLanguageModel
|
|
5
5
|
from langchain_core.language_models.chat_models import BaseChatModel
|
|
@@ -21,8 +21,8 @@ class ConditionalPromptSelector(BasePromptSelector):
|
|
|
21
21
|
|
|
22
22
|
default_prompt: BasePromptTemplate
|
|
23
23
|
"""Default prompt to use if no conditionals match."""
|
|
24
|
-
conditionals:
|
|
25
|
-
|
|
24
|
+
conditionals: list[
|
|
25
|
+
tuple[Callable[[BaseLanguageModel], bool], BasePromptTemplate]
|
|
26
26
|
] = Field(default_factory=list)
|
|
27
27
|
"""List of conditionals and prompts to use if the conditionals match."""
|
|
28
28
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import json
|
|
4
|
-
from typing import Any,
|
|
4
|
+
from typing import Any, Optional
|
|
5
5
|
|
|
6
6
|
from langchain_core._api import deprecated
|
|
7
7
|
from langchain_core.callbacks import CallbackManagerForChainRun
|
|
@@ -103,18 +103,18 @@ class QAGenerationChain(Chain):
|
|
|
103
103
|
raise NotImplementedError
|
|
104
104
|
|
|
105
105
|
@property
|
|
106
|
-
def input_keys(self) ->
|
|
106
|
+
def input_keys(self) -> list[str]:
|
|
107
107
|
return [self.input_key]
|
|
108
108
|
|
|
109
109
|
@property
|
|
110
|
-
def output_keys(self) ->
|
|
110
|
+
def output_keys(self) -> list[str]:
|
|
111
111
|
return [self.output_key]
|
|
112
112
|
|
|
113
113
|
def _call(
|
|
114
114
|
self,
|
|
115
|
-
inputs:
|
|
115
|
+
inputs: dict[str, Any],
|
|
116
116
|
run_manager: Optional[CallbackManagerForChainRun] = None,
|
|
117
|
-
) ->
|
|
117
|
+
) -> dict[str, list]:
|
|
118
118
|
docs = self.text_splitter.create_documents([inputs[self.input_key]])
|
|
119
119
|
results = self.llm_chain.generate(
|
|
120
120
|
[{"text": d.page_content} for d in docs], run_manager=run_manager
|