langchain 0.2.16__py3-none-any.whl → 0.3.0.dev1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/agents/agent.py +21 -17
- langchain/agents/agent_toolkits/vectorstore/toolkit.py +10 -7
- langchain/agents/chat/base.py +1 -1
- langchain/agents/conversational/base.py +1 -1
- langchain/agents/conversational_chat/base.py +1 -1
- langchain/agents/mrkl/base.py +1 -1
- langchain/agents/openai_assistant/base.py +8 -7
- langchain/agents/openai_functions_agent/base.py +6 -5
- langchain/agents/openai_functions_multi_agent/base.py +6 -5
- langchain/agents/react/base.py +1 -1
- langchain/agents/self_ask_with_search/base.py +1 -1
- langchain/agents/structured_chat/base.py +1 -1
- langchain/agents/structured_chat/output_parser.py +1 -1
- langchain/chains/api/base.py +13 -11
- langchain/chains/base.py +13 -5
- langchain/chains/combine_documents/base.py +1 -1
- langchain/chains/combine_documents/map_reduce.py +14 -10
- langchain/chains/combine_documents/map_rerank.py +17 -14
- langchain/chains/combine_documents/reduce.py +5 -3
- langchain/chains/combine_documents/refine.py +11 -8
- langchain/chains/combine_documents/stuff.py +8 -6
- langchain/chains/constitutional_ai/models.py +1 -1
- langchain/chains/conversation/base.py +13 -11
- langchain/chains/conversational_retrieval/base.py +9 -7
- langchain/chains/elasticsearch_database/base.py +10 -8
- langchain/chains/flare/base.py +1 -1
- langchain/chains/hyde/base.py +5 -3
- langchain/chains/llm.py +5 -4
- langchain/chains/llm_checker/base.py +8 -6
- langchain/chains/llm_math/base.py +8 -6
- langchain/chains/llm_summarization_checker/base.py +8 -6
- langchain/chains/mapreduce.py +5 -3
- langchain/chains/moderation.py +4 -3
- langchain/chains/natbot/base.py +8 -6
- langchain/chains/openai_functions/base.py +1 -1
- langchain/chains/openai_functions/citation_fuzzy_match.py +1 -1
- langchain/chains/openai_functions/extraction.py +1 -1
- langchain/chains/openai_functions/qa_with_structure.py +1 -1
- langchain/chains/openai_tools/extraction.py +1 -1
- langchain/chains/prompt_selector.py +1 -1
- langchain/chains/qa_generation/base.py +1 -1
- langchain/chains/qa_with_sources/base.py +8 -6
- langchain/chains/qa_with_sources/retrieval.py +1 -1
- langchain/chains/qa_with_sources/vector_db.py +4 -3
- langchain/chains/query_constructor/schema.py +5 -4
- langchain/chains/retrieval_qa/base.py +12 -9
- langchain/chains/router/base.py +5 -3
- langchain/chains/router/embedding_router.py +5 -3
- langchain/chains/router/llm_router.py +6 -5
- langchain/chains/sequential.py +17 -13
- langchain/chains/structured_output/base.py +1 -1
- langchain/chains/transform.py +1 -1
- langchain/chat_models/base.py +1 -1
- langchain/evaluation/agents/trajectory_eval_chain.py +4 -3
- langchain/evaluation/comparison/eval_chain.py +4 -3
- langchain/evaluation/criteria/eval_chain.py +4 -3
- langchain/evaluation/embedding_distance/base.py +4 -3
- langchain/evaluation/qa/eval_chain.py +7 -4
- langchain/evaluation/qa/generate_chain.py +1 -1
- langchain/evaluation/scoring/eval_chain.py +4 -3
- langchain/evaluation/string_distance/base.py +1 -1
- langchain/indexes/vectorstore.py +9 -7
- langchain/memory/chat_memory.py +1 -1
- langchain/memory/combined.py +1 -1
- langchain/memory/entity.py +4 -3
- langchain/memory/summary.py +1 -1
- langchain/memory/vectorstore.py +1 -1
- langchain/memory/vectorstore_token_buffer_memory.py +1 -1
- langchain/output_parsers/fix.py +3 -2
- langchain/output_parsers/pandas_dataframe.py +1 -1
- langchain/output_parsers/retry.py +4 -3
- langchain/output_parsers/structured.py +1 -1
- langchain/output_parsers/yaml.py +1 -1
- langchain/retrievers/contextual_compression.py +4 -2
- langchain/retrievers/document_compressors/base.py +4 -2
- langchain/retrievers/document_compressors/chain_extract.py +4 -2
- langchain/retrievers/document_compressors/chain_filter.py +4 -2
- langchain/retrievers/document_compressors/cohere_rerank.py +8 -6
- langchain/retrievers/document_compressors/cross_encoder_rerank.py +5 -3
- langchain/retrievers/document_compressors/embeddings_filter.py +4 -3
- langchain/retrievers/document_compressors/listwise_rerank.py +4 -3
- langchain/retrievers/ensemble.py +4 -3
- langchain/retrievers/multi_vector.py +5 -4
- langchain/retrievers/self_query/base.py +8 -6
- langchain/retrievers/time_weighted_retriever.py +4 -3
- langchain/smith/evaluation/config.py +7 -5
- {langchain-0.2.16.dist-info → langchain-0.3.0.dev1.dist-info}/METADATA +4 -4
- {langchain-0.2.16.dist-info → langchain-0.3.0.dev1.dist-info}/RECORD +91 -91
- {langchain-0.2.16.dist-info → langchain-0.3.0.dev1.dist-info}/LICENSE +0 -0
- {langchain-0.2.16.dist-info → langchain-0.3.0.dev1.dist-info}/WHEEL +0 -0
- {langchain-0.2.16.dist-info → langchain-0.3.0.dev1.dist-info}/entry_points.txt +0 -0
|
@@ -9,6 +9,7 @@ from langchain_core.callbacks import (
|
|
|
9
9
|
from langchain_core.documents import Document
|
|
10
10
|
from langchain_core.embeddings import Embeddings
|
|
11
11
|
from langchain_core.vectorstores import VectorStore
|
|
12
|
+
from pydantic import ConfigDict
|
|
12
13
|
|
|
13
14
|
from langchain.chains.router.base import RouterChain
|
|
14
15
|
|
|
@@ -19,9 +20,10 @@ class EmbeddingRouterChain(RouterChain):
|
|
|
19
20
|
vectorstore: VectorStore
|
|
20
21
|
routing_keys: List[str] = ["query"]
|
|
21
22
|
|
|
22
|
-
|
|
23
|
-
arbitrary_types_allowed
|
|
24
|
-
extra
|
|
23
|
+
model_config = ConfigDict(
|
|
24
|
+
arbitrary_types_allowed=True,
|
|
25
|
+
extra="forbid",
|
|
26
|
+
)
|
|
25
27
|
|
|
26
28
|
@property
|
|
27
29
|
def input_keys(self) -> List[str]:
|
|
@@ -13,8 +13,9 @@ from langchain_core.exceptions import OutputParserException
|
|
|
13
13
|
from langchain_core.language_models import BaseLanguageModel
|
|
14
14
|
from langchain_core.output_parsers import BaseOutputParser
|
|
15
15
|
from langchain_core.prompts import BasePromptTemplate
|
|
16
|
-
from langchain_core.pydantic_v1 import root_validator
|
|
17
16
|
from langchain_core.utils.json import parse_and_check_json_markdown
|
|
17
|
+
from pydantic import model_validator
|
|
18
|
+
from typing_extensions import Self
|
|
18
19
|
|
|
19
20
|
from langchain.chains import LLMChain
|
|
20
21
|
from langchain.chains.router.base import RouterChain
|
|
@@ -100,9 +101,9 @@ class LLMRouterChain(RouterChain):
|
|
|
100
101
|
llm_chain: LLMChain
|
|
101
102
|
"""LLM chain used to perform routing"""
|
|
102
103
|
|
|
103
|
-
@
|
|
104
|
-
def validate_prompt(
|
|
105
|
-
prompt =
|
|
104
|
+
@model_validator(mode="after")
|
|
105
|
+
def validate_prompt(self) -> Self:
|
|
106
|
+
prompt = self.llm_chain.prompt
|
|
106
107
|
if prompt.output_parser is None:
|
|
107
108
|
raise ValueError(
|
|
108
109
|
"LLMRouterChain requires base llm_chain prompt to have an output"
|
|
@@ -110,7 +111,7 @@ class LLMRouterChain(RouterChain):
|
|
|
110
111
|
" 'destination' and 'next_inputs'. Received a prompt with no output"
|
|
111
112
|
" parser."
|
|
112
113
|
)
|
|
113
|
-
return
|
|
114
|
+
return self
|
|
114
115
|
|
|
115
116
|
@property
|
|
116
117
|
def input_keys(self) -> List[str]:
|
langchain/chains/sequential.py
CHANGED
|
@@ -6,8 +6,9 @@ from langchain_core.callbacks import (
|
|
|
6
6
|
AsyncCallbackManagerForChainRun,
|
|
7
7
|
CallbackManagerForChainRun,
|
|
8
8
|
)
|
|
9
|
-
from langchain_core.pydantic_v1 import root_validator
|
|
10
9
|
from langchain_core.utils.input import get_color_mapping
|
|
10
|
+
from pydantic import ConfigDict, model_validator
|
|
11
|
+
from typing_extensions import Self
|
|
11
12
|
|
|
12
13
|
from langchain.chains.base import Chain
|
|
13
14
|
|
|
@@ -20,9 +21,10 @@ class SequentialChain(Chain):
|
|
|
20
21
|
output_variables: List[str] #: :meta private:
|
|
21
22
|
return_all: bool = False
|
|
22
23
|
|
|
23
|
-
|
|
24
|
-
arbitrary_types_allowed
|
|
25
|
-
extra
|
|
24
|
+
model_config = ConfigDict(
|
|
25
|
+
arbitrary_types_allowed=True,
|
|
26
|
+
extra="forbid",
|
|
27
|
+
)
|
|
26
28
|
|
|
27
29
|
@property
|
|
28
30
|
def input_keys(self) -> List[str]:
|
|
@@ -40,8 +42,9 @@ class SequentialChain(Chain):
|
|
|
40
42
|
"""
|
|
41
43
|
return self.output_variables
|
|
42
44
|
|
|
43
|
-
@
|
|
44
|
-
|
|
45
|
+
@model_validator(mode="before")
|
|
46
|
+
@classmethod
|
|
47
|
+
def validate_chains(cls, values: Dict) -> Any:
|
|
45
48
|
"""Validate that the correct inputs exist for all chains."""
|
|
46
49
|
chains = values["chains"]
|
|
47
50
|
input_variables = values["input_variables"]
|
|
@@ -129,9 +132,10 @@ class SimpleSequentialChain(Chain):
|
|
|
129
132
|
input_key: str = "input" #: :meta private:
|
|
130
133
|
output_key: str = "output" #: :meta private:
|
|
131
134
|
|
|
132
|
-
|
|
133
|
-
arbitrary_types_allowed
|
|
134
|
-
extra
|
|
135
|
+
model_config = ConfigDict(
|
|
136
|
+
arbitrary_types_allowed=True,
|
|
137
|
+
extra="forbid",
|
|
138
|
+
)
|
|
135
139
|
|
|
136
140
|
@property
|
|
137
141
|
def input_keys(self) -> List[str]:
|
|
@@ -149,10 +153,10 @@ class SimpleSequentialChain(Chain):
|
|
|
149
153
|
"""
|
|
150
154
|
return [self.output_key]
|
|
151
155
|
|
|
152
|
-
@
|
|
153
|
-
def validate_chains(
|
|
156
|
+
@model_validator(mode="after")
|
|
157
|
+
def validate_chains(self) -> Self:
|
|
154
158
|
"""Validate that chains are all single input/output."""
|
|
155
|
-
for chain in
|
|
159
|
+
for chain in self.chains:
|
|
156
160
|
if len(chain.input_keys) != 1:
|
|
157
161
|
raise ValueError(
|
|
158
162
|
"Chains used in SimplePipeline should all have one input, got "
|
|
@@ -163,7 +167,7 @@ class SimpleSequentialChain(Chain):
|
|
|
163
167
|
"Chains used in SimplePipeline should all have one output, got "
|
|
164
168
|
f"{chain} with {len(chain.output_keys)} outputs."
|
|
165
169
|
)
|
|
166
|
-
return
|
|
170
|
+
return self
|
|
167
171
|
|
|
168
172
|
def _call(
|
|
169
173
|
self,
|
|
@@ -18,13 +18,13 @@ from langchain_core.output_parsers.openai_tools import (
|
|
|
18
18
|
PydanticToolsParser,
|
|
19
19
|
)
|
|
20
20
|
from langchain_core.prompts import BasePromptTemplate
|
|
21
|
-
from langchain_core.pydantic_v1 import BaseModel
|
|
22
21
|
from langchain_core.runnables import Runnable
|
|
23
22
|
from langchain_core.utils.function_calling import (
|
|
24
23
|
convert_to_openai_function,
|
|
25
24
|
convert_to_openai_tool,
|
|
26
25
|
)
|
|
27
26
|
from langchain_core.utils.pydantic import is_basemodel_subclass
|
|
27
|
+
from pydantic import BaseModel
|
|
28
28
|
|
|
29
29
|
|
|
30
30
|
@deprecated(
|
langchain/chains/transform.py
CHANGED
langchain/chat_models/base.py
CHANGED
|
@@ -30,11 +30,11 @@ from langchain_core.language_models.chat_models import (
|
|
|
30
30
|
generate_from_stream,
|
|
31
31
|
)
|
|
32
32
|
from langchain_core.messages import AnyMessage, BaseMessage
|
|
33
|
-
from langchain_core.pydantic_v1 import BaseModel
|
|
34
33
|
from langchain_core.runnables import Runnable, RunnableConfig
|
|
35
34
|
from langchain_core.runnables.schema import StreamEvent
|
|
36
35
|
from langchain_core.tools import BaseTool
|
|
37
36
|
from langchain_core.tracers import RunLog, RunLogPatch
|
|
37
|
+
from pydantic import BaseModel
|
|
38
38
|
from typing_extensions import TypeAlias
|
|
39
39
|
|
|
40
40
|
__all__ = [
|
|
@@ -28,8 +28,8 @@ from langchain_core.exceptions import OutputParserException
|
|
|
28
28
|
from langchain_core.language_models import BaseLanguageModel
|
|
29
29
|
from langchain_core.language_models.chat_models import BaseChatModel
|
|
30
30
|
from langchain_core.output_parsers import BaseOutputParser
|
|
31
|
-
from langchain_core.pydantic_v1 import Field
|
|
32
31
|
from langchain_core.tools import BaseTool
|
|
32
|
+
from pydantic import ConfigDict, Field
|
|
33
33
|
|
|
34
34
|
from langchain.chains.llm import LLMChain
|
|
35
35
|
from langchain.evaluation.agents.trajectory_eval_prompt import (
|
|
@@ -156,8 +156,9 @@ class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain):
|
|
|
156
156
|
return_reasoning: bool = False # :meta private:
|
|
157
157
|
"""DEPRECATED. Reasoning always returned."""
|
|
158
158
|
|
|
159
|
-
|
|
160
|
-
extra
|
|
159
|
+
model_config = ConfigDict(
|
|
160
|
+
extra="ignore",
|
|
161
|
+
)
|
|
161
162
|
|
|
162
163
|
@property
|
|
163
164
|
def requires_reference(self) -> bool:
|
|
@@ -10,7 +10,7 @@ from langchain_core.callbacks.manager import Callbacks
|
|
|
10
10
|
from langchain_core.language_models import BaseLanguageModel
|
|
11
11
|
from langchain_core.output_parsers import BaseOutputParser
|
|
12
12
|
from langchain_core.prompts.prompt import PromptTemplate
|
|
13
|
-
from
|
|
13
|
+
from pydantic import ConfigDict, Field
|
|
14
14
|
|
|
15
15
|
from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
|
|
16
16
|
from langchain.chains.llm import LLMChain
|
|
@@ -191,8 +191,9 @@ class PairwiseStringEvalChain(PairwiseStringEvaluator, LLMEvalChain, LLMChain):
|
|
|
191
191
|
def is_lc_serializable(cls) -> bool:
|
|
192
192
|
return False
|
|
193
193
|
|
|
194
|
-
|
|
195
|
-
extra
|
|
194
|
+
model_config = ConfigDict(
|
|
195
|
+
extra="ignore",
|
|
196
|
+
)
|
|
196
197
|
|
|
197
198
|
@property
|
|
198
199
|
def requires_reference(self) -> bool:
|
|
@@ -8,7 +8,7 @@ from langchain_core.callbacks.manager import Callbacks
|
|
|
8
8
|
from langchain_core.language_models import BaseLanguageModel
|
|
9
9
|
from langchain_core.output_parsers import BaseOutputParser
|
|
10
10
|
from langchain_core.prompts import BasePromptTemplate
|
|
11
|
-
from
|
|
11
|
+
from pydantic import ConfigDict, Field
|
|
12
12
|
|
|
13
13
|
from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
|
|
14
14
|
from langchain.chains.llm import LLMChain
|
|
@@ -236,8 +236,9 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain):
|
|
|
236
236
|
def is_lc_serializable(cls) -> bool:
|
|
237
237
|
return False
|
|
238
238
|
|
|
239
|
-
|
|
240
|
-
extra
|
|
239
|
+
model_config = ConfigDict(
|
|
240
|
+
extra="ignore",
|
|
241
|
+
)
|
|
241
242
|
|
|
242
243
|
@property
|
|
243
244
|
def requires_reference(self) -> bool:
|
|
@@ -10,8 +10,8 @@ from langchain_core.callbacks.manager import (
|
|
|
10
10
|
Callbacks,
|
|
11
11
|
)
|
|
12
12
|
from langchain_core.embeddings import Embeddings
|
|
13
|
-
from langchain_core.pydantic_v1 import Field
|
|
14
13
|
from langchain_core.utils import pre_init
|
|
14
|
+
from pydantic import ConfigDict, Field
|
|
15
15
|
|
|
16
16
|
from langchain.chains.base import Chain
|
|
17
17
|
from langchain.evaluation.schema import PairwiseStringEvaluator, StringEvaluator
|
|
@@ -113,8 +113,9 @@ class _EmbeddingDistanceChainMixin(Chain):
|
|
|
113
113
|
)
|
|
114
114
|
return values
|
|
115
115
|
|
|
116
|
-
|
|
117
|
-
arbitrary_types_allowed
|
|
116
|
+
model_config = ConfigDict(
|
|
117
|
+
arbitrary_types_allowed=True,
|
|
118
|
+
)
|
|
118
119
|
|
|
119
120
|
@property
|
|
120
121
|
def output_keys(self) -> List[str]:
|
|
@@ -9,6 +9,7 @@ from typing import Any, List, Optional, Sequence, Tuple
|
|
|
9
9
|
from langchain_core.callbacks.manager import Callbacks
|
|
10
10
|
from langchain_core.language_models import BaseLanguageModel
|
|
11
11
|
from langchain_core.prompts import PromptTemplate
|
|
12
|
+
from pydantic import ConfigDict
|
|
12
13
|
|
|
13
14
|
from langchain.chains.llm import LLMChain
|
|
14
15
|
from langchain.evaluation.qa.eval_prompt import CONTEXT_PROMPT, COT_PROMPT, PROMPT
|
|
@@ -72,8 +73,9 @@ class QAEvalChain(LLMChain, StringEvaluator, LLMEvalChain):
|
|
|
72
73
|
|
|
73
74
|
output_key: str = "results" #: :meta private:
|
|
74
75
|
|
|
75
|
-
|
|
76
|
-
extra
|
|
76
|
+
model_config = ConfigDict(
|
|
77
|
+
extra="ignore",
|
|
78
|
+
)
|
|
77
79
|
|
|
78
80
|
@classmethod
|
|
79
81
|
def is_lc_serializable(cls) -> bool:
|
|
@@ -220,8 +222,9 @@ class ContextQAEvalChain(LLMChain, StringEvaluator, LLMEvalChain):
|
|
|
220
222
|
"""Whether the chain requires an input string."""
|
|
221
223
|
return True
|
|
222
224
|
|
|
223
|
-
|
|
224
|
-
extra
|
|
225
|
+
model_config = ConfigDict(
|
|
226
|
+
extra="ignore",
|
|
227
|
+
)
|
|
225
228
|
|
|
226
229
|
@classmethod
|
|
227
230
|
def _validate_input_vars(cls, prompt: PromptTemplate) -> None:
|
|
@@ -6,7 +6,7 @@ from typing import Any
|
|
|
6
6
|
|
|
7
7
|
from langchain_core.language_models import BaseLanguageModel
|
|
8
8
|
from langchain_core.output_parsers import BaseLLMOutputParser
|
|
9
|
-
from
|
|
9
|
+
from pydantic import Field
|
|
10
10
|
|
|
11
11
|
from langchain.chains.llm import LLMChain
|
|
12
12
|
from langchain.evaluation.qa.generate_prompt import PROMPT
|
|
@@ -10,7 +10,7 @@ from langchain_core.callbacks.manager import Callbacks
|
|
|
10
10
|
from langchain_core.language_models import BaseLanguageModel
|
|
11
11
|
from langchain_core.output_parsers import BaseOutputParser
|
|
12
12
|
from langchain_core.prompts.prompt import PromptTemplate
|
|
13
|
-
from
|
|
13
|
+
from pydantic import ConfigDict, Field
|
|
14
14
|
|
|
15
15
|
from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
|
|
16
16
|
from langchain.chains.llm import LLMChain
|
|
@@ -179,8 +179,9 @@ class ScoreStringEvalChain(StringEvaluator, LLMEvalChain, LLMChain):
|
|
|
179
179
|
criterion_name: str
|
|
180
180
|
"""The name of the criterion being evaluated."""
|
|
181
181
|
|
|
182
|
-
|
|
183
|
-
extra
|
|
182
|
+
model_config = ConfigDict(
|
|
183
|
+
extra="ignore",
|
|
184
|
+
)
|
|
184
185
|
|
|
185
186
|
@classmethod
|
|
186
187
|
def is_lc_serializable(cls) -> bool:
|
|
@@ -8,8 +8,8 @@ from langchain_core.callbacks.manager import (
|
|
|
8
8
|
CallbackManagerForChainRun,
|
|
9
9
|
Callbacks,
|
|
10
10
|
)
|
|
11
|
-
from langchain_core.pydantic_v1 import Field
|
|
12
11
|
from langchain_core.utils import pre_init
|
|
12
|
+
from pydantic import Field
|
|
13
13
|
|
|
14
14
|
from langchain.chains.base import Chain
|
|
15
15
|
from langchain.evaluation.schema import PairwiseStringEvaluator, StringEvaluator
|
langchain/indexes/vectorstore.py
CHANGED
|
@@ -4,9 +4,9 @@ from langchain_core.document_loaders import BaseLoader
|
|
|
4
4
|
from langchain_core.documents import Document
|
|
5
5
|
from langchain_core.embeddings import Embeddings
|
|
6
6
|
from langchain_core.language_models import BaseLanguageModel
|
|
7
|
-
from langchain_core.pydantic_v1 import BaseModel, Field
|
|
8
7
|
from langchain_core.vectorstores import VectorStore
|
|
9
8
|
from langchain_text_splitters import RecursiveCharacterTextSplitter, TextSplitter
|
|
9
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
10
10
|
|
|
11
11
|
from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain
|
|
12
12
|
from langchain.chains.retrieval_qa.base import RetrievalQA
|
|
@@ -21,9 +21,10 @@ class VectorStoreIndexWrapper(BaseModel):
|
|
|
21
21
|
|
|
22
22
|
vectorstore: VectorStore
|
|
23
23
|
|
|
24
|
-
|
|
25
|
-
arbitrary_types_allowed
|
|
26
|
-
extra
|
|
24
|
+
model_config = ConfigDict(
|
|
25
|
+
arbitrary_types_allowed=True,
|
|
26
|
+
extra="forbid",
|
|
27
|
+
)
|
|
27
28
|
|
|
28
29
|
def query(
|
|
29
30
|
self,
|
|
@@ -142,9 +143,10 @@ class VectorstoreIndexCreator(BaseModel):
|
|
|
142
143
|
text_splitter: TextSplitter = Field(default_factory=_get_default_text_splitter)
|
|
143
144
|
vectorstore_kwargs: dict = Field(default_factory=dict)
|
|
144
145
|
|
|
145
|
-
|
|
146
|
-
arbitrary_types_allowed
|
|
147
|
-
extra
|
|
146
|
+
model_config = ConfigDict(
|
|
147
|
+
arbitrary_types_allowed=True,
|
|
148
|
+
extra="forbid",
|
|
149
|
+
)
|
|
148
150
|
|
|
149
151
|
def from_loaders(self, loaders: List[BaseLoader]) -> VectorStoreIndexWrapper:
|
|
150
152
|
"""Create a vectorstore index from loaders."""
|
langchain/memory/chat_memory.py
CHANGED
|
@@ -8,7 +8,7 @@ from langchain_core.chat_history import (
|
|
|
8
8
|
)
|
|
9
9
|
from langchain_core.memory import BaseMemory
|
|
10
10
|
from langchain_core.messages import AIMessage, HumanMessage
|
|
11
|
-
from
|
|
11
|
+
from pydantic import Field
|
|
12
12
|
|
|
13
13
|
from langchain.memory.utils import get_prompt_input_key
|
|
14
14
|
|
langchain/memory/combined.py
CHANGED
langchain/memory/entity.py
CHANGED
|
@@ -6,7 +6,7 @@ from typing import Any, Dict, Iterable, List, Optional
|
|
|
6
6
|
from langchain_core.language_models import BaseLanguageModel
|
|
7
7
|
from langchain_core.messages import BaseMessage, get_buffer_string
|
|
8
8
|
from langchain_core.prompts import BasePromptTemplate
|
|
9
|
-
from
|
|
9
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
10
10
|
|
|
11
11
|
from langchain.chains.llm import LLMChain
|
|
12
12
|
from langchain.memory.chat_memory import BaseChatMemory
|
|
@@ -245,8 +245,9 @@ class SQLiteEntityStore(BaseEntityStore):
|
|
|
245
245
|
table_name: str = "memory_store"
|
|
246
246
|
conn: Any = None
|
|
247
247
|
|
|
248
|
-
|
|
249
|
-
arbitrary_types_allowed
|
|
248
|
+
model_config = ConfigDict(
|
|
249
|
+
arbitrary_types_allowed=True,
|
|
250
|
+
)
|
|
250
251
|
|
|
251
252
|
def __init__(
|
|
252
253
|
self,
|
langchain/memory/summary.py
CHANGED
|
@@ -7,8 +7,8 @@ from langchain_core.chat_history import BaseChatMessageHistory
|
|
|
7
7
|
from langchain_core.language_models import BaseLanguageModel
|
|
8
8
|
from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string
|
|
9
9
|
from langchain_core.prompts import BasePromptTemplate
|
|
10
|
-
from langchain_core.pydantic_v1 import BaseModel
|
|
11
10
|
from langchain_core.utils import pre_init
|
|
11
|
+
from pydantic import BaseModel
|
|
12
12
|
|
|
13
13
|
from langchain.chains.llm import LLMChain
|
|
14
14
|
from langchain.memory.chat_memory import BaseChatMemory
|
langchain/memory/vectorstore.py
CHANGED
|
@@ -3,8 +3,8 @@
|
|
|
3
3
|
from typing import Any, Dict, List, Optional, Sequence, Union
|
|
4
4
|
|
|
5
5
|
from langchain_core.documents import Document
|
|
6
|
-
from langchain_core.pydantic_v1 import Field
|
|
7
6
|
from langchain_core.vectorstores import VectorStoreRetriever
|
|
7
|
+
from pydantic import Field
|
|
8
8
|
|
|
9
9
|
from langchain.memory.chat_memory import BaseMemory
|
|
10
10
|
from langchain.memory.utils import get_prompt_input_key
|
|
@@ -13,8 +13,8 @@ from typing import Any, Dict, List
|
|
|
13
13
|
|
|
14
14
|
from langchain_core.messages import BaseMessage
|
|
15
15
|
from langchain_core.prompts.chat import SystemMessagePromptTemplate
|
|
16
|
-
from langchain_core.pydantic_v1 import Field, PrivateAttr
|
|
17
16
|
from langchain_core.vectorstores import VectorStoreRetriever
|
|
17
|
+
from pydantic import Field, PrivateAttr
|
|
18
18
|
|
|
19
19
|
from langchain.memory import ConversationTokenBufferMemory, VectorStoreRetrieverMemory
|
|
20
20
|
from langchain.memory.chat_memory import BaseChatMemory
|
langchain/output_parsers/fix.py
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
from typing import Any, TypeVar, Union
|
|
3
|
+
from typing import Annotated, Any, TypeVar, Union
|
|
4
4
|
|
|
5
5
|
from langchain_core.exceptions import OutputParserException
|
|
6
6
|
from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
|
|
7
7
|
from langchain_core.prompts import BasePromptTemplate
|
|
8
8
|
from langchain_core.runnables import Runnable, RunnableSerializable
|
|
9
|
+
from pydantic import SkipValidation
|
|
9
10
|
from typing_extensions import TypedDict
|
|
10
11
|
|
|
11
12
|
from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT
|
|
@@ -26,7 +27,7 @@ class OutputFixingParser(BaseOutputParser[T]):
|
|
|
26
27
|
def is_lc_serializable(cls) -> bool:
|
|
27
28
|
return True
|
|
28
29
|
|
|
29
|
-
parser: BaseOutputParser[T]
|
|
30
|
+
parser: Annotated[BaseOutputParser[T], SkipValidation()]
|
|
30
31
|
"""The parser to use to parse the output."""
|
|
31
32
|
# Should be an LLMChain but we want to avoid top-level imports from langchain.chains
|
|
32
33
|
retry_chain: Union[
|
|
@@ -3,7 +3,7 @@ from typing import Any, Dict, List, Tuple, Union
|
|
|
3
3
|
|
|
4
4
|
from langchain_core.exceptions import OutputParserException
|
|
5
5
|
from langchain_core.output_parsers.base import BaseOutputParser
|
|
6
|
-
from
|
|
6
|
+
from pydantic import validator
|
|
7
7
|
|
|
8
8
|
from langchain.output_parsers.format_instructions import (
|
|
9
9
|
PANDAS_DATAFRAME_FORMAT_INSTRUCTIONS,
|
|
@@ -8,7 +8,8 @@ from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
|
|
|
8
8
|
from langchain_core.prompt_values import PromptValue
|
|
9
9
|
from langchain_core.prompts import BasePromptTemplate, PromptTemplate
|
|
10
10
|
from langchain_core.runnables import RunnableSerializable
|
|
11
|
-
from
|
|
11
|
+
from pydantic import SkipValidation
|
|
12
|
+
from typing_extensions import Annotated, TypedDict
|
|
12
13
|
|
|
13
14
|
NAIVE_COMPLETION_RETRY = """Prompt:
|
|
14
15
|
{prompt}
|
|
@@ -53,7 +54,7 @@ class RetryOutputParser(BaseOutputParser[T]):
|
|
|
53
54
|
LLM, and telling it the completion did not satisfy criteria in the prompt.
|
|
54
55
|
"""
|
|
55
56
|
|
|
56
|
-
parser: BaseOutputParser[T]
|
|
57
|
+
parser: Annotated[BaseOutputParser[T], SkipValidation()]
|
|
57
58
|
"""The parser to use to parse the output."""
|
|
58
59
|
# Should be an LLMChain but we want to avoid top-level imports from langchain.chains
|
|
59
60
|
retry_chain: Union[RunnableSerializable[RetryOutputParserRetryChainInput, str], Any]
|
|
@@ -183,7 +184,7 @@ class RetryWithErrorOutputParser(BaseOutputParser[T]):
|
|
|
183
184
|
LLM, which in theory should give it more information on how to fix it.
|
|
184
185
|
"""
|
|
185
186
|
|
|
186
|
-
parser: BaseOutputParser[T]
|
|
187
|
+
parser: Annotated[BaseOutputParser[T], SkipValidation()]
|
|
187
188
|
"""The parser to use to parse the output."""
|
|
188
189
|
# Should be an LLMChain but we want to avoid top-level imports from langchain.chains
|
|
189
190
|
retry_chain: Union[
|
|
@@ -4,7 +4,7 @@ from typing import Any, Dict, List
|
|
|
4
4
|
|
|
5
5
|
from langchain_core.output_parsers import BaseOutputParser
|
|
6
6
|
from langchain_core.output_parsers.json import parse_and_check_json_markdown
|
|
7
|
-
from
|
|
7
|
+
from pydantic import BaseModel
|
|
8
8
|
|
|
9
9
|
from langchain.output_parsers.format_instructions import (
|
|
10
10
|
STRUCTURED_FORMAT_INSTRUCTIONS,
|
langchain/output_parsers/yaml.py
CHANGED
|
@@ -5,7 +5,7 @@ from typing import Type, TypeVar
|
|
|
5
5
|
import yaml
|
|
6
6
|
from langchain_core.exceptions import OutputParserException
|
|
7
7
|
from langchain_core.output_parsers import BaseOutputParser
|
|
8
|
-
from
|
|
8
|
+
from pydantic import BaseModel, ValidationError
|
|
9
9
|
|
|
10
10
|
from langchain.output_parsers.format_instructions import YAML_FORMAT_INSTRUCTIONS
|
|
11
11
|
|
|
@@ -6,6 +6,7 @@ from langchain_core.callbacks import (
|
|
|
6
6
|
)
|
|
7
7
|
from langchain_core.documents import Document
|
|
8
8
|
from langchain_core.retrievers import BaseRetriever, RetrieverLike
|
|
9
|
+
from pydantic import ConfigDict
|
|
9
10
|
|
|
10
11
|
from langchain.retrievers.document_compressors.base import (
|
|
11
12
|
BaseDocumentCompressor,
|
|
@@ -21,8 +22,9 @@ class ContextualCompressionRetriever(BaseRetriever):
|
|
|
21
22
|
base_retriever: RetrieverLike
|
|
22
23
|
"""Base Retriever to use for getting relevant documents."""
|
|
23
24
|
|
|
24
|
-
|
|
25
|
-
arbitrary_types_allowed
|
|
25
|
+
model_config = ConfigDict(
|
|
26
|
+
arbitrary_types_allowed=True,
|
|
27
|
+
)
|
|
26
28
|
|
|
27
29
|
def _get_relevant_documents(
|
|
28
30
|
self,
|
|
@@ -7,6 +7,7 @@ from langchain_core.documents import (
|
|
|
7
7
|
BaseDocumentTransformer,
|
|
8
8
|
Document,
|
|
9
9
|
)
|
|
10
|
+
from pydantic import ConfigDict
|
|
10
11
|
|
|
11
12
|
|
|
12
13
|
class DocumentCompressorPipeline(BaseDocumentCompressor):
|
|
@@ -15,8 +16,9 @@ class DocumentCompressorPipeline(BaseDocumentCompressor):
|
|
|
15
16
|
transformers: List[Union[BaseDocumentTransformer, BaseDocumentCompressor]]
|
|
16
17
|
"""List of document filters that are chained together and run in sequence."""
|
|
17
18
|
|
|
18
|
-
|
|
19
|
-
arbitrary_types_allowed
|
|
19
|
+
model_config = ConfigDict(
|
|
20
|
+
arbitrary_types_allowed=True,
|
|
21
|
+
)
|
|
20
22
|
|
|
21
23
|
def compress_documents(
|
|
22
24
|
self,
|
|
@@ -11,6 +11,7 @@ from langchain_core.language_models import BaseLanguageModel
|
|
|
11
11
|
from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
|
|
12
12
|
from langchain_core.prompts import PromptTemplate
|
|
13
13
|
from langchain_core.runnables import Runnable
|
|
14
|
+
from pydantic import ConfigDict
|
|
14
15
|
|
|
15
16
|
from langchain.chains.llm import LLMChain
|
|
16
17
|
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
|
|
@@ -56,8 +57,9 @@ class LLMChainExtractor(BaseDocumentCompressor):
|
|
|
56
57
|
get_input: Callable[[str, Document], dict] = default_get_input
|
|
57
58
|
"""Callable for constructing the chain input from the query and a Document."""
|
|
58
59
|
|
|
59
|
-
|
|
60
|
-
arbitrary_types_allowed
|
|
60
|
+
model_config = ConfigDict(
|
|
61
|
+
arbitrary_types_allowed=True,
|
|
62
|
+
)
|
|
61
63
|
|
|
62
64
|
def compress_documents(
|
|
63
65
|
self,
|
|
@@ -9,6 +9,7 @@ from langchain_core.output_parsers import StrOutputParser
|
|
|
9
9
|
from langchain_core.prompts import BasePromptTemplate, PromptTemplate
|
|
10
10
|
from langchain_core.runnables import Runnable
|
|
11
11
|
from langchain_core.runnables.config import RunnableConfig
|
|
12
|
+
from pydantic import ConfigDict
|
|
12
13
|
|
|
13
14
|
from langchain.chains import LLMChain
|
|
14
15
|
from langchain.output_parsers.boolean import BooleanOutputParser
|
|
@@ -41,8 +42,9 @@ class LLMChainFilter(BaseDocumentCompressor):
|
|
|
41
42
|
get_input: Callable[[str, Document], dict] = default_get_input
|
|
42
43
|
"""Callable for constructing the chain input from the query and a Document."""
|
|
43
44
|
|
|
44
|
-
|
|
45
|
-
arbitrary_types_allowed
|
|
45
|
+
model_config = ConfigDict(
|
|
46
|
+
arbitrary_types_allowed=True,
|
|
47
|
+
)
|
|
46
48
|
|
|
47
49
|
def compress_documents(
|
|
48
50
|
self,
|
|
@@ -6,8 +6,8 @@ from typing import Any, Dict, List, Optional, Sequence, Union
|
|
|
6
6
|
from langchain_core._api.deprecation import deprecated
|
|
7
7
|
from langchain_core.callbacks.manager import Callbacks
|
|
8
8
|
from langchain_core.documents import Document
|
|
9
|
-
from langchain_core.pydantic_v1 import root_validator
|
|
10
9
|
from langchain_core.utils import get_from_dict_or_env
|
|
10
|
+
from pydantic import ConfigDict, model_validator
|
|
11
11
|
|
|
12
12
|
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
|
|
13
13
|
|
|
@@ -30,12 +30,14 @@ class CohereRerank(BaseDocumentCompressor):
|
|
|
30
30
|
user_agent: str = "langchain"
|
|
31
31
|
"""Identifier for the application making the request."""
|
|
32
32
|
|
|
33
|
-
|
|
34
|
-
arbitrary_types_allowed
|
|
35
|
-
extra
|
|
33
|
+
model_config = ConfigDict(
|
|
34
|
+
arbitrary_types_allowed=True,
|
|
35
|
+
extra="forbid",
|
|
36
|
+
)
|
|
36
37
|
|
|
37
|
-
@
|
|
38
|
-
|
|
38
|
+
@model_validator(mode="before")
|
|
39
|
+
@classmethod
|
|
40
|
+
def validate_environment(cls, values: Dict) -> Any:
|
|
39
41
|
"""Validate that api key and python package exists in environment."""
|
|
40
42
|
if not values.get("client"):
|
|
41
43
|
try:
|