langchain 0.2.15__py3-none-any.whl → 0.3.0.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (92) hide show
  1. langchain/agents/agent.py +21 -17
  2. langchain/agents/agent_toolkits/vectorstore/toolkit.py +10 -7
  3. langchain/agents/chat/base.py +1 -1
  4. langchain/agents/conversational/base.py +1 -1
  5. langchain/agents/conversational_chat/base.py +1 -1
  6. langchain/agents/mrkl/base.py +1 -1
  7. langchain/agents/openai_assistant/base.py +8 -7
  8. langchain/agents/openai_functions_agent/base.py +6 -5
  9. langchain/agents/openai_functions_multi_agent/base.py +6 -5
  10. langchain/agents/openai_tools/base.py +8 -3
  11. langchain/agents/react/base.py +1 -1
  12. langchain/agents/self_ask_with_search/base.py +1 -1
  13. langchain/agents/structured_chat/base.py +1 -1
  14. langchain/agents/structured_chat/output_parser.py +1 -1
  15. langchain/chains/api/base.py +13 -11
  16. langchain/chains/base.py +13 -5
  17. langchain/chains/combine_documents/base.py +1 -1
  18. langchain/chains/combine_documents/map_reduce.py +14 -10
  19. langchain/chains/combine_documents/map_rerank.py +17 -14
  20. langchain/chains/combine_documents/reduce.py +5 -3
  21. langchain/chains/combine_documents/refine.py +11 -8
  22. langchain/chains/combine_documents/stuff.py +8 -6
  23. langchain/chains/constitutional_ai/models.py +1 -1
  24. langchain/chains/conversation/base.py +13 -11
  25. langchain/chains/conversational_retrieval/base.py +9 -7
  26. langchain/chains/elasticsearch_database/base.py +10 -8
  27. langchain/chains/flare/base.py +5 -2
  28. langchain/chains/hyde/base.py +5 -3
  29. langchain/chains/llm.py +5 -4
  30. langchain/chains/llm_checker/base.py +8 -6
  31. langchain/chains/llm_math/base.py +8 -6
  32. langchain/chains/llm_summarization_checker/base.py +8 -6
  33. langchain/chains/mapreduce.py +5 -3
  34. langchain/chains/moderation.py +12 -10
  35. langchain/chains/natbot/base.py +8 -6
  36. langchain/chains/openai_functions/base.py +1 -1
  37. langchain/chains/openai_functions/citation_fuzzy_match.py +1 -1
  38. langchain/chains/openai_functions/extraction.py +1 -1
  39. langchain/chains/openai_functions/qa_with_structure.py +1 -1
  40. langchain/chains/openai_tools/extraction.py +1 -1
  41. langchain/chains/prompt_selector.py +1 -1
  42. langchain/chains/qa_generation/base.py +1 -1
  43. langchain/chains/qa_with_sources/base.py +8 -6
  44. langchain/chains/qa_with_sources/retrieval.py +1 -1
  45. langchain/chains/qa_with_sources/vector_db.py +4 -3
  46. langchain/chains/query_constructor/schema.py +5 -4
  47. langchain/chains/retrieval_qa/base.py +12 -9
  48. langchain/chains/router/base.py +5 -3
  49. langchain/chains/router/embedding_router.py +5 -3
  50. langchain/chains/router/llm_router.py +6 -5
  51. langchain/chains/sequential.py +17 -13
  52. langchain/chains/structured_output/base.py +1 -1
  53. langchain/chains/transform.py +1 -1
  54. langchain/chat_models/base.py +1 -1
  55. langchain/evaluation/agents/trajectory_eval_chain.py +4 -3
  56. langchain/evaluation/comparison/eval_chain.py +4 -3
  57. langchain/evaluation/criteria/eval_chain.py +4 -3
  58. langchain/evaluation/embedding_distance/base.py +4 -3
  59. langchain/evaluation/qa/eval_chain.py +7 -4
  60. langchain/evaluation/qa/generate_chain.py +1 -1
  61. langchain/evaluation/scoring/eval_chain.py +4 -3
  62. langchain/evaluation/string_distance/base.py +1 -1
  63. langchain/indexes/vectorstore.py +9 -7
  64. langchain/memory/chat_memory.py +1 -1
  65. langchain/memory/combined.py +1 -1
  66. langchain/memory/entity.py +4 -3
  67. langchain/memory/summary.py +1 -1
  68. langchain/memory/vectorstore.py +1 -1
  69. langchain/memory/vectorstore_token_buffer_memory.py +1 -1
  70. langchain/output_parsers/fix.py +3 -2
  71. langchain/output_parsers/pandas_dataframe.py +1 -1
  72. langchain/output_parsers/retry.py +4 -3
  73. langchain/output_parsers/structured.py +1 -1
  74. langchain/output_parsers/yaml.py +1 -1
  75. langchain/retrievers/contextual_compression.py +4 -2
  76. langchain/retrievers/document_compressors/base.py +4 -2
  77. langchain/retrievers/document_compressors/chain_extract.py +4 -2
  78. langchain/retrievers/document_compressors/chain_filter.py +4 -2
  79. langchain/retrievers/document_compressors/cohere_rerank.py +8 -6
  80. langchain/retrievers/document_compressors/cross_encoder_rerank.py +5 -3
  81. langchain/retrievers/document_compressors/embeddings_filter.py +4 -3
  82. langchain/retrievers/document_compressors/listwise_rerank.py +4 -3
  83. langchain/retrievers/ensemble.py +4 -3
  84. langchain/retrievers/multi_vector.py +5 -4
  85. langchain/retrievers/self_query/base.py +19 -8
  86. langchain/retrievers/time_weighted_retriever.py +4 -3
  87. langchain/smith/evaluation/config.py +7 -5
  88. {langchain-0.2.15.dist-info → langchain-0.3.0.dev1.dist-info}/METADATA +4 -4
  89. {langchain-0.2.15.dist-info → langchain-0.3.0.dev1.dist-info}/RECORD +92 -92
  90. {langchain-0.2.15.dist-info → langchain-0.3.0.dev1.dist-info}/LICENSE +0 -0
  91. {langchain-0.2.15.dist-info → langchain-0.3.0.dev1.dist-info}/WHEEL +0 -0
  92. {langchain-0.2.15.dist-info → langchain-0.3.0.dev1.dist-info}/entry_points.txt +0 -0
@@ -1,4 +1,4 @@
1
- from langchain_core.pydantic_v1 import BaseModel
1
+ from pydantic import BaseModel, ConfigDict
2
2
 
3
3
 
4
4
  class AttributeInfo(BaseModel):
@@ -8,6 +8,7 @@ class AttributeInfo(BaseModel):
8
8
  description: str
9
9
  type: str
10
10
 
11
- class Config:
12
- arbitrary_types_allowed = True
13
- frozen = True
11
+ model_config = ConfigDict(
12
+ arbitrary_types_allowed=True,
13
+ frozen=True,
14
+ )
@@ -16,9 +16,9 @@ from langchain_core.callbacks import (
16
16
  from langchain_core.documents import Document
17
17
  from langchain_core.language_models import BaseLanguageModel
18
18
  from langchain_core.prompts import PromptTemplate
19
- from langchain_core.pydantic_v1 import Field, root_validator
20
19
  from langchain_core.retrievers import BaseRetriever
21
20
  from langchain_core.vectorstores import VectorStore
21
+ from pydantic import ConfigDict, Field, model_validator
22
22
 
23
23
  from langchain.chains.base import Chain
24
24
  from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
@@ -47,10 +47,11 @@ class BaseRetrievalQA(Chain):
47
47
  return_source_documents: bool = False
48
48
  """Return the source documents or not."""
49
49
 
50
- class Config:
51
- allow_population_by_field_name = True
52
- arbitrary_types_allowed = True
53
- extra = "forbid"
50
+ model_config = ConfigDict(
51
+ populate_by_name=True,
52
+ arbitrary_types_allowed=True,
53
+ extra="forbid",
54
+ )
54
55
 
55
56
  @property
56
57
  def input_keys(self) -> List[str]:
@@ -309,16 +310,18 @@ class VectorDBQA(BaseRetrievalQA):
309
310
  search_kwargs: Dict[str, Any] = Field(default_factory=dict)
310
311
  """Extra search args."""
311
312
 
312
- @root_validator(pre=True)
313
- def raise_deprecation(cls, values: Dict) -> Dict:
313
+ @model_validator(mode="before")
314
+ @classmethod
315
+ def raise_deprecation(cls, values: Dict) -> Any:
314
316
  warnings.warn(
315
317
  "`VectorDBQA` is deprecated - "
316
318
  "please use `from langchain.chains import RetrievalQA`"
317
319
  )
318
320
  return values
319
321
 
320
- @root_validator(pre=True)
321
- def validate_search_type(cls, values: Dict) -> Dict:
322
+ @model_validator(mode="before")
323
+ @classmethod
324
+ def validate_search_type(cls, values: Dict) -> Any:
322
325
  """Validate search type."""
323
326
  if "search_type" in values:
324
327
  search_type = values["search_type"]
@@ -10,6 +10,7 @@ from langchain_core.callbacks import (
10
10
  CallbackManagerForChainRun,
11
11
  Callbacks,
12
12
  )
13
+ from pydantic import ConfigDict
13
14
 
14
15
  from langchain.chains.base import Chain
15
16
 
@@ -60,9 +61,10 @@ class MultiRouteChain(Chain):
60
61
  """If True, use default_chain when an invalid destination name is provided.
61
62
  Defaults to False."""
62
63
 
63
- class Config:
64
- arbitrary_types_allowed = True
65
- extra = "forbid"
64
+ model_config = ConfigDict(
65
+ arbitrary_types_allowed=True,
66
+ extra="forbid",
67
+ )
66
68
 
67
69
  @property
68
70
  def input_keys(self) -> List[str]:
@@ -9,6 +9,7 @@ from langchain_core.callbacks import (
9
9
  from langchain_core.documents import Document
10
10
  from langchain_core.embeddings import Embeddings
11
11
  from langchain_core.vectorstores import VectorStore
12
+ from pydantic import ConfigDict
12
13
 
13
14
  from langchain.chains.router.base import RouterChain
14
15
 
@@ -19,9 +20,10 @@ class EmbeddingRouterChain(RouterChain):
19
20
  vectorstore: VectorStore
20
21
  routing_keys: List[str] = ["query"]
21
22
 
22
- class Config:
23
- arbitrary_types_allowed = True
24
- extra = "forbid"
23
+ model_config = ConfigDict(
24
+ arbitrary_types_allowed=True,
25
+ extra="forbid",
26
+ )
25
27
 
26
28
  @property
27
29
  def input_keys(self) -> List[str]:
@@ -13,8 +13,9 @@ from langchain_core.exceptions import OutputParserException
13
13
  from langchain_core.language_models import BaseLanguageModel
14
14
  from langchain_core.output_parsers import BaseOutputParser
15
15
  from langchain_core.prompts import BasePromptTemplate
16
- from langchain_core.pydantic_v1 import root_validator
17
16
  from langchain_core.utils.json import parse_and_check_json_markdown
17
+ from pydantic import model_validator
18
+ from typing_extensions import Self
18
19
 
19
20
  from langchain.chains import LLMChain
20
21
  from langchain.chains.router.base import RouterChain
@@ -100,9 +101,9 @@ class LLMRouterChain(RouterChain):
100
101
  llm_chain: LLMChain
101
102
  """LLM chain used to perform routing"""
102
103
 
103
- @root_validator(pre=False, skip_on_failure=True)
104
- def validate_prompt(cls, values: dict) -> dict:
105
- prompt = values["llm_chain"].prompt
104
+ @model_validator(mode="after")
105
+ def validate_prompt(self) -> Self:
106
+ prompt = self.llm_chain.prompt
106
107
  if prompt.output_parser is None:
107
108
  raise ValueError(
108
109
  "LLMRouterChain requires base llm_chain prompt to have an output"
@@ -110,7 +111,7 @@ class LLMRouterChain(RouterChain):
110
111
  " 'destination' and 'next_inputs'. Received a prompt with no output"
111
112
  " parser."
112
113
  )
113
- return values
114
+ return self
114
115
 
115
116
  @property
116
117
  def input_keys(self) -> List[str]:
@@ -6,8 +6,9 @@ from langchain_core.callbacks import (
6
6
  AsyncCallbackManagerForChainRun,
7
7
  CallbackManagerForChainRun,
8
8
  )
9
- from langchain_core.pydantic_v1 import root_validator
10
9
  from langchain_core.utils.input import get_color_mapping
10
+ from pydantic import ConfigDict, model_validator
11
+ from typing_extensions import Self
11
12
 
12
13
  from langchain.chains.base import Chain
13
14
 
@@ -20,9 +21,10 @@ class SequentialChain(Chain):
20
21
  output_variables: List[str] #: :meta private:
21
22
  return_all: bool = False
22
23
 
23
- class Config:
24
- arbitrary_types_allowed = True
25
- extra = "forbid"
24
+ model_config = ConfigDict(
25
+ arbitrary_types_allowed=True,
26
+ extra="forbid",
27
+ )
26
28
 
27
29
  @property
28
30
  def input_keys(self) -> List[str]:
@@ -40,8 +42,9 @@ class SequentialChain(Chain):
40
42
  """
41
43
  return self.output_variables
42
44
 
43
- @root_validator(pre=True)
44
- def validate_chains(cls, values: Dict) -> Dict:
45
+ @model_validator(mode="before")
46
+ @classmethod
47
+ def validate_chains(cls, values: Dict) -> Any:
45
48
  """Validate that the correct inputs exist for all chains."""
46
49
  chains = values["chains"]
47
50
  input_variables = values["input_variables"]
@@ -129,9 +132,10 @@ class SimpleSequentialChain(Chain):
129
132
  input_key: str = "input" #: :meta private:
130
133
  output_key: str = "output" #: :meta private:
131
134
 
132
- class Config:
133
- arbitrary_types_allowed = True
134
- extra = "forbid"
135
+ model_config = ConfigDict(
136
+ arbitrary_types_allowed=True,
137
+ extra="forbid",
138
+ )
135
139
 
136
140
  @property
137
141
  def input_keys(self) -> List[str]:
@@ -149,10 +153,10 @@ class SimpleSequentialChain(Chain):
149
153
  """
150
154
  return [self.output_key]
151
155
 
152
- @root_validator(pre=False, skip_on_failure=True)
153
- def validate_chains(cls, values: Dict) -> Dict:
156
+ @model_validator(mode="after")
157
+ def validate_chains(self) -> Self:
154
158
  """Validate that chains are all single input/output."""
155
- for chain in values["chains"]:
159
+ for chain in self.chains:
156
160
  if len(chain.input_keys) != 1:
157
161
  raise ValueError(
158
162
  "Chains used in SimplePipeline should all have one input, got "
@@ -163,7 +167,7 @@ class SimpleSequentialChain(Chain):
163
167
  "Chains used in SimplePipeline should all have one output, got "
164
168
  f"{chain} with {len(chain.output_keys)} outputs."
165
169
  )
166
- return values
170
+ return self
167
171
 
168
172
  def _call(
169
173
  self,
@@ -18,13 +18,13 @@ from langchain_core.output_parsers.openai_tools import (
18
18
  PydanticToolsParser,
19
19
  )
20
20
  from langchain_core.prompts import BasePromptTemplate
21
- from langchain_core.pydantic_v1 import BaseModel
22
21
  from langchain_core.runnables import Runnable
23
22
  from langchain_core.utils.function_calling import (
24
23
  convert_to_openai_function,
25
24
  convert_to_openai_tool,
26
25
  )
27
26
  from langchain_core.utils.pydantic import is_basemodel_subclass
27
+ from pydantic import BaseModel
28
28
 
29
29
 
30
30
  @deprecated(
@@ -8,7 +8,7 @@ from langchain_core.callbacks import (
8
8
  AsyncCallbackManagerForChainRun,
9
9
  CallbackManagerForChainRun,
10
10
  )
11
- from langchain_core.pydantic_v1 import Field
11
+ from pydantic import Field
12
12
 
13
13
  from langchain.chains.base import Chain
14
14
 
@@ -30,11 +30,11 @@ from langchain_core.language_models.chat_models import (
30
30
  generate_from_stream,
31
31
  )
32
32
  from langchain_core.messages import AnyMessage, BaseMessage
33
- from langchain_core.pydantic_v1 import BaseModel
34
33
  from langchain_core.runnables import Runnable, RunnableConfig
35
34
  from langchain_core.runnables.schema import StreamEvent
36
35
  from langchain_core.tools import BaseTool
37
36
  from langchain_core.tracers import RunLog, RunLogPatch
37
+ from pydantic import BaseModel
38
38
  from typing_extensions import TypeAlias
39
39
 
40
40
  __all__ = [
@@ -28,8 +28,8 @@ from langchain_core.exceptions import OutputParserException
28
28
  from langchain_core.language_models import BaseLanguageModel
29
29
  from langchain_core.language_models.chat_models import BaseChatModel
30
30
  from langchain_core.output_parsers import BaseOutputParser
31
- from langchain_core.pydantic_v1 import Field
32
31
  from langchain_core.tools import BaseTool
32
+ from pydantic import ConfigDict, Field
33
33
 
34
34
  from langchain.chains.llm import LLMChain
35
35
  from langchain.evaluation.agents.trajectory_eval_prompt import (
@@ -156,8 +156,9 @@ class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain):
156
156
  return_reasoning: bool = False # :meta private:
157
157
  """DEPRECATED. Reasoning always returned."""
158
158
 
159
- class Config:
160
- extra = "ignore"
159
+ model_config = ConfigDict(
160
+ extra="ignore",
161
+ )
161
162
 
162
163
  @property
163
164
  def requires_reference(self) -> bool:
@@ -10,7 +10,7 @@ from langchain_core.callbacks.manager import Callbacks
10
10
  from langchain_core.language_models import BaseLanguageModel
11
11
  from langchain_core.output_parsers import BaseOutputParser
12
12
  from langchain_core.prompts.prompt import PromptTemplate
13
- from langchain_core.pydantic_v1 import Field
13
+ from pydantic import ConfigDict, Field
14
14
 
15
15
  from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
16
16
  from langchain.chains.llm import LLMChain
@@ -191,8 +191,9 @@ class PairwiseStringEvalChain(PairwiseStringEvaluator, LLMEvalChain, LLMChain):
191
191
  def is_lc_serializable(cls) -> bool:
192
192
  return False
193
193
 
194
- class Config:
195
- extra = "ignore"
194
+ model_config = ConfigDict(
195
+ extra="ignore",
196
+ )
196
197
 
197
198
  @property
198
199
  def requires_reference(self) -> bool:
@@ -8,7 +8,7 @@ from langchain_core.callbacks.manager import Callbacks
8
8
  from langchain_core.language_models import BaseLanguageModel
9
9
  from langchain_core.output_parsers import BaseOutputParser
10
10
  from langchain_core.prompts import BasePromptTemplate
11
- from langchain_core.pydantic_v1 import Field
11
+ from pydantic import ConfigDict, Field
12
12
 
13
13
  from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
14
14
  from langchain.chains.llm import LLMChain
@@ -236,8 +236,9 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain):
236
236
  def is_lc_serializable(cls) -> bool:
237
237
  return False
238
238
 
239
- class Config:
240
- extra = "ignore"
239
+ model_config = ConfigDict(
240
+ extra="ignore",
241
+ )
241
242
 
242
243
  @property
243
244
  def requires_reference(self) -> bool:
@@ -10,8 +10,8 @@ from langchain_core.callbacks.manager import (
10
10
  Callbacks,
11
11
  )
12
12
  from langchain_core.embeddings import Embeddings
13
- from langchain_core.pydantic_v1 import Field
14
13
  from langchain_core.utils import pre_init
14
+ from pydantic import ConfigDict, Field
15
15
 
16
16
  from langchain.chains.base import Chain
17
17
  from langchain.evaluation.schema import PairwiseStringEvaluator, StringEvaluator
@@ -113,8 +113,9 @@ class _EmbeddingDistanceChainMixin(Chain):
113
113
  )
114
114
  return values
115
115
 
116
- class Config:
117
- arbitrary_types_allowed: bool = True
116
+ model_config = ConfigDict(
117
+ arbitrary_types_allowed=True,
118
+ )
118
119
 
119
120
  @property
120
121
  def output_keys(self) -> List[str]:
@@ -9,6 +9,7 @@ from typing import Any, List, Optional, Sequence, Tuple
9
9
  from langchain_core.callbacks.manager import Callbacks
10
10
  from langchain_core.language_models import BaseLanguageModel
11
11
  from langchain_core.prompts import PromptTemplate
12
+ from pydantic import ConfigDict
12
13
 
13
14
  from langchain.chains.llm import LLMChain
14
15
  from langchain.evaluation.qa.eval_prompt import CONTEXT_PROMPT, COT_PROMPT, PROMPT
@@ -72,8 +73,9 @@ class QAEvalChain(LLMChain, StringEvaluator, LLMEvalChain):
72
73
 
73
74
  output_key: str = "results" #: :meta private:
74
75
 
75
- class Config:
76
- extra = "ignore"
76
+ model_config = ConfigDict(
77
+ extra="ignore",
78
+ )
77
79
 
78
80
  @classmethod
79
81
  def is_lc_serializable(cls) -> bool:
@@ -220,8 +222,9 @@ class ContextQAEvalChain(LLMChain, StringEvaluator, LLMEvalChain):
220
222
  """Whether the chain requires an input string."""
221
223
  return True
222
224
 
223
- class Config:
224
- extra = "ignore"
225
+ model_config = ConfigDict(
226
+ extra="ignore",
227
+ )
225
228
 
226
229
  @classmethod
227
230
  def _validate_input_vars(cls, prompt: PromptTemplate) -> None:
@@ -6,7 +6,7 @@ from typing import Any
6
6
 
7
7
  from langchain_core.language_models import BaseLanguageModel
8
8
  from langchain_core.output_parsers import BaseLLMOutputParser
9
- from langchain_core.pydantic_v1 import Field
9
+ from pydantic import Field
10
10
 
11
11
  from langchain.chains.llm import LLMChain
12
12
  from langchain.evaluation.qa.generate_prompt import PROMPT
@@ -10,7 +10,7 @@ from langchain_core.callbacks.manager import Callbacks
10
10
  from langchain_core.language_models import BaseLanguageModel
11
11
  from langchain_core.output_parsers import BaseOutputParser
12
12
  from langchain_core.prompts.prompt import PromptTemplate
13
- from langchain_core.pydantic_v1 import Field
13
+ from pydantic import ConfigDict, Field
14
14
 
15
15
  from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
16
16
  from langchain.chains.llm import LLMChain
@@ -179,8 +179,9 @@ class ScoreStringEvalChain(StringEvaluator, LLMEvalChain, LLMChain):
179
179
  criterion_name: str
180
180
  """The name of the criterion being evaluated."""
181
181
 
182
- class Config:
183
- extra = "ignore"
182
+ model_config = ConfigDict(
183
+ extra="ignore",
184
+ )
184
185
 
185
186
  @classmethod
186
187
  def is_lc_serializable(cls) -> bool:
@@ -8,8 +8,8 @@ from langchain_core.callbacks.manager import (
8
8
  CallbackManagerForChainRun,
9
9
  Callbacks,
10
10
  )
11
- from langchain_core.pydantic_v1 import Field
12
11
  from langchain_core.utils import pre_init
12
+ from pydantic import Field
13
13
 
14
14
  from langchain.chains.base import Chain
15
15
  from langchain.evaluation.schema import PairwiseStringEvaluator, StringEvaluator
@@ -4,9 +4,9 @@ from langchain_core.document_loaders import BaseLoader
4
4
  from langchain_core.documents import Document
5
5
  from langchain_core.embeddings import Embeddings
6
6
  from langchain_core.language_models import BaseLanguageModel
7
- from langchain_core.pydantic_v1 import BaseModel, Field
8
7
  from langchain_core.vectorstores import VectorStore
9
8
  from langchain_text_splitters import RecursiveCharacterTextSplitter, TextSplitter
9
+ from pydantic import BaseModel, ConfigDict, Field
10
10
 
11
11
  from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain
12
12
  from langchain.chains.retrieval_qa.base import RetrievalQA
@@ -21,9 +21,10 @@ class VectorStoreIndexWrapper(BaseModel):
21
21
 
22
22
  vectorstore: VectorStore
23
23
 
24
- class Config:
25
- arbitrary_types_allowed = True
26
- extra = "forbid"
24
+ model_config = ConfigDict(
25
+ arbitrary_types_allowed=True,
26
+ extra="forbid",
27
+ )
27
28
 
28
29
  def query(
29
30
  self,
@@ -142,9 +143,10 @@ class VectorstoreIndexCreator(BaseModel):
142
143
  text_splitter: TextSplitter = Field(default_factory=_get_default_text_splitter)
143
144
  vectorstore_kwargs: dict = Field(default_factory=dict)
144
145
 
145
- class Config:
146
- arbitrary_types_allowed = True
147
- extra = "forbid"
146
+ model_config = ConfigDict(
147
+ arbitrary_types_allowed=True,
148
+ extra="forbid",
149
+ )
148
150
 
149
151
  def from_loaders(self, loaders: List[BaseLoader]) -> VectorStoreIndexWrapper:
150
152
  """Create a vectorstore index from loaders."""
@@ -8,7 +8,7 @@ from langchain_core.chat_history import (
8
8
  )
9
9
  from langchain_core.memory import BaseMemory
10
10
  from langchain_core.messages import AIMessage, HumanMessage
11
- from langchain_core.pydantic_v1 import Field
11
+ from pydantic import Field
12
12
 
13
13
  from langchain.memory.utils import get_prompt_input_key
14
14
 
@@ -2,7 +2,7 @@ import warnings
2
2
  from typing import Any, Dict, List, Set
3
3
 
4
4
  from langchain_core.memory import BaseMemory
5
- from langchain_core.pydantic_v1 import validator
5
+ from pydantic import validator
6
6
 
7
7
  from langchain.memory.chat_memory import BaseChatMemory
8
8
 
@@ -6,7 +6,7 @@ from typing import Any, Dict, Iterable, List, Optional
6
6
  from langchain_core.language_models import BaseLanguageModel
7
7
  from langchain_core.messages import BaseMessage, get_buffer_string
8
8
  from langchain_core.prompts import BasePromptTemplate
9
- from langchain_core.pydantic_v1 import BaseModel, Field
9
+ from pydantic import BaseModel, ConfigDict, Field
10
10
 
11
11
  from langchain.chains.llm import LLMChain
12
12
  from langchain.memory.chat_memory import BaseChatMemory
@@ -245,8 +245,9 @@ class SQLiteEntityStore(BaseEntityStore):
245
245
  table_name: str = "memory_store"
246
246
  conn: Any = None
247
247
 
248
- class Config:
249
- arbitrary_types_allowed = True
248
+ model_config = ConfigDict(
249
+ arbitrary_types_allowed=True,
250
+ )
250
251
 
251
252
  def __init__(
252
253
  self,
@@ -7,8 +7,8 @@ from langchain_core.chat_history import BaseChatMessageHistory
7
7
  from langchain_core.language_models import BaseLanguageModel
8
8
  from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string
9
9
  from langchain_core.prompts import BasePromptTemplate
10
- from langchain_core.pydantic_v1 import BaseModel
11
10
  from langchain_core.utils import pre_init
11
+ from pydantic import BaseModel
12
12
 
13
13
  from langchain.chains.llm import LLMChain
14
14
  from langchain.memory.chat_memory import BaseChatMemory
@@ -3,8 +3,8 @@
3
3
  from typing import Any, Dict, List, Optional, Sequence, Union
4
4
 
5
5
  from langchain_core.documents import Document
6
- from langchain_core.pydantic_v1 import Field
7
6
  from langchain_core.vectorstores import VectorStoreRetriever
7
+ from pydantic import Field
8
8
 
9
9
  from langchain.memory.chat_memory import BaseMemory
10
10
  from langchain.memory.utils import get_prompt_input_key
@@ -13,8 +13,8 @@ from typing import Any, Dict, List
13
13
 
14
14
  from langchain_core.messages import BaseMessage
15
15
  from langchain_core.prompts.chat import SystemMessagePromptTemplate
16
- from langchain_core.pydantic_v1 import Field, PrivateAttr
17
16
  from langchain_core.vectorstores import VectorStoreRetriever
17
+ from pydantic import Field, PrivateAttr
18
18
 
19
19
  from langchain.memory import ConversationTokenBufferMemory, VectorStoreRetrieverMemory
20
20
  from langchain.memory.chat_memory import BaseChatMemory
@@ -1,11 +1,12 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import Any, TypeVar, Union
3
+ from typing import Annotated, Any, TypeVar, Union
4
4
 
5
5
  from langchain_core.exceptions import OutputParserException
6
6
  from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
7
7
  from langchain_core.prompts import BasePromptTemplate
8
8
  from langchain_core.runnables import Runnable, RunnableSerializable
9
+ from pydantic import SkipValidation
9
10
  from typing_extensions import TypedDict
10
11
 
11
12
  from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT
@@ -26,7 +27,7 @@ class OutputFixingParser(BaseOutputParser[T]):
26
27
  def is_lc_serializable(cls) -> bool:
27
28
  return True
28
29
 
29
- parser: BaseOutputParser[T]
30
+ parser: Annotated[BaseOutputParser[T], SkipValidation()]
30
31
  """The parser to use to parse the output."""
31
32
  # Should be an LLMChain but we want to avoid top-level imports from langchain.chains
32
33
  retry_chain: Union[
@@ -3,7 +3,7 @@ from typing import Any, Dict, List, Tuple, Union
3
3
 
4
4
  from langchain_core.exceptions import OutputParserException
5
5
  from langchain_core.output_parsers.base import BaseOutputParser
6
- from langchain_core.pydantic_v1 import validator
6
+ from pydantic import validator
7
7
 
8
8
  from langchain.output_parsers.format_instructions import (
9
9
  PANDAS_DATAFRAME_FORMAT_INSTRUCTIONS,
@@ -8,7 +8,8 @@ from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
8
8
  from langchain_core.prompt_values import PromptValue
9
9
  from langchain_core.prompts import BasePromptTemplate, PromptTemplate
10
10
  from langchain_core.runnables import RunnableSerializable
11
- from typing_extensions import TypedDict
11
+ from pydantic import SkipValidation
12
+ from typing_extensions import Annotated, TypedDict
12
13
 
13
14
  NAIVE_COMPLETION_RETRY = """Prompt:
14
15
  {prompt}
@@ -53,7 +54,7 @@ class RetryOutputParser(BaseOutputParser[T]):
53
54
  LLM, and telling it the completion did not satisfy criteria in the prompt.
54
55
  """
55
56
 
56
- parser: BaseOutputParser[T]
57
+ parser: Annotated[BaseOutputParser[T], SkipValidation()]
57
58
  """The parser to use to parse the output."""
58
59
  # Should be an LLMChain but we want to avoid top-level imports from langchain.chains
59
60
  retry_chain: Union[RunnableSerializable[RetryOutputParserRetryChainInput, str], Any]
@@ -183,7 +184,7 @@ class RetryWithErrorOutputParser(BaseOutputParser[T]):
183
184
  LLM, which in theory should give it more information on how to fix it.
184
185
  """
185
186
 
186
- parser: BaseOutputParser[T]
187
+ parser: Annotated[BaseOutputParser[T], SkipValidation()]
187
188
  """The parser to use to parse the output."""
188
189
  # Should be an LLMChain but we want to avoid top-level imports from langchain.chains
189
190
  retry_chain: Union[
@@ -4,7 +4,7 @@ from typing import Any, Dict, List
4
4
 
5
5
  from langchain_core.output_parsers import BaseOutputParser
6
6
  from langchain_core.output_parsers.json import parse_and_check_json_markdown
7
- from langchain_core.pydantic_v1 import BaseModel
7
+ from pydantic import BaseModel
8
8
 
9
9
  from langchain.output_parsers.format_instructions import (
10
10
  STRUCTURED_FORMAT_INSTRUCTIONS,
@@ -5,7 +5,7 @@ from typing import Type, TypeVar
5
5
  import yaml
6
6
  from langchain_core.exceptions import OutputParserException
7
7
  from langchain_core.output_parsers import BaseOutputParser
8
- from langchain_core.pydantic_v1 import BaseModel, ValidationError
8
+ from pydantic import BaseModel, ValidationError
9
9
 
10
10
  from langchain.output_parsers.format_instructions import YAML_FORMAT_INSTRUCTIONS
11
11
 
@@ -6,6 +6,7 @@ from langchain_core.callbacks import (
6
6
  )
7
7
  from langchain_core.documents import Document
8
8
  from langchain_core.retrievers import BaseRetriever, RetrieverLike
9
+ from pydantic import ConfigDict
9
10
 
10
11
  from langchain.retrievers.document_compressors.base import (
11
12
  BaseDocumentCompressor,
@@ -21,8 +22,9 @@ class ContextualCompressionRetriever(BaseRetriever):
21
22
  base_retriever: RetrieverLike
22
23
  """Base Retriever to use for getting relevant documents."""
23
24
 
24
- class Config:
25
- arbitrary_types_allowed = True
25
+ model_config = ConfigDict(
26
+ arbitrary_types_allowed=True,
27
+ )
26
28
 
27
29
  def _get_relevant_documents(
28
30
  self,