langchain 0.2.16__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (95) hide show
  1. langchain/agents/agent.py +23 -19
  2. langchain/agents/agent_toolkits/vectorstore/toolkit.py +10 -7
  3. langchain/agents/chat/base.py +1 -1
  4. langchain/agents/conversational/base.py +1 -1
  5. langchain/agents/conversational_chat/base.py +1 -1
  6. langchain/agents/mrkl/base.py +1 -1
  7. langchain/agents/openai_assistant/base.py +8 -7
  8. langchain/agents/openai_functions_agent/base.py +6 -5
  9. langchain/agents/openai_functions_multi_agent/base.py +6 -5
  10. langchain/agents/react/base.py +1 -1
  11. langchain/agents/self_ask_with_search/base.py +1 -1
  12. langchain/agents/structured_chat/base.py +1 -1
  13. langchain/agents/structured_chat/output_parser.py +1 -1
  14. langchain/chains/api/base.py +14 -12
  15. langchain/chains/base.py +17 -9
  16. langchain/chains/combine_documents/base.py +1 -1
  17. langchain/chains/combine_documents/map_reduce.py +14 -10
  18. langchain/chains/combine_documents/map_rerank.py +17 -14
  19. langchain/chains/combine_documents/reduce.py +5 -3
  20. langchain/chains/combine_documents/refine.py +11 -8
  21. langchain/chains/combine_documents/stuff.py +8 -6
  22. langchain/chains/constitutional_ai/models.py +1 -1
  23. langchain/chains/conversation/base.py +13 -11
  24. langchain/chains/conversational_retrieval/base.py +10 -8
  25. langchain/chains/elasticsearch_database/base.py +11 -9
  26. langchain/chains/flare/base.py +1 -1
  27. langchain/chains/hyde/base.py +6 -4
  28. langchain/chains/llm.py +7 -7
  29. langchain/chains/llm_checker/base.py +8 -6
  30. langchain/chains/llm_math/base.py +8 -6
  31. langchain/chains/llm_summarization_checker/base.py +8 -6
  32. langchain/chains/mapreduce.py +5 -3
  33. langchain/chains/moderation.py +6 -5
  34. langchain/chains/natbot/base.py +8 -6
  35. langchain/chains/openai_functions/base.py +3 -3
  36. langchain/chains/openai_functions/citation_fuzzy_match.py +1 -1
  37. langchain/chains/openai_functions/extraction.py +8 -4
  38. langchain/chains/openai_functions/qa_with_structure.py +5 -2
  39. langchain/chains/openai_functions/tagging.py +5 -2
  40. langchain/chains/openai_tools/extraction.py +2 -2
  41. langchain/chains/prompt_selector.py +1 -1
  42. langchain/chains/qa_generation/base.py +1 -1
  43. langchain/chains/qa_with_sources/base.py +8 -6
  44. langchain/chains/qa_with_sources/retrieval.py +1 -1
  45. langchain/chains/qa_with_sources/vector_db.py +4 -3
  46. langchain/chains/query_constructor/schema.py +5 -4
  47. langchain/chains/retrieval_qa/base.py +12 -9
  48. langchain/chains/router/base.py +5 -3
  49. langchain/chains/router/embedding_router.py +5 -3
  50. langchain/chains/router/llm_router.py +6 -5
  51. langchain/chains/sequential.py +17 -13
  52. langchain/chains/structured_output/base.py +8 -8
  53. langchain/chains/transform.py +1 -1
  54. langchain/chat_models/base.py +2 -2
  55. langchain/evaluation/agents/trajectory_eval_chain.py +4 -3
  56. langchain/evaluation/comparison/eval_chain.py +4 -3
  57. langchain/evaluation/criteria/eval_chain.py +4 -3
  58. langchain/evaluation/embedding_distance/base.py +4 -3
  59. langchain/evaluation/qa/eval_chain.py +7 -4
  60. langchain/evaluation/qa/generate_chain.py +1 -1
  61. langchain/evaluation/scoring/eval_chain.py +4 -3
  62. langchain/evaluation/string_distance/base.py +1 -1
  63. langchain/indexes/vectorstore.py +9 -7
  64. langchain/memory/chat_memory.py +1 -1
  65. langchain/memory/combined.py +5 -3
  66. langchain/memory/entity.py +4 -3
  67. langchain/memory/summary.py +1 -1
  68. langchain/memory/vectorstore.py +1 -1
  69. langchain/memory/vectorstore_token_buffer_memory.py +1 -1
  70. langchain/output_parsers/fix.py +3 -2
  71. langchain/output_parsers/pandas_dataframe.py +3 -2
  72. langchain/output_parsers/retry.py +4 -3
  73. langchain/output_parsers/structured.py +1 -1
  74. langchain/output_parsers/yaml.py +5 -2
  75. langchain/pydantic_v1/__init__.py +20 -0
  76. langchain/pydantic_v1/dataclasses.py +20 -0
  77. langchain/pydantic_v1/main.py +20 -0
  78. langchain/retrievers/contextual_compression.py +4 -2
  79. langchain/retrievers/document_compressors/base.py +4 -2
  80. langchain/retrievers/document_compressors/chain_extract.py +4 -2
  81. langchain/retrievers/document_compressors/chain_filter.py +4 -2
  82. langchain/retrievers/document_compressors/cohere_rerank.py +8 -6
  83. langchain/retrievers/document_compressors/cross_encoder_rerank.py +5 -3
  84. langchain/retrievers/document_compressors/embeddings_filter.py +5 -4
  85. langchain/retrievers/document_compressors/listwise_rerank.py +4 -3
  86. langchain/retrievers/ensemble.py +18 -14
  87. langchain/retrievers/multi_vector.py +5 -4
  88. langchain/retrievers/self_query/base.py +8 -6
  89. langchain/retrievers/time_weighted_retriever.py +4 -3
  90. langchain/smith/evaluation/config.py +7 -5
  91. {langchain-0.2.16.dist-info → langchain-0.3.0.dist-info}/METADATA +5 -5
  92. {langchain-0.2.16.dist-info → langchain-0.3.0.dist-info}/RECORD +95 -95
  93. {langchain-0.2.16.dist-info → langchain-0.3.0.dist-info}/LICENSE +0 -0
  94. {langchain-0.2.16.dist-info → langchain-0.3.0.dist-info}/WHEEL +0 -0
  95. {langchain-0.2.16.dist-info → langchain-0.3.0.dist-info}/entry_points.txt +0 -0
@@ -6,6 +6,7 @@ from typing import Any, Callable, List, Optional, Protocol, Tuple
6
6
 
7
7
  from langchain_core.callbacks import Callbacks
8
8
  from langchain_core.documents import Document
9
+ from pydantic import ConfigDict
9
10
 
10
11
  from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
11
12
 
@@ -204,9 +205,10 @@ class ReduceDocumentsChain(BaseCombineDocumentsChain):
204
205
  If None, it will keep trying to collapse documents to fit token_max.
205
206
  Otherwise, after it reaches the max number, it will throw an error"""
206
207
 
207
- class Config:
208
- arbitrary_types_allowed = True
209
- extra = "forbid"
208
+ model_config = ConfigDict(
209
+ arbitrary_types_allowed=True,
210
+ extra="forbid",
211
+ )
210
212
 
211
213
  @property
212
214
  def _collapse_chain(self) -> BaseCombineDocumentsChain:
@@ -8,7 +8,7 @@ from langchain_core.callbacks import Callbacks
8
8
  from langchain_core.documents import Document
9
9
  from langchain_core.prompts import BasePromptTemplate, format_document
10
10
  from langchain_core.prompts.prompt import PromptTemplate
11
- from langchain_core.pydantic_v1 import Field, root_validator
11
+ from pydantic import ConfigDict, Field, model_validator
12
12
 
13
13
  from langchain.chains.combine_documents.base import (
14
14
  BaseCombineDocumentsChain,
@@ -98,20 +98,23 @@ class RefineDocumentsChain(BaseCombineDocumentsChain):
98
98
  _output_keys = _output_keys + ["intermediate_steps"]
99
99
  return _output_keys
100
100
 
101
- class Config:
102
- arbitrary_types_allowed = True
103
- extra = "forbid"
101
+ model_config = ConfigDict(
102
+ arbitrary_types_allowed=True,
103
+ extra="forbid",
104
+ )
104
105
 
105
- @root_validator(pre=True)
106
- def get_return_intermediate_steps(cls, values: Dict) -> Dict:
106
+ @model_validator(mode="before")
107
+ @classmethod
108
+ def get_return_intermediate_steps(cls, values: Dict) -> Any:
107
109
  """For backwards compatibility."""
108
110
  if "return_refine_steps" in values:
109
111
  values["return_intermediate_steps"] = values["return_refine_steps"]
110
112
  del values["return_refine_steps"]
111
113
  return values
112
114
 
113
- @root_validator(pre=True)
114
- def get_default_document_variable_name(cls, values: Dict) -> Dict:
115
+ @model_validator(mode="before")
116
+ @classmethod
117
+ def get_default_document_variable_name(cls, values: Dict) -> Any:
115
118
  """Get default document variable name, if not provided."""
116
119
  if "initial_llm_chain" not in values:
117
120
  raise ValueError("initial_llm_chain must be provided")
@@ -8,8 +8,8 @@ from langchain_core.documents import Document
8
8
  from langchain_core.language_models import LanguageModelLike
9
9
  from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
10
10
  from langchain_core.prompts import BasePromptTemplate, format_document
11
- from langchain_core.pydantic_v1 import Field, root_validator
12
11
  from langchain_core.runnables import Runnable, RunnablePassthrough
12
+ from pydantic import ConfigDict, Field, model_validator
13
13
 
14
14
  from langchain.chains.combine_documents.base import (
15
15
  DEFAULT_DOCUMENT_PROMPT,
@@ -156,12 +156,14 @@ class StuffDocumentsChain(BaseCombineDocumentsChain):
156
156
  document_separator: str = "\n\n"
157
157
  """The string with which to join the formatted documents"""
158
158
 
159
- class Config:
160
- arbitrary_types_allowed = True
161
- extra = "forbid"
159
+ model_config = ConfigDict(
160
+ arbitrary_types_allowed=True,
161
+ extra="forbid",
162
+ )
162
163
 
163
- @root_validator(pre=True)
164
- def get_default_document_variable_name(cls, values: Dict) -> Dict:
164
+ @model_validator(mode="before")
165
+ @classmethod
166
+ def get_default_document_variable_name(cls, values: Dict) -> Any:
165
167
  """Get default document variable name, if not provided.
166
168
 
167
169
  If only one variable is present in the llm_chain.prompt,
@@ -1,6 +1,6 @@
1
1
  """Models for the Constitutional AI chain."""
2
2
 
3
- from langchain_core.pydantic_v1 import BaseModel
3
+ from pydantic import BaseModel
4
4
 
5
5
 
6
6
  class ConstitutionalPrinciple(BaseModel):
@@ -1,11 +1,12 @@
1
1
  """Chain that carries on a conversation and calls an LLM."""
2
2
 
3
- from typing import Dict, List
3
+ from typing import List
4
4
 
5
5
  from langchain_core._api import deprecated
6
6
  from langchain_core.memory import BaseMemory
7
7
  from langchain_core.prompts import BasePromptTemplate
8
- from langchain_core.pydantic_v1 import Field, root_validator
8
+ from pydantic import ConfigDict, Field, model_validator
9
+ from typing_extensions import Self
9
10
 
10
11
  from langchain.chains.conversation.prompt import PROMPT
11
12
  from langchain.chains.llm import LLMChain
@@ -110,9 +111,10 @@ class ConversationChain(LLMChain):
110
111
  input_key: str = "input" #: :meta private:
111
112
  output_key: str = "response" #: :meta private:
112
113
 
113
- class Config:
114
- arbitrary_types_allowed = True
115
- extra = "forbid"
114
+ model_config = ConfigDict(
115
+ arbitrary_types_allowed=True,
116
+ extra="forbid",
117
+ )
116
118
 
117
119
  @classmethod
118
120
  def is_lc_serializable(cls) -> bool:
@@ -123,17 +125,17 @@ class ConversationChain(LLMChain):
123
125
  """Use this since so some prompt vars come from history."""
124
126
  return [self.input_key]
125
127
 
126
- @root_validator(pre=False, skip_on_failure=True)
127
- def validate_prompt_input_variables(cls, values: Dict) -> Dict:
128
+ @model_validator(mode="after")
129
+ def validate_prompt_input_variables(self) -> Self:
128
130
  """Validate that prompt input variables are consistent."""
129
- memory_keys = values["memory"].memory_variables
130
- input_key = values["input_key"]
131
+ memory_keys = self.memory.memory_variables
132
+ input_key = self.input_key
131
133
  if input_key in memory_keys:
132
134
  raise ValueError(
133
135
  f"The input key {input_key} was also found in the memory keys "
134
136
  f"({memory_keys}) - please provide keys that don't overlap."
135
137
  )
136
- prompt_variables = values["prompt"].input_variables
138
+ prompt_variables = self.prompt.input_variables
137
139
  expected_keys = memory_keys + [input_key]
138
140
  if set(expected_keys) != set(prompt_variables):
139
141
  raise ValueError(
@@ -141,4 +143,4 @@ class ConversationChain(LLMChain):
141
143
  f"{prompt_variables}, but got {memory_keys} as inputs from "
142
144
  f"memory, and {input_key} as the normal input key."
143
145
  )
144
- return values
146
+ return self
@@ -18,10 +18,10 @@ from langchain_core.documents import Document
18
18
  from langchain_core.language_models import BaseLanguageModel
19
19
  from langchain_core.messages import BaseMessage
20
20
  from langchain_core.prompts import BasePromptTemplate
21
- from langchain_core.pydantic_v1 import BaseModel, Field, root_validator
22
21
  from langchain_core.retrievers import BaseRetriever
23
22
  from langchain_core.runnables import RunnableConfig
24
23
  from langchain_core.vectorstores import VectorStore
24
+ from pydantic import BaseModel, ConfigDict, Field, model_validator
25
25
 
26
26
  from langchain.chains.base import Chain
27
27
  from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
@@ -92,14 +92,15 @@ class BaseConversationalRetrievalChain(Chain):
92
92
  get_chat_history: Optional[Callable[[List[CHAT_TURN_TYPE]], str]] = None
93
93
  """An optional function to get a string of the chat history.
94
94
  If None is provided, will use a default."""
95
- response_if_no_docs_found: Optional[str]
95
+ response_if_no_docs_found: Optional[str] = None
96
96
  """If specified, the chain will return a fixed response if no docs
97
97
  are found for the question. """
98
98
 
99
- class Config:
100
- allow_population_by_field_name = True
101
- arbitrary_types_allowed = True
102
- extra = "forbid"
99
+ model_config = ConfigDict(
100
+ populate_by_name=True,
101
+ arbitrary_types_allowed=True,
102
+ extra="forbid",
103
+ )
103
104
 
104
105
  @property
105
106
  def input_keys(self) -> List[str]:
@@ -482,8 +483,9 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
482
483
  def _chain_type(self) -> str:
483
484
  return "chat-vector-db"
484
485
 
485
- @root_validator(pre=True)
486
- def raise_deprecation(cls, values: Dict) -> Dict:
486
+ @model_validator(mode="before")
487
+ @classmethod
488
+ def raise_deprecation(cls, values: Dict) -> Any:
487
489
  warnings.warn(
488
490
  "`ChatVectorDBChain` is deprecated - "
489
491
  "please use `from langchain.chains import ConversationalRetrievalChain`"
@@ -9,8 +9,9 @@ from langchain_core.language_models import BaseLanguageModel
9
9
  from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
10
10
  from langchain_core.output_parsers.json import SimpleJsonOutputParser
11
11
  from langchain_core.prompts import BasePromptTemplate
12
- from langchain_core.pydantic_v1 import root_validator
13
12
  from langchain_core.runnables import Runnable
13
+ from pydantic import ConfigDict, model_validator
14
+ from typing_extensions import Self
14
15
 
15
16
  from langchain.chains.base import Chain
16
17
  from langchain.chains.elasticsearch_database.prompts import ANSWER_PROMPT, DSL_PROMPT
@@ -39,7 +40,7 @@ class ElasticsearchDatabaseChain(Chain):
39
40
  """Chain for creating the ES query."""
40
41
  answer_chain: Runnable
41
42
  """Chain for answering the user question."""
42
- database: Any
43
+ database: Any = None
43
44
  """Elasticsearch database to connect to of type elasticsearch.Elasticsearch."""
44
45
  top_k: int = 10
45
46
  """Number of results to return from the query"""
@@ -51,17 +52,18 @@ class ElasticsearchDatabaseChain(Chain):
51
52
  return_intermediate_steps: bool = False
52
53
  """Whether or not to return the intermediate steps along with the final answer."""
53
54
 
54
- class Config:
55
- arbitrary_types_allowed = True
56
- extra = "forbid"
55
+ model_config = ConfigDict(
56
+ arbitrary_types_allowed=True,
57
+ extra="forbid",
58
+ )
57
59
 
58
- @root_validator(pre=False, skip_on_failure=True)
59
- def validate_indices(cls, values: dict) -> dict:
60
- if values["include_indices"] and values["ignore_indices"]:
60
+ @model_validator(mode="after")
61
+ def validate_indices(self) -> Self:
62
+ if self.include_indices and self.ignore_indices:
61
63
  raise ValueError(
62
64
  "Cannot specify both 'include_indices' and 'ignore_indices'."
63
65
  )
64
- return values
66
+ return self
65
67
 
66
68
  @property
67
69
  def input_keys(self) -> List[str]:
@@ -11,9 +11,9 @@ from langchain_core.language_models import BaseLanguageModel
11
11
  from langchain_core.messages import AIMessage
12
12
  from langchain_core.output_parsers import StrOutputParser
13
13
  from langchain_core.prompts import BasePromptTemplate
14
- from langchain_core.pydantic_v1 import Field
15
14
  from langchain_core.retrievers import BaseRetriever
16
15
  from langchain_core.runnables import Runnable
16
+ from pydantic import Field
17
17
 
18
18
  from langchain.chains.base import Chain
19
19
  from langchain.chains.flare.prompts import (
@@ -14,6 +14,7 @@ from langchain_core.language_models import BaseLanguageModel
14
14
  from langchain_core.output_parsers import StrOutputParser
15
15
  from langchain_core.prompts import BasePromptTemplate
16
16
  from langchain_core.runnables import Runnable
17
+ from pydantic import ConfigDict
17
18
 
18
19
  from langchain.chains.base import Chain
19
20
  from langchain.chains.hyde.prompts import PROMPT_MAP
@@ -29,14 +30,15 @@ class HypotheticalDocumentEmbedder(Chain, Embeddings):
29
30
  base_embeddings: Embeddings
30
31
  llm_chain: Runnable
31
32
 
32
- class Config:
33
- arbitrary_types_allowed = True
34
- extra = "forbid"
33
+ model_config = ConfigDict(
34
+ arbitrary_types_allowed=True,
35
+ extra="forbid",
36
+ )
35
37
 
36
38
  @property
37
39
  def input_keys(self) -> List[str]:
38
40
  """Input keys for Hyde's LLM chain."""
39
- return self.llm_chain.input_schema.schema()["required"]
41
+ return self.llm_chain.input_schema.model_json_schema()["required"]
40
42
 
41
43
  @property
42
44
  def output_keys(self) -> List[str]:
langchain/chains/llm.py CHANGED
@@ -17,13 +17,11 @@ from langchain_core.language_models import (
17
17
  BaseLanguageModel,
18
18
  LanguageModelInput,
19
19
  )
20
- from langchain_core.load.dump import dumpd
21
20
  from langchain_core.messages import BaseMessage
22
21
  from langchain_core.output_parsers import BaseLLMOutputParser, StrOutputParser
23
22
  from langchain_core.outputs import ChatGeneration, Generation, LLMResult
24
23
  from langchain_core.prompt_values import PromptValue
25
24
  from langchain_core.prompts import BasePromptTemplate, PromptTemplate
26
- from langchain_core.pydantic_v1 import Field
27
25
  from langchain_core.runnables import (
28
26
  Runnable,
29
27
  RunnableBinding,
@@ -32,6 +30,7 @@ from langchain_core.runnables import (
32
30
  )
33
31
  from langchain_core.runnables.configurable import DynamicRunnable
34
32
  from langchain_core.utils.input import get_colored_text
33
+ from pydantic import ConfigDict, Field
35
34
 
36
35
  from langchain.chains.base import Chain
37
36
 
@@ -95,9 +94,10 @@ class LLMChain(Chain):
95
94
  If false, will return a bunch of extra information about the generation."""
96
95
  llm_kwargs: dict = Field(default_factory=dict)
97
96
 
98
- class Config:
99
- arbitrary_types_allowed = True
100
- extra = "forbid"
97
+ model_config = ConfigDict(
98
+ arbitrary_types_allowed=True,
99
+ extra="forbid",
100
+ )
101
101
 
102
102
  @property
103
103
  def input_keys(self) -> List[str]:
@@ -240,7 +240,7 @@ class LLMChain(Chain):
240
240
  callbacks, self.callbacks, self.verbose
241
241
  )
242
242
  run_manager = callback_manager.on_chain_start(
243
- dumpd(self),
243
+ None,
244
244
  {"input_list": input_list},
245
245
  )
246
246
  try:
@@ -260,7 +260,7 @@ class LLMChain(Chain):
260
260
  callbacks, self.callbacks, self.verbose
261
261
  )
262
262
  run_manager = await callback_manager.on_chain_start(
263
- dumpd(self),
263
+ None,
264
264
  {"input_list": input_list},
265
265
  )
266
266
  try:
@@ -9,7 +9,7 @@ from langchain_core._api import deprecated
9
9
  from langchain_core.callbacks import CallbackManagerForChainRun
10
10
  from langchain_core.language_models import BaseLanguageModel
11
11
  from langchain_core.prompts import PromptTemplate
12
- from langchain_core.pydantic_v1 import root_validator
12
+ from pydantic import ConfigDict, model_validator
13
13
 
14
14
  from langchain.chains.base import Chain
15
15
  from langchain.chains.llm import LLMChain
@@ -100,12 +100,14 @@ class LLMCheckerChain(Chain):
100
100
  input_key: str = "query" #: :meta private:
101
101
  output_key: str = "result" #: :meta private:
102
102
 
103
- class Config:
104
- arbitrary_types_allowed = True
105
- extra = "forbid"
103
+ model_config = ConfigDict(
104
+ arbitrary_types_allowed=True,
105
+ extra="forbid",
106
+ )
106
107
 
107
- @root_validator(pre=True)
108
- def raise_deprecation(cls, values: Dict) -> Dict:
108
+ @model_validator(mode="before")
109
+ @classmethod
110
+ def raise_deprecation(cls, values: Dict) -> Any:
109
111
  if "llm" in values:
110
112
  warnings.warn(
111
113
  "Directly instantiating an LLMCheckerChain with an llm is deprecated. "
@@ -14,7 +14,7 @@ from langchain_core.callbacks import (
14
14
  )
15
15
  from langchain_core.language_models import BaseLanguageModel
16
16
  from langchain_core.prompts import BasePromptTemplate
17
- from langchain_core.pydantic_v1 import root_validator
17
+ from pydantic import ConfigDict, model_validator
18
18
 
19
19
  from langchain.chains.base import Chain
20
20
  from langchain.chains.llm import LLMChain
@@ -156,12 +156,14 @@ class LLMMathChain(Chain):
156
156
  input_key: str = "question" #: :meta private:
157
157
  output_key: str = "answer" #: :meta private:
158
158
 
159
- class Config:
160
- arbitrary_types_allowed = True
161
- extra = "forbid"
159
+ model_config = ConfigDict(
160
+ arbitrary_types_allowed=True,
161
+ extra="forbid",
162
+ )
162
163
 
163
- @root_validator(pre=True)
164
- def raise_deprecation(cls, values: Dict) -> Dict:
164
+ @model_validator(mode="before")
165
+ @classmethod
166
+ def raise_deprecation(cls, values: Dict) -> Any:
165
167
  try:
166
168
  import numexpr # noqa: F401
167
169
  except ImportError:
@@ -10,7 +10,7 @@ from langchain_core._api import deprecated
10
10
  from langchain_core.callbacks import CallbackManagerForChainRun
11
11
  from langchain_core.language_models import BaseLanguageModel
12
12
  from langchain_core.prompts.prompt import PromptTemplate
13
- from langchain_core.pydantic_v1 import root_validator
13
+ from pydantic import ConfigDict, model_validator
14
14
 
15
15
  from langchain.chains.base import Chain
16
16
  from langchain.chains.llm import LLMChain
@@ -105,12 +105,14 @@ class LLMSummarizationCheckerChain(Chain):
105
105
  max_checks: int = 2
106
106
  """Maximum number of times to check the assertions. Default to double-checking."""
107
107
 
108
- class Config:
109
- arbitrary_types_allowed = True
110
- extra = "forbid"
108
+ model_config = ConfigDict(
109
+ arbitrary_types_allowed=True,
110
+ extra="forbid",
111
+ )
111
112
 
112
- @root_validator(pre=True)
113
- def raise_deprecation(cls, values: Dict) -> Dict:
113
+ @model_validator(mode="before")
114
+ @classmethod
115
+ def raise_deprecation(cls, values: Dict) -> Any:
114
116
  if "llm" in values:
115
117
  warnings.warn(
116
118
  "Directly instantiating an LLMSummarizationCheckerChain with an llm is "
@@ -14,6 +14,7 @@ from langchain_core.documents import Document
14
14
  from langchain_core.language_models import BaseLanguageModel
15
15
  from langchain_core.prompts import BasePromptTemplate
16
16
  from langchain_text_splitters import TextSplitter
17
+ from pydantic import ConfigDict
17
18
 
18
19
  from langchain.chains import ReduceDocumentsChain
19
20
  from langchain.chains.base import Chain
@@ -77,9 +78,10 @@ class MapReduceChain(Chain):
77
78
  **kwargs,
78
79
  )
79
80
 
80
- class Config:
81
- arbitrary_types_allowed = True
82
- extra = "forbid"
81
+ model_config = ConfigDict(
82
+ arbitrary_types_allowed=True,
83
+ extra="forbid",
84
+ )
83
85
 
84
86
  @property
85
87
  def input_keys(self) -> List[str]:
@@ -6,8 +6,8 @@ from langchain_core.callbacks import (
6
6
  AsyncCallbackManagerForChainRun,
7
7
  CallbackManagerForChainRun,
8
8
  )
9
- from langchain_core.pydantic_v1 import Field, root_validator
10
9
  from langchain_core.utils import check_package_version, get_from_dict_or_env
10
+ from pydantic import Field, model_validator
11
11
 
12
12
  from langchain.chains.base import Chain
13
13
 
@@ -28,8 +28,8 @@ class OpenAIModerationChain(Chain):
28
28
  moderation = OpenAIModerationChain()
29
29
  """
30
30
 
31
- client: Any #: :meta private:
32
- async_client: Any #: :meta private:
31
+ client: Any = None #: :meta private:
32
+ async_client: Any = None #: :meta private:
33
33
  model_name: Optional[str] = None
34
34
  """Moderation model name to use."""
35
35
  error: bool = False
@@ -40,8 +40,9 @@ class OpenAIModerationChain(Chain):
40
40
  openai_organization: Optional[str] = None
41
41
  openai_pre_1_0: bool = Field(default=None)
42
42
 
43
- @root_validator(pre=True)
44
- def validate_environment(cls, values: Dict) -> Dict:
43
+ @model_validator(mode="before")
44
+ @classmethod
45
+ def validate_environment(cls, values: Dict) -> Any:
45
46
  """Validate that api key and python package exists in environment."""
46
47
  openai_api_key = get_from_dict_or_env(
47
48
  values, "openai_api_key", "OPENAI_API_KEY"
@@ -9,8 +9,8 @@ from langchain_core._api import deprecated
9
9
  from langchain_core.callbacks import CallbackManagerForChainRun
10
10
  from langchain_core.language_models import BaseLanguageModel
11
11
  from langchain_core.output_parsers import StrOutputParser
12
- from langchain_core.pydantic_v1 import root_validator
13
12
  from langchain_core.runnables import Runnable
13
+ from pydantic import ConfigDict, model_validator
14
14
 
15
15
  from langchain.chains.base import Chain
16
16
  from langchain.chains.natbot.prompt import PROMPT
@@ -59,12 +59,14 @@ class NatBotChain(Chain):
59
59
  previous_command: str = "" #: :meta private:
60
60
  output_key: str = "command" #: :meta private:
61
61
 
62
- class Config:
63
- arbitrary_types_allowed = True
64
- extra = "forbid"
62
+ model_config = ConfigDict(
63
+ arbitrary_types_allowed=True,
64
+ extra="forbid",
65
+ )
65
66
 
66
- @root_validator(pre=True)
67
- def raise_deprecation(cls, values: Dict) -> Dict:
67
+ @model_validator(mode="before")
68
+ @classmethod
69
+ def raise_deprecation(cls, values: Dict) -> Any:
68
70
  if "llm" in values:
69
71
  warnings.warn(
70
72
  "Directly instantiating an NatBotChain with an llm is deprecated. "
@@ -19,11 +19,11 @@ from langchain_core.output_parsers.openai_functions import (
19
19
  PydanticAttrOutputFunctionsParser,
20
20
  )
21
21
  from langchain_core.prompts import BasePromptTemplate
22
- from langchain_core.pydantic_v1 import BaseModel
23
22
  from langchain_core.utils.function_calling import (
24
23
  PYTHON_TO_JSON_TYPES,
25
24
  convert_to_openai_function,
26
25
  )
26
+ from pydantic import BaseModel
27
27
 
28
28
  from langchain.chains import LLMChain
29
29
  from langchain.chains.structured_output.base import (
@@ -93,7 +93,7 @@ def create_openai_fn_chain(
93
93
  from langchain_community.chat_models import ChatOpenAI
94
94
  from langchain_core.prompts import ChatPromptTemplate
95
95
 
96
- from langchain_core.pydantic_v1 import BaseModel, Field
96
+ from pydantic import BaseModel, Field
97
97
 
98
98
 
99
99
  class RecordPerson(BaseModel):
@@ -183,7 +183,7 @@ def create_structured_output_chain(
183
183
  from langchain_community.chat_models import ChatOpenAI
184
184
  from langchain_core.prompts import ChatPromptTemplate
185
185
 
186
- from langchain_core.pydantic_v1 import BaseModel, Field
186
+ from pydantic import BaseModel, Field
187
187
 
188
188
  class Dog(BaseModel):
189
189
  \"\"\"Identifying information about a dog.\"\"\"
@@ -5,8 +5,8 @@ from langchain_core.language_models import BaseChatModel, BaseLanguageModel
5
5
  from langchain_core.messages import HumanMessage, SystemMessage
6
6
  from langchain_core.output_parsers.openai_functions import PydanticOutputFunctionsParser
7
7
  from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
8
- from langchain_core.pydantic_v1 import BaseModel, Field
9
8
  from langchain_core.runnables import Runnable
9
+ from pydantic import BaseModel, Field
10
10
 
11
11
  from langchain.chains.llm import LLMChain
12
12
  from langchain.chains.openai_functions.utils import get_llm_kwargs
@@ -7,7 +7,7 @@ from langchain_core.output_parsers.openai_functions import (
7
7
  PydanticAttrOutputFunctionsParser,
8
8
  )
9
9
  from langchain_core.prompts import BasePromptTemplate, ChatPromptTemplate
10
- from langchain_core.pydantic_v1 import BaseModel
10
+ from pydantic import BaseModel
11
11
 
12
12
  from langchain.chains.base import Chain
13
13
  from langchain.chains.llm import LLMChain
@@ -61,7 +61,7 @@ Passage:
61
61
  removal="1.0",
62
62
  alternative=(
63
63
  """
64
- from langchain_core.pydantic_v1 import BaseModel, Field
64
+ from pydantic import BaseModel, Field
65
65
  from langchain_anthropic import ChatAnthropic
66
66
 
67
67
  class Joke(BaseModel):
@@ -131,7 +131,7 @@ def create_extraction_chain(
131
131
  removal="1.0",
132
132
  alternative=(
133
133
  """
134
- from langchain_core.pydantic_v1 import BaseModel, Field
134
+ from pydantic import BaseModel, Field
135
135
  from langchain_anthropic import ChatAnthropic
136
136
 
137
137
  class Joke(BaseModel):
@@ -172,7 +172,11 @@ def create_extraction_chain_pydantic(
172
172
  class PydanticSchema(BaseModel):
173
173
  info: List[pydantic_schema] # type: ignore
174
174
 
175
- openai_schema = pydantic_schema.schema()
175
+ if hasattr(pydantic_schema, "model_json_schema"):
176
+ openai_schema = pydantic_schema.model_json_schema()
177
+ else:
178
+ openai_schema = pydantic_schema.schema()
179
+
176
180
  openai_schema = _resolve_schema_references(
177
181
  openai_schema, openai_schema.get("definitions", {})
178
182
  )
@@ -10,8 +10,8 @@ from langchain_core.output_parsers.openai_functions import (
10
10
  )
11
11
  from langchain_core.prompts import PromptTemplate
12
12
  from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
13
- from langchain_core.pydantic_v1 import BaseModel, Field
14
13
  from langchain_core.utils.pydantic import is_basemodel_subclass
14
+ from pydantic import BaseModel, Field
15
15
 
16
16
  from langchain.chains.llm import LLMChain
17
17
  from langchain.chains.openai_functions.utils import get_llm_kwargs
@@ -72,7 +72,10 @@ def create_qa_with_structure_chain(
72
72
  f"Should be one of `pydantic` or `base`."
73
73
  )
74
74
  if isinstance(schema, type) and is_basemodel_subclass(schema):
75
- schema_dict = cast(dict, schema.schema())
75
+ if hasattr(schema, "model_json_schema"):
76
+ schema_dict = cast(dict, schema.model_json_schema())
77
+ else:
78
+ schema_dict = cast(dict, schema.schema())
76
79
  else:
77
80
  schema_dict = cast(dict, schema)
78
81
  function = {
@@ -130,7 +130,7 @@ def create_tagging_chain_pydantic(
130
130
 
131
131
  .. code-block:: python
132
132
 
133
- from langchain_core.pydantic_v1 import BaseModel, Field
133
+ from pydantic import BaseModel, Field
134
134
  from langchain_anthropic import ChatAnthropic
135
135
 
136
136
  class Joke(BaseModel):
@@ -156,7 +156,10 @@ def create_tagging_chain_pydantic(
156
156
  Returns:
157
157
  Chain (LLMChain) that can be used to extract information from a passage.
158
158
  """
159
- openai_schema = pydantic_schema.schema()
159
+ if hasattr(pydantic_schema, "model_json_schema"):
160
+ openai_schema = pydantic_schema.model_json_schema()
161
+ else:
162
+ openai_schema = pydantic_schema.schema()
160
163
  function = _get_tagging_function(openai_schema)
161
164
  prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE)
162
165
  output_parser = PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema)