langchain 0.2.12__py3-none-any.whl → 0.2.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. langchain/_api/module_import.py +2 -2
  2. langchain/agents/__init__.py +4 -3
  3. langchain/agents/agent.py +7 -11
  4. langchain/agents/agent_toolkits/__init__.py +1 -1
  5. langchain/agents/agent_toolkits/vectorstore/base.py +114 -2
  6. langchain/agents/agent_toolkits/vectorstore/toolkit.py +2 -7
  7. langchain/agents/agent_types.py +1 -1
  8. langchain/agents/chat/base.py +1 -1
  9. langchain/agents/conversational/base.py +1 -1
  10. langchain/agents/conversational_chat/base.py +1 -1
  11. langchain/agents/initialize.py +2 -2
  12. langchain/agents/json_chat/base.py +1 -1
  13. langchain/agents/loading.py +4 -4
  14. langchain/agents/mrkl/base.py +4 -4
  15. langchain/agents/openai_assistant/base.py +2 -2
  16. langchain/agents/openai_functions_agent/base.py +2 -2
  17. langchain/agents/openai_functions_multi_agent/base.py +2 -2
  18. langchain/agents/react/agent.py +1 -1
  19. langchain/agents/react/base.py +4 -4
  20. langchain/agents/self_ask_with_search/base.py +2 -2
  21. langchain/agents/structured_chat/base.py +3 -2
  22. langchain/agents/tools.py +2 -2
  23. langchain/agents/xml/base.py +2 -2
  24. langchain/chains/__init__.py +1 -0
  25. langchain/chains/api/base.py +121 -1
  26. langchain/chains/base.py +5 -7
  27. langchain/chains/combine_documents/map_reduce.py +2 -4
  28. langchain/chains/combine_documents/map_rerank.py +4 -6
  29. langchain/chains/combine_documents/reduce.py +1 -4
  30. langchain/chains/combine_documents/refine.py +2 -4
  31. langchain/chains/combine_documents/stuff.py +12 -4
  32. langchain/chains/conversation/base.py +2 -4
  33. langchain/chains/conversational_retrieval/base.py +5 -7
  34. langchain/chains/elasticsearch_database/base.py +16 -20
  35. langchain/chains/example_generator.py +3 -4
  36. langchain/chains/flare/base.py +1 -1
  37. langchain/chains/hyde/base.py +1 -4
  38. langchain/chains/llm.py +2 -4
  39. langchain/chains/llm_checker/base.py +12 -4
  40. langchain/chains/llm_math/base.py +2 -4
  41. langchain/chains/llm_summarization_checker/base.py +12 -4
  42. langchain/chains/loading.py +17 -0
  43. langchain/chains/mapreduce.py +12 -4
  44. langchain/chains/natbot/base.py +2 -4
  45. langchain/chains/openai_functions/__init__.py +2 -0
  46. langchain/chains/openai_functions/base.py +2 -2
  47. langchain/chains/openai_functions/citation_fuzzy_match.py +54 -1
  48. langchain/chains/openai_functions/extraction.py +2 -2
  49. langchain/chains/openai_functions/openapi.py +88 -1
  50. langchain/chains/openai_functions/qa_with_structure.py +19 -0
  51. langchain/chains/openai_functions/tagging.py +81 -0
  52. langchain/chains/openai_tools/extraction.py +1 -1
  53. langchain/chains/qa_with_sources/base.py +21 -4
  54. langchain/chains/qa_with_sources/loading.py +16 -0
  55. langchain/chains/query_constructor/base.py +8 -2
  56. langchain/chains/query_constructor/schema.py +0 -2
  57. langchain/chains/question_answering/chain.py +15 -0
  58. langchain/chains/retrieval_qa/base.py +30 -6
  59. langchain/chains/router/base.py +1 -4
  60. langchain/chains/router/embedding_router.py +1 -4
  61. langchain/chains/router/llm_router.py +76 -1
  62. langchain/chains/router/multi_prompt.py +76 -1
  63. langchain/chains/sequential.py +3 -7
  64. langchain/chains/structured_output/base.py +3 -3
  65. langchain/chat_models/base.py +8 -10
  66. langchain/evaluation/agents/trajectory_eval_chain.py +2 -4
  67. langchain/evaluation/comparison/eval_chain.py +2 -4
  68. langchain/evaluation/criteria/eval_chain.py +2 -4
  69. langchain/evaluation/embedding_distance/base.py +0 -2
  70. langchain/evaluation/parsing/json_schema.py +1 -1
  71. langchain/evaluation/qa/eval_chain.py +2 -7
  72. langchain/evaluation/schema.py +8 -8
  73. langchain/evaluation/scoring/eval_chain.py +2 -4
  74. langchain/evaluation/string_distance/base.py +4 -4
  75. langchain/hub.py +60 -26
  76. langchain/indexes/vectorstore.py +3 -7
  77. langchain/memory/entity.py +0 -2
  78. langchain/memory/summary.py +9 -0
  79. langchain/output_parsers/retry.py +1 -1
  80. langchain/retrievers/contextual_compression.py +0 -2
  81. langchain/retrievers/document_compressors/base.py +0 -2
  82. langchain/retrievers/document_compressors/chain_filter.py +1 -1
  83. langchain/retrievers/document_compressors/cohere_rerank.py +3 -5
  84. langchain/retrievers/document_compressors/cross_encoder_rerank.py +1 -4
  85. langchain/retrievers/document_compressors/embeddings_filter.py +0 -2
  86. langchain/retrievers/document_compressors/listwise_rerank.py +1 -1
  87. langchain/retrievers/multi_query.py +4 -2
  88. langchain/retrievers/re_phraser.py +1 -1
  89. langchain/retrievers/self_query/base.py +1 -3
  90. langchain/retrievers/time_weighted_retriever.py +0 -2
  91. langchain/tools/__init__.py +14 -5
  92. langchain/tools/render.py +0 -2
  93. langchain/tools/retriever.py +0 -4
  94. {langchain-0.2.12.dist-info → langchain-0.2.14.dist-info}/METADATA +2 -2
  95. {langchain-0.2.12.dist-info → langchain-0.2.14.dist-info}/RECORD +98 -98
  96. {langchain-0.2.12.dist-info → langchain-0.2.14.dist-info}/LICENSE +0 -0
  97. {langchain-0.2.12.dist-info → langchain-0.2.14.dist-info}/WHEEL +0 -0
  98. {langchain-0.2.12.dist-info → langchain-0.2.14.dist-info}/entry_points.txt +0 -0
@@ -8,16 +8,16 @@ from langchain_core.prompts.base import BasePromptTemplate
8
8
  from langchain_core.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate
9
9
  from langchain_core.runnables import Runnable, RunnablePassthrough
10
10
  from langchain_core.tools import BaseTool
11
+ from langchain_core.tools.render import ToolsRenderer, render_text_description
11
12
 
12
13
  from langchain.agents.agent import BaseSingleActionAgent
13
14
  from langchain.agents.format_scratchpad import format_xml
14
15
  from langchain.agents.output_parsers import XMLAgentOutputParser
15
16
  from langchain.agents.xml.prompt import agent_instructions
16
17
  from langchain.chains.llm import LLMChain
17
- from langchain.tools.render import ToolsRenderer, render_text_description
18
18
 
19
19
 
20
- @deprecated("0.1.0", alternative="create_xml_agent", removal="0.3.0")
20
+ @deprecated("0.1.0", alternative="create_xml_agent", removal="1.0")
21
21
  class XMLAgent(BaseSingleActionAgent):
22
22
  """Agent that uses XML tags.
23
23
 
@@ -59,6 +59,7 @@ _module_lookup = {
59
59
  "OpenAIModerationChain": "langchain.chains.moderation",
60
60
  "NatBotChain": "langchain.chains.natbot.base",
61
61
  "create_citation_fuzzy_match_chain": "langchain.chains.openai_functions",
62
+ "create_citation_fuzzy_match_runnable": "langchain.chains.openai_functions",
62
63
  "create_extraction_chain": "langchain.chains.openai_functions",
63
64
  "create_extraction_chain_pydantic": "langchain.chains.openai_functions",
64
65
  "create_qa_with_sources_chain": "langchain.chains.openai_functions",
@@ -5,6 +5,7 @@ from __future__ import annotations
5
5
  from typing import Any, Dict, List, Optional, Sequence, Tuple
6
6
  from urllib.parse import urlparse
7
7
 
8
+ from langchain_core._api import deprecated
8
9
  from langchain_core.callbacks import (
9
10
  AsyncCallbackManagerForChainRun,
10
11
  CallbackManagerForChainRun,
@@ -53,6 +54,15 @@ def _check_in_allowed_domain(url: str, limit_to_domains: Sequence[str]) -> bool:
53
54
  try:
54
55
  from langchain_community.utilities.requests import TextRequestsWrapper
55
56
 
57
+ @deprecated(
58
+ since="0.2.13",
59
+ message=(
60
+ "This class is deprecated and will be removed in langchain 1.0. "
61
+ "See API reference for replacement: "
62
+ "https://api.python.langchain.com/en/latest/chains/langchain.chains.api.base.APIChain.html" # noqa: E501
63
+ ),
64
+ removal="1.0",
65
+ )
56
66
  class APIChain(Chain):
57
67
  """Chain that makes API calls and summarizes the responses to answer a question.
58
68
 
@@ -69,7 +79,117 @@ try:
69
79
  what network access it has.
70
80
 
71
81
  See https://python.langchain.com/docs/security for more information.
72
- """
82
+
83
+ Note: this class is deprecated. See below for a replacement implementation
84
+ using LangGraph. The benefits of this implementation are:
85
+
86
+ - Uses LLM tool calling features to encourage properly-formatted API requests;
87
+ - Support for both token-by-token and step-by-step streaming;
88
+ - Support for checkpointing and memory of chat history;
89
+ - Easier to modify or extend (e.g., with additional tools, structured responses, etc.)
90
+
91
+ Install LangGraph with:
92
+
93
+ .. code-block:: bash
94
+
95
+ pip install -U langgraph
96
+
97
+ .. code-block:: python
98
+
99
+ from typing import Annotated, Sequence
100
+ from typing_extensions import TypedDict
101
+
102
+ from langchain.chains.api.prompt import API_URL_PROMPT
103
+ from langchain_community.agent_toolkits.openapi.toolkit import RequestsToolkit
104
+ from langchain_community.utilities.requests import TextRequestsWrapper
105
+ from langchain_core.messages import BaseMessage
106
+ from langchain_core.prompts import ChatPromptTemplate
107
+ from langchain_openai import ChatOpenAI
108
+ from langchain_core.runnables import RunnableConfig
109
+ from langgraph.graph import END, StateGraph
110
+ from langgraph.graph.message import add_messages
111
+ from langgraph.prebuilt.tool_node import ToolNode
112
+
113
+ # NOTE: There are inherent risks in giving models discretion
114
+ # to execute real-world actions. We must "opt-in" to these
115
+ # risks by setting allow_dangerous_request=True to use these tools.
116
+ # This can be dangerous for calling unwanted requests. Please make
117
+ # sure your custom OpenAPI spec (yaml) is safe and that permissions
118
+ # associated with the tools are narrowly-scoped.
119
+ ALLOW_DANGEROUS_REQUESTS = True
120
+
121
+ # Subset of spec for https://jsonplaceholder.typicode.com
122
+ api_spec = \"\"\"
123
+ openapi: 3.0.0
124
+ info:
125
+ title: JSONPlaceholder API
126
+ version: 1.0.0
127
+ servers:
128
+ - url: https://jsonplaceholder.typicode.com
129
+ paths:
130
+ /posts:
131
+ get:
132
+ summary: Get posts
133
+ parameters: &id001
134
+ - name: _limit
135
+ in: query
136
+ required: false
137
+ schema:
138
+ type: integer
139
+ example: 2
140
+ description: Limit the number of results
141
+ \"\"\"
142
+
143
+ llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
144
+ toolkit = RequestsToolkit(
145
+ requests_wrapper=TextRequestsWrapper(headers={}), # no auth required
146
+ allow_dangerous_requests=ALLOW_DANGEROUS_REQUESTS,
147
+ )
148
+ tools = toolkit.get_tools()
149
+
150
+ api_request_chain = (
151
+ API_URL_PROMPT.partial(api_docs=api_spec)
152
+ | llm.bind_tools(tools, tool_choice="any")
153
+ )
154
+
155
+ class ChainState(TypedDict):
156
+ \"\"\"LangGraph state.\"\"\"
157
+
158
+ messages: Annotated[Sequence[BaseMessage], add_messages]
159
+
160
+
161
+ async def acall_request_chain(state: ChainState, config: RunnableConfig):
162
+ last_message = state["messages"][-1]
163
+ response = await api_request_chain.ainvoke(
164
+ {"question": last_message.content}, config
165
+ )
166
+ return {"messages": [response]}
167
+
168
+ async def acall_model(state: ChainState, config: RunnableConfig):
169
+ response = await llm.ainvoke(state["messages"], config)
170
+ return {"messages": [response]}
171
+
172
+ graph_builder = StateGraph(ChainState)
173
+ graph_builder.add_node("call_tool", acall_request_chain)
174
+ graph_builder.add_node("execute_tool", ToolNode(tools))
175
+ graph_builder.add_node("call_model", acall_model)
176
+ graph_builder.set_entry_point("call_tool")
177
+ graph_builder.add_edge("call_tool", "execute_tool")
178
+ graph_builder.add_edge("execute_tool", "call_model")
179
+ graph_builder.add_edge("call_model", END)
180
+ chain = graph_builder.compile()
181
+
182
+ .. code-block:: python
183
+
184
+ example_query = "Fetch the top two posts. What are their titles?"
185
+
186
+ events = chain.astream(
187
+ {"messages": [("user", example_query)]},
188
+ stream_mode="values",
189
+ )
190
+ async for event in events:
191
+ event["messages"][-1].pretty_print()
192
+ """ # noqa: E501
73
193
 
74
194
  api_request_chain: LLMChain
75
195
  api_answer_chain: LLMChain
langchain/chains/base.py CHANGED
@@ -97,8 +97,6 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
97
97
  """[DEPRECATED] Use `callbacks` instead."""
98
98
 
99
99
  class Config:
100
- """Configuration for this pydantic object."""
101
-
102
100
  arbitrary_types_allowed = True
103
101
 
104
102
  def get_input_schema(
@@ -336,7 +334,7 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
336
334
  None, self._call, inputs, run_manager.get_sync() if run_manager else None
337
335
  )
338
336
 
339
- @deprecated("0.1.0", alternative="invoke", removal="0.3.0")
337
+ @deprecated("0.1.0", alternative="invoke", removal="1.0")
340
338
  def __call__(
341
339
  self,
342
340
  inputs: Union[Dict[str, Any], Any],
@@ -387,7 +385,7 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
387
385
  include_run_info=include_run_info,
388
386
  )
389
387
 
390
- @deprecated("0.1.0", alternative="ainvoke", removal="0.3.0")
388
+ @deprecated("0.1.0", alternative="ainvoke", removal="1.0")
391
389
  async def acall(
392
390
  self,
393
391
  inputs: Union[Dict[str, Any], Any],
@@ -546,7 +544,7 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
546
544
  )
547
545
  return self.output_keys[0]
548
546
 
549
- @deprecated("0.1.0", alternative="invoke", removal="0.3.0")
547
+ @deprecated("0.1.0", alternative="invoke", removal="1.0")
550
548
  def run(
551
549
  self,
552
550
  *args: Any,
@@ -617,7 +615,7 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
617
615
  f" but not both. Got args: {args} and kwargs: {kwargs}."
618
616
  )
619
617
 
620
- @deprecated("0.1.0", alternative="ainvoke", removal="0.3.0")
618
+ @deprecated("0.1.0", alternative="ainvoke", removal="1.0")
621
619
  async def arun(
622
620
  self,
623
621
  *args: Any,
@@ -755,7 +753,7 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
755
753
  else:
756
754
  raise ValueError(f"{save_path} must be json or yaml")
757
755
 
758
- @deprecated("0.1.0", alternative="batch", removal="0.3.0")
756
+ @deprecated("0.1.0", alternative="batch", removal="1.0")
759
757
  def apply(
760
758
  self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
761
759
  ) -> List[Dict[str, str]]:
@@ -6,7 +6,7 @@ from typing import Any, Dict, List, Optional, Tuple, Type
6
6
 
7
7
  from langchain_core.callbacks import Callbacks
8
8
  from langchain_core.documents import Document
9
- from langchain_core.pydantic_v1 import BaseModel, Extra, root_validator
9
+ from langchain_core.pydantic_v1 import BaseModel, root_validator
10
10
  from langchain_core.runnables.config import RunnableConfig
11
11
  from langchain_core.runnables.utils import create_model
12
12
 
@@ -127,10 +127,8 @@ class MapReduceDocumentsChain(BaseCombineDocumentsChain):
127
127
  return _output_keys
128
128
 
129
129
  class Config:
130
- """Configuration for this pydantic object."""
131
-
132
- extra = Extra.forbid
133
130
  arbitrary_types_allowed = True
131
+ extra = "forbid"
134
132
 
135
133
  @root_validator(pre=True)
136
134
  def get_reduce_chain(cls, values: Dict) -> Dict:
@@ -6,7 +6,7 @@ from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union, cast
6
6
 
7
7
  from langchain_core.callbacks import Callbacks
8
8
  from langchain_core.documents import Document
9
- from langchain_core.pydantic_v1 import BaseModel, Extra, root_validator
9
+ from langchain_core.pydantic_v1 import BaseModel, root_validator
10
10
  from langchain_core.runnables.config import RunnableConfig
11
11
  from langchain_core.runnables.utils import create_model
12
12
 
@@ -25,7 +25,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
25
25
  Example:
26
26
  .. code-block:: python
27
27
 
28
- from langchain.chains import StuffDocumentsChain, LLMChain
28
+ from langchain.chains import MapRerankDocumentsChain, LLMChain
29
29
  from langchain_core.prompts import PromptTemplate
30
30
  from langchain_community.llms import OpenAI
31
31
  from langchain.output_parsers.regex import RegexParser
@@ -39,7 +39,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
39
39
  prompt_template = (
40
40
  "Use the following context to tell me the chemical formula "
41
41
  "for water. Output both your answer and a score of how confident "
42
- "you are. Context: {content}"
42
+ "you are. Context: {context}"
43
43
  )
44
44
  output_parser = RegexParser(
45
45
  regex=r"(.*?)\nScore: (.*)",
@@ -75,10 +75,8 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
75
75
  Intermediate steps include the results of calling llm_chain on each document."""
76
76
 
77
77
  class Config:
78
- """Configuration for this pydantic object."""
79
-
80
- extra = Extra.forbid
81
78
  arbitrary_types_allowed = True
79
+ extra = "forbid"
82
80
 
83
81
  def get_output_schema(
84
82
  self, config: Optional[RunnableConfig] = None
@@ -6,7 +6,6 @@ from typing import Any, Callable, List, Optional, Protocol, Tuple
6
6
 
7
7
  from langchain_core.callbacks import Callbacks
8
8
  from langchain_core.documents import Document
9
- from langchain_core.pydantic_v1 import Extra
10
9
 
11
10
  from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
12
11
 
@@ -206,10 +205,8 @@ class ReduceDocumentsChain(BaseCombineDocumentsChain):
206
205
  Otherwise, after it reaches the max number, it will throw an error"""
207
206
 
208
207
  class Config:
209
- """Configuration for this pydantic object."""
210
-
211
- extra = Extra.forbid
212
208
  arbitrary_types_allowed = True
209
+ extra = "forbid"
213
210
 
214
211
  @property
215
212
  def _collapse_chain(self) -> BaseCombineDocumentsChain:
@@ -8,7 +8,7 @@ from langchain_core.callbacks import Callbacks
8
8
  from langchain_core.documents import Document
9
9
  from langchain_core.prompts import BasePromptTemplate, format_document
10
10
  from langchain_core.prompts.prompt import PromptTemplate
11
- from langchain_core.pydantic_v1 import Extra, Field, root_validator
11
+ from langchain_core.pydantic_v1 import Field, root_validator
12
12
 
13
13
  from langchain.chains.combine_documents.base import (
14
14
  BaseCombineDocumentsChain,
@@ -99,10 +99,8 @@ class RefineDocumentsChain(BaseCombineDocumentsChain):
99
99
  return _output_keys
100
100
 
101
101
  class Config:
102
- """Configuration for this pydantic object."""
103
-
104
- extra = Extra.forbid
105
102
  arbitrary_types_allowed = True
103
+ extra = "forbid"
106
104
 
107
105
  @root_validator(pre=True)
108
106
  def get_return_intermediate_steps(cls, values: Dict) -> Dict:
@@ -2,12 +2,13 @@
2
2
 
3
3
  from typing import Any, Dict, List, Optional, Tuple
4
4
 
5
+ from langchain_core._api import deprecated
5
6
  from langchain_core.callbacks import Callbacks
6
7
  from langchain_core.documents import Document
7
8
  from langchain_core.language_models import LanguageModelLike
8
9
  from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
9
10
  from langchain_core.prompts import BasePromptTemplate, format_document
10
- from langchain_core.pydantic_v1 import Extra, Field, root_validator
11
+ from langchain_core.pydantic_v1 import Field, root_validator
11
12
  from langchain_core.runnables import Runnable, RunnablePassthrough
12
13
 
13
14
  from langchain.chains.combine_documents.base import (
@@ -95,6 +96,15 @@ def create_stuff_documents_chain(
95
96
  ).with_config(run_name="stuff_documents_chain")
96
97
 
97
98
 
99
+ @deprecated(
100
+ since="0.2.13",
101
+ removal="1.0",
102
+ message=(
103
+ "This class is deprecated. Use the `create_stuff_documents_chain` constructor "
104
+ "instead. See migration guide here: "
105
+ "https://python.langchain.com/v0.2/docs/versions/migrating_chains/stuff_docs_chain/" # noqa: E501
106
+ ),
107
+ )
98
108
  class StuffDocumentsChain(BaseCombineDocumentsChain):
99
109
  """Chain that combines documents by stuffing into context.
100
110
 
@@ -147,10 +157,8 @@ class StuffDocumentsChain(BaseCombineDocumentsChain):
147
157
  """The string with which to join the formatted documents"""
148
158
 
149
159
  class Config:
150
- """Configuration for this pydantic object."""
151
-
152
- extra = Extra.forbid
153
160
  arbitrary_types_allowed = True
161
+ extra = "forbid"
154
162
 
155
163
  @root_validator(pre=True)
156
164
  def get_default_document_variable_name(cls, values: Dict) -> Dict:
@@ -5,7 +5,7 @@ from typing import Dict, List
5
5
  from langchain_core._api import deprecated
6
6
  from langchain_core.memory import BaseMemory
7
7
  from langchain_core.prompts import BasePromptTemplate
8
- from langchain_core.pydantic_v1 import Extra, Field, root_validator
8
+ from langchain_core.pydantic_v1 import Field, root_validator
9
9
 
10
10
  from langchain.chains.conversation.prompt import PROMPT
11
11
  from langchain.chains.llm import LLMChain
@@ -111,10 +111,8 @@ class ConversationChain(LLMChain):
111
111
  output_key: str = "response" #: :meta private:
112
112
 
113
113
  class Config:
114
- """Configuration for this pydantic object."""
115
-
116
- extra = Extra.forbid
117
114
  arbitrary_types_allowed = True
115
+ extra = "forbid"
118
116
 
119
117
  @classmethod
120
118
  def is_lc_serializable(cls) -> bool:
@@ -18,7 +18,7 @@ from langchain_core.documents import Document
18
18
  from langchain_core.language_models import BaseLanguageModel
19
19
  from langchain_core.messages import BaseMessage
20
20
  from langchain_core.prompts import BasePromptTemplate
21
- from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator
21
+ from langchain_core.pydantic_v1 import BaseModel, Field, root_validator
22
22
  from langchain_core.retrievers import BaseRetriever
23
23
  from langchain_core.runnables import RunnableConfig
24
24
  from langchain_core.vectorstores import VectorStore
@@ -97,11 +97,9 @@ class BaseConversationalRetrievalChain(Chain):
97
97
  are found for the question. """
98
98
 
99
99
  class Config:
100
- """Configuration for this pydantic object."""
101
-
102
- extra = Extra.forbid
103
- arbitrary_types_allowed = True
104
100
  allow_population_by_field_name = True
101
+ arbitrary_types_allowed = True
102
+ extra = "forbid"
105
103
 
106
104
  @property
107
105
  def input_keys(self) -> List[str]:
@@ -244,7 +242,7 @@ class BaseConversationalRetrievalChain(Chain):
244
242
  "create_history_aware_retriever together with create_retrieval_chain "
245
243
  "(see example in docstring)"
246
244
  ),
247
- removal="0.3.0",
245
+ removal="1.0",
248
246
  )
249
247
  class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
250
248
  """Chain for having a conversation based on retrieved documents.
@@ -445,7 +443,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
445
443
  combine_docs_chain_kwargs: Parameters to pass as kwargs to `load_qa_chain`
446
444
  when constructing the combine_docs_chain.
447
445
  callbacks: Callbacks to pass to all subchains.
448
- **kwargs: Additional parameters to pass when initializing
446
+ kwargs: Additional parameters to pass when initializing
449
447
  ConversationalRetrievalChain
450
448
  """
451
449
  combine_docs_chain_kwargs = combine_docs_chain_kwargs or {}
@@ -6,14 +6,14 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional
6
6
 
7
7
  from langchain_core.callbacks import CallbackManagerForChainRun
8
8
  from langchain_core.language_models import BaseLanguageModel
9
- from langchain_core.output_parsers import BaseLLMOutputParser
9
+ from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
10
10
  from langchain_core.output_parsers.json import SimpleJsonOutputParser
11
11
  from langchain_core.prompts import BasePromptTemplate
12
- from langchain_core.pydantic_v1 import Extra, root_validator
12
+ from langchain_core.pydantic_v1 import root_validator
13
+ from langchain_core.runnables import Runnable
13
14
 
14
15
  from langchain.chains.base import Chain
15
16
  from langchain.chains.elasticsearch_database.prompts import ANSWER_PROMPT, DSL_PROMPT
16
- from langchain.chains.llm import LLMChain
17
17
 
18
18
  if TYPE_CHECKING:
19
19
  from elasticsearch import Elasticsearch
@@ -35,9 +35,9 @@ class ElasticsearchDatabaseChain(Chain):
35
35
  db_chain = ElasticsearchDatabaseChain.from_llm(OpenAI(), database)
36
36
  """
37
37
 
38
- query_chain: LLMChain
38
+ query_chain: Runnable
39
39
  """Chain for creating the ES query."""
40
- answer_chain: LLMChain
40
+ answer_chain: Runnable
41
41
  """Chain for answering the user question."""
42
42
  database: Any
43
43
  """Elasticsearch database to connect to of type elasticsearch.Elasticsearch."""
@@ -52,10 +52,8 @@ class ElasticsearchDatabaseChain(Chain):
52
52
  """Whether or not to return the intermediate steps along with the final answer."""
53
53
 
54
54
  class Config:
55
- """Configuration for this pydantic object."""
56
-
57
- extra = Extra.forbid
58
55
  arbitrary_types_allowed = True
56
+ extra = "forbid"
59
57
 
60
58
  @root_validator(pre=False, skip_on_failure=True)
61
59
  def validate_indices(cls, values: dict) -> dict:
@@ -137,9 +135,9 @@ class ElasticsearchDatabaseChain(Chain):
137
135
  intermediate_steps: List = []
138
136
  try:
139
137
  intermediate_steps.append(query_inputs) # input: es generation
140
- es_cmd = self.query_chain.run(
141
- callbacks=_run_manager.get_child(),
142
- **query_inputs,
138
+ es_cmd = self.query_chain.invoke(
139
+ query_inputs,
140
+ config={"callbacks": _run_manager.get_child()},
143
141
  )
144
142
 
145
143
  _run_manager.on_text(es_cmd, color="green", verbose=self.verbose)
@@ -156,9 +154,9 @@ class ElasticsearchDatabaseChain(Chain):
156
154
  _run_manager.on_text("\nAnswer:", verbose=self.verbose)
157
155
  answer_inputs: dict = {"data": result, "input": input_text}
158
156
  intermediate_steps.append(answer_inputs) # input: final answer
159
- final_result = self.answer_chain.run(
160
- callbacks=_run_manager.get_child(),
161
- **answer_inputs,
157
+ final_result = self.answer_chain.invoke(
158
+ answer_inputs,
159
+ config={"callbacks": _run_manager.get_child()},
162
160
  )
163
161
 
164
162
  intermediate_steps.append(final_result) # output: final answer
@@ -185,7 +183,7 @@ class ElasticsearchDatabaseChain(Chain):
185
183
  *,
186
184
  query_prompt: Optional[BasePromptTemplate] = None,
187
185
  answer_prompt: Optional[BasePromptTemplate] = None,
188
- query_output_parser: Optional[BaseLLMOutputParser] = None,
186
+ query_output_parser: Optional[BaseOutputParser] = None,
189
187
  **kwargs: Any,
190
188
  ) -> ElasticsearchDatabaseChain:
191
189
  """Convenience method to construct ElasticsearchDatabaseChain from an LLM.
@@ -197,15 +195,13 @@ class ElasticsearchDatabaseChain(Chain):
197
195
  answer_prompt: The prompt to use for answering user question given data.
198
196
  query_output_parser: The output parser to use for parsing model-generated
199
197
  ES query. Defaults to SimpleJsonOutputParser.
200
- **kwargs: Additional arguments to pass to the constructor.
198
+ kwargs: Additional arguments to pass to the constructor.
201
199
  """
202
200
  query_prompt = query_prompt or DSL_PROMPT
203
201
  query_output_parser = query_output_parser or SimpleJsonOutputParser()
204
- query_chain = LLMChain(
205
- llm=llm, prompt=query_prompt, output_parser=query_output_parser
206
- )
202
+ query_chain = query_prompt | llm | query_output_parser
207
203
  answer_prompt = answer_prompt or ANSWER_PROMPT
208
- answer_chain = LLMChain(llm=llm, prompt=answer_prompt)
204
+ answer_chain = answer_prompt | llm | StrOutputParser()
209
205
  return cls(
210
206
  query_chain=query_chain,
211
207
  answer_chain=answer_chain,
@@ -1,11 +1,10 @@
1
1
  from typing import List
2
2
 
3
3
  from langchain_core.language_models import BaseLanguageModel
4
+ from langchain_core.output_parsers import StrOutputParser
4
5
  from langchain_core.prompts.few_shot import FewShotPromptTemplate
5
6
  from langchain_core.prompts.prompt import PromptTemplate
6
7
 
7
- from langchain.chains.llm import LLMChain
8
-
9
8
  TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
10
9
 
11
10
 
@@ -19,5 +18,5 @@ def generate_example(
19
18
  input_variables=[],
20
19
  example_prompt=prompt_template,
21
20
  )
22
- chain = LLMChain(llm=llm, prompt=prompt)
23
- return chain.predict()
21
+ chain = prompt | llm | StrOutputParser()
22
+ return chain.invoke({})
@@ -245,7 +245,7 @@ class FlareChain(Chain):
245
245
  Args:
246
246
  llm: Language model to use.
247
247
  max_generation_len: Maximum length of the generated response.
248
- **kwargs: Additional arguments to pass to the constructor.
248
+ kwargs: Additional arguments to pass to the constructor.
249
249
 
250
250
  Returns:
251
251
  FlareChain class with the given language model.
@@ -12,7 +12,6 @@ from langchain_core.callbacks import CallbackManagerForChainRun
12
12
  from langchain_core.embeddings import Embeddings
13
13
  from langchain_core.language_models import BaseLanguageModel
14
14
  from langchain_core.prompts import BasePromptTemplate
15
- from langchain_core.pydantic_v1 import Extra
16
15
 
17
16
  from langchain.chains.base import Chain
18
17
  from langchain.chains.hyde.prompts import PROMPT_MAP
@@ -29,10 +28,8 @@ class HypotheticalDocumentEmbedder(Chain, Embeddings):
29
28
  llm_chain: LLMChain
30
29
 
31
30
  class Config:
32
- """Configuration for this pydantic object."""
33
-
34
- extra = Extra.forbid
35
31
  arbitrary_types_allowed = True
32
+ extra = "forbid"
36
33
 
37
34
  @property
38
35
  def input_keys(self) -> List[str]:
langchain/chains/llm.py CHANGED
@@ -23,7 +23,7 @@ from langchain_core.output_parsers import BaseLLMOutputParser, StrOutputParser
23
23
  from langchain_core.outputs import ChatGeneration, Generation, LLMResult
24
24
  from langchain_core.prompt_values import PromptValue
25
25
  from langchain_core.prompts import BasePromptTemplate, PromptTemplate
26
- from langchain_core.pydantic_v1 import Extra, Field
26
+ from langchain_core.pydantic_v1 import Field
27
27
  from langchain_core.runnables import (
28
28
  Runnable,
29
29
  RunnableBinding,
@@ -96,10 +96,8 @@ class LLMChain(Chain):
96
96
  llm_kwargs: dict = Field(default_factory=dict)
97
97
 
98
98
  class Config:
99
- """Configuration for this pydantic object."""
100
-
101
- extra = Extra.forbid
102
99
  arbitrary_types_allowed = True
100
+ extra = "forbid"
103
101
 
104
102
  @property
105
103
  def input_keys(self) -> List[str]:
@@ -5,10 +5,11 @@ from __future__ import annotations
5
5
  import warnings
6
6
  from typing import Any, Dict, List, Optional
7
7
 
8
+ from langchain_core._api import deprecated
8
9
  from langchain_core.callbacks import CallbackManagerForChainRun
9
10
  from langchain_core.language_models import BaseLanguageModel
10
11
  from langchain_core.prompts import PromptTemplate
11
- from langchain_core.pydantic_v1 import Extra, root_validator
12
+ from langchain_core.pydantic_v1 import root_validator
12
13
 
13
14
  from langchain.chains.base import Chain
14
15
  from langchain.chains.llm import LLMChain
@@ -63,6 +64,15 @@ def _load_question_to_checked_assertions_chain(
63
64
  return question_to_checked_assertions_chain
64
65
 
65
66
 
67
+ @deprecated(
68
+ since="0.2.13",
69
+ message=(
70
+ "See LangGraph guides for a variety of self-reflection and corrective "
71
+ "strategies for question-answering and other tasks: "
72
+ "https://langchain-ai.github.io/langgraph/tutorials/rag/langgraph_self_rag/"
73
+ ),
74
+ removal="1.0",
75
+ )
66
76
  class LLMCheckerChain(Chain):
67
77
  """Chain for question-answering with self-verification.
68
78
 
@@ -91,10 +101,8 @@ class LLMCheckerChain(Chain):
91
101
  output_key: str = "result" #: :meta private:
92
102
 
93
103
  class Config:
94
- """Configuration for this pydantic object."""
95
-
96
- extra = Extra.forbid
97
104
  arbitrary_types_allowed = True
105
+ extra = "forbid"
98
106
 
99
107
  @root_validator(pre=True)
100
108
  def raise_deprecation(cls, values: Dict) -> Dict:
@@ -13,7 +13,7 @@ from langchain_core.callbacks import (
13
13
  )
14
14
  from langchain_core.language_models import BaseLanguageModel
15
15
  from langchain_core.prompts import BasePromptTemplate
16
- from langchain_core.pydantic_v1 import Extra, root_validator
16
+ from langchain_core.pydantic_v1 import root_validator
17
17
 
18
18
  from langchain.chains.base import Chain
19
19
  from langchain.chains.llm import LLMChain
@@ -40,10 +40,8 @@ class LLMMathChain(Chain):
40
40
  output_key: str = "answer" #: :meta private:
41
41
 
42
42
  class Config:
43
- """Configuration for this pydantic object."""
44
-
45
- extra = Extra.forbid
46
43
  arbitrary_types_allowed = True
44
+ extra = "forbid"
47
45
 
48
46
  @root_validator(pre=True)
49
47
  def raise_deprecation(cls, values: Dict) -> Dict: