langchain 0.2.6__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (182) hide show
  1. langchain/__init__.py +1 -0
  2. langchain/agents/__init__.py +5 -4
  3. langchain/agents/agent.py +272 -50
  4. langchain/agents/agent_iterator.py +20 -0
  5. langchain/agents/agent_toolkits/__init__.py +1 -0
  6. langchain/agents/agent_toolkits/file_management/__init__.py +1 -0
  7. langchain/agents/agent_toolkits/playwright/__init__.py +1 -0
  8. langchain/agents/agent_toolkits/vectorstore/base.py +1 -0
  9. langchain/agents/agent_toolkits/vectorstore/toolkit.py +1 -0
  10. langchain/agents/agent_types.py +1 -0
  11. langchain/agents/chat/base.py +37 -1
  12. langchain/agents/chat/output_parser.py +14 -0
  13. langchain/agents/conversational/base.py +38 -6
  14. langchain/agents/conversational/output_parser.py +10 -0
  15. langchain/agents/conversational_chat/base.py +42 -3
  16. langchain/agents/format_scratchpad/__init__.py +1 -0
  17. langchain/agents/format_scratchpad/log.py +12 -1
  18. langchain/agents/format_scratchpad/log_to_messages.py +10 -1
  19. langchain/agents/format_scratchpad/openai_functions.py +10 -5
  20. langchain/agents/format_scratchpad/tools.py +11 -7
  21. langchain/agents/initialize.py +15 -7
  22. langchain/agents/json_chat/base.py +6 -0
  23. langchain/agents/loading.py +7 -0
  24. langchain/agents/mrkl/base.py +39 -10
  25. langchain/agents/mrkl/output_parser.py +12 -0
  26. langchain/agents/openai_assistant/base.py +37 -14
  27. langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +32 -4
  28. langchain/agents/openai_functions_agent/base.py +61 -10
  29. langchain/agents/openai_functions_multi_agent/base.py +22 -7
  30. langchain/agents/openai_tools/base.py +3 -0
  31. langchain/agents/output_parsers/__init__.py +1 -0
  32. langchain/agents/react/base.py +1 -0
  33. langchain/agents/self_ask_with_search/base.py +1 -0
  34. langchain/agents/structured_chat/output_parser.py +3 -3
  35. langchain/agents/tools.py +3 -0
  36. langchain/agents/utils.py +9 -1
  37. langchain/base_language.py +1 -0
  38. langchain/callbacks/__init__.py +1 -0
  39. langchain/callbacks/base.py +1 -0
  40. langchain/callbacks/streaming_stdout.py +1 -0
  41. langchain/callbacks/streaming_stdout_final_only.py +1 -0
  42. langchain/callbacks/tracers/evaluation.py +1 -0
  43. langchain/chains/api/base.py +5 -2
  44. langchain/chains/base.py +1 -1
  45. langchain/chains/combine_documents/base.py +59 -0
  46. langchain/chains/combine_documents/map_reduce.py +4 -2
  47. langchain/chains/combine_documents/map_rerank.py +5 -3
  48. langchain/chains/combine_documents/refine.py +4 -2
  49. langchain/chains/combine_documents/stuff.py +1 -0
  50. langchain/chains/constitutional_ai/base.py +1 -0
  51. langchain/chains/constitutional_ai/models.py +1 -0
  52. langchain/chains/constitutional_ai/principles.py +1 -0
  53. langchain/chains/conversation/base.py +81 -1
  54. langchain/chains/conversational_retrieval/base.py +2 -1
  55. langchain/chains/elasticsearch_database/base.py +2 -1
  56. langchain/chains/hyde/base.py +1 -0
  57. langchain/chains/llm.py +1 -0
  58. langchain/chains/llm_checker/base.py +4 -3
  59. langchain/chains/llm_math/base.py +1 -0
  60. langchain/chains/loading.py +2 -1
  61. langchain/chains/mapreduce.py +1 -0
  62. langchain/chains/moderation.py +1 -1
  63. langchain/chains/natbot/base.py +1 -0
  64. langchain/chains/openai_functions/base.py +1 -0
  65. langchain/chains/qa_generation/base.py +47 -1
  66. langchain/chains/qa_with_sources/__init__.py +1 -0
  67. langchain/chains/qa_with_sources/loading.py +1 -0
  68. langchain/chains/qa_with_sources/vector_db.py +1 -1
  69. langchain/chains/query_constructor/base.py +1 -0
  70. langchain/chains/query_constructor/ir.py +1 -0
  71. langchain/chains/question_answering/chain.py +1 -0
  72. langchain/chains/retrieval_qa/base.py +3 -2
  73. langchain/chains/router/base.py +1 -0
  74. langchain/chains/router/llm_router.py +2 -1
  75. langchain/chains/router/multi_prompt.py +1 -0
  76. langchain/chains/router/multi_retrieval_qa.py +1 -0
  77. langchain/chains/sequential.py +2 -1
  78. langchain/chains/structured_output/base.py +6 -6
  79. langchain/chains/summarize/chain.py +1 -0
  80. langchain/chains/transform.py +4 -3
  81. langchain/chat_models/__init__.py +1 -0
  82. langchain/chat_models/base.py +2 -2
  83. langchain/docstore/__init__.py +1 -0
  84. langchain/document_loaders/__init__.py +1 -0
  85. langchain/document_transformers/__init__.py +1 -0
  86. langchain/embeddings/__init__.py +0 -1
  87. langchain/evaluation/__init__.py +2 -1
  88. langchain/evaluation/agents/__init__.py +1 -0
  89. langchain/evaluation/agents/trajectory_eval_prompt.py +1 -0
  90. langchain/evaluation/comparison/__init__.py +1 -0
  91. langchain/evaluation/comparison/eval_chain.py +1 -0
  92. langchain/evaluation/comparison/prompt.py +1 -0
  93. langchain/evaluation/embedding_distance/__init__.py +1 -0
  94. langchain/evaluation/embedding_distance/base.py +1 -0
  95. langchain/evaluation/loading.py +1 -0
  96. langchain/evaluation/parsing/base.py +1 -0
  97. langchain/evaluation/qa/__init__.py +1 -0
  98. langchain/evaluation/qa/eval_chain.py +1 -0
  99. langchain/evaluation/qa/generate_chain.py +1 -0
  100. langchain/evaluation/schema.py +1 -0
  101. langchain/evaluation/scoring/__init__.py +1 -0
  102. langchain/evaluation/scoring/eval_chain.py +1 -0
  103. langchain/evaluation/scoring/prompt.py +1 -0
  104. langchain/evaluation/string_distance/__init__.py +1 -0
  105. langchain/example_generator.py +1 -0
  106. langchain/formatting.py +1 -0
  107. langchain/globals/__init__.py +1 -0
  108. langchain/graphs/__init__.py +1 -0
  109. langchain/indexes/__init__.py +1 -0
  110. langchain/indexes/_sql_record_manager.py +1 -2
  111. langchain/indexes/graph.py +1 -0
  112. langchain/indexes/prompts/__init__.py +1 -0
  113. langchain/input.py +1 -0
  114. langchain/llms/__init__.py +1 -0
  115. langchain/load/__init__.py +1 -0
  116. langchain/memory/__init__.py +5 -0
  117. langchain/memory/vectorstore_token_buffer_memory.py +184 -0
  118. langchain/output_parsers/__init__.py +1 -0
  119. langchain/prompts/__init__.py +1 -0
  120. langchain/prompts/example_selector/__init__.py +1 -0
  121. langchain/python.py +1 -0
  122. langchain/requests.py +1 -0
  123. langchain/retrievers/__init__.py +1 -0
  124. langchain/retrievers/document_compressors/chain_extract.py +1 -0
  125. langchain/retrievers/document_compressors/chain_filter.py +1 -0
  126. langchain/retrievers/ensemble.py +1 -0
  127. langchain/retrievers/self_query/base.py +7 -7
  128. langchain/schema/__init__.py +1 -0
  129. langchain/schema/runnable/__init__.py +1 -0
  130. langchain/serpapi.py +1 -0
  131. langchain/smith/__init__.py +6 -5
  132. langchain/smith/evaluation/__init__.py +0 -1
  133. langchain/smith/evaluation/string_run_evaluator.py +1 -0
  134. langchain/sql_database.py +1 -0
  135. langchain/storage/__init__.py +1 -0
  136. langchain/storage/_lc_store.py +1 -0
  137. langchain/storage/in_memory.py +1 -0
  138. langchain/text_splitter.py +1 -0
  139. langchain/tools/__init__.py +1 -0
  140. langchain/tools/amadeus/__init__.py +1 -0
  141. langchain/tools/azure_cognitive_services/__init__.py +1 -0
  142. langchain/tools/bing_search/__init__.py +1 -0
  143. langchain/tools/dataforseo_api_search/__init__.py +1 -0
  144. langchain/tools/ddg_search/__init__.py +1 -0
  145. langchain/tools/edenai/__init__.py +1 -0
  146. langchain/tools/eleven_labs/__init__.py +1 -0
  147. langchain/tools/file_management/__init__.py +1 -0
  148. langchain/tools/github/__init__.py +1 -1
  149. langchain/tools/gitlab/__init__.py +1 -1
  150. langchain/tools/gmail/__init__.py +1 -0
  151. langchain/tools/golden_query/__init__.py +1 -0
  152. langchain/tools/google_cloud/__init__.py +1 -0
  153. langchain/tools/google_finance/__init__.py +1 -0
  154. langchain/tools/google_jobs/__init__.py +1 -0
  155. langchain/tools/google_lens/__init__.py +1 -0
  156. langchain/tools/google_places/__init__.py +1 -0
  157. langchain/tools/google_scholar/__init__.py +1 -0
  158. langchain/tools/google_search/__init__.py +1 -0
  159. langchain/tools/google_trends/__init__.py +1 -0
  160. langchain/tools/human/__init__.py +1 -0
  161. langchain/tools/memorize/__init__.py +1 -0
  162. langchain/tools/metaphor_search/__init__.py +1 -0
  163. langchain/tools/multion/__init__.py +1 -0
  164. langchain/tools/office365/__init__.py +1 -0
  165. langchain/tools/openapi/utils/openapi_utils.py +1 -0
  166. langchain/tools/openweathermap/__init__.py +1 -0
  167. langchain/tools/playwright/__init__.py +1 -0
  168. langchain/tools/shell/__init__.py +1 -0
  169. langchain/tools/slack/__init__.py +1 -0
  170. langchain/tools/sql_database/prompt.py +1 -0
  171. langchain/tools/steamship_image_generation/__init__.py +1 -0
  172. langchain/tools/tavily_search/__init__.py +1 -0
  173. langchain/tools/wolfram_alpha/__init__.py +1 -0
  174. langchain/tools/zapier/__init__.py +1 -0
  175. langchain/utilities/__init__.py +1 -0
  176. langchain/utilities/python.py +1 -0
  177. langchain/vectorstores/__init__.py +1 -0
  178. {langchain-0.2.6.dist-info → langchain-0.2.7.dist-info}/METADATA +2 -3
  179. {langchain-0.2.6.dist-info → langchain-0.2.7.dist-info}/RECORD +182 -181
  180. {langchain-0.2.6.dist-info → langchain-0.2.7.dist-info}/LICENSE +0 -0
  181. {langchain-0.2.6.dist-info → langchain-0.2.7.dist-info}/WHEEL +0 -0
  182. {langchain-0.2.6.dist-info → langchain-0.2.7.dist-info}/entry_points.txt +0 -0
@@ -3,6 +3,7 @@
3
3
  from abc import ABC, abstractmethod
4
4
  from typing import Any, Dict, List, Optional, Tuple, Type
5
5
 
6
+ from langchain_core._api import deprecated
6
7
  from langchain_core.callbacks import (
7
8
  AsyncCallbackManagerForChainRun,
8
9
  CallbackManagerForChainRun,
@@ -157,12 +158,70 @@ class BaseCombineDocumentsChain(Chain, ABC):
157
158
  return extra_return_dict
158
159
 
159
160
 
161
+ @deprecated(
162
+ since="0.2.7",
163
+ alternative=(
164
+ "example in API reference with more detail: "
165
+ "https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.base.AnalyzeDocumentChain.html" # noqa: E501
166
+ ),
167
+ removal="1.0",
168
+ )
160
169
  class AnalyzeDocumentChain(Chain):
161
170
  """Chain that splits documents, then analyzes it in pieces.
162
171
 
163
172
  This chain is parameterized by a TextSplitter and a CombineDocumentsChain.
164
173
  This chain takes a single document as input, and then splits it up into chunks
165
174
  and then passes those chucks to the CombineDocumentsChain.
175
+
176
+ This class is deprecated. See below for alternative implementations which
177
+ supports async and streaming modes of operation.
178
+
179
+ If the underlying combine documents chain takes one ``input_documents`` argument
180
+ (e.g., chains generated by ``load_summarize_chain``):
181
+
182
+ .. code-block:: python
183
+
184
+ split_text = lambda x: text_splitter.create_documents([x])
185
+
186
+ summarize_document_chain = split_text | chain
187
+
188
+ If the underlying chain takes additional arguments (e.g., ``load_qa_chain``, which
189
+ takes an additional ``question`` argument), we can use the following:
190
+
191
+ .. code-block:: python
192
+
193
+ from operator import itemgetter
194
+ from langchain_core.runnables import RunnableLambda, RunnableParallel
195
+
196
+ split_text = RunnableLambda(
197
+ lambda x: text_splitter.create_documents([x])
198
+ )
199
+ summarize_document_chain = RunnableParallel(
200
+ question=itemgetter("question"),
201
+ input_documents=itemgetter("input_document") | split_text,
202
+ ) | chain.pick("output_text")
203
+
204
+ To additionally return the input parameters, as ``AnalyzeDocumentChain`` does,
205
+ we can wrap this construction with ``RunnablePassthrough``:
206
+
207
+ .. code-block:: python
208
+
209
+ from operator import itemgetter
210
+ from langchain_core.runnables import (
211
+ RunnableLambda,
212
+ RunnableParallel,
213
+ RunnablePassthrough,
214
+ )
215
+
216
+ split_text = RunnableLambda(
217
+ lambda x: text_splitter.create_documents([x])
218
+ )
219
+ summarize_document_chain = RunnablePassthrough.assign(
220
+ output_text=RunnableParallel(
221
+ question=itemgetter("question"),
222
+ input_documents=itemgetter("input_document") | split_text,
223
+ ) | chain.pick("output_text")
224
+ )
166
225
  """
167
226
 
168
227
  input_key: str = "input_document" #: :meta private:
@@ -166,8 +166,11 @@ class MapReduceDocumentsChain(BaseCombineDocumentsChain):
166
166
  @root_validator(pre=True)
167
167
  def get_default_document_variable_name(cls, values: Dict) -> Dict:
168
168
  """Get default document variable name, if not provided."""
169
+ if "llm_chain" not in values:
170
+ raise ValueError("llm_chain must be provided")
171
+
172
+ llm_chain_variables = values["llm_chain"].prompt.input_variables
169
173
  if "document_variable_name" not in values:
170
- llm_chain_variables = values["llm_chain"].prompt.input_variables
171
174
  if len(llm_chain_variables) == 1:
172
175
  values["document_variable_name"] = llm_chain_variables[0]
173
176
  else:
@@ -176,7 +179,6 @@ class MapReduceDocumentsChain(BaseCombineDocumentsChain):
176
179
  "multiple llm_chain input_variables"
177
180
  )
178
181
  else:
179
- llm_chain_variables = values["llm_chain"].prompt.input_variables
180
182
  if values["document_variable_name"] not in llm_chain_variables:
181
183
  raise ValueError(
182
184
  f"document_variable_name {values['document_variable_name']} was "
@@ -106,7 +106,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
106
106
  _output_keys += self.metadata_keys
107
107
  return _output_keys
108
108
 
109
- @root_validator()
109
+ @root_validator(pre=False, skip_on_failure=True)
110
110
  def validate_llm_output(cls, values: Dict) -> Dict:
111
111
  """Validate that the combine chain outputs a dictionary."""
112
112
  output_parser = values["llm_chain"].prompt.output_parser
@@ -131,8 +131,11 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
131
131
  @root_validator(pre=True)
132
132
  def get_default_document_variable_name(cls, values: Dict) -> Dict:
133
133
  """Get default document variable name, if not provided."""
134
+ if "llm_chain" not in values:
135
+ raise ValueError("llm_chain must be provided")
136
+
137
+ llm_chain_variables = values["llm_chain"].prompt.input_variables
134
138
  if "document_variable_name" not in values:
135
- llm_chain_variables = values["llm_chain"].prompt.input_variables
136
139
  if len(llm_chain_variables) == 1:
137
140
  values["document_variable_name"] = llm_chain_variables[0]
138
141
  else:
@@ -141,7 +144,6 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
141
144
  "multiple llm_chain input_variables"
142
145
  )
143
146
  else:
144
- llm_chain_variables = values["llm_chain"].prompt.input_variables
145
147
  if values["document_variable_name"] not in llm_chain_variables:
146
148
  raise ValueError(
147
149
  f"document_variable_name {values['document_variable_name']} was "
@@ -115,8 +115,11 @@ class RefineDocumentsChain(BaseCombineDocumentsChain):
115
115
  @root_validator(pre=True)
116
116
  def get_default_document_variable_name(cls, values: Dict) -> Dict:
117
117
  """Get default document variable name, if not provided."""
118
+ if "initial_llm_chain" not in values:
119
+ raise ValueError("initial_llm_chain must be provided")
120
+
121
+ llm_chain_variables = values["initial_llm_chain"].prompt.input_variables
118
122
  if "document_variable_name" not in values:
119
- llm_chain_variables = values["initial_llm_chain"].prompt.input_variables
120
123
  if len(llm_chain_variables) == 1:
121
124
  values["document_variable_name"] = llm_chain_variables[0]
122
125
  else:
@@ -125,7 +128,6 @@ class RefineDocumentsChain(BaseCombineDocumentsChain):
125
128
  "multiple llm_chain input_variables"
126
129
  )
127
130
  else:
128
- llm_chain_variables = values["initial_llm_chain"].prompt.input_variables
129
131
  if values["document_variable_name"] not in llm_chain_variables:
130
132
  raise ValueError(
131
133
  f"document_variable_name {values['document_variable_name']} was "
@@ -1,4 +1,5 @@
1
1
  """Chain that combines documents by stuffing into context."""
2
+
2
3
  from typing import Any, Dict, List, Optional, Tuple
3
4
 
4
5
  from langchain_core.callbacks import Callbacks
@@ -1,4 +1,5 @@
1
1
  """Chain for applying constitutional principles to the outputs of another chain."""
2
+
2
3
  from typing import Any, Dict, List, Optional
3
4
 
4
5
  from langchain_core.callbacks import CallbackManagerForChainRun
@@ -1,4 +1,5 @@
1
1
  """Models for the Constitutional AI chain."""
2
+
2
3
  from langchain_core.pydantic_v1 import BaseModel
3
4
 
4
5
 
@@ -1,6 +1,7 @@
1
1
  """Constitutional principles from https://arxiv.org/pdf/2212.08073.pdf (Bai et al. 2022)
2
2
  UnifiedObjectives v0.2 principles ("uo-*") adapted from https://examine.dev/docs/Unified_objectives.pdf (Samwald et al. 2023)
3
3
  """
4
+
4
5
  # flake8: noqa
5
6
  from typing import Dict
6
7
 
@@ -1,6 +1,8 @@
1
1
  """Chain that carries on a conversation and calls an LLM."""
2
+
2
3
  from typing import Dict, List
3
4
 
5
+ from langchain_core._api import deprecated
4
6
  from langchain_core.memory import BaseMemory
5
7
  from langchain_core.prompts import BasePromptTemplate
6
8
  from langchain_core.pydantic_v1 import Extra, Field, root_validator
@@ -10,9 +12,87 @@ from langchain.chains.llm import LLMChain
10
12
  from langchain.memory.buffer import ConversationBufferMemory
11
13
 
12
14
 
15
+ @deprecated(
16
+ since="0.2.7",
17
+ alternative=(
18
+ "RunnableWithMessageHistory: "
19
+ "https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html" # noqa: E501
20
+ ),
21
+ removal="1.0",
22
+ )
13
23
  class ConversationChain(LLMChain):
14
24
  """Chain to have a conversation and load context from memory.
15
25
 
26
+ This class is deprecated in favor of ``RunnableWithMessageHistory``. Please refer
27
+ to this tutorial for more detail: https://python.langchain.com/v0.2/docs/tutorials/chatbot/
28
+
29
+ ``RunnableWithMessageHistory`` offers several benefits, including:
30
+
31
+ - Stream, batch, and async support;
32
+ - More flexible memory handling, including the ability to manage memory
33
+ outside the chain;
34
+ - Support for multiple threads.
35
+
36
+ Below is a minimal implementation, analogous to using ``ConversationChain`` with
37
+ the default ``ConversationBufferMemory``:
38
+
39
+ .. code-block:: python
40
+
41
+ from langchain_core.chat_history import InMemoryChatMessageHistory
42
+ from langchain_core.runnables.history import RunnableWithMessageHistory
43
+ from langchain_openai import ChatOpenAI
44
+
45
+
46
+ store = {} # memory is maintained outside the chain
47
+
48
+ def get_session_history(session_id: str) -> InMemoryChatMessageHistory:
49
+ if session_id not in store:
50
+ store[session_id] = InMemoryChatMessageHistory()
51
+ return store[session_id]
52
+
53
+ llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
54
+
55
+ chain = RunnableWithMessageHistory(llm, get_session_history)
56
+ chain.invoke(
57
+ "Hi I'm Bob.",
58
+ config={"configurable": {"session_id": "1"}},
59
+ ) # session_id determines thread
60
+ Memory objects can also be incorporated into the ``get_session_history`` callable:
61
+
62
+ .. code-block:: python
63
+
64
+ from langchain.memory import ConversationBufferWindowMemory
65
+ from langchain_core.chat_history import InMemoryChatMessageHistory
66
+ from langchain_core.runnables.history import RunnableWithMessageHistory
67
+ from langchain_openai import ChatOpenAI
68
+
69
+
70
+ store = {} # memory is maintained outside the chain
71
+
72
+ def get_session_history(session_id: str) -> InMemoryChatMessageHistory:
73
+ if session_id not in store:
74
+ store[session_id] = InMemoryChatMessageHistory()
75
+ return store[session_id]
76
+
77
+ memory = ConversationBufferWindowMemory(
78
+ chat_memory=store[session_id],
79
+ k=3,
80
+ return_messages=True,
81
+ )
82
+ assert len(memory.memory_variables) == 1
83
+ key = memory.memory_variables[0]
84
+ messages = memory.load_memory_variables({})[key]
85
+ store[session_id] = InMemoryChatMessageHistory(messages=messages)
86
+ return store[session_id]
87
+
88
+ llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
89
+
90
+ chain = RunnableWithMessageHistory(llm, get_session_history)
91
+ chain.invoke(
92
+ "Hi I'm Bob.",
93
+ config={"configurable": {"session_id": "1"}},
94
+ ) # session_id determines thread
95
+
16
96
  Example:
17
97
  .. code-block:: python
18
98
 
@@ -45,7 +125,7 @@ class ConversationChain(LLMChain):
45
125
  """Use this since so some prompt vars come from history."""
46
126
  return [self.input_key]
47
127
 
48
- @root_validator()
128
+ @root_validator(pre=False, skip_on_failure=True)
49
129
  def validate_prompt_input_variables(cls, values: Dict) -> Dict:
50
130
  """Validate that prompt input variables are consistent."""
51
131
  memory_keys = values["memory"].memory_variables
@@ -1,4 +1,5 @@
1
1
  """Chain for chatting with a vector database."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import inspect
@@ -480,7 +481,7 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
480
481
  def _chain_type(self) -> str:
481
482
  return "chat-vector-db"
482
483
 
483
- @root_validator()
484
+ @root_validator(pre=True)
484
485
  def raise_deprecation(cls, values: Dict) -> Dict:
485
486
  warnings.warn(
486
487
  "`ChatVectorDBChain` is deprecated - "
@@ -1,4 +1,5 @@
1
1
  """Chain for interacting with Elasticsearch Database."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from typing import TYPE_CHECKING, Any, Dict, List, Optional
@@ -56,7 +57,7 @@ class ElasticsearchDatabaseChain(Chain):
56
57
  extra = Extra.forbid
57
58
  arbitrary_types_allowed = True
58
59
 
59
- @root_validator()
60
+ @root_validator(pre=False, skip_on_failure=True)
60
61
  def validate_indices(cls, values: dict) -> dict:
61
62
  if values["include_indices"] and values["ignore_indices"]:
62
63
  raise ValueError(
@@ -2,6 +2,7 @@
2
2
 
3
3
  https://arxiv.org/abs/2212.10496
4
4
  """
5
+
5
6
  from __future__ import annotations
6
7
 
7
8
  from typing import Any, Dict, List, Optional
langchain/chains/llm.py CHANGED
@@ -1,4 +1,5 @@
1
1
  """Chain that just formats a prompt and calls an LLM."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import warnings
@@ -1,4 +1,5 @@
1
1
  """Chain for question-answering with self-verification."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import warnings
@@ -118,9 +119,9 @@ class LLMCheckerChain(Chain):
118
119
  values.get("revised_answer_prompt", REVISED_ANSWER_PROMPT),
119
120
  )
120
121
  )
121
- values[
122
- "question_to_checked_assertions_chain"
123
- ] = question_to_checked_assertions_chain
122
+ values["question_to_checked_assertions_chain"] = (
123
+ question_to_checked_assertions_chain
124
+ )
124
125
  return values
125
126
 
126
127
  @property
@@ -1,4 +1,5 @@
1
1
  """Chain that interprets a prompt and executes python code to do math."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import math
@@ -1,4 +1,5 @@
1
1
  """Functionality for loading chains."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import json
@@ -408,7 +409,7 @@ def _load_sql_database_chain(config: dict, **kwargs: Any) -> Any:
408
409
  if "llm_chain" in config:
409
410
  llm_chain_config = config.pop("llm_chain")
410
411
  chain = load_chain_from_config(llm_chain_config, **kwargs)
411
- return SQLDatabaseChain(llm_chain=chain, database=database, **config)
412
+ return SQLDatabaseChain(llm_chain=chain, database=database, **config) # type: ignore[arg-type]
412
413
  if "llm" in config:
413
414
  llm_config = config.pop("llm")
414
415
  llm = load_llm_from_config(llm_config, **kwargs)
@@ -3,6 +3,7 @@
3
3
  Splits up a document, sends the smaller parts to the LLM with one prompt,
4
4
  then combines the results with another one.
5
5
  """
6
+
6
7
  from __future__ import annotations
7
8
 
8
9
  from typing import Any, Dict, List, Mapping, Optional
@@ -40,7 +40,7 @@ class OpenAIModerationChain(Chain):
40
40
  openai_organization: Optional[str] = None
41
41
  _openai_pre_1_0: bool = Field(default=None)
42
42
 
43
- @root_validator()
43
+ @root_validator(pre=True)
44
44
  def validate_environment(cls, values: Dict) -> Dict:
45
45
  """Validate that api key and python package exists in environment."""
46
46
  openai_api_key = get_from_dict_or_env(
@@ -1,4 +1,5 @@
1
1
  """Implement an LLM driven browser."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import warnings
@@ -1,4 +1,5 @@
1
1
  """Methods for creating chains that use OpenAI function-calling APIs."""
2
+
2
3
  from typing import (
3
4
  Any,
4
5
  Callable,
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import json
4
4
  from typing import Any, Dict, List, Optional
5
5
 
6
+ from langchain_core._api import deprecated
6
7
  from langchain_core.callbacks import CallbackManagerForChainRun
7
8
  from langchain_core.language_models import BaseLanguageModel
8
9
  from langchain_core.prompts import BasePromptTemplate
@@ -14,8 +15,53 @@ from langchain.chains.llm import LLMChain
14
15
  from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR
15
16
 
16
17
 
18
+ @deprecated(
19
+ since="0.2.7",
20
+ alternative=(
21
+ "example in API reference with more detail: "
22
+ "https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html" # noqa: E501
23
+ ),
24
+ removal="1.0",
25
+ )
17
26
  class QAGenerationChain(Chain):
18
- """Base class for question-answer generation chains."""
27
+ """Base class for question-answer generation chains.
28
+
29
+ This class is deprecated. See below for an alternative implementation.
30
+
31
+ Advantages of this implementation include:
32
+
33
+ - Supports async and streaming;
34
+ - Surfaces prompt and text splitter for easier customization;
35
+ - Use of JsonOutputParser supports JSONPatch operations in streaming mode,
36
+ as well as robustness to markdown.
37
+
38
+ .. code-block:: python
39
+
40
+ from langchain.chains.qa_generation.prompt import CHAT_PROMPT as prompt
41
+ # Note: import PROMPT if using a legacy non-chat model.
42
+ from langchain_core.output_parsers import JsonOutputParser
43
+ from langchain_core.runnables import (
44
+ RunnableLambda,
45
+ RunnableParallel,
46
+ RunnablePassthrough,
47
+ )
48
+ from langchain_core.runnables.base import RunnableEach
49
+ from langchain_openai import ChatOpenAI
50
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
51
+
52
+ llm = ChatOpenAI()
53
+ text_splitter = RecursiveCharacterTextSplitter(chunk_overlap=500)
54
+ split_text = RunnableLambda(
55
+ lambda x: text_splitter.create_documents([x])
56
+ )
57
+
58
+ chain = RunnableParallel(
59
+ text=RunnablePassthrough(),
60
+ questions=(
61
+ split_text | RunnableEach(bound=prompt | llm | JsonOutputParser())
62
+ )
63
+ )
64
+ """
19
65
 
20
66
  llm_chain: LLMChain
21
67
  """LLM Chain that generates responses from user input and context."""
@@ -1,4 +1,5 @@
1
1
  """Load question answering with sources chains."""
2
+
2
3
  from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain
3
4
 
4
5
  __all__ = ["load_qa_with_sources_chain"]
@@ -1,4 +1,5 @@
1
1
  """Load question answering with sources chains."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from typing import Any, Mapping, Optional, Protocol
@@ -61,7 +61,7 @@ class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain):
61
61
  ) -> List[Document]:
62
62
  raise NotImplementedError("VectorDBQAWithSourcesChain does not support async")
63
63
 
64
- @root_validator()
64
+ @root_validator(pre=True)
65
65
  def raise_deprecation(cls, values: Dict) -> Dict:
66
66
  warnings.warn(
67
67
  "`VectorDBQAWithSourcesChain` is deprecated - "
@@ -1,4 +1,5 @@
1
1
  """LLM Chain for turning a user text query into a structured query."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import json
@@ -1,4 +1,5 @@
1
1
  """Internal representation of a structured query language."""
2
+
2
3
  from langchain_core.structured_query import (
3
4
  Comparator,
4
5
  Comparison,
@@ -1,4 +1,5 @@
1
1
  """Load question answering chains."""
2
+
2
3
  from typing import Any, Mapping, Optional, Protocol
3
4
 
4
5
  from langchain_core.callbacks import BaseCallbackManager, Callbacks
@@ -1,4 +1,5 @@
1
1
  """Chain for question-answering against a vector database."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import inspect
@@ -284,7 +285,7 @@ class VectorDBQA(BaseRetrievalQA):
284
285
  search_kwargs: Dict[str, Any] = Field(default_factory=dict)
285
286
  """Extra search args."""
286
287
 
287
- @root_validator()
288
+ @root_validator(pre=True)
288
289
  def raise_deprecation(cls, values: Dict) -> Dict:
289
290
  warnings.warn(
290
291
  "`VectorDBQA` is deprecated - "
@@ -292,7 +293,7 @@ class VectorDBQA(BaseRetrievalQA):
292
293
  )
293
294
  return values
294
295
 
295
- @root_validator()
296
+ @root_validator(pre=True)
296
297
  def validate_search_type(cls, values: Dict) -> Dict:
297
298
  """Validate search type."""
298
299
  if "search_type" in values:
@@ -1,4 +1,5 @@
1
1
  """Base classes for chain routing."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from abc import ABC
@@ -1,4 +1,5 @@
1
1
  """Base classes for LLM-powered router chains."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from typing import Any, Dict, List, Optional, Type, cast
@@ -24,7 +25,7 @@ class LLMRouterChain(RouterChain):
24
25
  llm_chain: LLMChain
25
26
  """LLM chain used to perform routing"""
26
27
 
27
- @root_validator()
28
+ @root_validator(pre=False, skip_on_failure=True)
28
29
  def validate_prompt(cls, values: dict) -> dict:
29
30
  prompt = values["llm_chain"].prompt
30
31
  if prompt.output_parser is None:
@@ -1,4 +1,5 @@
1
1
  """Use a single chain to route an input to one of multiple llm chains."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from typing import Any, Dict, List, Optional
@@ -1,4 +1,5 @@
1
1
  """Use a single chain to route an input to one of multiple retrieval qa chains."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from typing import Any, Dict, List, Mapping, Optional
@@ -1,4 +1,5 @@
1
1
  """Chain pipeline where the outputs of one step feed directly into next."""
2
+
2
3
  from typing import Any, Dict, List, Optional
3
4
 
4
5
  from langchain_core.callbacks import (
@@ -152,7 +153,7 @@ class SimpleSequentialChain(Chain):
152
153
  """
153
154
  return [self.output_key]
154
155
 
155
- @root_validator()
156
+ @root_validator(pre=False, skip_on_failure=True)
156
157
  def validate_chains(cls, values: Dict) -> Dict:
157
158
  """Validate that chains are all single input/output."""
158
159
  for chain in values["chains"]:
@@ -466,9 +466,9 @@ def _get_openai_tool_output_parser(
466
466
  first_tool_only: bool = False,
467
467
  ) -> Union[BaseOutputParser, BaseGenerationOutputParser]:
468
468
  if isinstance(tool, type) and issubclass(tool, BaseModel):
469
- output_parser: Union[
470
- BaseOutputParser, BaseGenerationOutputParser
471
- ] = PydanticToolsParser(tools=[tool], first_tool_only=first_tool_only)
469
+ output_parser: Union[BaseOutputParser, BaseGenerationOutputParser] = (
470
+ PydanticToolsParser(tools=[tool], first_tool_only=first_tool_only)
471
+ )
472
472
  else:
473
473
  key_name = convert_to_openai_tool(tool)["function"]["name"]
474
474
  output_parser = JsonOutputKeyToolsParser(
@@ -500,9 +500,9 @@ def get_openai_output_parser(
500
500
  }
501
501
  else:
502
502
  pydantic_schema = functions[0]
503
- output_parser: Union[
504
- BaseOutputParser, BaseGenerationOutputParser
505
- ] = PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema)
503
+ output_parser: Union[BaseOutputParser, BaseGenerationOutputParser] = (
504
+ PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema)
505
+ )
506
506
  else:
507
507
  output_parser = JsonOutputFunctionsParser(args_only=len(functions) <= 1)
508
508
  return output_parser
@@ -1,4 +1,5 @@
1
1
  """Load summarizing chains."""
2
+
2
3
  from typing import Any, Mapping, Optional, Protocol
3
4
 
4
5
  from langchain_core.callbacks import Callbacks
@@ -1,4 +1,5 @@
1
1
  """Chain that runs an arbitrary python function."""
2
+
2
3
  import functools
3
4
  import logging
4
5
  from typing import Any, Awaitable, Callable, Dict, List, Optional
@@ -31,9 +32,9 @@ class TransformChain(Chain):
31
32
  """The keys returned by the transform's output dictionary."""
32
33
  transform_cb: Callable[[Dict[str, str]], Dict[str, str]] = Field(alias="transform")
33
34
  """The transform function."""
34
- atransform_cb: Optional[
35
- Callable[[Dict[str, Any]], Awaitable[Dict[str, Any]]]
36
- ] = Field(None, alias="atransform")
35
+ atransform_cb: Optional[Callable[[Dict[str, Any]], Awaitable[Dict[str, Any]]]] = (
36
+ Field(None, alias="atransform")
37
+ )
37
38
  """The async coroutine transform function."""
38
39
 
39
40
  @staticmethod