langchain 0.2.5__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (197) hide show
  1. langchain/__init__.py +1 -0
  2. langchain/_api/module_import.py +2 -2
  3. langchain/agents/__init__.py +5 -4
  4. langchain/agents/agent.py +272 -50
  5. langchain/agents/agent_iterator.py +20 -0
  6. langchain/agents/agent_toolkits/__init__.py +1 -0
  7. langchain/agents/agent_toolkits/file_management/__init__.py +1 -0
  8. langchain/agents/agent_toolkits/playwright/__init__.py +1 -0
  9. langchain/agents/agent_toolkits/vectorstore/base.py +1 -0
  10. langchain/agents/agent_toolkits/vectorstore/toolkit.py +1 -0
  11. langchain/agents/agent_types.py +1 -0
  12. langchain/agents/chat/base.py +37 -1
  13. langchain/agents/chat/output_parser.py +14 -0
  14. langchain/agents/conversational/base.py +38 -6
  15. langchain/agents/conversational/output_parser.py +10 -0
  16. langchain/agents/conversational_chat/base.py +42 -3
  17. langchain/agents/format_scratchpad/__init__.py +1 -0
  18. langchain/agents/format_scratchpad/log.py +12 -1
  19. langchain/agents/format_scratchpad/log_to_messages.py +10 -1
  20. langchain/agents/format_scratchpad/openai_functions.py +10 -5
  21. langchain/agents/format_scratchpad/tools.py +11 -7
  22. langchain/agents/initialize.py +15 -7
  23. langchain/agents/json_chat/base.py +9 -3
  24. langchain/agents/loading.py +7 -0
  25. langchain/agents/mrkl/base.py +39 -10
  26. langchain/agents/mrkl/output_parser.py +12 -0
  27. langchain/agents/openai_assistant/base.py +37 -14
  28. langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +32 -4
  29. langchain/agents/openai_functions_agent/base.py +61 -10
  30. langchain/agents/openai_functions_multi_agent/base.py +22 -7
  31. langchain/agents/openai_tools/base.py +3 -0
  32. langchain/agents/output_parsers/__init__.py +1 -0
  33. langchain/agents/react/base.py +1 -0
  34. langchain/agents/self_ask_with_search/base.py +1 -0
  35. langchain/agents/structured_chat/output_parser.py +3 -3
  36. langchain/agents/tool_calling_agent/base.py +13 -3
  37. langchain/agents/tools.py +3 -0
  38. langchain/agents/utils.py +9 -1
  39. langchain/base_language.py +1 -0
  40. langchain/callbacks/__init__.py +1 -0
  41. langchain/callbacks/base.py +1 -0
  42. langchain/callbacks/streaming_stdout.py +1 -0
  43. langchain/callbacks/streaming_stdout_final_only.py +1 -0
  44. langchain/callbacks/tracers/evaluation.py +1 -0
  45. langchain/chains/api/base.py +5 -2
  46. langchain/chains/base.py +1 -1
  47. langchain/chains/combine_documents/base.py +59 -0
  48. langchain/chains/combine_documents/map_reduce.py +4 -2
  49. langchain/chains/combine_documents/map_rerank.py +5 -3
  50. langchain/chains/combine_documents/refine.py +4 -2
  51. langchain/chains/combine_documents/stuff.py +1 -0
  52. langchain/chains/constitutional_ai/base.py +1 -0
  53. langchain/chains/constitutional_ai/models.py +1 -0
  54. langchain/chains/constitutional_ai/principles.py +1 -0
  55. langchain/chains/conversation/base.py +81 -1
  56. langchain/chains/conversational_retrieval/base.py +2 -1
  57. langchain/chains/elasticsearch_database/base.py +2 -1
  58. langchain/chains/hyde/base.py +1 -0
  59. langchain/chains/llm.py +4 -2
  60. langchain/chains/llm_checker/base.py +4 -3
  61. langchain/chains/llm_math/base.py +1 -0
  62. langchain/chains/loading.py +2 -1
  63. langchain/chains/mapreduce.py +1 -0
  64. langchain/chains/moderation.py +1 -1
  65. langchain/chains/natbot/base.py +1 -0
  66. langchain/chains/openai_functions/base.py +1 -0
  67. langchain/chains/openai_functions/extraction.py +6 -6
  68. langchain/chains/openai_tools/extraction.py +3 -3
  69. langchain/chains/qa_generation/base.py +47 -1
  70. langchain/chains/qa_with_sources/__init__.py +1 -0
  71. langchain/chains/qa_with_sources/loading.py +1 -0
  72. langchain/chains/qa_with_sources/vector_db.py +1 -1
  73. langchain/chains/query_constructor/base.py +1 -0
  74. langchain/chains/query_constructor/ir.py +1 -0
  75. langchain/chains/question_answering/chain.py +1 -0
  76. langchain/chains/retrieval_qa/base.py +3 -2
  77. langchain/chains/router/base.py +1 -0
  78. langchain/chains/router/llm_router.py +2 -1
  79. langchain/chains/router/multi_prompt.py +1 -0
  80. langchain/chains/router/multi_retrieval_qa.py +1 -0
  81. langchain/chains/sequential.py +2 -1
  82. langchain/chains/structured_output/base.py +12 -12
  83. langchain/chains/summarize/chain.py +1 -0
  84. langchain/chains/transform.py +4 -3
  85. langchain/chat_models/__init__.py +1 -0
  86. langchain/chat_models/base.py +2 -2
  87. langchain/docstore/__init__.py +1 -0
  88. langchain/document_loaders/__init__.py +1 -0
  89. langchain/document_transformers/__init__.py +1 -0
  90. langchain/embeddings/__init__.py +0 -1
  91. langchain/evaluation/__init__.py +2 -1
  92. langchain/evaluation/agents/__init__.py +1 -0
  93. langchain/evaluation/agents/trajectory_eval_prompt.py +1 -0
  94. langchain/evaluation/comparison/__init__.py +1 -0
  95. langchain/evaluation/comparison/eval_chain.py +1 -0
  96. langchain/evaluation/comparison/prompt.py +1 -0
  97. langchain/evaluation/embedding_distance/__init__.py +1 -0
  98. langchain/evaluation/embedding_distance/base.py +1 -0
  99. langchain/evaluation/loading.py +1 -0
  100. langchain/evaluation/parsing/base.py +1 -0
  101. langchain/evaluation/qa/__init__.py +1 -0
  102. langchain/evaluation/qa/eval_chain.py +1 -0
  103. langchain/evaluation/qa/generate_chain.py +1 -0
  104. langchain/evaluation/schema.py +1 -0
  105. langchain/evaluation/scoring/__init__.py +1 -0
  106. langchain/evaluation/scoring/eval_chain.py +1 -0
  107. langchain/evaluation/scoring/prompt.py +1 -0
  108. langchain/evaluation/string_distance/__init__.py +1 -0
  109. langchain/example_generator.py +1 -0
  110. langchain/formatting.py +1 -0
  111. langchain/globals/__init__.py +1 -0
  112. langchain/graphs/__init__.py +1 -0
  113. langchain/indexes/__init__.py +1 -0
  114. langchain/indexes/_sql_record_manager.py +9 -5
  115. langchain/indexes/graph.py +1 -0
  116. langchain/indexes/prompts/__init__.py +1 -0
  117. langchain/input.py +1 -0
  118. langchain/llms/__init__.py +1 -0
  119. langchain/load/__init__.py +1 -0
  120. langchain/memory/__init__.py +5 -0
  121. langchain/memory/vectorstore_token_buffer_memory.py +184 -0
  122. langchain/output_parsers/__init__.py +1 -0
  123. langchain/output_parsers/combining.py +1 -1
  124. langchain/output_parsers/enum.py +7 -3
  125. langchain/output_parsers/fix.py +57 -16
  126. langchain/output_parsers/pandas_dataframe.py +1 -1
  127. langchain/output_parsers/regex.py +1 -1
  128. langchain/output_parsers/regex_dict.py +1 -1
  129. langchain/output_parsers/retry.py +76 -29
  130. langchain/output_parsers/structured.py +3 -3
  131. langchain/output_parsers/yaml.py +4 -0
  132. langchain/prompts/__init__.py +1 -0
  133. langchain/prompts/example_selector/__init__.py +1 -0
  134. langchain/python.py +1 -0
  135. langchain/requests.py +1 -0
  136. langchain/retrievers/__init__.py +1 -0
  137. langchain/retrievers/document_compressors/chain_extract.py +1 -0
  138. langchain/retrievers/document_compressors/chain_filter.py +1 -0
  139. langchain/retrievers/ensemble.py +18 -3
  140. langchain/retrievers/multi_query.py +2 -1
  141. langchain/retrievers/re_phraser.py +2 -1
  142. langchain/retrievers/self_query/base.py +9 -8
  143. langchain/schema/__init__.py +1 -0
  144. langchain/schema/runnable/__init__.py +1 -0
  145. langchain/serpapi.py +1 -0
  146. langchain/smith/__init__.py +6 -5
  147. langchain/smith/evaluation/__init__.py +0 -1
  148. langchain/smith/evaluation/string_run_evaluator.py +1 -0
  149. langchain/sql_database.py +1 -0
  150. langchain/storage/__init__.py +1 -0
  151. langchain/storage/_lc_store.py +1 -0
  152. langchain/storage/in_memory.py +1 -0
  153. langchain/text_splitter.py +1 -0
  154. langchain/tools/__init__.py +1 -0
  155. langchain/tools/amadeus/__init__.py +1 -0
  156. langchain/tools/azure_cognitive_services/__init__.py +1 -0
  157. langchain/tools/bing_search/__init__.py +1 -0
  158. langchain/tools/dataforseo_api_search/__init__.py +1 -0
  159. langchain/tools/ddg_search/__init__.py +1 -0
  160. langchain/tools/edenai/__init__.py +1 -0
  161. langchain/tools/eleven_labs/__init__.py +1 -0
  162. langchain/tools/file_management/__init__.py +1 -0
  163. langchain/tools/github/__init__.py +1 -1
  164. langchain/tools/gitlab/__init__.py +1 -1
  165. langchain/tools/gmail/__init__.py +1 -0
  166. langchain/tools/golden_query/__init__.py +1 -0
  167. langchain/tools/google_cloud/__init__.py +1 -0
  168. langchain/tools/google_finance/__init__.py +1 -0
  169. langchain/tools/google_jobs/__init__.py +1 -0
  170. langchain/tools/google_lens/__init__.py +1 -0
  171. langchain/tools/google_places/__init__.py +1 -0
  172. langchain/tools/google_scholar/__init__.py +1 -0
  173. langchain/tools/google_search/__init__.py +1 -0
  174. langchain/tools/google_trends/__init__.py +1 -0
  175. langchain/tools/human/__init__.py +1 -0
  176. langchain/tools/memorize/__init__.py +1 -0
  177. langchain/tools/metaphor_search/__init__.py +1 -0
  178. langchain/tools/multion/__init__.py +1 -0
  179. langchain/tools/office365/__init__.py +1 -0
  180. langchain/tools/openapi/utils/openapi_utils.py +1 -0
  181. langchain/tools/openweathermap/__init__.py +1 -0
  182. langchain/tools/playwright/__init__.py +1 -0
  183. langchain/tools/shell/__init__.py +1 -0
  184. langchain/tools/slack/__init__.py +1 -0
  185. langchain/tools/sql_database/prompt.py +1 -0
  186. langchain/tools/steamship_image_generation/__init__.py +1 -0
  187. langchain/tools/tavily_search/__init__.py +1 -0
  188. langchain/tools/wolfram_alpha/__init__.py +1 -0
  189. langchain/tools/zapier/__init__.py +1 -0
  190. langchain/utilities/__init__.py +1 -0
  191. langchain/utilities/python.py +1 -0
  192. langchain/vectorstores/__init__.py +1 -0
  193. {langchain-0.2.5.dist-info → langchain-0.2.7.dist-info}/METADATA +3 -4
  194. {langchain-0.2.5.dist-info → langchain-0.2.7.dist-info}/RECORD +197 -196
  195. {langchain-0.2.5.dist-info → langchain-0.2.7.dist-info}/LICENSE +0 -0
  196. {langchain-0.2.5.dist-info → langchain-0.2.7.dist-info}/WHEEL +0 -0
  197. {langchain-0.2.5.dist-info → langchain-0.2.7.dist-info}/entry_points.txt +0 -0
@@ -1,4 +1,5 @@
1
1
  """Deprecated module for BaseLanguageModel class, kept for backwards compatibility."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from langchain_core.language_models import BaseLanguageModel
@@ -6,6 +6,7 @@
6
6
 
7
7
  BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
8
8
  """
9
+
9
10
  from typing import TYPE_CHECKING, Any
10
11
 
11
12
  from langchain_core.callbacks import (
@@ -1,4 +1,5 @@
1
1
  """Base callback handler that can be used to handle callbacks in langchain."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from langchain_core.callbacks import (
@@ -1,4 +1,5 @@
1
1
  """Callback Handler streams to stdout on new llm token."""
2
+
2
3
  from langchain_core.callbacks import StreamingStdOutCallbackHandler
3
4
 
4
5
  __all__ = ["StreamingStdOutCallbackHandler"]
@@ -1,4 +1,5 @@
1
1
  """Callback Handler streams to stdout on new llm token."""
2
+
2
3
  import sys
3
4
  from typing import Any, Dict, List, Optional
4
5
 
@@ -1,4 +1,5 @@
1
1
  """A tracer that runs evaluators over completed runs."""
2
+
2
3
  from langchain_core.tracers.evaluation import (
3
4
  EvaluatorCallbackHandler,
4
5
  wait_for_all_evaluators,
@@ -1,4 +1,5 @@
1
1
  """Chain that makes API calls and summarizes the responses to answer a question."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from typing import Any, Dict, List, Optional, Sequence, Tuple
@@ -106,7 +107,7 @@ try:
106
107
  """
107
108
  return [self.output_key]
108
109
 
109
- @root_validator(pre=True)
110
+ @root_validator(pre=False, skip_on_failure=True)
110
111
  def validate_api_request_prompt(cls, values: Dict) -> Dict:
111
112
  """Check that api request prompt expects the right variables."""
112
113
  input_vars = values["api_request_chain"].prompt.input_variables
@@ -120,6 +121,8 @@ try:
120
121
  @root_validator(pre=True)
121
122
  def validate_limit_to_domains(cls, values: Dict) -> Dict:
122
123
  """Check that allowed domains are valid."""
124
+ # This check must be a pre=True check, so that a default of None
125
+ # won't be set to limit_to_domains if it's not provided.
123
126
  if "limit_to_domains" not in values:
124
127
  raise ValueError(
125
128
  "You must specify a list of domains to limit access using "
@@ -135,7 +138,7 @@ try:
135
138
  )
136
139
  return values
137
140
 
138
- @root_validator(pre=True)
141
+ @root_validator(pre=False, skip_on_failure=True)
139
142
  def validate_api_answer_prompt(cls, values: Dict) -> Dict:
140
143
  """Check that api answer prompt expects the right variables."""
141
144
  input_vars = values["api_answer_chain"].prompt.input_variables
langchain/chains/base.py CHANGED
@@ -225,7 +225,7 @@ class Chain(RunnableSerializable[Dict[str, Any], Dict[str, Any]], ABC):
225
225
  def _chain_type(self) -> str:
226
226
  raise NotImplementedError("Saving not supported for this chain type.")
227
227
 
228
- @root_validator()
228
+ @root_validator(pre=True)
229
229
  def raise_callback_manager_deprecation(cls, values: Dict) -> Dict:
230
230
  """Raise deprecation warning if callback_manager is used."""
231
231
  if values.get("callback_manager") is not None:
@@ -3,6 +3,7 @@
3
3
  from abc import ABC, abstractmethod
4
4
  from typing import Any, Dict, List, Optional, Tuple, Type
5
5
 
6
+ from langchain_core._api import deprecated
6
7
  from langchain_core.callbacks import (
7
8
  AsyncCallbackManagerForChainRun,
8
9
  CallbackManagerForChainRun,
@@ -157,12 +158,70 @@ class BaseCombineDocumentsChain(Chain, ABC):
157
158
  return extra_return_dict
158
159
 
159
160
 
161
+ @deprecated(
162
+ since="0.2.7",
163
+ alternative=(
164
+ "example in API reference with more detail: "
165
+ "https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.base.AnalyzeDocumentChain.html" # noqa: E501
166
+ ),
167
+ removal="1.0",
168
+ )
160
169
  class AnalyzeDocumentChain(Chain):
161
170
  """Chain that splits documents, then analyzes it in pieces.
162
171
 
163
172
  This chain is parameterized by a TextSplitter and a CombineDocumentsChain.
164
173
  This chain takes a single document as input, and then splits it up into chunks
165
174
  and then passes those chucks to the CombineDocumentsChain.
175
+
176
+ This class is deprecated. See below for alternative implementations which
177
+ supports async and streaming modes of operation.
178
+
179
+ If the underlying combine documents chain takes one ``input_documents`` argument
180
+ (e.g., chains generated by ``load_summarize_chain``):
181
+
182
+ .. code-block:: python
183
+
184
+ split_text = lambda x: text_splitter.create_documents([x])
185
+
186
+ summarize_document_chain = split_text | chain
187
+
188
+ If the underlying chain takes additional arguments (e.g., ``load_qa_chain``, which
189
+ takes an additional ``question`` argument), we can use the following:
190
+
191
+ .. code-block:: python
192
+
193
+ from operator import itemgetter
194
+ from langchain_core.runnables import RunnableLambda, RunnableParallel
195
+
196
+ split_text = RunnableLambda(
197
+ lambda x: text_splitter.create_documents([x])
198
+ )
199
+ summarize_document_chain = RunnableParallel(
200
+ question=itemgetter("question"),
201
+ input_documents=itemgetter("input_document") | split_text,
202
+ ) | chain.pick("output_text")
203
+
204
+ To additionally return the input parameters, as ``AnalyzeDocumentChain`` does,
205
+ we can wrap this construction with ``RunnablePassthrough``:
206
+
207
+ .. code-block:: python
208
+
209
+ from operator import itemgetter
210
+ from langchain_core.runnables import (
211
+ RunnableLambda,
212
+ RunnableParallel,
213
+ RunnablePassthrough,
214
+ )
215
+
216
+ split_text = RunnableLambda(
217
+ lambda x: text_splitter.create_documents([x])
218
+ )
219
+ summarize_document_chain = RunnablePassthrough.assign(
220
+ output_text=RunnableParallel(
221
+ question=itemgetter("question"),
222
+ input_documents=itemgetter("input_document") | split_text,
223
+ ) | chain.pick("output_text")
224
+ )
166
225
  """
167
226
 
168
227
  input_key: str = "input_document" #: :meta private:
@@ -166,8 +166,11 @@ class MapReduceDocumentsChain(BaseCombineDocumentsChain):
166
166
  @root_validator(pre=True)
167
167
  def get_default_document_variable_name(cls, values: Dict) -> Dict:
168
168
  """Get default document variable name, if not provided."""
169
+ if "llm_chain" not in values:
170
+ raise ValueError("llm_chain must be provided")
171
+
172
+ llm_chain_variables = values["llm_chain"].prompt.input_variables
169
173
  if "document_variable_name" not in values:
170
- llm_chain_variables = values["llm_chain"].prompt.input_variables
171
174
  if len(llm_chain_variables) == 1:
172
175
  values["document_variable_name"] = llm_chain_variables[0]
173
176
  else:
@@ -176,7 +179,6 @@ class MapReduceDocumentsChain(BaseCombineDocumentsChain):
176
179
  "multiple llm_chain input_variables"
177
180
  )
178
181
  else:
179
- llm_chain_variables = values["llm_chain"].prompt.input_variables
180
182
  if values["document_variable_name"] not in llm_chain_variables:
181
183
  raise ValueError(
182
184
  f"document_variable_name {values['document_variable_name']} was "
@@ -106,7 +106,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
106
106
  _output_keys += self.metadata_keys
107
107
  return _output_keys
108
108
 
109
- @root_validator()
109
+ @root_validator(pre=False, skip_on_failure=True)
110
110
  def validate_llm_output(cls, values: Dict) -> Dict:
111
111
  """Validate that the combine chain outputs a dictionary."""
112
112
  output_parser = values["llm_chain"].prompt.output_parser
@@ -131,8 +131,11 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
131
131
  @root_validator(pre=True)
132
132
  def get_default_document_variable_name(cls, values: Dict) -> Dict:
133
133
  """Get default document variable name, if not provided."""
134
+ if "llm_chain" not in values:
135
+ raise ValueError("llm_chain must be provided")
136
+
137
+ llm_chain_variables = values["llm_chain"].prompt.input_variables
134
138
  if "document_variable_name" not in values:
135
- llm_chain_variables = values["llm_chain"].prompt.input_variables
136
139
  if len(llm_chain_variables) == 1:
137
140
  values["document_variable_name"] = llm_chain_variables[0]
138
141
  else:
@@ -141,7 +144,6 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
141
144
  "multiple llm_chain input_variables"
142
145
  )
143
146
  else:
144
- llm_chain_variables = values["llm_chain"].prompt.input_variables
145
147
  if values["document_variable_name"] not in llm_chain_variables:
146
148
  raise ValueError(
147
149
  f"document_variable_name {values['document_variable_name']} was "
@@ -115,8 +115,11 @@ class RefineDocumentsChain(BaseCombineDocumentsChain):
115
115
  @root_validator(pre=True)
116
116
  def get_default_document_variable_name(cls, values: Dict) -> Dict:
117
117
  """Get default document variable name, if not provided."""
118
+ if "initial_llm_chain" not in values:
119
+ raise ValueError("initial_llm_chain must be provided")
120
+
121
+ llm_chain_variables = values["initial_llm_chain"].prompt.input_variables
118
122
  if "document_variable_name" not in values:
119
- llm_chain_variables = values["initial_llm_chain"].prompt.input_variables
120
123
  if len(llm_chain_variables) == 1:
121
124
  values["document_variable_name"] = llm_chain_variables[0]
122
125
  else:
@@ -125,7 +128,6 @@ class RefineDocumentsChain(BaseCombineDocumentsChain):
125
128
  "multiple llm_chain input_variables"
126
129
  )
127
130
  else:
128
- llm_chain_variables = values["initial_llm_chain"].prompt.input_variables
129
131
  if values["document_variable_name"] not in llm_chain_variables:
130
132
  raise ValueError(
131
133
  f"document_variable_name {values['document_variable_name']} was "
@@ -1,4 +1,5 @@
1
1
  """Chain that combines documents by stuffing into context."""
2
+
2
3
  from typing import Any, Dict, List, Optional, Tuple
3
4
 
4
5
  from langchain_core.callbacks import Callbacks
@@ -1,4 +1,5 @@
1
1
  """Chain for applying constitutional principles to the outputs of another chain."""
2
+
2
3
  from typing import Any, Dict, List, Optional
3
4
 
4
5
  from langchain_core.callbacks import CallbackManagerForChainRun
@@ -1,4 +1,5 @@
1
1
  """Models for the Constitutional AI chain."""
2
+
2
3
  from langchain_core.pydantic_v1 import BaseModel
3
4
 
4
5
 
@@ -1,6 +1,7 @@
1
1
  """Constitutional principles from https://arxiv.org/pdf/2212.08073.pdf (Bai et al. 2022)
2
2
  UnifiedObjectives v0.2 principles ("uo-*") adapted from https://examine.dev/docs/Unified_objectives.pdf (Samwald et al. 2023)
3
3
  """
4
+
4
5
  # flake8: noqa
5
6
  from typing import Dict
6
7
 
@@ -1,6 +1,8 @@
1
1
  """Chain that carries on a conversation and calls an LLM."""
2
+
2
3
  from typing import Dict, List
3
4
 
5
+ from langchain_core._api import deprecated
4
6
  from langchain_core.memory import BaseMemory
5
7
  from langchain_core.prompts import BasePromptTemplate
6
8
  from langchain_core.pydantic_v1 import Extra, Field, root_validator
@@ -10,9 +12,87 @@ from langchain.chains.llm import LLMChain
10
12
  from langchain.memory.buffer import ConversationBufferMemory
11
13
 
12
14
 
15
+ @deprecated(
16
+ since="0.2.7",
17
+ alternative=(
18
+ "RunnableWithMessageHistory: "
19
+ "https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html" # noqa: E501
20
+ ),
21
+ removal="1.0",
22
+ )
13
23
  class ConversationChain(LLMChain):
14
24
  """Chain to have a conversation and load context from memory.
15
25
 
26
+ This class is deprecated in favor of ``RunnableWithMessageHistory``. Please refer
27
+ to this tutorial for more detail: https://python.langchain.com/v0.2/docs/tutorials/chatbot/
28
+
29
+ ``RunnableWithMessageHistory`` offers several benefits, including:
30
+
31
+ - Stream, batch, and async support;
32
+ - More flexible memory handling, including the ability to manage memory
33
+ outside the chain;
34
+ - Support for multiple threads.
35
+
36
+ Below is a minimal implementation, analogous to using ``ConversationChain`` with
37
+ the default ``ConversationBufferMemory``:
38
+
39
+ .. code-block:: python
40
+
41
+ from langchain_core.chat_history import InMemoryChatMessageHistory
42
+ from langchain_core.runnables.history import RunnableWithMessageHistory
43
+ from langchain_openai import ChatOpenAI
44
+
45
+
46
+ store = {} # memory is maintained outside the chain
47
+
48
+ def get_session_history(session_id: str) -> InMemoryChatMessageHistory:
49
+ if session_id not in store:
50
+ store[session_id] = InMemoryChatMessageHistory()
51
+ return store[session_id]
52
+
53
+ llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
54
+
55
+ chain = RunnableWithMessageHistory(llm, get_session_history)
56
+ chain.invoke(
57
+ "Hi I'm Bob.",
58
+ config={"configurable": {"session_id": "1"}},
59
+ ) # session_id determines thread
60
+ Memory objects can also be incorporated into the ``get_session_history`` callable:
61
+
62
+ .. code-block:: python
63
+
64
+ from langchain.memory import ConversationBufferWindowMemory
65
+ from langchain_core.chat_history import InMemoryChatMessageHistory
66
+ from langchain_core.runnables.history import RunnableWithMessageHistory
67
+ from langchain_openai import ChatOpenAI
68
+
69
+
70
+ store = {} # memory is maintained outside the chain
71
+
72
+ def get_session_history(session_id: str) -> InMemoryChatMessageHistory:
73
+ if session_id not in store:
74
+ store[session_id] = InMemoryChatMessageHistory()
75
+ return store[session_id]
76
+
77
+ memory = ConversationBufferWindowMemory(
78
+ chat_memory=store[session_id],
79
+ k=3,
80
+ return_messages=True,
81
+ )
82
+ assert len(memory.memory_variables) == 1
83
+ key = memory.memory_variables[0]
84
+ messages = memory.load_memory_variables({})[key]
85
+ store[session_id] = InMemoryChatMessageHistory(messages=messages)
86
+ return store[session_id]
87
+
88
+ llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
89
+
90
+ chain = RunnableWithMessageHistory(llm, get_session_history)
91
+ chain.invoke(
92
+ "Hi I'm Bob.",
93
+ config={"configurable": {"session_id": "1"}},
94
+ ) # session_id determines thread
95
+
16
96
  Example:
17
97
  .. code-block:: python
18
98
 
@@ -45,7 +125,7 @@ class ConversationChain(LLMChain):
45
125
  """Use this since so some prompt vars come from history."""
46
126
  return [self.input_key]
47
127
 
48
- @root_validator()
128
+ @root_validator(pre=False, skip_on_failure=True)
49
129
  def validate_prompt_input_variables(cls, values: Dict) -> Dict:
50
130
  """Validate that prompt input variables are consistent."""
51
131
  memory_keys = values["memory"].memory_variables
@@ -1,4 +1,5 @@
1
1
  """Chain for chatting with a vector database."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import inspect
@@ -480,7 +481,7 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
480
481
  def _chain_type(self) -> str:
481
482
  return "chat-vector-db"
482
483
 
483
- @root_validator()
484
+ @root_validator(pre=True)
484
485
  def raise_deprecation(cls, values: Dict) -> Dict:
485
486
  warnings.warn(
486
487
  "`ChatVectorDBChain` is deprecated - "
@@ -1,4 +1,5 @@
1
1
  """Chain for interacting with Elasticsearch Database."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from typing import TYPE_CHECKING, Any, Dict, List, Optional
@@ -56,7 +57,7 @@ class ElasticsearchDatabaseChain(Chain):
56
57
  extra = Extra.forbid
57
58
  arbitrary_types_allowed = True
58
59
 
59
- @root_validator()
60
+ @root_validator(pre=False, skip_on_failure=True)
60
61
  def validate_indices(cls, values: dict) -> dict:
61
62
  if values["include_indices"] and values["ignore_indices"]:
62
63
  raise ValueError(
@@ -2,6 +2,7 @@
2
2
 
3
3
  https://arxiv.org/abs/2212.10496
4
4
  """
5
+
5
6
  from __future__ import annotations
6
7
 
7
8
  from typing import Any, Dict, List, Optional
langchain/chains/llm.py CHANGED
@@ -1,4 +1,5 @@
1
1
  """Chain that just formats a prompt and calls an LLM."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import warnings
@@ -38,7 +39,7 @@ from langchain.chains.base import Chain
38
39
  @deprecated(
39
40
  since="0.1.17",
40
41
  alternative="RunnableSequence, e.g., `prompt | llm`",
41
- removal="0.3.0",
42
+ removal="1.0",
42
43
  )
43
44
  class LLMChain(Chain):
44
45
  """Chain to run queries against LLMs.
@@ -48,6 +49,7 @@ class LLMChain(Chain):
48
49
 
49
50
  .. code-block:: python
50
51
 
52
+ from langchain_core.output_parsers import StrOutputParser
51
53
  from langchain_core.prompts import PromptTemplate
52
54
  from langchain_openai import OpenAI
53
55
 
@@ -56,7 +58,7 @@ class LLMChain(Chain):
56
58
  input_variables=["adjective"], template=prompt_template
57
59
  )
58
60
  llm = OpenAI()
59
- chain = prompt | llm
61
+ chain = prompt | llm | StrOutputParser()
60
62
 
61
63
  chain.invoke("your adjective here")
62
64
 
@@ -1,4 +1,5 @@
1
1
  """Chain for question-answering with self-verification."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import warnings
@@ -118,9 +119,9 @@ class LLMCheckerChain(Chain):
118
119
  values.get("revised_answer_prompt", REVISED_ANSWER_PROMPT),
119
120
  )
120
121
  )
121
- values[
122
- "question_to_checked_assertions_chain"
123
- ] = question_to_checked_assertions_chain
122
+ values["question_to_checked_assertions_chain"] = (
123
+ question_to_checked_assertions_chain
124
+ )
124
125
  return values
125
126
 
126
127
  @property
@@ -1,4 +1,5 @@
1
1
  """Chain that interprets a prompt and executes python code to do math."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import math
@@ -1,4 +1,5 @@
1
1
  """Functionality for loading chains."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import json
@@ -408,7 +409,7 @@ def _load_sql_database_chain(config: dict, **kwargs: Any) -> Any:
408
409
  if "llm_chain" in config:
409
410
  llm_chain_config = config.pop("llm_chain")
410
411
  chain = load_chain_from_config(llm_chain_config, **kwargs)
411
- return SQLDatabaseChain(llm_chain=chain, database=database, **config)
412
+ return SQLDatabaseChain(llm_chain=chain, database=database, **config) # type: ignore[arg-type]
412
413
  if "llm" in config:
413
414
  llm_config = config.pop("llm")
414
415
  llm = load_llm_from_config(llm_config, **kwargs)
@@ -3,6 +3,7 @@
3
3
  Splits up a document, sends the smaller parts to the LLM with one prompt,
4
4
  then combines the results with another one.
5
5
  """
6
+
6
7
  from __future__ import annotations
7
8
 
8
9
  from typing import Any, Dict, List, Mapping, Optional
@@ -40,7 +40,7 @@ class OpenAIModerationChain(Chain):
40
40
  openai_organization: Optional[str] = None
41
41
  _openai_pre_1_0: bool = Field(default=None)
42
42
 
43
- @root_validator()
43
+ @root_validator(pre=True)
44
44
  def validate_environment(cls, values: Dict) -> Dict:
45
45
  """Validate that api key and python package exists in environment."""
46
46
  openai_api_key = get_from_dict_or_env(
@@ -1,4 +1,5 @@
1
1
  """Implement an LLM driven browser."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import warnings
@@ -1,4 +1,5 @@
1
1
  """Methods for creating chains that use OpenAI function-calling APIs."""
2
+
2
3
  from typing import (
3
4
  Any,
4
5
  Callable,
@@ -50,13 +50,13 @@ Passage:
50
50
  "LangChain has introduced a method called `with_structured_output` that"
51
51
  "is available on ChatModels capable of tool calling."
52
52
  "You can read more about the method here: "
53
- "https://python.langchain.com/docs/modules/model_io/chat/structured_output/"
53
+ "<https://python.langchain.com/docs/modules/model_io/chat/structured_output/>. "
54
54
  "Please follow our extraction use case documentation for more guidelines"
55
55
  "on how to do information extraction with LLMs."
56
- "https://python.langchain.com/docs/use_cases/extraction/."
56
+ "<https://python.langchain.com/docs/use_cases/extraction/>. "
57
57
  "If you notice other issues, please provide "
58
58
  "feedback here:"
59
- "https://github.com/langchain-ai/langchain/discussions/18154"
59
+ "<https://github.com/langchain-ai/langchain/discussions/18154>"
60
60
  ),
61
61
  removal="0.3.0",
62
62
  alternative=(
@@ -120,13 +120,13 @@ def create_extraction_chain(
120
120
  "LangChain has introduced a method called `with_structured_output` that"
121
121
  "is available on ChatModels capable of tool calling."
122
122
  "You can read more about the method here: "
123
- "https://python.langchain.com/docs/modules/model_io/chat/structured_output/"
123
+ "<https://python.langchain.com/docs/modules/model_io/chat/structured_output/>. "
124
124
  "Please follow our extraction use case documentation for more guidelines"
125
125
  "on how to do information extraction with LLMs."
126
- "https://python.langchain.com/docs/use_cases/extraction/."
126
+ "<https://python.langchain.com/docs/use_cases/extraction/>. "
127
127
  "If you notice other issues, please provide "
128
128
  "feedback here:"
129
- "https://github.com/langchain-ai/langchain/discussions/18154"
129
+ "<https://github.com/langchain-ai/langchain/discussions/18154>"
130
130
  ),
131
131
  removal="0.3.0",
132
132
  alternative=(
@@ -20,14 +20,14 @@ If a property is not present and is not required in the function parameters, do
20
20
  "LangChain has introduced a method called `with_structured_output` that"
21
21
  "is available on ChatModels capable of tool calling."
22
22
  "You can read more about the method here: "
23
- "https://python.langchain.com/docs/modules/model_io/chat/structured_output/"
23
+ "<https://python.langchain.com/docs/modules/model_io/chat/structured_output/>. "
24
24
  "Please follow our extraction use case documentation for more guidelines"
25
25
  "on how to do information extraction with LLMs."
26
- "https://python.langchain.com/docs/use_cases/extraction/."
26
+ "<https://python.langchain.com/docs/use_cases/extraction/>. "
27
27
  "with_structured_output does not currently support a list of pydantic schemas. "
28
28
  "If this is a blocker or if you notice other issues, please provide "
29
29
  "feedback here:"
30
- "https://github.com/langchain-ai/langchain/discussions/18154"
30
+ "<https://github.com/langchain-ai/langchain/discussions/18154>"
31
31
  ),
32
32
  removal="0.3.0",
33
33
  alternative=(
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import json
4
4
  from typing import Any, Dict, List, Optional
5
5
 
6
+ from langchain_core._api import deprecated
6
7
  from langchain_core.callbacks import CallbackManagerForChainRun
7
8
  from langchain_core.language_models import BaseLanguageModel
8
9
  from langchain_core.prompts import BasePromptTemplate
@@ -14,8 +15,53 @@ from langchain.chains.llm import LLMChain
14
15
  from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR
15
16
 
16
17
 
18
+ @deprecated(
19
+ since="0.2.7",
20
+ alternative=(
21
+ "example in API reference with more detail: "
22
+ "https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html" # noqa: E501
23
+ ),
24
+ removal="1.0",
25
+ )
17
26
  class QAGenerationChain(Chain):
18
- """Base class for question-answer generation chains."""
27
+ """Base class for question-answer generation chains.
28
+
29
+ This class is deprecated. See below for an alternative implementation.
30
+
31
+ Advantages of this implementation include:
32
+
33
+ - Supports async and streaming;
34
+ - Surfaces prompt and text splitter for easier customization;
35
+ - Use of JsonOutputParser supports JSONPatch operations in streaming mode,
36
+ as well as robustness to markdown.
37
+
38
+ .. code-block:: python
39
+
40
+ from langchain.chains.qa_generation.prompt import CHAT_PROMPT as prompt
41
+ # Note: import PROMPT if using a legacy non-chat model.
42
+ from langchain_core.output_parsers import JsonOutputParser
43
+ from langchain_core.runnables import (
44
+ RunnableLambda,
45
+ RunnableParallel,
46
+ RunnablePassthrough,
47
+ )
48
+ from langchain_core.runnables.base import RunnableEach
49
+ from langchain_openai import ChatOpenAI
50
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
51
+
52
+ llm = ChatOpenAI()
53
+ text_splitter = RecursiveCharacterTextSplitter(chunk_overlap=500)
54
+ split_text = RunnableLambda(
55
+ lambda x: text_splitter.create_documents([x])
56
+ )
57
+
58
+ chain = RunnableParallel(
59
+ text=RunnablePassthrough(),
60
+ questions=(
61
+ split_text | RunnableEach(bound=prompt | llm | JsonOutputParser())
62
+ )
63
+ )
64
+ """
19
65
 
20
66
  llm_chain: LLMChain
21
67
  """LLM Chain that generates responses from user input and context."""
@@ -1,4 +1,5 @@
1
1
  """Load question answering with sources chains."""
2
+
2
3
  from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain
3
4
 
4
5
  __all__ = ["load_qa_with_sources_chain"]
@@ -1,4 +1,5 @@
1
1
  """Load question answering with sources chains."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from typing import Any, Mapping, Optional, Protocol