langchain 0.3.22__py3-none-any.whl → 0.3.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (170) hide show
  1. langchain/_api/module_import.py +3 -3
  2. langchain/agents/agent.py +104 -109
  3. langchain/agents/agent_iterator.py +11 -15
  4. langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +2 -2
  5. langchain/agents/agent_toolkits/vectorstore/base.py +3 -3
  6. langchain/agents/agent_toolkits/vectorstore/toolkit.py +4 -6
  7. langchain/agents/chat/base.py +7 -6
  8. langchain/agents/chat/output_parser.py +2 -1
  9. langchain/agents/conversational/base.py +5 -4
  10. langchain/agents/conversational_chat/base.py +9 -8
  11. langchain/agents/format_scratchpad/log.py +1 -3
  12. langchain/agents/format_scratchpad/log_to_messages.py +3 -5
  13. langchain/agents/format_scratchpad/openai_functions.py +4 -4
  14. langchain/agents/format_scratchpad/tools.py +3 -3
  15. langchain/agents/format_scratchpad/xml.py +1 -3
  16. langchain/agents/initialize.py +2 -1
  17. langchain/agents/json_chat/base.py +3 -2
  18. langchain/agents/loading.py +5 -5
  19. langchain/agents/mrkl/base.py +6 -5
  20. langchain/agents/openai_assistant/base.py +13 -17
  21. langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +6 -6
  22. langchain/agents/openai_functions_agent/base.py +13 -12
  23. langchain/agents/openai_functions_multi_agent/base.py +15 -14
  24. langchain/agents/openai_tools/base.py +2 -1
  25. langchain/agents/output_parsers/openai_functions.py +2 -2
  26. langchain/agents/output_parsers/openai_tools.py +6 -6
  27. langchain/agents/output_parsers/react_json_single_input.py +2 -1
  28. langchain/agents/output_parsers/self_ask.py +2 -1
  29. langchain/agents/output_parsers/tools.py +7 -7
  30. langchain/agents/react/agent.py +3 -2
  31. langchain/agents/react/base.py +4 -3
  32. langchain/agents/schema.py +3 -3
  33. langchain/agents/self_ask_with_search/base.py +2 -1
  34. langchain/agents/structured_chat/base.py +9 -8
  35. langchain/agents/structured_chat/output_parser.py +2 -1
  36. langchain/agents/tool_calling_agent/base.py +3 -2
  37. langchain/agents/tools.py +4 -4
  38. langchain/agents/types.py +3 -3
  39. langchain/agents/utils.py +1 -1
  40. langchain/agents/xml/base.py +7 -6
  41. langchain/callbacks/streaming_aiter.py +3 -2
  42. langchain/callbacks/streaming_aiter_final_only.py +3 -3
  43. langchain/callbacks/streaming_stdout_final_only.py +3 -3
  44. langchain/chains/api/base.py +11 -12
  45. langchain/chains/base.py +47 -50
  46. langchain/chains/combine_documents/base.py +23 -23
  47. langchain/chains/combine_documents/map_reduce.py +12 -12
  48. langchain/chains/combine_documents/map_rerank.py +16 -15
  49. langchain/chains/combine_documents/reduce.py +17 -17
  50. langchain/chains/combine_documents/refine.py +12 -12
  51. langchain/chains/combine_documents/stuff.py +10 -10
  52. langchain/chains/constitutional_ai/base.py +9 -9
  53. langchain/chains/conversation/base.py +2 -4
  54. langchain/chains/conversational_retrieval/base.py +30 -30
  55. langchain/chains/elasticsearch_database/base.py +13 -13
  56. langchain/chains/example_generator.py +1 -3
  57. langchain/chains/flare/base.py +13 -12
  58. langchain/chains/flare/prompts.py +2 -4
  59. langchain/chains/hyde/base.py +8 -8
  60. langchain/chains/llm.py +31 -30
  61. langchain/chains/llm_checker/base.py +6 -6
  62. langchain/chains/llm_math/base.py +10 -10
  63. langchain/chains/llm_summarization_checker/base.py +6 -6
  64. langchain/chains/loading.py +12 -14
  65. langchain/chains/mapreduce.py +7 -6
  66. langchain/chains/moderation.py +8 -8
  67. langchain/chains/natbot/base.py +6 -6
  68. langchain/chains/openai_functions/base.py +8 -10
  69. langchain/chains/openai_functions/citation_fuzzy_match.py +4 -4
  70. langchain/chains/openai_functions/extraction.py +3 -3
  71. langchain/chains/openai_functions/openapi.py +12 -12
  72. langchain/chains/openai_functions/qa_with_structure.py +4 -4
  73. langchain/chains/openai_functions/utils.py +2 -2
  74. langchain/chains/openai_tools/extraction.py +2 -2
  75. langchain/chains/prompt_selector.py +3 -3
  76. langchain/chains/qa_generation/base.py +5 -5
  77. langchain/chains/qa_with_sources/base.py +21 -21
  78. langchain/chains/qa_with_sources/loading.py +2 -1
  79. langchain/chains/qa_with_sources/retrieval.py +6 -6
  80. langchain/chains/qa_with_sources/vector_db.py +8 -8
  81. langchain/chains/query_constructor/base.py +4 -3
  82. langchain/chains/query_constructor/parser.py +5 -4
  83. langchain/chains/question_answering/chain.py +3 -2
  84. langchain/chains/retrieval.py +2 -2
  85. langchain/chains/retrieval_qa/base.py +16 -16
  86. langchain/chains/router/base.py +12 -11
  87. langchain/chains/router/embedding_router.py +12 -11
  88. langchain/chains/router/llm_router.py +12 -12
  89. langchain/chains/router/multi_prompt.py +3 -3
  90. langchain/chains/router/multi_retrieval_qa.py +5 -4
  91. langchain/chains/sequential.py +18 -18
  92. langchain/chains/sql_database/query.py +4 -4
  93. langchain/chains/structured_output/base.py +14 -13
  94. langchain/chains/summarize/chain.py +4 -3
  95. langchain/chains/transform.py +12 -11
  96. langchain/chat_models/base.py +34 -31
  97. langchain/embeddings/__init__.py +1 -1
  98. langchain/embeddings/base.py +4 -4
  99. langchain/embeddings/cache.py +19 -18
  100. langchain/evaluation/agents/trajectory_eval_chain.py +16 -19
  101. langchain/evaluation/comparison/eval_chain.py +10 -10
  102. langchain/evaluation/criteria/eval_chain.py +11 -10
  103. langchain/evaluation/embedding_distance/base.py +21 -21
  104. langchain/evaluation/exact_match/base.py +3 -3
  105. langchain/evaluation/loading.py +7 -8
  106. langchain/evaluation/qa/eval_chain.py +7 -6
  107. langchain/evaluation/regex_match/base.py +3 -3
  108. langchain/evaluation/schema.py +6 -5
  109. langchain/evaluation/scoring/eval_chain.py +9 -9
  110. langchain/evaluation/string_distance/base.py +23 -23
  111. langchain/hub.py +2 -1
  112. langchain/indexes/_sql_record_manager.py +8 -7
  113. langchain/indexes/vectorstore.py +11 -11
  114. langchain/llms/__init__.py +3 -3
  115. langchain/memory/buffer.py +13 -13
  116. langchain/memory/buffer_window.py +5 -5
  117. langchain/memory/chat_memory.py +5 -5
  118. langchain/memory/combined.py +10 -10
  119. langchain/memory/entity.py +8 -7
  120. langchain/memory/readonly.py +4 -4
  121. langchain/memory/simple.py +5 -5
  122. langchain/memory/summary.py +8 -8
  123. langchain/memory/summary_buffer.py +11 -11
  124. langchain/memory/token_buffer.py +5 -5
  125. langchain/memory/utils.py +2 -2
  126. langchain/memory/vectorstore.py +15 -14
  127. langchain/memory/vectorstore_token_buffer_memory.py +7 -7
  128. langchain/model_laboratory.py +4 -3
  129. langchain/output_parsers/combining.py +5 -5
  130. langchain/output_parsers/datetime.py +1 -2
  131. langchain/output_parsers/enum.py +4 -5
  132. langchain/output_parsers/pandas_dataframe.py +5 -5
  133. langchain/output_parsers/regex.py +4 -4
  134. langchain/output_parsers/regex_dict.py +4 -4
  135. langchain/output_parsers/retry.py +2 -2
  136. langchain/output_parsers/structured.py +5 -5
  137. langchain/output_parsers/yaml.py +3 -3
  138. langchain/pydantic_v1/__init__.py +1 -6
  139. langchain/pydantic_v1/dataclasses.py +1 -5
  140. langchain/pydantic_v1/main.py +1 -5
  141. langchain/retrievers/contextual_compression.py +3 -3
  142. langchain/retrievers/document_compressors/base.py +3 -2
  143. langchain/retrievers/document_compressors/chain_extract.py +4 -3
  144. langchain/retrievers/document_compressors/chain_filter.py +3 -2
  145. langchain/retrievers/document_compressors/cohere_rerank.py +4 -3
  146. langchain/retrievers/document_compressors/cross_encoder.py +1 -2
  147. langchain/retrievers/document_compressors/cross_encoder_rerank.py +2 -1
  148. langchain/retrievers/document_compressors/embeddings_filter.py +3 -2
  149. langchain/retrievers/document_compressors/listwise_rerank.py +6 -5
  150. langchain/retrievers/ensemble.py +15 -19
  151. langchain/retrievers/merger_retriever.py +7 -12
  152. langchain/retrievers/multi_query.py +14 -13
  153. langchain/retrievers/multi_vector.py +4 -4
  154. langchain/retrievers/parent_document_retriever.py +9 -8
  155. langchain/retrievers/re_phraser.py +2 -3
  156. langchain/retrievers/self_query/base.py +13 -12
  157. langchain/retrievers/time_weighted_retriever.py +14 -14
  158. langchain/runnables/openai_functions.py +4 -3
  159. langchain/smith/evaluation/config.py +7 -6
  160. langchain/smith/evaluation/progress.py +3 -2
  161. langchain/smith/evaluation/runner_utils.py +58 -61
  162. langchain/smith/evaluation/string_run_evaluator.py +29 -29
  163. langchain/storage/encoder_backed.py +7 -11
  164. langchain/storage/file_system.py +5 -4
  165. {langchain-0.3.22.dist-info → langchain-0.3.24.dist-info}/METADATA +5 -3
  166. {langchain-0.3.22.dist-info → langchain-0.3.24.dist-info}/RECORD +169 -169
  167. {langchain-0.3.22.dist-info → langchain-0.3.24.dist-info}/WHEEL +1 -1
  168. langchain-0.3.24.dist-info/entry_points.txt +4 -0
  169. langchain-0.3.22.dist-info/entry_points.txt +0 -5
  170. {langchain-0.3.22.dist-info → langchain-0.3.24.dist-info}/licenses/LICENSE +0 -0
@@ -2,7 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import TYPE_CHECKING, Any, Dict, List, Optional
5
+ from typing import TYPE_CHECKING, Any, Optional
6
6
 
7
7
  from langchain_core.callbacks import CallbackManagerForChainRun
8
8
  from langchain_core.language_models import BaseLanguageModel
@@ -44,8 +44,8 @@ class ElasticsearchDatabaseChain(Chain):
44
44
  """Elasticsearch database to connect to of type elasticsearch.Elasticsearch."""
45
45
  top_k: int = 10
46
46
  """Number of results to return from the query"""
47
- ignore_indices: Optional[List[str]] = None
48
- include_indices: Optional[List[str]] = None
47
+ ignore_indices: Optional[list[str]] = None
48
+ include_indices: Optional[list[str]] = None
49
49
  input_key: str = "question" #: :meta private:
50
50
  output_key: str = "result" #: :meta private:
51
51
  sample_documents_in_index_info: int = 3
@@ -66,7 +66,7 @@ class ElasticsearchDatabaseChain(Chain):
66
66
  return self
67
67
 
68
68
  @property
69
- def input_keys(self) -> List[str]:
69
+ def input_keys(self) -> list[str]:
70
70
  """Return the singular input key.
71
71
 
72
72
  :meta private:
@@ -74,7 +74,7 @@ class ElasticsearchDatabaseChain(Chain):
74
74
  return [self.input_key]
75
75
 
76
76
  @property
77
- def output_keys(self) -> List[str]:
77
+ def output_keys(self) -> list[str]:
78
78
  """Return the singular output key.
79
79
 
80
80
  :meta private:
@@ -84,7 +84,7 @@ class ElasticsearchDatabaseChain(Chain):
84
84
  else:
85
85
  return [self.output_key, INTERMEDIATE_STEPS_KEY]
86
86
 
87
- def _list_indices(self) -> List[str]:
87
+ def _list_indices(self) -> list[str]:
88
88
  all_indices = [
89
89
  index["index"] for index in self.database.cat.indices(format="json")
90
90
  ]
@@ -96,7 +96,7 @@ class ElasticsearchDatabaseChain(Chain):
96
96
 
97
97
  return all_indices
98
98
 
99
- def _get_indices_infos(self, indices: List[str]) -> str:
99
+ def _get_indices_infos(self, indices: list[str]) -> str:
100
100
  mappings = self.database.indices.get_mapping(index=",".join(indices))
101
101
  if self.sample_documents_in_index_info > 0:
102
102
  for k, v in mappings.items():
@@ -114,15 +114,15 @@ class ElasticsearchDatabaseChain(Chain):
114
114
  ]
115
115
  )
116
116
 
117
- def _search(self, indices: List[str], query: str) -> str:
117
+ def _search(self, indices: list[str], query: str) -> str:
118
118
  result = self.database.search(index=",".join(indices), body=query)
119
119
  return str(result)
120
120
 
121
121
  def _call(
122
122
  self,
123
- inputs: Dict[str, Any],
123
+ inputs: dict[str, Any],
124
124
  run_manager: Optional[CallbackManagerForChainRun] = None,
125
- ) -> Dict[str, Any]:
125
+ ) -> dict[str, Any]:
126
126
  _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
127
127
  input_text = f"{inputs[self.input_key]}\nESQuery:"
128
128
  _run_manager.on_text(input_text, verbose=self.verbose)
@@ -134,7 +134,7 @@ class ElasticsearchDatabaseChain(Chain):
134
134
  "indices_info": indices_info,
135
135
  "stop": ["\nESResult:"],
136
136
  }
137
- intermediate_steps: List = []
137
+ intermediate_steps: list = []
138
138
  try:
139
139
  intermediate_steps.append(query_inputs) # input: es generation
140
140
  es_cmd = self.query_chain.invoke(
@@ -163,14 +163,14 @@ class ElasticsearchDatabaseChain(Chain):
163
163
 
164
164
  intermediate_steps.append(final_result) # output: final answer
165
165
  _run_manager.on_text(final_result, color="green", verbose=self.verbose)
166
- chain_result: Dict[str, Any] = {self.output_key: final_result}
166
+ chain_result: dict[str, Any] = {self.output_key: final_result}
167
167
  if self.return_intermediate_steps:
168
168
  chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
169
169
  return chain_result
170
170
  except Exception as exc:
171
171
  # Append intermediate steps to exception, to aid in logging and later
172
172
  # improvement of few shot prompt seeds
173
- exc.intermediate_steps = intermediate_steps # type: ignore
173
+ exc.intermediate_steps = intermediate_steps # type: ignore[attr-defined]
174
174
  raise exc
175
175
 
176
176
  @property
@@ -1,5 +1,3 @@
1
- from typing import List
2
-
3
1
  from langchain_core.language_models import BaseLanguageModel
4
2
  from langchain_core.output_parsers import StrOutputParser
5
3
  from langchain_core.prompts.few_shot import FewShotPromptTemplate
@@ -9,7 +7,7 @@ TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
9
7
 
10
8
 
11
9
  def generate_example(
12
- examples: List[dict], llm: BaseLanguageModel, prompt_template: PromptTemplate
10
+ examples: list[dict], llm: BaseLanguageModel, prompt_template: PromptTemplate
13
11
  ) -> str:
14
12
  """Return another example given a list of examples for a prompt."""
15
13
  prompt = FewShotPromptTemplate(
@@ -2,7 +2,8 @@ from __future__ import annotations
2
2
 
3
3
  import logging
4
4
  import re
5
- from typing import Any, Dict, List, Optional, Sequence, Tuple
5
+ from collections.abc import Sequence
6
+ from typing import Any, Optional
6
7
 
7
8
  from langchain_core.callbacks import (
8
9
  CallbackManagerForChainRun,
@@ -26,7 +27,7 @@ from langchain.chains.llm import LLMChain
26
27
  logger = logging.getLogger(__name__)
27
28
 
28
29
 
29
- def _extract_tokens_and_log_probs(response: AIMessage) -> Tuple[List[str], List[float]]:
30
+ def _extract_tokens_and_log_probs(response: AIMessage) -> tuple[list[str], list[float]]:
30
31
  """Extract tokens and log probabilities from chat model response."""
31
32
  tokens = []
32
33
  log_probs = []
@@ -47,7 +48,7 @@ class QuestionGeneratorChain(LLMChain):
47
48
  return False
48
49
 
49
50
  @property
50
- def input_keys(self) -> List[str]:
51
+ def input_keys(self) -> list[str]:
51
52
  """Input keys for the chain."""
52
53
  return ["user_input", "context", "response"]
53
54
 
@@ -58,7 +59,7 @@ def _low_confidence_spans(
58
59
  min_prob: float,
59
60
  min_token_gap: int,
60
61
  num_pad_tokens: int,
61
- ) -> List[str]:
62
+ ) -> list[str]:
62
63
  try:
63
64
  import numpy as np
64
65
 
@@ -117,22 +118,22 @@ class FlareChain(Chain):
117
118
  """Whether to start with retrieval."""
118
119
 
119
120
  @property
120
- def input_keys(self) -> List[str]:
121
+ def input_keys(self) -> list[str]:
121
122
  """Input keys for the chain."""
122
123
  return ["user_input"]
123
124
 
124
125
  @property
125
- def output_keys(self) -> List[str]:
126
+ def output_keys(self) -> list[str]:
126
127
  """Output keys for the chain."""
127
128
  return ["response"]
128
129
 
129
130
  def _do_generation(
130
131
  self,
131
- questions: List[str],
132
+ questions: list[str],
132
133
  user_input: str,
133
134
  response: str,
134
135
  _run_manager: CallbackManagerForChainRun,
135
- ) -> Tuple[str, bool]:
136
+ ) -> tuple[str, bool]:
136
137
  callbacks = _run_manager.get_child()
137
138
  docs = []
138
139
  for question in questions:
@@ -153,12 +154,12 @@ class FlareChain(Chain):
153
154
 
154
155
  def _do_retrieval(
155
156
  self,
156
- low_confidence_spans: List[str],
157
+ low_confidence_spans: list[str],
157
158
  _run_manager: CallbackManagerForChainRun,
158
159
  user_input: str,
159
160
  response: str,
160
161
  initial_response: str,
161
- ) -> Tuple[str, bool]:
162
+ ) -> tuple[str, bool]:
162
163
  question_gen_inputs = [
163
164
  {
164
165
  "user_input": user_input,
@@ -187,9 +188,9 @@ class FlareChain(Chain):
187
188
 
188
189
  def _call(
189
190
  self,
190
- inputs: Dict[str, Any],
191
+ inputs: dict[str, Any],
191
192
  run_manager: Optional[CallbackManagerForChainRun] = None,
192
- ) -> Dict[str, Any]:
193
+ ) -> dict[str, Any]:
193
194
  _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
194
195
 
195
196
  user_input = inputs[self.input_keys[0]]
@@ -1,16 +1,14 @@
1
- from typing import Tuple
2
-
3
1
  from langchain_core.output_parsers import BaseOutputParser
4
2
  from langchain_core.prompts import PromptTemplate
5
3
 
6
4
 
7
- class FinishedOutputParser(BaseOutputParser[Tuple[str, bool]]):
5
+ class FinishedOutputParser(BaseOutputParser[tuple[str, bool]]):
8
6
  """Output parser that checks if the output is finished."""
9
7
 
10
8
  finished_value: str = "FINISHED"
11
9
  """Value that indicates the output is finished."""
12
10
 
13
- def parse(self, text: str) -> Tuple[str, bool]:
11
+ def parse(self, text: str) -> tuple[str, bool]:
14
12
  cleaned = text.strip()
15
13
  finished = self.finished_value in cleaned
16
14
  return cleaned.replace(self.finished_value, ""), finished
@@ -6,7 +6,7 @@ https://arxiv.org/abs/2212.10496
6
6
  from __future__ import annotations
7
7
 
8
8
  import logging
9
- from typing import Any, Dict, List, Optional
9
+ from typing import Any, Optional
10
10
 
11
11
  from langchain_core.callbacks import CallbackManagerForChainRun
12
12
  from langchain_core.embeddings import Embeddings
@@ -38,23 +38,23 @@ class HypotheticalDocumentEmbedder(Chain, Embeddings):
38
38
  )
39
39
 
40
40
  @property
41
- def input_keys(self) -> List[str]:
41
+ def input_keys(self) -> list[str]:
42
42
  """Input keys for Hyde's LLM chain."""
43
43
  return self.llm_chain.input_schema.model_json_schema()["required"]
44
44
 
45
45
  @property
46
- def output_keys(self) -> List[str]:
46
+ def output_keys(self) -> list[str]:
47
47
  """Output keys for Hyde's LLM chain."""
48
48
  if isinstance(self.llm_chain, LLMChain):
49
49
  return self.llm_chain.output_keys
50
50
  else:
51
51
  return ["text"]
52
52
 
53
- def embed_documents(self, texts: List[str]) -> List[List[float]]:
53
+ def embed_documents(self, texts: list[str]) -> list[list[float]]:
54
54
  """Call the base embeddings."""
55
55
  return self.base_embeddings.embed_documents(texts)
56
56
 
57
- def combine_embeddings(self, embeddings: List[List[float]]) -> List[float]:
57
+ def combine_embeddings(self, embeddings: list[list[float]]) -> list[float]:
58
58
  """Combine embeddings into final embeddings."""
59
59
  try:
60
60
  import numpy as np
@@ -73,7 +73,7 @@ class HypotheticalDocumentEmbedder(Chain, Embeddings):
73
73
  num_vectors = len(embeddings)
74
74
  return [sum(dim_values) / num_vectors for dim_values in zip(*embeddings)]
75
75
 
76
- def embed_query(self, text: str) -> List[float]:
76
+ def embed_query(self, text: str) -> list[float]:
77
77
  """Generate a hypothetical document and embedded it."""
78
78
  var_name = self.input_keys[0]
79
79
  result = self.llm_chain.invoke({var_name: text})
@@ -86,9 +86,9 @@ class HypotheticalDocumentEmbedder(Chain, Embeddings):
86
86
 
87
87
  def _call(
88
88
  self,
89
- inputs: Dict[str, Any],
89
+ inputs: dict[str, Any],
90
90
  run_manager: Optional[CallbackManagerForChainRun] = None,
91
- ) -> Dict[str, str]:
91
+ ) -> dict[str, str]:
92
92
  """Call the internal llm chain."""
93
93
  _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
94
94
  return self.llm_chain.invoke(
langchain/chains/llm.py CHANGED
@@ -3,7 +3,8 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import warnings
6
- from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, cast
6
+ from collections.abc import Sequence
7
+ from typing import Any, Optional, Union, cast
7
8
 
8
9
  from langchain_core._api import deprecated
9
10
  from langchain_core.callbacks import (
@@ -100,7 +101,7 @@ class LLMChain(Chain):
100
101
  )
101
102
 
102
103
  @property
103
- def input_keys(self) -> List[str]:
104
+ def input_keys(self) -> list[str]:
104
105
  """Will be whatever keys the prompt expects.
105
106
 
106
107
  :meta private:
@@ -108,7 +109,7 @@ class LLMChain(Chain):
108
109
  return self.prompt.input_variables
109
110
 
110
111
  @property
111
- def output_keys(self) -> List[str]:
112
+ def output_keys(self) -> list[str]:
112
113
  """Will always return text key.
113
114
 
114
115
  :meta private:
@@ -120,15 +121,15 @@ class LLMChain(Chain):
120
121
 
121
122
  def _call(
122
123
  self,
123
- inputs: Dict[str, Any],
124
+ inputs: dict[str, Any],
124
125
  run_manager: Optional[CallbackManagerForChainRun] = None,
125
- ) -> Dict[str, str]:
126
+ ) -> dict[str, str]:
126
127
  response = self.generate([inputs], run_manager=run_manager)
127
128
  return self.create_outputs(response)[0]
128
129
 
129
130
  def generate(
130
131
  self,
131
- input_list: List[Dict[str, Any]],
132
+ input_list: list[dict[str, Any]],
132
133
  run_manager: Optional[CallbackManagerForChainRun] = None,
133
134
  ) -> LLMResult:
134
135
  """Generate LLM result from inputs."""
@@ -143,9 +144,9 @@ class LLMChain(Chain):
143
144
  )
144
145
  else:
145
146
  results = self.llm.bind(stop=stop, **self.llm_kwargs).batch(
146
- cast(List, prompts), {"callbacks": callbacks}
147
+ cast(list, prompts), {"callbacks": callbacks}
147
148
  )
148
- generations: List[List[Generation]] = []
149
+ generations: list[list[Generation]] = []
149
150
  for res in results:
150
151
  if isinstance(res, BaseMessage):
151
152
  generations.append([ChatGeneration(message=res)])
@@ -155,7 +156,7 @@ class LLMChain(Chain):
155
156
 
156
157
  async def agenerate(
157
158
  self,
158
- input_list: List[Dict[str, Any]],
159
+ input_list: list[dict[str, Any]],
159
160
  run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
160
161
  ) -> LLMResult:
161
162
  """Generate LLM result from inputs."""
@@ -170,9 +171,9 @@ class LLMChain(Chain):
170
171
  )
171
172
  else:
172
173
  results = await self.llm.bind(stop=stop, **self.llm_kwargs).abatch(
173
- cast(List, prompts), {"callbacks": callbacks}
174
+ cast(list, prompts), {"callbacks": callbacks}
174
175
  )
175
- generations: List[List[Generation]] = []
176
+ generations: list[list[Generation]] = []
176
177
  for res in results:
177
178
  if isinstance(res, BaseMessage):
178
179
  generations.append([ChatGeneration(message=res)])
@@ -182,9 +183,9 @@ class LLMChain(Chain):
182
183
 
183
184
  def prep_prompts(
184
185
  self,
185
- input_list: List[Dict[str, Any]],
186
+ input_list: list[dict[str, Any]],
186
187
  run_manager: Optional[CallbackManagerForChainRun] = None,
187
- ) -> Tuple[List[PromptValue], Optional[List[str]]]:
188
+ ) -> tuple[list[PromptValue], Optional[list[str]]]:
188
189
  """Prepare prompts from inputs."""
189
190
  stop = None
190
191
  if len(input_list) == 0:
@@ -208,9 +209,9 @@ class LLMChain(Chain):
208
209
 
209
210
  async def aprep_prompts(
210
211
  self,
211
- input_list: List[Dict[str, Any]],
212
+ input_list: list[dict[str, Any]],
212
213
  run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
213
- ) -> Tuple[List[PromptValue], Optional[List[str]]]:
214
+ ) -> tuple[list[PromptValue], Optional[list[str]]]:
214
215
  """Prepare prompts from inputs."""
215
216
  stop = None
216
217
  if len(input_list) == 0:
@@ -233,8 +234,8 @@ class LLMChain(Chain):
233
234
  return prompts, stop
234
235
 
235
236
  def apply(
236
- self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
237
- ) -> List[Dict[str, str]]:
237
+ self, input_list: list[dict[str, Any]], callbacks: Callbacks = None
238
+ ) -> list[dict[str, str]]:
238
239
  """Utilize the LLM generate method for speed gains."""
239
240
  callback_manager = CallbackManager.configure(
240
241
  callbacks, self.callbacks, self.verbose
@@ -254,8 +255,8 @@ class LLMChain(Chain):
254
255
  return outputs
255
256
 
256
257
  async def aapply(
257
- self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
258
- ) -> List[Dict[str, str]]:
258
+ self, input_list: list[dict[str, Any]], callbacks: Callbacks = None
259
+ ) -> list[dict[str, str]]:
259
260
  """Utilize the LLM generate method for speed gains."""
260
261
  callback_manager = AsyncCallbackManager.configure(
261
262
  callbacks, self.callbacks, self.verbose
@@ -278,7 +279,7 @@ class LLMChain(Chain):
278
279
  def _run_output_key(self) -> str:
279
280
  return self.output_key
280
281
 
281
- def create_outputs(self, llm_result: LLMResult) -> List[Dict[str, Any]]:
282
+ def create_outputs(self, llm_result: LLMResult) -> list[dict[str, Any]]:
282
283
  """Create outputs from response."""
283
284
  result = [
284
285
  # Get the text of the top generated string.
@@ -294,9 +295,9 @@ class LLMChain(Chain):
294
295
 
295
296
  async def _acall(
296
297
  self,
297
- inputs: Dict[str, Any],
298
+ inputs: dict[str, Any],
298
299
  run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
299
- ) -> Dict[str, str]:
300
+ ) -> dict[str, str]:
300
301
  response = await self.agenerate([inputs], run_manager=run_manager)
301
302
  return self.create_outputs(response)[0]
302
303
 
@@ -336,7 +337,7 @@ class LLMChain(Chain):
336
337
 
337
338
  def predict_and_parse(
338
339
  self, callbacks: Callbacks = None, **kwargs: Any
339
- ) -> Union[str, List[str], Dict[str, Any]]:
340
+ ) -> Union[str, list[str], dict[str, Any]]:
340
341
  """Call predict and then parse the results."""
341
342
  warnings.warn(
342
343
  "The predict_and_parse method is deprecated, "
@@ -350,7 +351,7 @@ class LLMChain(Chain):
350
351
 
351
352
  async def apredict_and_parse(
352
353
  self, callbacks: Callbacks = None, **kwargs: Any
353
- ) -> Union[str, List[str], Dict[str, str]]:
354
+ ) -> Union[str, list[str], dict[str, str]]:
354
355
  """Call apredict and then parse the results."""
355
356
  warnings.warn(
356
357
  "The apredict_and_parse method is deprecated, "
@@ -363,8 +364,8 @@ class LLMChain(Chain):
363
364
  return result
364
365
 
365
366
  def apply_and_parse(
366
- self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
367
- ) -> Sequence[Union[str, List[str], Dict[str, str]]]:
367
+ self, input_list: list[dict[str, Any]], callbacks: Callbacks = None
368
+ ) -> Sequence[Union[str, list[str], dict[str, str]]]:
368
369
  """Call apply and then parse the results."""
369
370
  warnings.warn(
370
371
  "The apply_and_parse method is deprecated, "
@@ -374,8 +375,8 @@ class LLMChain(Chain):
374
375
  return self._parse_generation(result)
375
376
 
376
377
  def _parse_generation(
377
- self, generation: List[Dict[str, str]]
378
- ) -> Sequence[Union[str, List[str], Dict[str, str]]]:
378
+ self, generation: list[dict[str, str]]
379
+ ) -> Sequence[Union[str, list[str], dict[str, str]]]:
379
380
  if self.prompt.output_parser is not None:
380
381
  return [
381
382
  self.prompt.output_parser.parse(res[self.output_key])
@@ -385,8 +386,8 @@ class LLMChain(Chain):
385
386
  return generation
386
387
 
387
388
  async def aapply_and_parse(
388
- self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
389
- ) -> Sequence[Union[str, List[str], Dict[str, str]]]:
389
+ self, input_list: list[dict[str, Any]], callbacks: Callbacks = None
390
+ ) -> Sequence[Union[str, list[str], dict[str, str]]]:
390
391
  """Call apply and then parse the results."""
391
392
  warnings.warn(
392
393
  "The aapply_and_parse method is deprecated, "
@@ -3,7 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import warnings
6
- from typing import Any, Dict, List, Optional
6
+ from typing import Any, Optional
7
7
 
8
8
  from langchain_core._api import deprecated
9
9
  from langchain_core.callbacks import CallbackManagerForChainRun
@@ -107,7 +107,7 @@ class LLMCheckerChain(Chain):
107
107
 
108
108
  @model_validator(mode="before")
109
109
  @classmethod
110
- def raise_deprecation(cls, values: Dict) -> Any:
110
+ def raise_deprecation(cls, values: dict) -> Any:
111
111
  if "llm" in values:
112
112
  warnings.warn(
113
113
  "Directly instantiating an LLMCheckerChain with an llm is deprecated. "
@@ -135,7 +135,7 @@ class LLMCheckerChain(Chain):
135
135
  return values
136
136
 
137
137
  @property
138
- def input_keys(self) -> List[str]:
138
+ def input_keys(self) -> list[str]:
139
139
  """Return the singular input key.
140
140
 
141
141
  :meta private:
@@ -143,7 +143,7 @@ class LLMCheckerChain(Chain):
143
143
  return [self.input_key]
144
144
 
145
145
  @property
146
- def output_keys(self) -> List[str]:
146
+ def output_keys(self) -> list[str]:
147
147
  """Return the singular output key.
148
148
 
149
149
  :meta private:
@@ -152,9 +152,9 @@ class LLMCheckerChain(Chain):
152
152
 
153
153
  def _call(
154
154
  self,
155
- inputs: Dict[str, Any],
155
+ inputs: dict[str, Any],
156
156
  run_manager: Optional[CallbackManagerForChainRun] = None,
157
- ) -> Dict[str, str]:
157
+ ) -> dict[str, str]:
158
158
  _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
159
159
  question = inputs[self.input_key]
160
160
 
@@ -5,7 +5,7 @@ from __future__ import annotations
5
5
  import math
6
6
  import re
7
7
  import warnings
8
- from typing import Any, Dict, List, Optional
8
+ from typing import Any, Optional
9
9
 
10
10
  from langchain_core._api import deprecated
11
11
  from langchain_core.callbacks import (
@@ -163,7 +163,7 @@ class LLMMathChain(Chain):
163
163
 
164
164
  @model_validator(mode="before")
165
165
  @classmethod
166
- def raise_deprecation(cls, values: Dict) -> Any:
166
+ def raise_deprecation(cls, values: dict) -> Any:
167
167
  try:
168
168
  import numexpr # noqa: F401
169
169
  except ImportError:
@@ -183,7 +183,7 @@ class LLMMathChain(Chain):
183
183
  return values
184
184
 
185
185
  @property
186
- def input_keys(self) -> List[str]:
186
+ def input_keys(self) -> list[str]:
187
187
  """Expect input key.
188
188
 
189
189
  :meta private:
@@ -191,7 +191,7 @@ class LLMMathChain(Chain):
191
191
  return [self.input_key]
192
192
 
193
193
  @property
194
- def output_keys(self) -> List[str]:
194
+ def output_keys(self) -> list[str]:
195
195
  """Expect output key.
196
196
 
197
197
  :meta private:
@@ -221,7 +221,7 @@ class LLMMathChain(Chain):
221
221
 
222
222
  def _process_llm_result(
223
223
  self, llm_output: str, run_manager: CallbackManagerForChainRun
224
- ) -> Dict[str, str]:
224
+ ) -> dict[str, str]:
225
225
  run_manager.on_text(llm_output, color="green", verbose=self.verbose)
226
226
  llm_output = llm_output.strip()
227
227
  text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL)
@@ -243,7 +243,7 @@ class LLMMathChain(Chain):
243
243
  self,
244
244
  llm_output: str,
245
245
  run_manager: AsyncCallbackManagerForChainRun,
246
- ) -> Dict[str, str]:
246
+ ) -> dict[str, str]:
247
247
  await run_manager.on_text(llm_output, color="green", verbose=self.verbose)
248
248
  llm_output = llm_output.strip()
249
249
  text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL)
@@ -263,9 +263,9 @@ class LLMMathChain(Chain):
263
263
 
264
264
  def _call(
265
265
  self,
266
- inputs: Dict[str, str],
266
+ inputs: dict[str, str],
267
267
  run_manager: Optional[CallbackManagerForChainRun] = None,
268
- ) -> Dict[str, str]:
268
+ ) -> dict[str, str]:
269
269
  _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
270
270
  _run_manager.on_text(inputs[self.input_key])
271
271
  llm_output = self.llm_chain.predict(
@@ -277,9 +277,9 @@ class LLMMathChain(Chain):
277
277
 
278
278
  async def _acall(
279
279
  self,
280
- inputs: Dict[str, str],
280
+ inputs: dict[str, str],
281
281
  run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
282
- ) -> Dict[str, str]:
282
+ ) -> dict[str, str]:
283
283
  _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
284
284
  await _run_manager.on_text(inputs[self.input_key])
285
285
  llm_output = await self.llm_chain.apredict(
@@ -4,7 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  import warnings
6
6
  from pathlib import Path
7
- from typing import Any, Dict, List, Optional
7
+ from typing import Any, Optional
8
8
 
9
9
  from langchain_core._api import deprecated
10
10
  from langchain_core.callbacks import CallbackManagerForChainRun
@@ -112,7 +112,7 @@ class LLMSummarizationCheckerChain(Chain):
112
112
 
113
113
  @model_validator(mode="before")
114
114
  @classmethod
115
- def raise_deprecation(cls, values: Dict) -> Any:
115
+ def raise_deprecation(cls, values: dict) -> Any:
116
116
  if "llm" in values:
117
117
  warnings.warn(
118
118
  "Directly instantiating an LLMSummarizationCheckerChain with an llm is "
@@ -131,7 +131,7 @@ class LLMSummarizationCheckerChain(Chain):
131
131
  return values
132
132
 
133
133
  @property
134
- def input_keys(self) -> List[str]:
134
+ def input_keys(self) -> list[str]:
135
135
  """Return the singular input key.
136
136
 
137
137
  :meta private:
@@ -139,7 +139,7 @@ class LLMSummarizationCheckerChain(Chain):
139
139
  return [self.input_key]
140
140
 
141
141
  @property
142
- def output_keys(self) -> List[str]:
142
+ def output_keys(self) -> list[str]:
143
143
  """Return the singular output key.
144
144
 
145
145
  :meta private:
@@ -148,9 +148,9 @@ class LLMSummarizationCheckerChain(Chain):
148
148
 
149
149
  def _call(
150
150
  self,
151
- inputs: Dict[str, Any],
151
+ inputs: dict[str, Any],
152
152
  run_manager: Optional[CallbackManagerForChainRun] = None,
153
- ) -> Dict[str, str]:
153
+ ) -> dict[str, str]:
154
154
  _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
155
155
  all_true = False
156
156
  count = 0