langchain 0.3.23__py3-none-any.whl → 0.3.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (170) hide show
  1. langchain/_api/module_import.py +3 -3
  2. langchain/agents/agent.py +104 -109
  3. langchain/agents/agent_iterator.py +11 -15
  4. langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +2 -2
  5. langchain/agents/agent_toolkits/vectorstore/base.py +3 -3
  6. langchain/agents/agent_toolkits/vectorstore/toolkit.py +4 -6
  7. langchain/agents/chat/base.py +7 -6
  8. langchain/agents/chat/output_parser.py +2 -1
  9. langchain/agents/conversational/base.py +5 -4
  10. langchain/agents/conversational_chat/base.py +9 -8
  11. langchain/agents/format_scratchpad/log.py +1 -3
  12. langchain/agents/format_scratchpad/log_to_messages.py +3 -5
  13. langchain/agents/format_scratchpad/openai_functions.py +4 -4
  14. langchain/agents/format_scratchpad/tools.py +3 -3
  15. langchain/agents/format_scratchpad/xml.py +1 -3
  16. langchain/agents/initialize.py +2 -1
  17. langchain/agents/json_chat/base.py +3 -2
  18. langchain/agents/loading.py +5 -5
  19. langchain/agents/mrkl/base.py +6 -5
  20. langchain/agents/openai_assistant/base.py +17 -17
  21. langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +6 -6
  22. langchain/agents/openai_functions_agent/base.py +13 -12
  23. langchain/agents/openai_functions_multi_agent/base.py +15 -14
  24. langchain/agents/openai_tools/base.py +2 -1
  25. langchain/agents/output_parsers/openai_functions.py +2 -2
  26. langchain/agents/output_parsers/openai_tools.py +6 -6
  27. langchain/agents/output_parsers/react_json_single_input.py +2 -1
  28. langchain/agents/output_parsers/self_ask.py +2 -1
  29. langchain/agents/output_parsers/tools.py +7 -7
  30. langchain/agents/react/agent.py +3 -2
  31. langchain/agents/react/base.py +4 -3
  32. langchain/agents/schema.py +3 -3
  33. langchain/agents/self_ask_with_search/base.py +2 -1
  34. langchain/agents/structured_chat/base.py +9 -8
  35. langchain/agents/structured_chat/output_parser.py +2 -1
  36. langchain/agents/tool_calling_agent/base.py +3 -2
  37. langchain/agents/tools.py +4 -4
  38. langchain/agents/types.py +3 -3
  39. langchain/agents/utils.py +1 -1
  40. langchain/agents/xml/base.py +7 -6
  41. langchain/callbacks/streaming_aiter.py +3 -2
  42. langchain/callbacks/streaming_aiter_final_only.py +3 -3
  43. langchain/callbacks/streaming_stdout_final_only.py +3 -3
  44. langchain/chains/api/base.py +11 -12
  45. langchain/chains/base.py +47 -50
  46. langchain/chains/combine_documents/base.py +23 -23
  47. langchain/chains/combine_documents/map_reduce.py +12 -12
  48. langchain/chains/combine_documents/map_rerank.py +16 -15
  49. langchain/chains/combine_documents/reduce.py +17 -17
  50. langchain/chains/combine_documents/refine.py +12 -12
  51. langchain/chains/combine_documents/stuff.py +10 -10
  52. langchain/chains/constitutional_ai/base.py +9 -9
  53. langchain/chains/conversation/base.py +2 -4
  54. langchain/chains/conversational_retrieval/base.py +30 -30
  55. langchain/chains/elasticsearch_database/base.py +13 -13
  56. langchain/chains/example_generator.py +1 -3
  57. langchain/chains/flare/base.py +13 -12
  58. langchain/chains/flare/prompts.py +2 -4
  59. langchain/chains/hyde/base.py +8 -8
  60. langchain/chains/llm.py +31 -30
  61. langchain/chains/llm_checker/base.py +6 -6
  62. langchain/chains/llm_math/base.py +10 -10
  63. langchain/chains/llm_summarization_checker/base.py +6 -6
  64. langchain/chains/loading.py +12 -14
  65. langchain/chains/mapreduce.py +7 -6
  66. langchain/chains/moderation.py +8 -8
  67. langchain/chains/natbot/base.py +6 -6
  68. langchain/chains/openai_functions/base.py +8 -10
  69. langchain/chains/openai_functions/citation_fuzzy_match.py +4 -4
  70. langchain/chains/openai_functions/extraction.py +3 -3
  71. langchain/chains/openai_functions/openapi.py +12 -12
  72. langchain/chains/openai_functions/qa_with_structure.py +4 -4
  73. langchain/chains/openai_functions/utils.py +2 -2
  74. langchain/chains/openai_tools/extraction.py +2 -2
  75. langchain/chains/prompt_selector.py +3 -3
  76. langchain/chains/qa_generation/base.py +5 -5
  77. langchain/chains/qa_with_sources/base.py +21 -21
  78. langchain/chains/qa_with_sources/loading.py +2 -1
  79. langchain/chains/qa_with_sources/retrieval.py +6 -6
  80. langchain/chains/qa_with_sources/vector_db.py +8 -8
  81. langchain/chains/query_constructor/base.py +4 -3
  82. langchain/chains/query_constructor/parser.py +5 -4
  83. langchain/chains/question_answering/chain.py +3 -2
  84. langchain/chains/retrieval.py +2 -2
  85. langchain/chains/retrieval_qa/base.py +16 -16
  86. langchain/chains/router/base.py +12 -11
  87. langchain/chains/router/embedding_router.py +12 -11
  88. langchain/chains/router/llm_router.py +12 -12
  89. langchain/chains/router/multi_prompt.py +3 -3
  90. langchain/chains/router/multi_retrieval_qa.py +5 -4
  91. langchain/chains/sequential.py +18 -18
  92. langchain/chains/sql_database/query.py +21 -5
  93. langchain/chains/structured_output/base.py +14 -13
  94. langchain/chains/summarize/chain.py +4 -3
  95. langchain/chains/transform.py +12 -11
  96. langchain/chat_models/base.py +27 -31
  97. langchain/embeddings/__init__.py +1 -1
  98. langchain/embeddings/base.py +4 -6
  99. langchain/embeddings/cache.py +19 -18
  100. langchain/evaluation/agents/trajectory_eval_chain.py +16 -19
  101. langchain/evaluation/comparison/eval_chain.py +10 -10
  102. langchain/evaluation/criteria/eval_chain.py +11 -10
  103. langchain/evaluation/embedding_distance/base.py +21 -21
  104. langchain/evaluation/exact_match/base.py +3 -3
  105. langchain/evaluation/loading.py +7 -8
  106. langchain/evaluation/qa/eval_chain.py +7 -6
  107. langchain/evaluation/regex_match/base.py +3 -3
  108. langchain/evaluation/schema.py +6 -5
  109. langchain/evaluation/scoring/eval_chain.py +9 -9
  110. langchain/evaluation/string_distance/base.py +23 -23
  111. langchain/hub.py +2 -1
  112. langchain/indexes/_sql_record_manager.py +8 -7
  113. langchain/indexes/vectorstore.py +11 -11
  114. langchain/llms/__init__.py +3 -3
  115. langchain/memory/buffer.py +13 -13
  116. langchain/memory/buffer_window.py +5 -5
  117. langchain/memory/chat_memory.py +5 -5
  118. langchain/memory/combined.py +10 -10
  119. langchain/memory/entity.py +8 -7
  120. langchain/memory/readonly.py +4 -4
  121. langchain/memory/simple.py +5 -5
  122. langchain/memory/summary.py +8 -8
  123. langchain/memory/summary_buffer.py +11 -11
  124. langchain/memory/token_buffer.py +5 -5
  125. langchain/memory/utils.py +2 -2
  126. langchain/memory/vectorstore.py +15 -14
  127. langchain/memory/vectorstore_token_buffer_memory.py +7 -7
  128. langchain/model_laboratory.py +4 -3
  129. langchain/output_parsers/combining.py +5 -5
  130. langchain/output_parsers/datetime.py +1 -2
  131. langchain/output_parsers/enum.py +4 -5
  132. langchain/output_parsers/pandas_dataframe.py +5 -5
  133. langchain/output_parsers/regex.py +4 -4
  134. langchain/output_parsers/regex_dict.py +4 -4
  135. langchain/output_parsers/retry.py +2 -2
  136. langchain/output_parsers/structured.py +5 -5
  137. langchain/output_parsers/yaml.py +3 -3
  138. langchain/pydantic_v1/__init__.py +1 -6
  139. langchain/pydantic_v1/dataclasses.py +1 -5
  140. langchain/pydantic_v1/main.py +1 -5
  141. langchain/retrievers/contextual_compression.py +3 -3
  142. langchain/retrievers/document_compressors/base.py +3 -2
  143. langchain/retrievers/document_compressors/chain_extract.py +4 -3
  144. langchain/retrievers/document_compressors/chain_filter.py +3 -2
  145. langchain/retrievers/document_compressors/cohere_rerank.py +4 -3
  146. langchain/retrievers/document_compressors/cross_encoder.py +1 -2
  147. langchain/retrievers/document_compressors/cross_encoder_rerank.py +2 -1
  148. langchain/retrievers/document_compressors/embeddings_filter.py +3 -2
  149. langchain/retrievers/document_compressors/listwise_rerank.py +6 -5
  150. langchain/retrievers/ensemble.py +15 -19
  151. langchain/retrievers/merger_retriever.py +7 -12
  152. langchain/retrievers/multi_query.py +14 -13
  153. langchain/retrievers/multi_vector.py +4 -4
  154. langchain/retrievers/parent_document_retriever.py +9 -8
  155. langchain/retrievers/re_phraser.py +2 -3
  156. langchain/retrievers/self_query/base.py +13 -12
  157. langchain/retrievers/time_weighted_retriever.py +14 -14
  158. langchain/runnables/openai_functions.py +4 -3
  159. langchain/smith/evaluation/config.py +7 -6
  160. langchain/smith/evaluation/progress.py +3 -2
  161. langchain/smith/evaluation/runner_utils.py +66 -69
  162. langchain/smith/evaluation/string_run_evaluator.py +38 -31
  163. langchain/storage/encoder_backed.py +7 -11
  164. langchain/storage/file_system.py +5 -4
  165. {langchain-0.3.23.dist-info → langchain-0.3.25.dist-info}/METADATA +3 -3
  166. {langchain-0.3.23.dist-info → langchain-0.3.25.dist-info}/RECORD +169 -169
  167. {langchain-0.3.23.dist-info → langchain-0.3.25.dist-info}/WHEEL +1 -1
  168. langchain-0.3.25.dist-info/entry_points.txt +4 -0
  169. langchain-0.3.23.dist-info/entry_points.txt +0 -5
  170. {langchain-0.3.23.dist-info → langchain-0.3.25.dist-info}/licenses/LICENSE +0 -0
@@ -2,7 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import Any, Dict, List, Optional, Type, cast
5
+ from typing import Any, Optional, cast
6
6
 
7
7
  from langchain_core._api import deprecated
8
8
  from langchain_core.callbacks import (
@@ -114,42 +114,42 @@ class LLMRouterChain(RouterChain):
114
114
  return self
115
115
 
116
116
  @property
117
- def input_keys(self) -> List[str]:
117
+ def input_keys(self) -> list[str]:
118
118
  """Will be whatever keys the LLM chain prompt expects.
119
119
 
120
120
  :meta private:
121
121
  """
122
122
  return self.llm_chain.input_keys
123
123
 
124
- def _validate_outputs(self, outputs: Dict[str, Any]) -> None:
124
+ def _validate_outputs(self, outputs: dict[str, Any]) -> None:
125
125
  super()._validate_outputs(outputs)
126
126
  if not isinstance(outputs["next_inputs"], dict):
127
127
  raise ValueError
128
128
 
129
129
  def _call(
130
130
  self,
131
- inputs: Dict[str, Any],
131
+ inputs: dict[str, Any],
132
132
  run_manager: Optional[CallbackManagerForChainRun] = None,
133
- ) -> Dict[str, Any]:
133
+ ) -> dict[str, Any]:
134
134
  _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
135
135
  callbacks = _run_manager.get_child()
136
136
 
137
137
  prediction = self.llm_chain.predict(callbacks=callbacks, **inputs)
138
138
  output = cast(
139
- Dict[str, Any],
139
+ dict[str, Any],
140
140
  self.llm_chain.prompt.output_parser.parse(prediction),
141
141
  )
142
142
  return output
143
143
 
144
144
  async def _acall(
145
145
  self,
146
- inputs: Dict[str, Any],
146
+ inputs: dict[str, Any],
147
147
  run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
148
- ) -> Dict[str, Any]:
148
+ ) -> dict[str, Any]:
149
149
  _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
150
150
  callbacks = _run_manager.get_child()
151
151
  output = cast(
152
- Dict[str, Any],
152
+ dict[str, Any],
153
153
  await self.llm_chain.apredict_and_parse(callbacks=callbacks, **inputs),
154
154
  )
155
155
  return output
@@ -163,14 +163,14 @@ class LLMRouterChain(RouterChain):
163
163
  return cls(llm_chain=llm_chain, **kwargs)
164
164
 
165
165
 
166
- class RouterOutputParser(BaseOutputParser[Dict[str, str]]):
166
+ class RouterOutputParser(BaseOutputParser[dict[str, str]]):
167
167
  """Parser for output of router chain in the multi-prompt chain."""
168
168
 
169
169
  default_destination: str = "DEFAULT"
170
- next_inputs_type: Type = str
170
+ next_inputs_type: type = str
171
171
  next_inputs_inner_key: str = "input"
172
172
 
173
- def parse(self, text: str) -> Dict[str, Any]:
173
+ def parse(self, text: str) -> dict[str, Any]:
174
174
  try:
175
175
  expected_keys = ["destination", "next_inputs"]
176
176
  parsed = parse_and_check_json_markdown(text, expected_keys)
@@ -2,7 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import Any, Dict, List, Optional
5
+ from typing import Any, Optional
6
6
 
7
7
  from langchain_core._api import deprecated
8
8
  from langchain_core.language_models import BaseLanguageModel
@@ -142,14 +142,14 @@ class MultiPromptChain(MultiRouteChain):
142
142
  """ # noqa: E501
143
143
 
144
144
  @property
145
- def output_keys(self) -> List[str]:
145
+ def output_keys(self) -> list[str]:
146
146
  return ["text"]
147
147
 
148
148
  @classmethod
149
149
  def from_prompts(
150
150
  cls,
151
151
  llm: BaseLanguageModel,
152
- prompt_infos: List[Dict[str, str]],
152
+ prompt_infos: list[dict[str, str]],
153
153
  default_chain: Optional[Chain] = None,
154
154
  **kwargs: Any,
155
155
  ) -> MultiPromptChain:
@@ -2,7 +2,8 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import Any, Dict, List, Mapping, Optional
5
+ from collections.abc import Mapping
6
+ from typing import Any, Optional
6
7
 
7
8
  from langchain_core.language_models import BaseLanguageModel
8
9
  from langchain_core.prompts import PromptTemplate
@@ -19,7 +20,7 @@ from langchain.chains.router.multi_retrieval_prompt import (
19
20
  )
20
21
 
21
22
 
22
- class MultiRetrievalQAChain(MultiRouteChain): # type: ignore[override]
23
+ class MultiRetrievalQAChain(MultiRouteChain):
23
24
  """A multi-route chain that uses an LLM router chain to choose amongst retrieval
24
25
  qa chains."""
25
26
 
@@ -31,14 +32,14 @@ class MultiRetrievalQAChain(MultiRouteChain): # type: ignore[override]
31
32
  """Default chain to use when router doesn't map input to one of the destinations."""
32
33
 
33
34
  @property
34
- def output_keys(self) -> List[str]:
35
+ def output_keys(self) -> list[str]:
35
36
  return ["result"]
36
37
 
37
38
  @classmethod
38
39
  def from_retrievers(
39
40
  cls,
40
41
  llm: BaseLanguageModel,
41
- retriever_infos: List[Dict[str, Any]],
42
+ retriever_infos: list[dict[str, Any]],
42
43
  default_retriever: Optional[BaseRetriever] = None,
43
44
  default_prompt: Optional[PromptTemplate] = None,
44
45
  default_chain: Optional[Chain] = None,
@@ -1,6 +1,6 @@
1
1
  """Chain pipeline where the outputs of one step feed directly into next."""
2
2
 
3
- from typing import Any, Dict, List, Optional
3
+ from typing import Any, Optional
4
4
 
5
5
  from langchain_core.callbacks import (
6
6
  AsyncCallbackManagerForChainRun,
@@ -16,9 +16,9 @@ from langchain.chains.base import Chain
16
16
  class SequentialChain(Chain):
17
17
  """Chain where the outputs of one chain feed directly into next."""
18
18
 
19
- chains: List[Chain]
20
- input_variables: List[str]
21
- output_variables: List[str] #: :meta private:
19
+ chains: list[Chain]
20
+ input_variables: list[str]
21
+ output_variables: list[str] #: :meta private:
22
22
  return_all: bool = False
23
23
 
24
24
  model_config = ConfigDict(
@@ -27,7 +27,7 @@ class SequentialChain(Chain):
27
27
  )
28
28
 
29
29
  @property
30
- def input_keys(self) -> List[str]:
30
+ def input_keys(self) -> list[str]:
31
31
  """Return expected input keys to the chain.
32
32
 
33
33
  :meta private:
@@ -35,7 +35,7 @@ class SequentialChain(Chain):
35
35
  return self.input_variables
36
36
 
37
37
  @property
38
- def output_keys(self) -> List[str]:
38
+ def output_keys(self) -> list[str]:
39
39
  """Return output key.
40
40
 
41
41
  :meta private:
@@ -44,7 +44,7 @@ class SequentialChain(Chain):
44
44
 
45
45
  @model_validator(mode="before")
46
46
  @classmethod
47
- def validate_chains(cls, values: Dict) -> Any:
47
+ def validate_chains(cls, values: dict) -> Any:
48
48
  """Validate that the correct inputs exist for all chains."""
49
49
  chains = values["chains"]
50
50
  input_variables = values["input_variables"]
@@ -97,9 +97,9 @@ class SequentialChain(Chain):
97
97
 
98
98
  def _call(
99
99
  self,
100
- inputs: Dict[str, str],
100
+ inputs: dict[str, str],
101
101
  run_manager: Optional[CallbackManagerForChainRun] = None,
102
- ) -> Dict[str, str]:
102
+ ) -> dict[str, str]:
103
103
  known_values = inputs.copy()
104
104
  _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
105
105
  for i, chain in enumerate(self.chains):
@@ -110,9 +110,9 @@ class SequentialChain(Chain):
110
110
 
111
111
  async def _acall(
112
112
  self,
113
- inputs: Dict[str, Any],
113
+ inputs: dict[str, Any],
114
114
  run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
115
- ) -> Dict[str, Any]:
115
+ ) -> dict[str, Any]:
116
116
  known_values = inputs.copy()
117
117
  _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
118
118
  callbacks = _run_manager.get_child()
@@ -127,7 +127,7 @@ class SequentialChain(Chain):
127
127
  class SimpleSequentialChain(Chain):
128
128
  """Simple chain where the outputs of one step feed directly into next."""
129
129
 
130
- chains: List[Chain]
130
+ chains: list[Chain]
131
131
  strip_outputs: bool = False
132
132
  input_key: str = "input" #: :meta private:
133
133
  output_key: str = "output" #: :meta private:
@@ -138,7 +138,7 @@ class SimpleSequentialChain(Chain):
138
138
  )
139
139
 
140
140
  @property
141
- def input_keys(self) -> List[str]:
141
+ def input_keys(self) -> list[str]:
142
142
  """Expect input key.
143
143
 
144
144
  :meta private:
@@ -146,7 +146,7 @@ class SimpleSequentialChain(Chain):
146
146
  return [self.input_key]
147
147
 
148
148
  @property
149
- def output_keys(self) -> List[str]:
149
+ def output_keys(self) -> list[str]:
150
150
  """Return output key.
151
151
 
152
152
  :meta private:
@@ -171,9 +171,9 @@ class SimpleSequentialChain(Chain):
171
171
 
172
172
  def _call(
173
173
  self,
174
- inputs: Dict[str, str],
174
+ inputs: dict[str, str],
175
175
  run_manager: Optional[CallbackManagerForChainRun] = None,
176
- ) -> Dict[str, str]:
176
+ ) -> dict[str, str]:
177
177
  _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
178
178
  _input = inputs[self.input_key]
179
179
  color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))])
@@ -190,9 +190,9 @@ class SimpleSequentialChain(Chain):
190
190
 
191
191
  async def _acall(
192
192
  self,
193
- inputs: Dict[str, Any],
193
+ inputs: dict[str, Any],
194
194
  run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
195
- ) -> Dict[str, Any]:
195
+ ) -> dict[str, Any]:
196
196
  _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
197
197
  _input = inputs[self.input_key]
198
198
  color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))])
@@ -1,6 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, TypedDict, Union
3
+ from typing import TYPE_CHECKING, Any, Optional, TypedDict, Union
4
4
 
5
5
  from langchain_core.language_models import BaseLanguageModel
6
6
  from langchain_core.output_parsers import StrOutputParser
@@ -27,7 +27,7 @@ class SQLInputWithTables(TypedDict):
27
27
  """Input for a SQL Chain."""
28
28
 
29
29
  question: str
30
- table_names_to_use: List[str]
30
+ table_names_to_use: list[str]
31
31
 
32
32
 
33
33
  def create_sql_query_chain(
@@ -35,7 +35,9 @@ def create_sql_query_chain(
35
35
  db: SQLDatabase,
36
36
  prompt: Optional[BasePromptTemplate] = None,
37
37
  k: int = 5,
38
- ) -> Runnable[Union[SQLInput, SQLInputWithTables, Dict[str, Any]], str]:
38
+ *,
39
+ get_col_comments: Optional[bool] = None,
40
+ ) -> Runnable[Union[SQLInput, SQLInputWithTables, dict[str, Any]], str]:
39
41
  """Create a chain that generates SQL queries.
40
42
 
41
43
  *Security Note*: This chain generates SQL queries for the given database.
@@ -59,6 +61,8 @@ def create_sql_query_chain(
59
61
  prompt: The prompt to use. If none is provided, will choose one
60
62
  based on dialect. Defaults to None. See Prompt section below for more.
61
63
  k: The number of results per select statement to return. Defaults to 5.
64
+ get_col_comments: Whether to retrieve column comments along with table info.
65
+ Defaults to False.
62
66
 
63
67
  Returns:
64
68
  A chain that takes in a question and generates a SQL query that answers
@@ -127,14 +131,26 @@ def create_sql_query_chain(
127
131
  if "dialect" in prompt_to_use.input_variables:
128
132
  prompt_to_use = prompt_to_use.partial(dialect=db.dialect)
129
133
 
134
+ table_info_kwargs = {}
135
+ if get_col_comments:
136
+ if db.dialect not in ("postgresql", "mysql", "oracle"):
137
+ raise ValueError(
138
+ f"get_col_comments=True is only supported for dialects "
139
+ f"'postgresql', 'mysql', and 'oracle'. Received dialect: "
140
+ f"{db.dialect}"
141
+ )
142
+ else:
143
+ table_info_kwargs["get_col_comments"] = True
144
+
130
145
  inputs = {
131
146
  "input": lambda x: x["question"] + "\nSQLQuery: ",
132
147
  "table_info": lambda x: db.get_table_info(
133
- table_names=x.get("table_names_to_use")
148
+ table_names=x.get("table_names_to_use"),
149
+ **table_info_kwargs,
134
150
  ),
135
151
  }
136
152
  return (
137
- RunnablePassthrough.assign(**inputs) # type: ignore
153
+ RunnablePassthrough.assign(**inputs) # type: ignore[return-value]
138
154
  | (
139
155
  lambda x: {
140
156
  k: v
@@ -1,5 +1,6 @@
1
1
  import json
2
- from typing import Any, Callable, Dict, Literal, Optional, Sequence, Type, Union
2
+ from collections.abc import Sequence
3
+ from typing import Any, Callable, Literal, Optional, Union
3
4
 
4
5
  from langchain_core._api import deprecated
5
6
  from langchain_core.output_parsers import (
@@ -63,7 +64,7 @@ from pydantic import BaseModel
63
64
  ),
64
65
  )
65
66
  def create_openai_fn_runnable(
66
- functions: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]],
67
+ functions: Sequence[Union[dict[str, Any], type[BaseModel], Callable]],
67
68
  llm: Runnable,
68
69
  prompt: Optional[BasePromptTemplate] = None,
69
70
  *,
@@ -135,7 +136,7 @@ def create_openai_fn_runnable(
135
136
  if not functions:
136
137
  raise ValueError("Need to pass in at least one function. Received zero.")
137
138
  openai_functions = [convert_to_openai_function(f) for f in functions]
138
- llm_kwargs_: Dict[str, Any] = {"functions": openai_functions, **llm_kwargs}
139
+ llm_kwargs_: dict[str, Any] = {"functions": openai_functions, **llm_kwargs}
139
140
  if len(openai_functions) == 1 and enforce_single_function_usage:
140
141
  llm_kwargs_["function_call"] = {"name": openai_functions[0]["name"]}
141
142
  output_parser = output_parser or get_openai_output_parser(functions)
@@ -181,7 +182,7 @@ def create_openai_fn_runnable(
181
182
  ),
182
183
  )
183
184
  def create_structured_output_runnable(
184
- output_schema: Union[Dict[str, Any], Type[BaseModel]],
185
+ output_schema: Union[dict[str, Any], type[BaseModel]],
185
186
  llm: Runnable,
186
187
  prompt: Optional[BasePromptTemplate] = None,
187
188
  *,
@@ -437,7 +438,7 @@ def create_structured_output_runnable(
437
438
 
438
439
 
439
440
  def _create_openai_tools_runnable(
440
- tool: Union[Dict[str, Any], Type[BaseModel], Callable],
441
+ tool: Union[dict[str, Any], type[BaseModel], Callable],
441
442
  llm: Runnable,
442
443
  *,
443
444
  prompt: Optional[BasePromptTemplate],
@@ -446,7 +447,7 @@ def _create_openai_tools_runnable(
446
447
  first_tool_only: bool,
447
448
  ) -> Runnable:
448
449
  oai_tool = convert_to_openai_tool(tool)
449
- llm_kwargs: Dict[str, Any] = {"tools": [oai_tool]}
450
+ llm_kwargs: dict[str, Any] = {"tools": [oai_tool]}
450
451
  if enforce_tool_usage:
451
452
  llm_kwargs["tool_choice"] = {
452
453
  "type": "function",
@@ -462,7 +463,7 @@ def _create_openai_tools_runnable(
462
463
 
463
464
 
464
465
  def _get_openai_tool_output_parser(
465
- tool: Union[Dict[str, Any], Type[BaseModel], Callable],
466
+ tool: Union[dict[str, Any], type[BaseModel], Callable],
466
467
  *,
467
468
  first_tool_only: bool = False,
468
469
  ) -> Union[BaseOutputParser, BaseGenerationOutputParser]:
@@ -479,7 +480,7 @@ def _get_openai_tool_output_parser(
479
480
 
480
481
 
481
482
  def get_openai_output_parser(
482
- functions: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]],
483
+ functions: Sequence[Union[dict[str, Any], type[BaseModel], Callable]],
483
484
  ) -> Union[BaseOutputParser, BaseGenerationOutputParser]:
484
485
  """Get the appropriate function output parser given the user functions.
485
486
 
@@ -496,7 +497,7 @@ def get_openai_output_parser(
496
497
  """
497
498
  if isinstance(functions[0], type) and is_basemodel_subclass(functions[0]):
498
499
  if len(functions) > 1:
499
- pydantic_schema: Union[Dict, Type[BaseModel]] = {
500
+ pydantic_schema: Union[dict, type[BaseModel]] = {
500
501
  convert_to_openai_function(fn)["name"]: fn for fn in functions
501
502
  }
502
503
  else:
@@ -510,7 +511,7 @@ def get_openai_output_parser(
510
511
 
511
512
 
512
513
  def _create_openai_json_runnable(
513
- output_schema: Union[Dict[str, Any], Type[BaseModel]],
514
+ output_schema: Union[dict[str, Any], type[BaseModel]],
514
515
  llm: Runnable,
515
516
  prompt: Optional[BasePromptTemplate] = None,
516
517
  *,
@@ -519,7 +520,7 @@ def _create_openai_json_runnable(
519
520
  """"""
520
521
  if isinstance(output_schema, type) and is_basemodel_subclass(output_schema):
521
522
  output_parser = output_parser or PydanticOutputParser(
522
- pydantic_object=output_schema, # type: ignore
523
+ pydantic_object=output_schema,
523
524
  )
524
525
  schema_as_dict = convert_to_openai_function(output_schema)["parameters"]
525
526
  else:
@@ -537,7 +538,7 @@ def _create_openai_json_runnable(
537
538
 
538
539
 
539
540
  def _create_openai_functions_structured_output_runnable(
540
- output_schema: Union[Dict[str, Any], Type[BaseModel]],
541
+ output_schema: Union[dict[str, Any], type[BaseModel]],
541
542
  llm: Runnable,
542
543
  prompt: Optional[BasePromptTemplate] = None,
543
544
  *,
@@ -558,7 +559,7 @@ def _create_openai_functions_structured_output_runnable(
558
559
  class _OutputFormatter(BaseModel):
559
560
  """Output formatter. Should always be used to format your response to the user.""" # noqa: E501
560
561
 
561
- output: output_schema # type: ignore
562
+ output: output_schema # type: ignore[valid-type]
562
563
 
563
564
  function = _OutputFormatter
564
565
  output_parser = output_parser or PydanticAttrOutputFunctionsParser(
@@ -1,6 +1,7 @@
1
1
  """Load summarizing chains."""
2
2
 
3
- from typing import Any, Mapping, Optional, Protocol
3
+ from collections.abc import Mapping
4
+ from typing import Any, Optional, Protocol
4
5
 
5
6
  from langchain_core.callbacks import Callbacks
6
7
  from langchain_core.language_models import BaseLanguageModel
@@ -61,14 +62,14 @@ def _load_map_reduce_chain(
61
62
  llm=llm,
62
63
  prompt=map_prompt,
63
64
  verbose=verbose, # type: ignore[arg-type]
64
- callbacks=callbacks, # type: ignore[arg-type]
65
+ callbacks=callbacks,
65
66
  )
66
67
  _reduce_llm = reduce_llm or llm
67
68
  reduce_chain = LLMChain(
68
69
  llm=_reduce_llm,
69
70
  prompt=combine_prompt,
70
71
  verbose=verbose, # type: ignore[arg-type]
71
- callbacks=callbacks, # type: ignore[arg-type]
72
+ callbacks=callbacks,
72
73
  )
73
74
  # TODO: document prompt
74
75
  combine_documents_chain = StuffDocumentsChain(
@@ -2,7 +2,8 @@
2
2
 
3
3
  import functools
4
4
  import logging
5
- from typing import Any, Awaitable, Callable, Dict, List, Optional
5
+ from collections.abc import Awaitable
6
+ from typing import Any, Callable, Optional
6
7
 
7
8
  from langchain_core.callbacks import (
8
9
  AsyncCallbackManagerForChainRun,
@@ -26,13 +27,13 @@ class TransformChain(Chain):
26
27
  output_variables["entities"], transform=func())
27
28
  """
28
29
 
29
- input_variables: List[str]
30
+ input_variables: list[str]
30
31
  """The keys expected by the transform's input dictionary."""
31
- output_variables: List[str]
32
+ output_variables: list[str]
32
33
  """The keys returned by the transform's output dictionary."""
33
- transform_cb: Callable[[Dict[str, str]], Dict[str, str]] = Field(alias="transform")
34
+ transform_cb: Callable[[dict[str, str]], dict[str, str]] = Field(alias="transform")
34
35
  """The transform function."""
35
- atransform_cb: Optional[Callable[[Dict[str, Any]], Awaitable[Dict[str, Any]]]] = (
36
+ atransform_cb: Optional[Callable[[dict[str, Any]], Awaitable[dict[str, Any]]]] = (
36
37
  Field(None, alias="atransform")
37
38
  )
38
39
  """The async coroutine transform function."""
@@ -47,7 +48,7 @@ class TransformChain(Chain):
47
48
  logger.warning(msg)
48
49
 
49
50
  @property
50
- def input_keys(self) -> List[str]:
51
+ def input_keys(self) -> list[str]:
51
52
  """Expect input keys.
52
53
 
53
54
  :meta private:
@@ -55,7 +56,7 @@ class TransformChain(Chain):
55
56
  return self.input_variables
56
57
 
57
58
  @property
58
- def output_keys(self) -> List[str]:
59
+ def output_keys(self) -> list[str]:
59
60
  """Return output keys.
60
61
 
61
62
  :meta private:
@@ -64,16 +65,16 @@ class TransformChain(Chain):
64
65
 
65
66
  def _call(
66
67
  self,
67
- inputs: Dict[str, str],
68
+ inputs: dict[str, str],
68
69
  run_manager: Optional[CallbackManagerForChainRun] = None,
69
- ) -> Dict[str, str]:
70
+ ) -> dict[str, str]:
70
71
  return self.transform_cb(inputs)
71
72
 
72
73
  async def _acall(
73
74
  self,
74
- inputs: Dict[str, Any],
75
+ inputs: dict[str, Any],
75
76
  run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
76
- ) -> Dict[str, Any]:
77
+ ) -> dict[str, Any]:
77
78
  if self.atransform_cb is not None:
78
79
  return await self.atransform_cb(inputs)
79
80
  else: