langchain 0.3.23__py3-none-any.whl → 0.3.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (170) hide show
  1. langchain/_api/module_import.py +3 -3
  2. langchain/agents/agent.py +104 -109
  3. langchain/agents/agent_iterator.py +11 -15
  4. langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +2 -2
  5. langchain/agents/agent_toolkits/vectorstore/base.py +3 -3
  6. langchain/agents/agent_toolkits/vectorstore/toolkit.py +4 -6
  7. langchain/agents/chat/base.py +7 -6
  8. langchain/agents/chat/output_parser.py +2 -1
  9. langchain/agents/conversational/base.py +5 -4
  10. langchain/agents/conversational_chat/base.py +9 -8
  11. langchain/agents/format_scratchpad/log.py +1 -3
  12. langchain/agents/format_scratchpad/log_to_messages.py +3 -5
  13. langchain/agents/format_scratchpad/openai_functions.py +4 -4
  14. langchain/agents/format_scratchpad/tools.py +3 -3
  15. langchain/agents/format_scratchpad/xml.py +1 -3
  16. langchain/agents/initialize.py +2 -1
  17. langchain/agents/json_chat/base.py +3 -2
  18. langchain/agents/loading.py +5 -5
  19. langchain/agents/mrkl/base.py +6 -5
  20. langchain/agents/openai_assistant/base.py +17 -17
  21. langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +6 -6
  22. langchain/agents/openai_functions_agent/base.py +13 -12
  23. langchain/agents/openai_functions_multi_agent/base.py +15 -14
  24. langchain/agents/openai_tools/base.py +2 -1
  25. langchain/agents/output_parsers/openai_functions.py +2 -2
  26. langchain/agents/output_parsers/openai_tools.py +6 -6
  27. langchain/agents/output_parsers/react_json_single_input.py +2 -1
  28. langchain/agents/output_parsers/self_ask.py +2 -1
  29. langchain/agents/output_parsers/tools.py +7 -7
  30. langchain/agents/react/agent.py +3 -2
  31. langchain/agents/react/base.py +4 -3
  32. langchain/agents/schema.py +3 -3
  33. langchain/agents/self_ask_with_search/base.py +2 -1
  34. langchain/agents/structured_chat/base.py +9 -8
  35. langchain/agents/structured_chat/output_parser.py +2 -1
  36. langchain/agents/tool_calling_agent/base.py +3 -2
  37. langchain/agents/tools.py +4 -4
  38. langchain/agents/types.py +3 -3
  39. langchain/agents/utils.py +1 -1
  40. langchain/agents/xml/base.py +7 -6
  41. langchain/callbacks/streaming_aiter.py +3 -2
  42. langchain/callbacks/streaming_aiter_final_only.py +3 -3
  43. langchain/callbacks/streaming_stdout_final_only.py +3 -3
  44. langchain/chains/api/base.py +11 -12
  45. langchain/chains/base.py +47 -50
  46. langchain/chains/combine_documents/base.py +23 -23
  47. langchain/chains/combine_documents/map_reduce.py +12 -12
  48. langchain/chains/combine_documents/map_rerank.py +16 -15
  49. langchain/chains/combine_documents/reduce.py +17 -17
  50. langchain/chains/combine_documents/refine.py +12 -12
  51. langchain/chains/combine_documents/stuff.py +10 -10
  52. langchain/chains/constitutional_ai/base.py +9 -9
  53. langchain/chains/conversation/base.py +2 -4
  54. langchain/chains/conversational_retrieval/base.py +30 -30
  55. langchain/chains/elasticsearch_database/base.py +13 -13
  56. langchain/chains/example_generator.py +1 -3
  57. langchain/chains/flare/base.py +13 -12
  58. langchain/chains/flare/prompts.py +2 -4
  59. langchain/chains/hyde/base.py +8 -8
  60. langchain/chains/llm.py +31 -30
  61. langchain/chains/llm_checker/base.py +6 -6
  62. langchain/chains/llm_math/base.py +10 -10
  63. langchain/chains/llm_summarization_checker/base.py +6 -6
  64. langchain/chains/loading.py +12 -14
  65. langchain/chains/mapreduce.py +7 -6
  66. langchain/chains/moderation.py +8 -8
  67. langchain/chains/natbot/base.py +6 -6
  68. langchain/chains/openai_functions/base.py +8 -10
  69. langchain/chains/openai_functions/citation_fuzzy_match.py +4 -4
  70. langchain/chains/openai_functions/extraction.py +3 -3
  71. langchain/chains/openai_functions/openapi.py +12 -12
  72. langchain/chains/openai_functions/qa_with_structure.py +4 -4
  73. langchain/chains/openai_functions/utils.py +2 -2
  74. langchain/chains/openai_tools/extraction.py +2 -2
  75. langchain/chains/prompt_selector.py +3 -3
  76. langchain/chains/qa_generation/base.py +5 -5
  77. langchain/chains/qa_with_sources/base.py +21 -21
  78. langchain/chains/qa_with_sources/loading.py +2 -1
  79. langchain/chains/qa_with_sources/retrieval.py +6 -6
  80. langchain/chains/qa_with_sources/vector_db.py +8 -8
  81. langchain/chains/query_constructor/base.py +4 -3
  82. langchain/chains/query_constructor/parser.py +5 -4
  83. langchain/chains/question_answering/chain.py +3 -2
  84. langchain/chains/retrieval.py +2 -2
  85. langchain/chains/retrieval_qa/base.py +16 -16
  86. langchain/chains/router/base.py +12 -11
  87. langchain/chains/router/embedding_router.py +12 -11
  88. langchain/chains/router/llm_router.py +12 -12
  89. langchain/chains/router/multi_prompt.py +3 -3
  90. langchain/chains/router/multi_retrieval_qa.py +5 -4
  91. langchain/chains/sequential.py +18 -18
  92. langchain/chains/sql_database/query.py +21 -5
  93. langchain/chains/structured_output/base.py +14 -13
  94. langchain/chains/summarize/chain.py +4 -3
  95. langchain/chains/transform.py +12 -11
  96. langchain/chat_models/base.py +27 -31
  97. langchain/embeddings/__init__.py +1 -1
  98. langchain/embeddings/base.py +4 -6
  99. langchain/embeddings/cache.py +19 -18
  100. langchain/evaluation/agents/trajectory_eval_chain.py +16 -19
  101. langchain/evaluation/comparison/eval_chain.py +10 -10
  102. langchain/evaluation/criteria/eval_chain.py +11 -10
  103. langchain/evaluation/embedding_distance/base.py +21 -21
  104. langchain/evaluation/exact_match/base.py +3 -3
  105. langchain/evaluation/loading.py +7 -8
  106. langchain/evaluation/qa/eval_chain.py +7 -6
  107. langchain/evaluation/regex_match/base.py +3 -3
  108. langchain/evaluation/schema.py +6 -5
  109. langchain/evaluation/scoring/eval_chain.py +9 -9
  110. langchain/evaluation/string_distance/base.py +23 -23
  111. langchain/hub.py +2 -1
  112. langchain/indexes/_sql_record_manager.py +8 -7
  113. langchain/indexes/vectorstore.py +11 -11
  114. langchain/llms/__init__.py +3 -3
  115. langchain/memory/buffer.py +13 -13
  116. langchain/memory/buffer_window.py +5 -5
  117. langchain/memory/chat_memory.py +5 -5
  118. langchain/memory/combined.py +10 -10
  119. langchain/memory/entity.py +8 -7
  120. langchain/memory/readonly.py +4 -4
  121. langchain/memory/simple.py +5 -5
  122. langchain/memory/summary.py +8 -8
  123. langchain/memory/summary_buffer.py +11 -11
  124. langchain/memory/token_buffer.py +5 -5
  125. langchain/memory/utils.py +2 -2
  126. langchain/memory/vectorstore.py +15 -14
  127. langchain/memory/vectorstore_token_buffer_memory.py +7 -7
  128. langchain/model_laboratory.py +4 -3
  129. langchain/output_parsers/combining.py +5 -5
  130. langchain/output_parsers/datetime.py +1 -2
  131. langchain/output_parsers/enum.py +4 -5
  132. langchain/output_parsers/pandas_dataframe.py +5 -5
  133. langchain/output_parsers/regex.py +4 -4
  134. langchain/output_parsers/regex_dict.py +4 -4
  135. langchain/output_parsers/retry.py +2 -2
  136. langchain/output_parsers/structured.py +5 -5
  137. langchain/output_parsers/yaml.py +3 -3
  138. langchain/pydantic_v1/__init__.py +1 -6
  139. langchain/pydantic_v1/dataclasses.py +1 -5
  140. langchain/pydantic_v1/main.py +1 -5
  141. langchain/retrievers/contextual_compression.py +3 -3
  142. langchain/retrievers/document_compressors/base.py +3 -2
  143. langchain/retrievers/document_compressors/chain_extract.py +4 -3
  144. langchain/retrievers/document_compressors/chain_filter.py +3 -2
  145. langchain/retrievers/document_compressors/cohere_rerank.py +4 -3
  146. langchain/retrievers/document_compressors/cross_encoder.py +1 -2
  147. langchain/retrievers/document_compressors/cross_encoder_rerank.py +2 -1
  148. langchain/retrievers/document_compressors/embeddings_filter.py +3 -2
  149. langchain/retrievers/document_compressors/listwise_rerank.py +6 -5
  150. langchain/retrievers/ensemble.py +15 -19
  151. langchain/retrievers/merger_retriever.py +7 -12
  152. langchain/retrievers/multi_query.py +14 -13
  153. langchain/retrievers/multi_vector.py +4 -4
  154. langchain/retrievers/parent_document_retriever.py +9 -8
  155. langchain/retrievers/re_phraser.py +2 -3
  156. langchain/retrievers/self_query/base.py +13 -12
  157. langchain/retrievers/time_weighted_retriever.py +14 -14
  158. langchain/runnables/openai_functions.py +4 -3
  159. langchain/smith/evaluation/config.py +7 -6
  160. langchain/smith/evaluation/progress.py +3 -2
  161. langchain/smith/evaluation/runner_utils.py +66 -69
  162. langchain/smith/evaluation/string_run_evaluator.py +38 -31
  163. langchain/storage/encoder_backed.py +7 -11
  164. langchain/storage/file_system.py +5 -4
  165. {langchain-0.3.23.dist-info → langchain-0.3.25.dist-info}/METADATA +3 -3
  166. {langchain-0.3.23.dist-info → langchain-0.3.25.dist-info}/RECORD +169 -169
  167. {langchain-0.3.23.dist-info → langchain-0.3.25.dist-info}/WHEEL +1 -1
  168. langchain-0.3.25.dist-info/entry_points.txt +4 -0
  169. langchain-0.3.23.dist-info/entry_points.txt +0 -5
  170. {langchain-0.3.23.dist-info → langchain-0.3.25.dist-info}/licenses/LICENSE +0 -0
@@ -2,8 +2,9 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
+ import uuid
5
6
  from abc import abstractmethod
6
- from typing import Any, Dict, List, Optional
7
+ from typing import Any, Optional
7
8
 
8
9
  from langchain_core.callbacks.manager import (
9
10
  AsyncCallbackManagerForChainRun,
@@ -21,7 +22,7 @@ from langchain.evaluation.schema import StringEvaluator
21
22
  from langchain.schema import RUN_KEY
22
23
 
23
24
 
24
- def _get_messages_from_run_dict(messages: List[dict]) -> List[BaseMessage]:
25
+ def _get_messages_from_run_dict(messages: list[dict]) -> list[BaseMessage]:
25
26
  if not messages:
26
27
  return []
27
28
  first_message = messages[0]
@@ -35,15 +36,15 @@ class StringRunMapper(Serializable):
35
36
  """Extract items to evaluate from the run object."""
36
37
 
37
38
  @property
38
- def output_keys(self) -> List[str]:
39
+ def output_keys(self) -> list[str]:
39
40
  """The keys to extract from the run."""
40
41
  return ["prediction", "input"]
41
42
 
42
43
  @abstractmethod
43
- def map(self, run: Run) -> Dict[str, str]:
44
+ def map(self, run: Run) -> dict[str, str]:
44
45
  """Maps the Run to a dictionary."""
45
46
 
46
- def __call__(self, run: Run) -> Dict[str, str]:
47
+ def __call__(self, run: Run) -> dict[str, str]:
47
48
  """Maps the Run to a dictionary."""
48
49
  if not run.outputs:
49
50
  raise ValueError(f"Run {run.id} has no outputs to evaluate.")
@@ -53,7 +54,7 @@ class StringRunMapper(Serializable):
53
54
  class LLMStringRunMapper(StringRunMapper):
54
55
  """Extract items to evaluate from the run object."""
55
56
 
56
- def serialize_chat_messages(self, messages: List[Dict]) -> str:
57
+ def serialize_chat_messages(self, messages: list[dict]) -> str:
57
58
  """Extract the input messages from the run."""
58
59
  if isinstance(messages, list) and messages:
59
60
  if isinstance(messages[0], dict):
@@ -66,7 +67,7 @@ class LLMStringRunMapper(StringRunMapper):
66
67
  return get_buffer_string(chat_messages)
67
68
  raise ValueError(f"Could not extract messages to evaluate {messages}")
68
69
 
69
- def serialize_inputs(self, inputs: Dict) -> str:
70
+ def serialize_inputs(self, inputs: dict) -> str:
70
71
  if "prompts" in inputs: # Should we even accept this?
71
72
  input_ = "\n\n".join(inputs["prompts"])
72
73
  elif "prompt" in inputs:
@@ -77,13 +78,13 @@ class LLMStringRunMapper(StringRunMapper):
77
78
  raise ValueError("LLM Run must have either messages or prompts as inputs.")
78
79
  return input_
79
80
 
80
- def serialize_outputs(self, outputs: Dict) -> str:
81
+ def serialize_outputs(self, outputs: dict) -> str:
81
82
  if not outputs.get("generations"):
82
83
  raise ValueError("Cannot evaluate LLM Run without generations.")
83
- generations: List[Dict] = outputs["generations"]
84
+ generations: list[dict] = outputs["generations"]
84
85
  if not generations:
85
86
  raise ValueError("Cannot evaluate LLM run with empty generations.")
86
- first_generation: Dict = generations[0]
87
+ first_generation: dict = generations[0]
87
88
  if isinstance(first_generation, list):
88
89
  # Runs from Tracer have generations as a list of lists of dicts
89
90
  # Whereas Runs from the API have a list of dicts
@@ -94,7 +95,7 @@ class LLMStringRunMapper(StringRunMapper):
94
95
  output_ = first_generation["text"]
95
96
  return output_
96
97
 
97
- def map(self, run: Run) -> Dict[str, str]:
98
+ def map(self, run: Run) -> dict[str, str]:
98
99
  """Maps the Run to a dictionary."""
99
100
  if run.run_type != "llm":
100
101
  raise ValueError("LLM RunMapper only supports LLM runs.")
@@ -135,7 +136,7 @@ class ChainStringRunMapper(StringRunMapper):
135
136
  If not provided, will use the only output key or raise an error
136
137
  if there are multiple."""
137
138
 
138
- def _get_key(self, source: Dict, key: Optional[str], which: str) -> str:
139
+ def _get_key(self, source: dict, key: Optional[str], which: str) -> str:
139
140
  if key is not None:
140
141
  return source[key]
141
142
  elif len(source) == 1:
@@ -146,7 +147,7 @@ class ChainStringRunMapper(StringRunMapper):
146
147
  f"{source}\nPlease manually specify a {which}_key"
147
148
  )
148
149
 
149
- def map(self, run: Run) -> Dict[str, str]:
150
+ def map(self, run: Run) -> dict[str, str]:
150
151
  """Maps the Run to a dictionary."""
151
152
  if not run.outputs:
152
153
  raise ValueError(
@@ -182,7 +183,7 @@ class ChainStringRunMapper(StringRunMapper):
182
183
  class ToolStringRunMapper(StringRunMapper):
183
184
  """Map an input to the tool."""
184
185
 
185
- def map(self, run: Run) -> Dict[str, str]:
186
+ def map(self, run: Run) -> dict[str, str]:
186
187
  if not run.outputs:
187
188
  raise ValueError(f"Run {run.id} has no outputs to evaluate.")
188
189
  return {"input": run.inputs["input"], "prediction": run.outputs["output"]}
@@ -194,16 +195,16 @@ class StringExampleMapper(Serializable):
194
195
  reference_key: Optional[str] = None
195
196
 
196
197
  @property
197
- def output_keys(self) -> List[str]:
198
+ def output_keys(self) -> list[str]:
198
199
  """The keys to extract from the run."""
199
200
  return ["reference"]
200
201
 
201
- def serialize_chat_messages(self, messages: List[Dict]) -> str:
202
+ def serialize_chat_messages(self, messages: list[dict]) -> str:
202
203
  """Extract the input messages from the run."""
203
204
  chat_messages = _get_messages_from_run_dict(messages)
204
205
  return get_buffer_string(chat_messages)
205
206
 
206
- def map(self, example: Example) -> Dict[str, str]:
207
+ def map(self, example: Example) -> dict[str, str]:
207
208
  """Maps the Example, or dataset row to a dictionary."""
208
209
  if not example.outputs:
209
210
  raise ValueError(
@@ -230,7 +231,7 @@ class StringExampleMapper(Serializable):
230
231
  else output
231
232
  }
232
233
 
233
- def __call__(self, example: Example) -> Dict[str, str]:
234
+ def __call__(self, example: Example) -> dict[str, str]:
234
235
  """Maps the Run and Example to a dictionary."""
235
236
  if not example.outputs:
236
237
  raise ValueError(
@@ -239,7 +240,7 @@ class StringExampleMapper(Serializable):
239
240
  return self.map(example)
240
241
 
241
242
 
242
- class StringRunEvaluatorChain(Chain, RunEvaluator): # type: ignore[override, override]
243
+ class StringRunEvaluatorChain(Chain, RunEvaluator):
243
244
  """Evaluate Run and optional examples."""
244
245
 
245
246
  run_mapper: StringRunMapper
@@ -253,14 +254,14 @@ class StringRunEvaluatorChain(Chain, RunEvaluator): # type: ignore[override, ov
253
254
  """The evaluation chain."""
254
255
 
255
256
  @property
256
- def input_keys(self) -> List[str]:
257
+ def input_keys(self) -> list[str]:
257
258
  return ["run", "example"]
258
259
 
259
260
  @property
260
- def output_keys(self) -> List[str]:
261
+ def output_keys(self) -> list[str]:
261
262
  return ["feedback"]
262
263
 
263
- def _prepare_input(self, inputs: Dict[str, Any]) -> Dict[str, str]:
264
+ def _prepare_input(self, inputs: dict[str, Any]) -> dict[str, str]:
264
265
  run: Run = inputs["run"]
265
266
  example: Optional[Example] = inputs.get("example")
266
267
  evaluate_strings_inputs = self.run_mapper(run)
@@ -277,7 +278,7 @@ class StringRunEvaluatorChain(Chain, RunEvaluator): # type: ignore[override, ov
277
278
  )
278
279
  return evaluate_strings_inputs
279
280
 
280
- def _prepare_output(self, output: Dict[str, Any]) -> Dict[str, Any]:
281
+ def _prepare_output(self, output: dict[str, Any]) -> dict[str, Any]:
281
282
  evaluation_result = EvaluationResult(
282
283
  key=self.name, comment=output.get("reasoning"), **output
283
284
  )
@@ -288,9 +289,9 @@ class StringRunEvaluatorChain(Chain, RunEvaluator): # type: ignore[override, ov
288
289
 
289
290
  def _call(
290
291
  self,
291
- inputs: Dict[str, str],
292
+ inputs: dict[str, str],
292
293
  run_manager: Optional[CallbackManagerForChainRun] = None,
293
- ) -> Dict[str, Any]:
294
+ ) -> dict[str, Any]:
294
295
  """Call the evaluation chain."""
295
296
  evaluate_strings_inputs = self._prepare_input(inputs)
296
297
  _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
@@ -304,9 +305,9 @@ class StringRunEvaluatorChain(Chain, RunEvaluator): # type: ignore[override, ov
304
305
 
305
306
  async def _acall(
306
307
  self,
307
- inputs: Dict[str, str],
308
+ inputs: dict[str, str],
308
309
  run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
309
- ) -> Dict[str, Any]:
310
+ ) -> dict[str, Any]:
310
311
  """Call the evaluation chain."""
311
312
  evaluate_strings_inputs = self._prepare_input(inputs)
312
313
  _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
@@ -318,14 +319,17 @@ class StringRunEvaluatorChain(Chain, RunEvaluator): # type: ignore[override, ov
318
319
  )
319
320
  return self._prepare_output(chain_output)
320
321
 
321
- def _prepare_evaluator_output(self, output: Dict[str, Any]) -> EvaluationResult:
322
+ def _prepare_evaluator_output(self, output: dict[str, Any]) -> EvaluationResult:
322
323
  feedback: EvaluationResult = output["feedback"]
323
324
  if RUN_KEY not in feedback.evaluator_info:
324
325
  feedback.evaluator_info[RUN_KEY] = output[RUN_KEY]
325
326
  return feedback
326
327
 
327
328
  def evaluate_run(
328
- self, run: Run, example: Optional[Example] = None
329
+ self,
330
+ run: Run,
331
+ example: Optional[Example] = None,
332
+ evaluator_run_id: Optional[uuid.UUID] = None,
329
333
  ) -> EvaluationResult:
330
334
  """Evaluate an example."""
331
335
  try:
@@ -339,7 +343,10 @@ class StringRunEvaluatorChain(Chain, RunEvaluator): # type: ignore[override, ov
339
343
  )
340
344
 
341
345
  async def aevaluate_run(
342
- self, run: Run, example: Optional[Example] = None
346
+ self,
347
+ run: Run,
348
+ example: Optional[Example] = None,
349
+ evaluator_run_id: Optional[uuid.UUID] = None,
343
350
  ) -> EvaluationResult:
344
351
  """Evaluate an example."""
345
352
  try:
@@ -362,7 +369,7 @@ class StringRunEvaluatorChain(Chain, RunEvaluator): # type: ignore[override, ov
362
369
  input_key: Optional[str] = None,
363
370
  prediction_key: Optional[str] = None,
364
371
  reference_key: Optional[str] = None,
365
- tags: Optional[List[str]] = None,
372
+ tags: Optional[list[str]] = None,
366
373
  ) -> StringRunEvaluatorChain:
367
374
  """
368
375
  Create a StringRunEvaluatorChain from an evaluator and the run and dataset types.
@@ -1,12 +1,8 @@
1
+ from collections.abc import AsyncIterator, Iterator, Sequence
1
2
  from typing import (
2
3
  Any,
3
- AsyncIterator,
4
4
  Callable,
5
- Iterator,
6
- List,
7
5
  Optional,
8
- Sequence,
9
- Tuple,
10
6
  TypeVar,
11
7
  Union,
12
8
  )
@@ -65,25 +61,25 @@ class EncoderBackedStore(BaseStore[K, V]):
65
61
  self.value_serializer = value_serializer
66
62
  self.value_deserializer = value_deserializer
67
63
 
68
- def mget(self, keys: Sequence[K]) -> List[Optional[V]]:
64
+ def mget(self, keys: Sequence[K]) -> list[Optional[V]]:
69
65
  """Get the values associated with the given keys."""
70
- encoded_keys: List[str] = [self.key_encoder(key) for key in keys]
66
+ encoded_keys: list[str] = [self.key_encoder(key) for key in keys]
71
67
  values = self.store.mget(encoded_keys)
72
68
  return [
73
69
  self.value_deserializer(value) if value is not None else value
74
70
  for value in values
75
71
  ]
76
72
 
77
- async def amget(self, keys: Sequence[K]) -> List[Optional[V]]:
73
+ async def amget(self, keys: Sequence[K]) -> list[Optional[V]]:
78
74
  """Get the values associated with the given keys."""
79
- encoded_keys: List[str] = [self.key_encoder(key) for key in keys]
75
+ encoded_keys: list[str] = [self.key_encoder(key) for key in keys]
80
76
  values = await self.store.amget(encoded_keys)
81
77
  return [
82
78
  self.value_deserializer(value) if value is not None else value
83
79
  for value in values
84
80
  ]
85
81
 
86
- def mset(self, key_value_pairs: Sequence[Tuple[K, V]]) -> None:
82
+ def mset(self, key_value_pairs: Sequence[tuple[K, V]]) -> None:
87
83
  """Set the values for the given keys."""
88
84
  encoded_pairs = [
89
85
  (self.key_encoder(key), self.value_serializer(value))
@@ -91,7 +87,7 @@ class EncoderBackedStore(BaseStore[K, V]):
91
87
  ]
92
88
  self.store.mset(encoded_pairs)
93
89
 
94
- async def amset(self, key_value_pairs: Sequence[Tuple[K, V]]) -> None:
90
+ async def amset(self, key_value_pairs: Sequence[tuple[K, V]]) -> None:
95
91
  """Set the values for the given keys."""
96
92
  encoded_pairs = [
97
93
  (self.key_encoder(key), self.value_serializer(value))
@@ -1,8 +1,9 @@
1
1
  import os
2
2
  import re
3
3
  import time
4
+ from collections.abc import Iterator, Sequence
4
5
  from pathlib import Path
5
- from typing import Iterator, List, Optional, Sequence, Tuple, Union
6
+ from typing import Optional, Union
6
7
 
7
8
  from langchain_core.stores import ByteStore
8
9
 
@@ -103,7 +104,7 @@ class LocalFileStore(ByteStore):
103
104
  if self.chmod_dir is not None:
104
105
  os.chmod(dir, self.chmod_dir)
105
106
 
106
- def mget(self, keys: Sequence[str]) -> List[Optional[bytes]]:
107
+ def mget(self, keys: Sequence[str]) -> list[Optional[bytes]]:
107
108
  """Get the values associated with the given keys.
108
109
 
109
110
  Args:
@@ -113,7 +114,7 @@ class LocalFileStore(ByteStore):
113
114
  A sequence of optional values associated with the keys.
114
115
  If a key is not found, the corresponding value will be None.
115
116
  """
116
- values: List[Optional[bytes]] = []
117
+ values: list[Optional[bytes]] = []
117
118
  for key in keys:
118
119
  full_path = self._get_full_path(key)
119
120
  if full_path.exists():
@@ -126,7 +127,7 @@ class LocalFileStore(ByteStore):
126
127
  values.append(None)
127
128
  return values
128
129
 
129
- def mset(self, key_value_pairs: Sequence[Tuple[str, bytes]]) -> None:
130
+ def mset(self, key_value_pairs: Sequence[tuple[str, bytes]]) -> None:
130
131
  """Set the values for the given keys.
131
132
 
132
133
  Args:
@@ -1,13 +1,13 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain
3
- Version: 0.3.23
3
+ Version: 0.3.25
4
4
  Summary: Building applications with LLMs through composability
5
5
  License: MIT
6
6
  Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
7
7
  Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D0%22&expanded=true
8
8
  Project-URL: repository, https://github.com/langchain-ai/langchain
9
- Requires-Python: <4.0,>=3.9
10
- Requires-Dist: langchain-core<1.0.0,>=0.3.51
9
+ Requires-Python: >=3.9
10
+ Requires-Dist: langchain-core<1.0.0,>=0.3.58
11
11
  Requires-Dist: langchain-text-splitters<1.0.0,>=0.3.8
12
12
  Requires-Dist: langsmith<0.4,>=0.1.17
13
13
  Requires-Dist: pydantic<3.0.0,>=2.7.4