langchain 0.3.22__py3-none-any.whl → 0.3.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (170) hide show
  1. langchain/_api/module_import.py +3 -3
  2. langchain/agents/agent.py +104 -109
  3. langchain/agents/agent_iterator.py +11 -15
  4. langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +2 -2
  5. langchain/agents/agent_toolkits/vectorstore/base.py +3 -3
  6. langchain/agents/agent_toolkits/vectorstore/toolkit.py +4 -6
  7. langchain/agents/chat/base.py +7 -6
  8. langchain/agents/chat/output_parser.py +2 -1
  9. langchain/agents/conversational/base.py +5 -4
  10. langchain/agents/conversational_chat/base.py +9 -8
  11. langchain/agents/format_scratchpad/log.py +1 -3
  12. langchain/agents/format_scratchpad/log_to_messages.py +3 -5
  13. langchain/agents/format_scratchpad/openai_functions.py +4 -4
  14. langchain/agents/format_scratchpad/tools.py +3 -3
  15. langchain/agents/format_scratchpad/xml.py +1 -3
  16. langchain/agents/initialize.py +2 -1
  17. langchain/agents/json_chat/base.py +3 -2
  18. langchain/agents/loading.py +5 -5
  19. langchain/agents/mrkl/base.py +6 -5
  20. langchain/agents/openai_assistant/base.py +13 -17
  21. langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +6 -6
  22. langchain/agents/openai_functions_agent/base.py +13 -12
  23. langchain/agents/openai_functions_multi_agent/base.py +15 -14
  24. langchain/agents/openai_tools/base.py +2 -1
  25. langchain/agents/output_parsers/openai_functions.py +2 -2
  26. langchain/agents/output_parsers/openai_tools.py +6 -6
  27. langchain/agents/output_parsers/react_json_single_input.py +2 -1
  28. langchain/agents/output_parsers/self_ask.py +2 -1
  29. langchain/agents/output_parsers/tools.py +7 -7
  30. langchain/agents/react/agent.py +3 -2
  31. langchain/agents/react/base.py +4 -3
  32. langchain/agents/schema.py +3 -3
  33. langchain/agents/self_ask_with_search/base.py +2 -1
  34. langchain/agents/structured_chat/base.py +9 -8
  35. langchain/agents/structured_chat/output_parser.py +2 -1
  36. langchain/agents/tool_calling_agent/base.py +3 -2
  37. langchain/agents/tools.py +4 -4
  38. langchain/agents/types.py +3 -3
  39. langchain/agents/utils.py +1 -1
  40. langchain/agents/xml/base.py +7 -6
  41. langchain/callbacks/streaming_aiter.py +3 -2
  42. langchain/callbacks/streaming_aiter_final_only.py +3 -3
  43. langchain/callbacks/streaming_stdout_final_only.py +3 -3
  44. langchain/chains/api/base.py +11 -12
  45. langchain/chains/base.py +47 -50
  46. langchain/chains/combine_documents/base.py +23 -23
  47. langchain/chains/combine_documents/map_reduce.py +12 -12
  48. langchain/chains/combine_documents/map_rerank.py +16 -15
  49. langchain/chains/combine_documents/reduce.py +17 -17
  50. langchain/chains/combine_documents/refine.py +12 -12
  51. langchain/chains/combine_documents/stuff.py +10 -10
  52. langchain/chains/constitutional_ai/base.py +9 -9
  53. langchain/chains/conversation/base.py +2 -4
  54. langchain/chains/conversational_retrieval/base.py +30 -30
  55. langchain/chains/elasticsearch_database/base.py +13 -13
  56. langchain/chains/example_generator.py +1 -3
  57. langchain/chains/flare/base.py +13 -12
  58. langchain/chains/flare/prompts.py +2 -4
  59. langchain/chains/hyde/base.py +8 -8
  60. langchain/chains/llm.py +31 -30
  61. langchain/chains/llm_checker/base.py +6 -6
  62. langchain/chains/llm_math/base.py +10 -10
  63. langchain/chains/llm_summarization_checker/base.py +6 -6
  64. langchain/chains/loading.py +12 -14
  65. langchain/chains/mapreduce.py +7 -6
  66. langchain/chains/moderation.py +8 -8
  67. langchain/chains/natbot/base.py +6 -6
  68. langchain/chains/openai_functions/base.py +8 -10
  69. langchain/chains/openai_functions/citation_fuzzy_match.py +4 -4
  70. langchain/chains/openai_functions/extraction.py +3 -3
  71. langchain/chains/openai_functions/openapi.py +12 -12
  72. langchain/chains/openai_functions/qa_with_structure.py +4 -4
  73. langchain/chains/openai_functions/utils.py +2 -2
  74. langchain/chains/openai_tools/extraction.py +2 -2
  75. langchain/chains/prompt_selector.py +3 -3
  76. langchain/chains/qa_generation/base.py +5 -5
  77. langchain/chains/qa_with_sources/base.py +21 -21
  78. langchain/chains/qa_with_sources/loading.py +2 -1
  79. langchain/chains/qa_with_sources/retrieval.py +6 -6
  80. langchain/chains/qa_with_sources/vector_db.py +8 -8
  81. langchain/chains/query_constructor/base.py +4 -3
  82. langchain/chains/query_constructor/parser.py +5 -4
  83. langchain/chains/question_answering/chain.py +3 -2
  84. langchain/chains/retrieval.py +2 -2
  85. langchain/chains/retrieval_qa/base.py +16 -16
  86. langchain/chains/router/base.py +12 -11
  87. langchain/chains/router/embedding_router.py +12 -11
  88. langchain/chains/router/llm_router.py +12 -12
  89. langchain/chains/router/multi_prompt.py +3 -3
  90. langchain/chains/router/multi_retrieval_qa.py +5 -4
  91. langchain/chains/sequential.py +18 -18
  92. langchain/chains/sql_database/query.py +4 -4
  93. langchain/chains/structured_output/base.py +14 -13
  94. langchain/chains/summarize/chain.py +4 -3
  95. langchain/chains/transform.py +12 -11
  96. langchain/chat_models/base.py +34 -31
  97. langchain/embeddings/__init__.py +1 -1
  98. langchain/embeddings/base.py +4 -4
  99. langchain/embeddings/cache.py +19 -18
  100. langchain/evaluation/agents/trajectory_eval_chain.py +16 -19
  101. langchain/evaluation/comparison/eval_chain.py +10 -10
  102. langchain/evaluation/criteria/eval_chain.py +11 -10
  103. langchain/evaluation/embedding_distance/base.py +21 -21
  104. langchain/evaluation/exact_match/base.py +3 -3
  105. langchain/evaluation/loading.py +7 -8
  106. langchain/evaluation/qa/eval_chain.py +7 -6
  107. langchain/evaluation/regex_match/base.py +3 -3
  108. langchain/evaluation/schema.py +6 -5
  109. langchain/evaluation/scoring/eval_chain.py +9 -9
  110. langchain/evaluation/string_distance/base.py +23 -23
  111. langchain/hub.py +2 -1
  112. langchain/indexes/_sql_record_manager.py +8 -7
  113. langchain/indexes/vectorstore.py +11 -11
  114. langchain/llms/__init__.py +3 -3
  115. langchain/memory/buffer.py +13 -13
  116. langchain/memory/buffer_window.py +5 -5
  117. langchain/memory/chat_memory.py +5 -5
  118. langchain/memory/combined.py +10 -10
  119. langchain/memory/entity.py +8 -7
  120. langchain/memory/readonly.py +4 -4
  121. langchain/memory/simple.py +5 -5
  122. langchain/memory/summary.py +8 -8
  123. langchain/memory/summary_buffer.py +11 -11
  124. langchain/memory/token_buffer.py +5 -5
  125. langchain/memory/utils.py +2 -2
  126. langchain/memory/vectorstore.py +15 -14
  127. langchain/memory/vectorstore_token_buffer_memory.py +7 -7
  128. langchain/model_laboratory.py +4 -3
  129. langchain/output_parsers/combining.py +5 -5
  130. langchain/output_parsers/datetime.py +1 -2
  131. langchain/output_parsers/enum.py +4 -5
  132. langchain/output_parsers/pandas_dataframe.py +5 -5
  133. langchain/output_parsers/regex.py +4 -4
  134. langchain/output_parsers/regex_dict.py +4 -4
  135. langchain/output_parsers/retry.py +2 -2
  136. langchain/output_parsers/structured.py +5 -5
  137. langchain/output_parsers/yaml.py +3 -3
  138. langchain/pydantic_v1/__init__.py +1 -6
  139. langchain/pydantic_v1/dataclasses.py +1 -5
  140. langchain/pydantic_v1/main.py +1 -5
  141. langchain/retrievers/contextual_compression.py +3 -3
  142. langchain/retrievers/document_compressors/base.py +3 -2
  143. langchain/retrievers/document_compressors/chain_extract.py +4 -3
  144. langchain/retrievers/document_compressors/chain_filter.py +3 -2
  145. langchain/retrievers/document_compressors/cohere_rerank.py +4 -3
  146. langchain/retrievers/document_compressors/cross_encoder.py +1 -2
  147. langchain/retrievers/document_compressors/cross_encoder_rerank.py +2 -1
  148. langchain/retrievers/document_compressors/embeddings_filter.py +3 -2
  149. langchain/retrievers/document_compressors/listwise_rerank.py +6 -5
  150. langchain/retrievers/ensemble.py +15 -19
  151. langchain/retrievers/merger_retriever.py +7 -12
  152. langchain/retrievers/multi_query.py +14 -13
  153. langchain/retrievers/multi_vector.py +4 -4
  154. langchain/retrievers/parent_document_retriever.py +9 -8
  155. langchain/retrievers/re_phraser.py +2 -3
  156. langchain/retrievers/self_query/base.py +13 -12
  157. langchain/retrievers/time_weighted_retriever.py +14 -14
  158. langchain/runnables/openai_functions.py +4 -3
  159. langchain/smith/evaluation/config.py +7 -6
  160. langchain/smith/evaluation/progress.py +3 -2
  161. langchain/smith/evaluation/runner_utils.py +58 -61
  162. langchain/smith/evaluation/string_run_evaluator.py +29 -29
  163. langchain/storage/encoder_backed.py +7 -11
  164. langchain/storage/file_system.py +5 -4
  165. {langchain-0.3.22.dist-info → langchain-0.3.24.dist-info}/METADATA +5 -3
  166. {langchain-0.3.22.dist-info → langchain-0.3.24.dist-info}/RECORD +169 -169
  167. {langchain-0.3.22.dist-info → langchain-0.3.24.dist-info}/WHEEL +1 -1
  168. langchain-0.3.24.dist-info/entry_points.txt +4 -0
  169. langchain-0.3.22.dist-info/entry_points.txt +0 -5
  170. {langchain-0.3.22.dist-info → langchain-0.3.24.dist-info}/licenses/LICENSE +0 -0
@@ -1,19 +1,13 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import warnings
4
+ from collections.abc import AsyncIterator, Iterator, Sequence
4
5
  from importlib import util
5
6
  from typing import (
6
7
  Any,
7
- AsyncIterator,
8
8
  Callable,
9
- Dict,
10
- Iterator,
11
- List,
12
9
  Literal,
13
10
  Optional,
14
- Sequence,
15
- Tuple,
16
- Type,
17
11
  Union,
18
12
  cast,
19
13
  overload,
@@ -47,7 +41,7 @@ __all__ = [
47
41
 
48
42
 
49
43
  @overload
50
- def init_chat_model( # type: ignore[overload-overlap]
44
+ def init_chat_model(
51
45
  model: str,
52
46
  *,
53
47
  model_provider: Optional[str] = None,
@@ -73,7 +67,7 @@ def init_chat_model(
73
67
  model: Optional[str] = None,
74
68
  *,
75
69
  model_provider: Optional[str] = None,
76
- configurable_fields: Union[Literal["any"], List[str], Tuple[str, ...]] = ...,
70
+ configurable_fields: Union[Literal["any"], list[str], tuple[str, ...]] = ...,
77
71
  config_prefix: Optional[str] = None,
78
72
  **kwargs: Any,
79
73
  ) -> _ConfigurableModel: ...
@@ -87,7 +81,7 @@ def init_chat_model(
87
81
  *,
88
82
  model_provider: Optional[str] = None,
89
83
  configurable_fields: Optional[
90
- Union[Literal["any"], List[str], Tuple[str, ...]]
84
+ Union[Literal["any"], list[str], tuple[str, ...]]
91
85
  ] = None,
92
86
  config_prefix: Optional[str] = None,
93
87
  **kwargs: Any,
@@ -125,6 +119,7 @@ def init_chat_model(
125
119
  - 'ibm' -> langchain-ibm
126
120
  - 'nvidia' -> langchain-nvidia-ai-endpoints
127
121
  - 'xai' -> langchain-xai
122
+ - 'perplexity' -> langchain-perplexity
128
123
 
129
124
  Will attempt to infer model_provider from model if not specified. The
130
125
  following providers will be inferred based on these model prefixes:
@@ -138,6 +133,7 @@ def init_chat_model(
138
133
  - 'mistral...' -> 'mistralai'
139
134
  - 'deepseek...' -> 'deepseek'
140
135
  - 'grok...' -> 'xai'
136
+ - 'sonar...' -> 'perplexity'
141
137
  configurable_fields: Which model parameters are
142
138
  configurable:
143
139
 
@@ -351,7 +347,7 @@ def _init_chat_model_helper(
351
347
  _check_pkg("langchain_anthropic")
352
348
  from langchain_anthropic import ChatAnthropic
353
349
 
354
- return ChatAnthropic(model=model, **kwargs) # type: ignore[call-arg]
350
+ return ChatAnthropic(model=model, **kwargs) # type: ignore[call-arg,unused-ignore]
355
351
  elif model_provider == "azure_openai":
356
352
  _check_pkg("langchain_openai")
357
353
  from langchain_openai import AzureChatOpenAI
@@ -406,7 +402,7 @@ def _init_chat_model_helper(
406
402
  _check_pkg("langchain_mistralai")
407
403
  from langchain_mistralai import ChatMistralAI
408
404
 
409
- return ChatMistralAI(model=model, **kwargs) # type: ignore[call-arg]
405
+ return ChatMistralAI(model=model, **kwargs) # type: ignore[call-arg,unused-ignore]
410
406
  elif model_provider == "huggingface":
411
407
  _check_pkg("langchain_huggingface")
412
408
  from langchain_huggingface import ChatHuggingFace
@@ -453,6 +449,11 @@ def _init_chat_model_helper(
453
449
  from langchain_xai import ChatXAI
454
450
 
455
451
  return ChatXAI(model=model, **kwargs)
452
+ elif model_provider == "perplexity":
453
+ _check_pkg("langchain_perplexity")
454
+ from langchain_perplexity import ChatPerplexity
455
+
456
+ return ChatPerplexity(model=model, **kwargs)
456
457
  else:
457
458
  supported = ", ".join(_SUPPORTED_PROVIDERS)
458
459
  raise ValueError(
@@ -481,6 +482,7 @@ _SUPPORTED_PROVIDERS = {
481
482
  "deepseek",
482
483
  "ibm",
483
484
  "xai",
485
+ "perplexity",
484
486
  }
485
487
 
486
488
 
@@ -503,11 +505,13 @@ def _attempt_infer_model_provider(model_name: str) -> Optional[str]:
503
505
  return "deepseek"
504
506
  elif model_name.startswith("grok"):
505
507
  return "xai"
508
+ elif model_name.startswith("sonar"):
509
+ return "perplexity"
506
510
  else:
507
511
  return None
508
512
 
509
513
 
510
- def _parse_model(model: str, model_provider: Optional[str]) -> Tuple[str, str]:
514
+ def _parse_model(model: str, model_provider: Optional[str]) -> tuple[str, str]:
511
515
  if (
512
516
  not model_provider
513
517
  and ":" in model
@@ -547,12 +551,12 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
547
551
  self,
548
552
  *,
549
553
  default_config: Optional[dict] = None,
550
- configurable_fields: Union[Literal["any"], List[str], Tuple[str, ...]] = "any",
554
+ configurable_fields: Union[Literal["any"], list[str], tuple[str, ...]] = "any",
551
555
  config_prefix: str = "",
552
- queued_declarative_operations: Sequence[Tuple[str, Tuple, Dict]] = (),
556
+ queued_declarative_operations: Sequence[tuple[str, tuple, dict]] = (),
553
557
  ) -> None:
554
558
  self._default_config: dict = default_config or {}
555
- self._configurable_fields: Union[Literal["any"], List[str]] = (
559
+ self._configurable_fields: Union[Literal["any"], list[str]] = (
556
560
  configurable_fields
557
561
  if configurable_fields == "any"
558
562
  else list(configurable_fields)
@@ -562,7 +566,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
562
566
  if config_prefix and not config_prefix.endswith("_")
563
567
  else config_prefix
564
568
  )
565
- self._queued_declarative_operations: List[Tuple[str, Tuple, Dict]] = list(
569
+ self._queued_declarative_operations: list[tuple[str, tuple, dict]] = list(
566
570
  queued_declarative_operations
567
571
  )
568
572
 
@@ -663,7 +667,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
663
667
  return Union[
664
668
  str,
665
669
  Union[StringPromptValue, ChatPromptValueConcrete],
666
- List[AnyMessage],
670
+ list[AnyMessage],
667
671
  ]
668
672
 
669
673
  def invoke(
@@ -701,12 +705,12 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
701
705
 
702
706
  def batch(
703
707
  self,
704
- inputs: List[LanguageModelInput],
705
- config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
708
+ inputs: list[LanguageModelInput],
709
+ config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None,
706
710
  *,
707
711
  return_exceptions: bool = False,
708
712
  **kwargs: Optional[Any],
709
- ) -> List[Any]:
713
+ ) -> list[Any]:
710
714
  config = config or None
711
715
  # If <= 1 config use the underlying models batch implementation.
712
716
  if config is None or isinstance(config, dict) or len(config) <= 1:
@@ -724,12 +728,12 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
724
728
 
725
729
  async def abatch(
726
730
  self,
727
- inputs: List[LanguageModelInput],
728
- config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
731
+ inputs: list[LanguageModelInput],
732
+ config: Optional[Union[RunnableConfig, list[RunnableConfig]]] = None,
729
733
  *,
730
734
  return_exceptions: bool = False,
731
735
  **kwargs: Optional[Any],
732
- ) -> List[Any]:
736
+ ) -> list[Any]:
733
737
  config = config or None
734
738
  # If <= 1 config use the underlying models batch implementation.
735
739
  if config is None or isinstance(config, dict) or len(config) <= 1:
@@ -752,7 +756,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
752
756
  *,
753
757
  return_exceptions: bool = False,
754
758
  **kwargs: Any,
755
- ) -> Iterator[Tuple[int, Union[Any, Exception]]]:
759
+ ) -> Iterator[tuple[int, Union[Any, Exception]]]:
756
760
  config = config or None
757
761
  # If <= 1 config use the underlying models batch implementation.
758
762
  if config is None or isinstance(config, dict) or len(config) <= 1:
@@ -775,7 +779,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
775
779
  *,
776
780
  return_exceptions: bool = False,
777
781
  **kwargs: Any,
778
- ) -> AsyncIterator[Tuple[int, Any]]:
782
+ ) -> AsyncIterator[tuple[int, Any]]:
779
783
  config = config or None
780
784
  # If <= 1 config use the underlying models batch implementation.
781
785
  if config is None or isinstance(config, dict) or len(config) <= 1:
@@ -801,8 +805,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
801
805
  config: Optional[RunnableConfig] = None,
802
806
  **kwargs: Optional[Any],
803
807
  ) -> Iterator[Any]:
804
- for x in self._model(config).transform(input, config=config, **kwargs):
805
- yield x
808
+ yield from self._model(config).transform(input, config=config, **kwargs)
806
809
 
807
810
  async def atransform(
808
811
  self,
@@ -908,13 +911,13 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
908
911
  # Explicitly added to satisfy downstream linters.
909
912
  def bind_tools(
910
913
  self,
911
- tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
914
+ tools: Sequence[Union[dict[str, Any], type[BaseModel], Callable, BaseTool]],
912
915
  **kwargs: Any,
913
916
  ) -> Runnable[LanguageModelInput, BaseMessage]:
914
917
  return self.__getattr__("bind_tools")(tools, **kwargs)
915
918
 
916
919
  # Explicitly added to satisfy downstream linters.
917
920
  def with_structured_output(
918
- self, schema: Union[Dict, Type[BaseModel]], **kwargs: Any
919
- ) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
921
+ self, schema: Union[dict, type[BaseModel]], **kwargs: Any
922
+ ) -> Runnable[LanguageModelInput, Union[dict, BaseModel]]:
920
923
  return self.__getattr__("with_structured_output")(schema, **kwargs)
@@ -87,7 +87,7 @@ class HypotheticalDocumentEmbedder:
87
87
  )
88
88
  from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H
89
89
 
90
- return H(*args, **kwargs) # type: ignore
90
+ return H(*args, **kwargs) # type: ignore[return-value]
91
91
 
92
92
  @classmethod
93
93
  def from_llm(cls, *args: Any, **kwargs: Any) -> Any:
@@ -1,6 +1,6 @@
1
1
  import functools
2
2
  from importlib import util
3
- from typing import Any, List, Optional, Tuple, Union
3
+ from typing import Any, Optional, Union
4
4
 
5
5
  from langchain_core._api import beta
6
6
  from langchain_core.embeddings import Embeddings
@@ -25,7 +25,7 @@ def _get_provider_list() -> str:
25
25
  )
26
26
 
27
27
 
28
- def _parse_model_string(model_name: str) -> Tuple[str, str]:
28
+ def _parse_model_string(model_name: str) -> tuple[str, str]:
29
29
  """Parse a model string into provider and model name components.
30
30
 
31
31
  The model string should be in the format 'provider:model-name', where provider
@@ -78,7 +78,7 @@ def _parse_model_string(model_name: str) -> Tuple[str, str]:
78
78
 
79
79
  def _infer_model_and_provider(
80
80
  model: str, *, provider: Optional[str] = None
81
- ) -> Tuple[str, str]:
81
+ ) -> tuple[str, str]:
82
82
  if not model.strip():
83
83
  raise ValueError("Model name cannot be empty")
84
84
  if provider is None and ":" in model:
@@ -122,7 +122,7 @@ def init_embeddings(
122
122
  *,
123
123
  provider: Optional[str] = None,
124
124
  **kwargs: Any,
125
- ) -> Union[Embeddings, Runnable[Any, List[float]]]:
125
+ ) -> Union[Embeddings, Runnable[Any, list[float]]]:
126
126
  """Initialize an embeddings model from a model name and optional provider.
127
127
 
128
128
  **Note:** Must have the integration package corresponding to the model provider
@@ -12,8 +12,9 @@ from __future__ import annotations
12
12
  import hashlib
13
13
  import json
14
14
  import uuid
15
+ from collections.abc import Sequence
15
16
  from functools import partial
16
- from typing import Callable, List, Optional, Sequence, Union, cast
17
+ from typing import Callable, Optional, Union, cast
17
18
 
18
19
  from langchain_core.embeddings import Embeddings
19
20
  from langchain_core.stores import BaseStore, ByteStore
@@ -45,9 +46,9 @@ def _value_serializer(value: Sequence[float]) -> bytes:
45
46
  return json.dumps(value).encode()
46
47
 
47
48
 
48
- def _value_deserializer(serialized_value: bytes) -> List[float]:
49
+ def _value_deserializer(serialized_value: bytes) -> list[float]:
49
50
  """Deserialize a value."""
50
- return cast(List[float], json.loads(serialized_value.decode()))
51
+ return cast(list[float], json.loads(serialized_value.decode()))
51
52
 
52
53
 
53
54
  class CacheBackedEmbeddings(Embeddings):
@@ -88,10 +89,10 @@ class CacheBackedEmbeddings(Embeddings):
88
89
  def __init__(
89
90
  self,
90
91
  underlying_embeddings: Embeddings,
91
- document_embedding_store: BaseStore[str, List[float]],
92
+ document_embedding_store: BaseStore[str, list[float]],
92
93
  *,
93
94
  batch_size: Optional[int] = None,
94
- query_embedding_store: Optional[BaseStore[str, List[float]]] = None,
95
+ query_embedding_store: Optional[BaseStore[str, list[float]]] = None,
95
96
  ) -> None:
96
97
  """Initialize the embedder.
97
98
 
@@ -108,7 +109,7 @@ class CacheBackedEmbeddings(Embeddings):
108
109
  self.underlying_embeddings = underlying_embeddings
109
110
  self.batch_size = batch_size
110
111
 
111
- def embed_documents(self, texts: List[str]) -> List[List[float]]:
112
+ def embed_documents(self, texts: list[str]) -> list[list[float]]:
112
113
  """Embed a list of texts.
113
114
 
114
115
  The method first checks the cache for the embeddings.
@@ -121,10 +122,10 @@ class CacheBackedEmbeddings(Embeddings):
121
122
  Returns:
122
123
  A list of embeddings for the given texts.
123
124
  """
124
- vectors: List[Union[List[float], None]] = self.document_embedding_store.mget(
125
+ vectors: list[Union[list[float], None]] = self.document_embedding_store.mget(
125
126
  texts
126
127
  )
127
- all_missing_indices: List[int] = [
128
+ all_missing_indices: list[int] = [
128
129
  i for i, vector in enumerate(vectors) if vector is None
129
130
  ]
130
131
 
@@ -138,10 +139,10 @@ class CacheBackedEmbeddings(Embeddings):
138
139
  vectors[index] = updated_vector
139
140
 
140
141
  return cast(
141
- List[List[float]], vectors
142
+ list[list[float]], vectors
142
143
  ) # Nones should have been resolved by now
143
144
 
144
- async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
145
+ async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
145
146
  """Embed a list of texts.
146
147
 
147
148
  The method first checks the cache for the embeddings.
@@ -154,10 +155,10 @@ class CacheBackedEmbeddings(Embeddings):
154
155
  Returns:
155
156
  A list of embeddings for the given texts.
156
157
  """
157
- vectors: List[
158
- Union[List[float], None]
158
+ vectors: list[
159
+ Union[list[float], None]
159
160
  ] = await self.document_embedding_store.amget(texts)
160
- all_missing_indices: List[int] = [
161
+ all_missing_indices: list[int] = [
161
162
  i for i, vector in enumerate(vectors) if vector is None
162
163
  ]
163
164
 
@@ -175,10 +176,10 @@ class CacheBackedEmbeddings(Embeddings):
175
176
  vectors[index] = updated_vector
176
177
 
177
178
  return cast(
178
- List[List[float]], vectors
179
+ list[list[float]], vectors
179
180
  ) # Nones should have been resolved by now
180
181
 
181
- def embed_query(self, text: str) -> List[float]:
182
+ def embed_query(self, text: str) -> list[float]:
182
183
  """Embed query text.
183
184
 
184
185
  By default, this method does not cache queries. To enable caching, set the
@@ -201,7 +202,7 @@ class CacheBackedEmbeddings(Embeddings):
201
202
  self.query_embedding_store.mset([(text, vector)])
202
203
  return vector
203
204
 
204
- async def aembed_query(self, text: str) -> List[float]:
205
+ async def aembed_query(self, text: str) -> list[float]:
205
206
  """Embed query text.
206
207
 
207
208
  By default, this method does not cache queries. To enable caching, set the
@@ -250,7 +251,7 @@ class CacheBackedEmbeddings(Embeddings):
250
251
  """
251
252
  namespace = namespace
252
253
  key_encoder = _create_key_encoder(namespace)
253
- document_embedding_store = EncoderBackedStore[str, List[float]](
254
+ document_embedding_store = EncoderBackedStore[str, list[float]](
254
255
  document_embedding_cache,
255
256
  key_encoder,
256
257
  _value_serializer,
@@ -261,7 +262,7 @@ class CacheBackedEmbeddings(Embeddings):
261
262
  elif query_embedding_cache is False:
262
263
  query_embedding_store = None
263
264
  else:
264
- query_embedding_store = EncoderBackedStore[str, List[float]](
265
+ query_embedding_store = EncoderBackedStore[str, list[float]](
265
266
  query_embedding_cache,
266
267
  key_encoder,
267
268
  _value_serializer,
@@ -6,13 +6,10 @@ chain (LLMChain) to generate the reasoning and scores.
6
6
  """
7
7
 
8
8
  import re
9
+ from collections.abc import Sequence
9
10
  from typing import (
10
11
  Any,
11
- Dict,
12
- List,
13
12
  Optional,
14
- Sequence,
15
- Tuple,
16
13
  TypedDict,
17
14
  Union,
18
15
  cast,
@@ -145,7 +142,7 @@ class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain):
145
142
  # 0
146
143
  """
147
144
 
148
- agent_tools: Optional[List[BaseTool]] = None
145
+ agent_tools: Optional[list[BaseTool]] = None
149
146
  """A list of tools available to the agent."""
150
147
  eval_chain: LLMChain
151
148
  """The language model chain used for evaluation."""
@@ -184,7 +181,7 @@ Description: {tool.description}"""
184
181
 
185
182
  @staticmethod
186
183
  def get_agent_trajectory(
187
- steps: Union[str, Sequence[Tuple[AgentAction, str]]],
184
+ steps: Union[str, Sequence[tuple[AgentAction, str]]],
188
185
  ) -> str:
189
186
  """Get the agent trajectory as a formatted string.
190
187
 
@@ -263,7 +260,7 @@ The following is the expected answer. Use this to measure correctness:
263
260
  )
264
261
 
265
262
  @property
266
- def input_keys(self) -> List[str]:
263
+ def input_keys(self) -> list[str]:
267
264
  """Get the input keys for the chain.
268
265
 
269
266
  Returns:
@@ -272,7 +269,7 @@ The following is the expected answer. Use this to measure correctness:
272
269
  return ["question", "agent_trajectory", "answer", "reference"]
273
270
 
274
271
  @property
275
- def output_keys(self) -> List[str]:
272
+ def output_keys(self) -> list[str]:
276
273
  """Get the output keys for the chain.
277
274
 
278
275
  Returns:
@@ -280,16 +277,16 @@ The following is the expected answer. Use this to measure correctness:
280
277
  """
281
278
  return ["score", "reasoning"]
282
279
 
283
- def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:
280
+ def prep_inputs(self, inputs: Union[dict[str, Any], Any]) -> dict[str, str]:
284
281
  """Validate and prep inputs."""
285
282
  inputs["reference"] = self._format_reference(inputs.get("reference"))
286
283
  return super().prep_inputs(inputs)
287
284
 
288
285
  def _call(
289
286
  self,
290
- inputs: Dict[str, str],
287
+ inputs: dict[str, str],
291
288
  run_manager: Optional[CallbackManagerForChainRun] = None,
292
- ) -> Dict[str, Any]:
289
+ ) -> dict[str, Any]:
293
290
  """Run the chain and generate the output.
294
291
 
295
292
  Args:
@@ -311,9 +308,9 @@ The following is the expected answer. Use this to measure correctness:
311
308
 
312
309
  async def _acall(
313
310
  self,
314
- inputs: Dict[str, str],
311
+ inputs: dict[str, str],
315
312
  run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
316
- ) -> Dict[str, Any]:
313
+ ) -> dict[str, Any]:
317
314
  """Run the chain and generate the output.
318
315
 
319
316
  Args:
@@ -338,11 +335,11 @@ The following is the expected answer. Use this to measure correctness:
338
335
  *,
339
336
  prediction: str,
340
337
  input: str,
341
- agent_trajectory: Sequence[Tuple[AgentAction, str]],
338
+ agent_trajectory: Sequence[tuple[AgentAction, str]],
342
339
  reference: Optional[str] = None,
343
340
  callbacks: Callbacks = None,
344
- tags: Optional[List[str]] = None,
345
- metadata: Optional[Dict[str, Any]] = None,
341
+ tags: Optional[list[str]] = None,
342
+ metadata: Optional[dict[str, Any]] = None,
346
343
  include_run_info: bool = False,
347
344
  **kwargs: Any,
348
345
  ) -> dict:
@@ -380,11 +377,11 @@ The following is the expected answer. Use this to measure correctness:
380
377
  *,
381
378
  prediction: str,
382
379
  input: str,
383
- agent_trajectory: Sequence[Tuple[AgentAction, str]],
380
+ agent_trajectory: Sequence[tuple[AgentAction, str]],
384
381
  reference: Optional[str] = None,
385
382
  callbacks: Callbacks = None,
386
- tags: Optional[List[str]] = None,
387
- metadata: Optional[Dict[str, Any]] = None,
383
+ tags: Optional[list[str]] = None,
384
+ metadata: Optional[dict[str, Any]] = None,
388
385
  include_run_info: bool = False,
389
386
  **kwargs: Any,
390
387
  ) -> dict:
@@ -4,7 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  import logging
6
6
  import re
7
- from typing import Any, Dict, List, Optional, Union
7
+ from typing import Any, Optional, Union
8
8
 
9
9
  from langchain_core.callbacks.manager import Callbacks
10
10
  from langchain_core.language_models import BaseLanguageModel
@@ -49,7 +49,7 @@ _SUPPORTED_CRITERIA = {
49
49
 
50
50
 
51
51
  def resolve_pairwise_criteria(
52
- criteria: Optional[Union[CRITERIA_TYPE, str, List[CRITERIA_TYPE]]],
52
+ criteria: Optional[Union[CRITERIA_TYPE, str, list[CRITERIA_TYPE]]],
53
53
  ) -> dict:
54
54
  """Resolve the criteria for the pairwise evaluator.
55
55
 
@@ -95,7 +95,7 @@ def resolve_pairwise_criteria(
95
95
  return criteria_
96
96
 
97
97
 
98
- class PairwiseStringResultOutputParser(BaseOutputParser[dict]): # type: ignore[override]
98
+ class PairwiseStringResultOutputParser(BaseOutputParser[dict]):
99
99
  """A parser for the output of the PairwiseStringEvalChain.
100
100
 
101
101
  Attributes:
@@ -113,7 +113,7 @@ class PairwiseStringResultOutputParser(BaseOutputParser[dict]): # type: ignore[
113
113
  """
114
114
  return "pairwise_string_result"
115
115
 
116
- def parse(self, text: str) -> Dict[str, Any]:
116
+ def parse(self, text: str) -> dict[str, Any]:
117
117
  """Parse the output text.
118
118
 
119
119
  Args:
@@ -151,7 +151,7 @@ class PairwiseStringResultOutputParser(BaseOutputParser[dict]): # type: ignore[
151
151
  }
152
152
 
153
153
 
154
- class PairwiseStringEvalChain(PairwiseStringEvaluator, LLMEvalChain, LLMChain): # type: ignore[override]
154
+ class PairwiseStringEvalChain(PairwiseStringEvaluator, LLMEvalChain, LLMChain):
155
155
  """A chain for comparing two outputs, such as the outputs
156
156
  of two models, prompts, or outputs of a single model on similar inputs.
157
157
 
@@ -314,8 +314,8 @@ Performance may be significantly worse with other models."
314
314
  input: Optional[str] = None,
315
315
  reference: Optional[str] = None,
316
316
  callbacks: Callbacks = None,
317
- tags: Optional[List[str]] = None,
318
- metadata: Optional[Dict[str, Any]] = None,
317
+ tags: Optional[list[str]] = None,
318
+ metadata: Optional[dict[str, Any]] = None,
319
319
  include_run_info: bool = False,
320
320
  **kwargs: Any,
321
321
  ) -> dict:
@@ -356,8 +356,8 @@ Performance may be significantly worse with other models."
356
356
  reference: Optional[str] = None,
357
357
  input: Optional[str] = None,
358
358
  callbacks: Callbacks = None,
359
- tags: Optional[List[str]] = None,
360
- metadata: Optional[Dict[str, Any]] = None,
359
+ tags: Optional[list[str]] = None,
360
+ metadata: Optional[dict[str, Any]] = None,
361
361
  include_run_info: bool = False,
362
362
  **kwargs: Any,
363
363
  ) -> dict:
@@ -391,7 +391,7 @@ Performance may be significantly worse with other models."
391
391
  return self._prepare_output(result)
392
392
 
393
393
 
394
- class LabeledPairwiseStringEvalChain(PairwiseStringEvalChain): # type: ignore[override]
394
+ class LabeledPairwiseStringEvalChain(PairwiseStringEvalChain):
395
395
  """A chain for comparing two outputs, such as the outputs
396
396
  of two models, prompts, or outputs of a single model on similar inputs,
397
397
  with labeled preferences.
@@ -1,8 +1,9 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import re
4
+ from collections.abc import Mapping
4
5
  from enum import Enum
5
- from typing import Any, Dict, List, Mapping, Optional, Union
6
+ from typing import Any, Optional, Union
6
7
 
7
8
  from langchain_core.callbacks.manager import Callbacks
8
9
  from langchain_core.language_models import BaseLanguageModel
@@ -68,7 +69,7 @@ class CriteriaResultOutputParser(BaseOutputParser[dict]):
68
69
  def _type(self) -> str:
69
70
  return "criteria_result"
70
71
 
71
- def parse(self, text: str) -> Dict[str, Any]:
72
+ def parse(self, text: str) -> dict[str, Any]:
72
73
  """Parse the output text.
73
74
 
74
75
  Args:
@@ -121,7 +122,7 @@ CRITERIA_TYPE = Union[
121
122
 
122
123
  def resolve_criteria(
123
124
  criteria: Optional[Union[CRITERIA_TYPE, str]],
124
- ) -> Dict[str, str]:
125
+ ) -> dict[str, str]:
125
126
  """Resolve the criteria to evaluate.
126
127
 
127
128
  Parameters
@@ -164,7 +165,7 @@ def resolve_criteria(
164
165
  return criteria_
165
166
 
166
167
 
167
- class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): # type: ignore[override]
168
+ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain):
168
169
  """LLM Chain for evaluating runs against criteria.
169
170
 
170
171
  Parameters
@@ -285,7 +286,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): # type: ignor
285
286
  def resolve_criteria(
286
287
  cls,
287
288
  criteria: Optional[Union[CRITERIA_TYPE, str]],
288
- ) -> Dict[str, str]:
289
+ ) -> dict[str, str]:
289
290
  """Resolve the criteria to evaluate.
290
291
 
291
292
  Parameters
@@ -404,8 +405,8 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): # type: ignor
404
405
  reference: Optional[str] = None,
405
406
  input: Optional[str] = None,
406
407
  callbacks: Callbacks = None,
407
- tags: Optional[List[str]] = None,
408
- metadata: Optional[Dict[str, Any]] = None,
408
+ tags: Optional[list[str]] = None,
409
+ metadata: Optional[dict[str, Any]] = None,
409
410
  include_run_info: bool = False,
410
411
  **kwargs: Any,
411
412
  ) -> dict:
@@ -459,8 +460,8 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): # type: ignor
459
460
  reference: Optional[str] = None,
460
461
  input: Optional[str] = None,
461
462
  callbacks: Callbacks = None,
462
- tags: Optional[List[str]] = None,
463
- metadata: Optional[Dict[str, Any]] = None,
463
+ tags: Optional[list[str]] = None,
464
+ metadata: Optional[dict[str, Any]] = None,
464
465
  include_run_info: bool = False,
465
466
  **kwargs: Any,
466
467
  ) -> dict:
@@ -508,7 +509,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): # type: ignor
508
509
  return self._prepare_output(result)
509
510
 
510
511
 
511
- class LabeledCriteriaEvalChain(CriteriaEvalChain): # type: ignore[override]
512
+ class LabeledCriteriaEvalChain(CriteriaEvalChain):
512
513
  """Criteria evaluation chain that requires references."""
513
514
 
514
515
  @classmethod