langchain 0.2.5__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (197) hide show
  1. langchain/__init__.py +1 -0
  2. langchain/_api/module_import.py +2 -2
  3. langchain/agents/__init__.py +5 -4
  4. langchain/agents/agent.py +272 -50
  5. langchain/agents/agent_iterator.py +20 -0
  6. langchain/agents/agent_toolkits/__init__.py +1 -0
  7. langchain/agents/agent_toolkits/file_management/__init__.py +1 -0
  8. langchain/agents/agent_toolkits/playwright/__init__.py +1 -0
  9. langchain/agents/agent_toolkits/vectorstore/base.py +1 -0
  10. langchain/agents/agent_toolkits/vectorstore/toolkit.py +1 -0
  11. langchain/agents/agent_types.py +1 -0
  12. langchain/agents/chat/base.py +37 -1
  13. langchain/agents/chat/output_parser.py +14 -0
  14. langchain/agents/conversational/base.py +38 -6
  15. langchain/agents/conversational/output_parser.py +10 -0
  16. langchain/agents/conversational_chat/base.py +42 -3
  17. langchain/agents/format_scratchpad/__init__.py +1 -0
  18. langchain/agents/format_scratchpad/log.py +12 -1
  19. langchain/agents/format_scratchpad/log_to_messages.py +10 -1
  20. langchain/agents/format_scratchpad/openai_functions.py +10 -5
  21. langchain/agents/format_scratchpad/tools.py +11 -7
  22. langchain/agents/initialize.py +15 -7
  23. langchain/agents/json_chat/base.py +9 -3
  24. langchain/agents/loading.py +7 -0
  25. langchain/agents/mrkl/base.py +39 -10
  26. langchain/agents/mrkl/output_parser.py +12 -0
  27. langchain/agents/openai_assistant/base.py +37 -14
  28. langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +32 -4
  29. langchain/agents/openai_functions_agent/base.py +61 -10
  30. langchain/agents/openai_functions_multi_agent/base.py +22 -7
  31. langchain/agents/openai_tools/base.py +3 -0
  32. langchain/agents/output_parsers/__init__.py +1 -0
  33. langchain/agents/react/base.py +1 -0
  34. langchain/agents/self_ask_with_search/base.py +1 -0
  35. langchain/agents/structured_chat/output_parser.py +3 -3
  36. langchain/agents/tool_calling_agent/base.py +13 -3
  37. langchain/agents/tools.py +3 -0
  38. langchain/agents/utils.py +9 -1
  39. langchain/base_language.py +1 -0
  40. langchain/callbacks/__init__.py +1 -0
  41. langchain/callbacks/base.py +1 -0
  42. langchain/callbacks/streaming_stdout.py +1 -0
  43. langchain/callbacks/streaming_stdout_final_only.py +1 -0
  44. langchain/callbacks/tracers/evaluation.py +1 -0
  45. langchain/chains/api/base.py +5 -2
  46. langchain/chains/base.py +1 -1
  47. langchain/chains/combine_documents/base.py +59 -0
  48. langchain/chains/combine_documents/map_reduce.py +4 -2
  49. langchain/chains/combine_documents/map_rerank.py +5 -3
  50. langchain/chains/combine_documents/refine.py +4 -2
  51. langchain/chains/combine_documents/stuff.py +1 -0
  52. langchain/chains/constitutional_ai/base.py +1 -0
  53. langchain/chains/constitutional_ai/models.py +1 -0
  54. langchain/chains/constitutional_ai/principles.py +1 -0
  55. langchain/chains/conversation/base.py +81 -1
  56. langchain/chains/conversational_retrieval/base.py +2 -1
  57. langchain/chains/elasticsearch_database/base.py +2 -1
  58. langchain/chains/hyde/base.py +1 -0
  59. langchain/chains/llm.py +4 -2
  60. langchain/chains/llm_checker/base.py +4 -3
  61. langchain/chains/llm_math/base.py +1 -0
  62. langchain/chains/loading.py +2 -1
  63. langchain/chains/mapreduce.py +1 -0
  64. langchain/chains/moderation.py +1 -1
  65. langchain/chains/natbot/base.py +1 -0
  66. langchain/chains/openai_functions/base.py +1 -0
  67. langchain/chains/openai_functions/extraction.py +6 -6
  68. langchain/chains/openai_tools/extraction.py +3 -3
  69. langchain/chains/qa_generation/base.py +47 -1
  70. langchain/chains/qa_with_sources/__init__.py +1 -0
  71. langchain/chains/qa_with_sources/loading.py +1 -0
  72. langchain/chains/qa_with_sources/vector_db.py +1 -1
  73. langchain/chains/query_constructor/base.py +1 -0
  74. langchain/chains/query_constructor/ir.py +1 -0
  75. langchain/chains/question_answering/chain.py +1 -0
  76. langchain/chains/retrieval_qa/base.py +3 -2
  77. langchain/chains/router/base.py +1 -0
  78. langchain/chains/router/llm_router.py +2 -1
  79. langchain/chains/router/multi_prompt.py +1 -0
  80. langchain/chains/router/multi_retrieval_qa.py +1 -0
  81. langchain/chains/sequential.py +2 -1
  82. langchain/chains/structured_output/base.py +12 -12
  83. langchain/chains/summarize/chain.py +1 -0
  84. langchain/chains/transform.py +4 -3
  85. langchain/chat_models/__init__.py +1 -0
  86. langchain/chat_models/base.py +2 -2
  87. langchain/docstore/__init__.py +1 -0
  88. langchain/document_loaders/__init__.py +1 -0
  89. langchain/document_transformers/__init__.py +1 -0
  90. langchain/embeddings/__init__.py +0 -1
  91. langchain/evaluation/__init__.py +2 -1
  92. langchain/evaluation/agents/__init__.py +1 -0
  93. langchain/evaluation/agents/trajectory_eval_prompt.py +1 -0
  94. langchain/evaluation/comparison/__init__.py +1 -0
  95. langchain/evaluation/comparison/eval_chain.py +1 -0
  96. langchain/evaluation/comparison/prompt.py +1 -0
  97. langchain/evaluation/embedding_distance/__init__.py +1 -0
  98. langchain/evaluation/embedding_distance/base.py +1 -0
  99. langchain/evaluation/loading.py +1 -0
  100. langchain/evaluation/parsing/base.py +1 -0
  101. langchain/evaluation/qa/__init__.py +1 -0
  102. langchain/evaluation/qa/eval_chain.py +1 -0
  103. langchain/evaluation/qa/generate_chain.py +1 -0
  104. langchain/evaluation/schema.py +1 -0
  105. langchain/evaluation/scoring/__init__.py +1 -0
  106. langchain/evaluation/scoring/eval_chain.py +1 -0
  107. langchain/evaluation/scoring/prompt.py +1 -0
  108. langchain/evaluation/string_distance/__init__.py +1 -0
  109. langchain/example_generator.py +1 -0
  110. langchain/formatting.py +1 -0
  111. langchain/globals/__init__.py +1 -0
  112. langchain/graphs/__init__.py +1 -0
  113. langchain/indexes/__init__.py +1 -0
  114. langchain/indexes/_sql_record_manager.py +9 -5
  115. langchain/indexes/graph.py +1 -0
  116. langchain/indexes/prompts/__init__.py +1 -0
  117. langchain/input.py +1 -0
  118. langchain/llms/__init__.py +1 -0
  119. langchain/load/__init__.py +1 -0
  120. langchain/memory/__init__.py +5 -0
  121. langchain/memory/vectorstore_token_buffer_memory.py +184 -0
  122. langchain/output_parsers/__init__.py +1 -0
  123. langchain/output_parsers/combining.py +1 -1
  124. langchain/output_parsers/enum.py +7 -3
  125. langchain/output_parsers/fix.py +57 -16
  126. langchain/output_parsers/pandas_dataframe.py +1 -1
  127. langchain/output_parsers/regex.py +1 -1
  128. langchain/output_parsers/regex_dict.py +1 -1
  129. langchain/output_parsers/retry.py +76 -29
  130. langchain/output_parsers/structured.py +3 -3
  131. langchain/output_parsers/yaml.py +4 -0
  132. langchain/prompts/__init__.py +1 -0
  133. langchain/prompts/example_selector/__init__.py +1 -0
  134. langchain/python.py +1 -0
  135. langchain/requests.py +1 -0
  136. langchain/retrievers/__init__.py +1 -0
  137. langchain/retrievers/document_compressors/chain_extract.py +1 -0
  138. langchain/retrievers/document_compressors/chain_filter.py +1 -0
  139. langchain/retrievers/ensemble.py +18 -3
  140. langchain/retrievers/multi_query.py +2 -1
  141. langchain/retrievers/re_phraser.py +2 -1
  142. langchain/retrievers/self_query/base.py +9 -8
  143. langchain/schema/__init__.py +1 -0
  144. langchain/schema/runnable/__init__.py +1 -0
  145. langchain/serpapi.py +1 -0
  146. langchain/smith/__init__.py +6 -5
  147. langchain/smith/evaluation/__init__.py +0 -1
  148. langchain/smith/evaluation/string_run_evaluator.py +1 -0
  149. langchain/sql_database.py +1 -0
  150. langchain/storage/__init__.py +1 -0
  151. langchain/storage/_lc_store.py +1 -0
  152. langchain/storage/in_memory.py +1 -0
  153. langchain/text_splitter.py +1 -0
  154. langchain/tools/__init__.py +1 -0
  155. langchain/tools/amadeus/__init__.py +1 -0
  156. langchain/tools/azure_cognitive_services/__init__.py +1 -0
  157. langchain/tools/bing_search/__init__.py +1 -0
  158. langchain/tools/dataforseo_api_search/__init__.py +1 -0
  159. langchain/tools/ddg_search/__init__.py +1 -0
  160. langchain/tools/edenai/__init__.py +1 -0
  161. langchain/tools/eleven_labs/__init__.py +1 -0
  162. langchain/tools/file_management/__init__.py +1 -0
  163. langchain/tools/github/__init__.py +1 -1
  164. langchain/tools/gitlab/__init__.py +1 -1
  165. langchain/tools/gmail/__init__.py +1 -0
  166. langchain/tools/golden_query/__init__.py +1 -0
  167. langchain/tools/google_cloud/__init__.py +1 -0
  168. langchain/tools/google_finance/__init__.py +1 -0
  169. langchain/tools/google_jobs/__init__.py +1 -0
  170. langchain/tools/google_lens/__init__.py +1 -0
  171. langchain/tools/google_places/__init__.py +1 -0
  172. langchain/tools/google_scholar/__init__.py +1 -0
  173. langchain/tools/google_search/__init__.py +1 -0
  174. langchain/tools/google_trends/__init__.py +1 -0
  175. langchain/tools/human/__init__.py +1 -0
  176. langchain/tools/memorize/__init__.py +1 -0
  177. langchain/tools/metaphor_search/__init__.py +1 -0
  178. langchain/tools/multion/__init__.py +1 -0
  179. langchain/tools/office365/__init__.py +1 -0
  180. langchain/tools/openapi/utils/openapi_utils.py +1 -0
  181. langchain/tools/openweathermap/__init__.py +1 -0
  182. langchain/tools/playwright/__init__.py +1 -0
  183. langchain/tools/shell/__init__.py +1 -0
  184. langchain/tools/slack/__init__.py +1 -0
  185. langchain/tools/sql_database/prompt.py +1 -0
  186. langchain/tools/steamship_image_generation/__init__.py +1 -0
  187. langchain/tools/tavily_search/__init__.py +1 -0
  188. langchain/tools/wolfram_alpha/__init__.py +1 -0
  189. langchain/tools/zapier/__init__.py +1 -0
  190. langchain/utilities/__init__.py +1 -0
  191. langchain/utilities/python.py +1 -0
  192. langchain/vectorstores/__init__.py +1 -0
  193. {langchain-0.2.5.dist-info → langchain-0.2.7.dist-info}/METADATA +3 -4
  194. {langchain-0.2.5.dist-info → langchain-0.2.7.dist-info}/RECORD +197 -196
  195. {langchain-0.2.5.dist-info → langchain-0.2.7.dist-info}/LICENSE +0 -0
  196. {langchain-0.2.5.dist-info → langchain-0.2.7.dist-info}/WHEEL +0 -0
  197. {langchain-0.2.5.dist-info → langchain-0.2.7.dist-info}/entry_points.txt +0 -0
@@ -61,7 +61,7 @@ class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain):
61
61
  ) -> List[Document]:
62
62
  raise NotImplementedError("VectorDBQAWithSourcesChain does not support async")
63
63
 
64
- @root_validator()
64
+ @root_validator(pre=True)
65
65
  def raise_deprecation(cls, values: Dict) -> Dict:
66
66
  warnings.warn(
67
67
  "`VectorDBQAWithSourcesChain` is deprecated - "
@@ -1,4 +1,5 @@
1
1
  """LLM Chain for turning a user text query into a structured query."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import json
@@ -1,4 +1,5 @@
1
1
  """Internal representation of a structured query language."""
2
+
2
3
  from langchain_core.structured_query import (
3
4
  Comparator,
4
5
  Comparison,
@@ -1,4 +1,5 @@
1
1
  """Load question answering chains."""
2
+
2
3
  from typing import Any, Mapping, Optional, Protocol
3
4
 
4
5
  from langchain_core.callbacks import BaseCallbackManager, Callbacks
@@ -1,4 +1,5 @@
1
1
  """Chain for question-answering against a vector database."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import inspect
@@ -284,7 +285,7 @@ class VectorDBQA(BaseRetrievalQA):
284
285
  search_kwargs: Dict[str, Any] = Field(default_factory=dict)
285
286
  """Extra search args."""
286
287
 
287
- @root_validator()
288
+ @root_validator(pre=True)
288
289
  def raise_deprecation(cls, values: Dict) -> Dict:
289
290
  warnings.warn(
290
291
  "`VectorDBQA` is deprecated - "
@@ -292,7 +293,7 @@ class VectorDBQA(BaseRetrievalQA):
292
293
  )
293
294
  return values
294
295
 
295
- @root_validator()
296
+ @root_validator(pre=True)
296
297
  def validate_search_type(cls, values: Dict) -> Dict:
297
298
  """Validate search type."""
298
299
  if "search_type" in values:
@@ -1,4 +1,5 @@
1
1
  """Base classes for chain routing."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from abc import ABC
@@ -1,4 +1,5 @@
1
1
  """Base classes for LLM-powered router chains."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from typing import Any, Dict, List, Optional, Type, cast
@@ -24,7 +25,7 @@ class LLMRouterChain(RouterChain):
24
25
  llm_chain: LLMChain
25
26
  """LLM chain used to perform routing"""
26
27
 
27
- @root_validator()
28
+ @root_validator(pre=False, skip_on_failure=True)
28
29
  def validate_prompt(cls, values: dict) -> dict:
29
30
  prompt = values["llm_chain"].prompt
30
31
  if prompt.output_parser is None:
@@ -1,4 +1,5 @@
1
1
  """Use a single chain to route an input to one of multiple llm chains."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from typing import Any, Dict, List, Optional
@@ -1,4 +1,5 @@
1
1
  """Use a single chain to route an input to one of multiple retrieval qa chains."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from typing import Any, Dict, List, Mapping, Optional
@@ -1,4 +1,5 @@
1
1
  """Chain pipeline where the outputs of one step feed directly into next."""
2
+
2
3
  from typing import Any, Dict, List, Optional
3
4
 
4
5
  from langchain_core.callbacks import (
@@ -152,7 +153,7 @@ class SimpleSequentialChain(Chain):
152
153
  """
153
154
  return [self.output_key]
154
155
 
155
- @root_validator()
156
+ @root_validator(pre=False, skip_on_failure=True)
156
157
  def validate_chains(cls, values: Dict) -> Dict:
157
158
  """Validate that chains are all single input/output."""
158
159
  for chain in values["chains"]:
@@ -32,13 +32,13 @@ from langchain_core.utils.function_calling import (
32
32
  "LangChain has introduced a method called `with_structured_output` that "
33
33
  "is available on ChatModels capable of tool calling. "
34
34
  "You can read more about the method here: "
35
- "https://python.langchain.com/docs/modules/model_io/chat/structured_output/ "
35
+ "<https://python.langchain.com/docs/modules/model_io/chat/structured_output/>. "
36
36
  "Please follow our extraction use case documentation for more guidelines "
37
37
  "on how to do information extraction with LLMs. "
38
- "https://python.langchain.com/docs/use_cases/extraction/. "
38
+ "<https://python.langchain.com/docs/use_cases/extraction/>. "
39
39
  "If you notice other issues, please provide "
40
40
  "feedback here: "
41
- "https://github.com/langchain-ai/langchain/discussions/18154"
41
+ "<https://github.com/langchain-ai/langchain/discussions/18154>"
42
42
  ),
43
43
  removal="0.3.0",
44
44
  alternative=(
@@ -150,13 +150,13 @@ def create_openai_fn_runnable(
150
150
  "LangChain has introduced a method called `with_structured_output` that "
151
151
  "is available on ChatModels capable of tool calling. "
152
152
  "You can read more about the method here: "
153
- "https://python.langchain.com/docs/modules/model_io/chat/structured_output/ "
153
+ "<https://python.langchain.com/docs/modules/model_io/chat/structured_output/>."
154
154
  "Please follow our extraction use case documentation for more guidelines "
155
155
  "on how to do information extraction with LLMs. "
156
- "https://python.langchain.com/docs/use_cases/extraction/. "
156
+ "<https://python.langchain.com/docs/use_cases/extraction/>. "
157
157
  "If you notice other issues, please provide "
158
158
  "feedback here: "
159
- "https://github.com/langchain-ai/langchain/discussions/18154"
159
+ "<https://github.com/langchain-ai/langchain/discussions/18154>"
160
160
  ),
161
161
  removal="0.3.0",
162
162
  alternative=(
@@ -466,9 +466,9 @@ def _get_openai_tool_output_parser(
466
466
  first_tool_only: bool = False,
467
467
  ) -> Union[BaseOutputParser, BaseGenerationOutputParser]:
468
468
  if isinstance(tool, type) and issubclass(tool, BaseModel):
469
- output_parser: Union[
470
- BaseOutputParser, BaseGenerationOutputParser
471
- ] = PydanticToolsParser(tools=[tool], first_tool_only=first_tool_only)
469
+ output_parser: Union[BaseOutputParser, BaseGenerationOutputParser] = (
470
+ PydanticToolsParser(tools=[tool], first_tool_only=first_tool_only)
471
+ )
472
472
  else:
473
473
  key_name = convert_to_openai_tool(tool)["function"]["name"]
474
474
  output_parser = JsonOutputKeyToolsParser(
@@ -500,9 +500,9 @@ def get_openai_output_parser(
500
500
  }
501
501
  else:
502
502
  pydantic_schema = functions[0]
503
- output_parser: Union[
504
- BaseOutputParser, BaseGenerationOutputParser
505
- ] = PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema)
503
+ output_parser: Union[BaseOutputParser, BaseGenerationOutputParser] = (
504
+ PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema)
505
+ )
506
506
  else:
507
507
  output_parser = JsonOutputFunctionsParser(args_only=len(functions) <= 1)
508
508
  return output_parser
@@ -1,4 +1,5 @@
1
1
  """Load summarizing chains."""
2
+
2
3
  from typing import Any, Mapping, Optional, Protocol
3
4
 
4
5
  from langchain_core.callbacks import Callbacks
@@ -1,4 +1,5 @@
1
1
  """Chain that runs an arbitrary python function."""
2
+
2
3
  import functools
3
4
  import logging
4
5
  from typing import Any, Awaitable, Callable, Dict, List, Optional
@@ -31,9 +32,9 @@ class TransformChain(Chain):
31
32
  """The keys returned by the transform's output dictionary."""
32
33
  transform_cb: Callable[[Dict[str, str]], Dict[str, str]] = Field(alias="transform")
33
34
  """The transform function."""
34
- atransform_cb: Optional[
35
- Callable[[Dict[str, Any]], Awaitable[Dict[str, Any]]]
36
- ] = Field(None, alias="atransform")
35
+ atransform_cb: Optional[Callable[[Dict[str, Any]], Awaitable[Dict[str, Any]]]] = (
36
+ Field(None, alias="atransform")
37
+ )
37
38
  """The async coroutine transform function."""
38
39
 
39
40
  @staticmethod
@@ -16,6 +16,7 @@ an interface where "chat messages" are the inputs and outputs.
16
16
 
17
17
  AIMessage, BaseMessage, HumanMessage
18
18
  """ # noqa: E501
19
+
19
20
  import warnings
20
21
 
21
22
  from langchain_core._api import LangChainDeprecationWarning
@@ -94,7 +94,7 @@ def init_chat_model(
94
94
  _check_pkg("langchain_anthropic")
95
95
  from langchain_anthropic import ChatAnthropic
96
96
 
97
- return ChatAnthropic(model=model, **kwargs)
97
+ return ChatAnthropic(model=model, **kwargs) # type: ignore[call-arg]
98
98
  elif model_provider == "azure_openai":
99
99
  _check_pkg("langchain_openai")
100
100
  from langchain_openai import AzureChatOpenAI
@@ -134,7 +134,7 @@ def init_chat_model(
134
134
  _check_pkg("langchain_mistralai")
135
135
  from langchain_mistralai import ChatMistralAI
136
136
 
137
- return ChatMistralAI(model=model, **kwargs)
137
+ return ChatMistralAI(model=model, **kwargs) # type: ignore[call-arg]
138
138
  elif model_provider == "huggingface":
139
139
  _check_pkg("langchain_huggingface")
140
140
  from langchain_huggingface import ChatHuggingFace
@@ -14,6 +14,7 @@ The **Docstore** is a simplified version of the Document Loader.
14
14
 
15
15
  Document, AddableMixin
16
16
  """
17
+
17
18
  from typing import TYPE_CHECKING, Any
18
19
 
19
20
  from langchain._api import create_importer
@@ -14,6 +14,7 @@
14
14
 
15
15
  Document, <name>TextSplitter
16
16
  """
17
+
17
18
  from typing import TYPE_CHECKING, Any
18
19
 
19
20
  from langchain._api import create_importer
@@ -14,6 +14,7 @@
14
14
 
15
15
  Document
16
16
  """ # noqa: E501
17
+
17
18
  from typing import TYPE_CHECKING, Any
18
19
 
19
20
  from langchain._api import create_importer
@@ -10,7 +10,6 @@ from different APIs and services.
10
10
  Embeddings --> <name>Embeddings # Examples: OpenAIEmbeddings, HuggingFaceEmbeddings
11
11
  """
12
12
 
13
-
14
13
  import logging
15
14
  from typing import TYPE_CHECKING, Any
16
15
 
@@ -38,7 +38,7 @@ name of the dataset to load.
38
38
  - Comparing the output of two models: :class:`PairwiseStringEvalChain <langchain.evaluation.comparison.eval_chain.PairwiseStringEvalChain>` or :class:`LabeledPairwiseStringEvalChain <langchain.evaluation.comparison.eval_chain.LabeledPairwiseStringEvalChain>` when there is additionally a reference label.
39
39
  - Judging the efficacy of an agent's tool usage: :class:`TrajectoryEvalChain <langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain>`
40
40
  - Checking whether an output complies with a set of criteria: :class:`CriteriaEvalChain <langchain.evaluation.criteria.eval_chain.CriteriaEvalChain>` or :class:`LabeledCriteriaEvalChain <langchain.evaluation.criteria.eval_chain.LabeledCriteriaEvalChain>` when there is additionally a reference label.
41
- - Computing semantic difference between a prediction and reference: :class:`EmbeddingDistanceEvalChain <langchain.evaluation.embedding_distance.base.EmbeddingDistanceEvalChain>` or between two predictions: :class:`PairwiseEmbeddingDistanceEvalChain <langchain.evaluation.embedding_distance.base.PairwiseEmbeddingDistanceEvalChain>`
41
+ - Computing semantic difference between a prediction and reference: :class:`EmbeddingDistanceEvalChain <langchain.evaluation.embedding_distance.base.EmbeddingDistanceEvalChain>` or between two predictions: :class:`PairwiseEmbeddingDistanceEvalChain <langchain.evaluation.embedding_distance.base.PairwiseEmbeddingDistanceEvalChain>`
42
42
  - Measuring the string distance between a prediction and reference :class:`StringDistanceEvalChain <langchain.evaluation.string_distance.base.StringDistanceEvalChain>` or between two predictions :class:`PairwiseStringDistanceEvalChain <langchain.evaluation.string_distance.base.PairwiseStringDistanceEvalChain>`
43
43
 
44
44
  **Low-level API**
@@ -52,6 +52,7 @@ These evaluators implement one of the following interfaces:
52
52
  These interfaces enable easier composability and usage within a higher level evaluation framework.
53
53
 
54
54
  """ # noqa: E501
55
+
55
56
  from langchain.evaluation.agents import TrajectoryEvalChain
56
57
  from langchain.evaluation.comparison import (
57
58
  LabeledPairwiseStringEvalChain,
@@ -1,4 +1,5 @@
1
1
  """Chains for evaluating ReAct style agents."""
2
+
2
3
  from langchain.evaluation.agents.trajectory_eval_chain import TrajectoryEvalChain
3
4
 
4
5
  __all__ = ["TrajectoryEvalChain"]
@@ -1,4 +1,5 @@
1
1
  """Prompt for trajectory evaluation chain."""
2
+
2
3
  # flake8: noqa
3
4
  from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
4
5
 
@@ -27,6 +27,7 @@ Example:
27
27
  # . " by explaining what the formula means.\\n[[B]]"
28
28
  # }
29
29
  """
30
+
30
31
  from langchain.evaluation.comparison.eval_chain import (
31
32
  LabeledPairwiseStringEvalChain,
32
33
  PairwiseStringEvalChain,
@@ -1,4 +1,5 @@
1
1
  """Base classes for comparing the output of two models."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import logging
@@ -4,6 +4,7 @@ This prompt is used to compare two responses and evaluate which one best follows
4
4
  and answers the question. The prompt is based on the paper from
5
5
  Zheng, et. al. https://arxiv.org/abs/2306.05685
6
6
  """
7
+
7
8
  # flake8: noqa
8
9
  from langchain_core.prompts.chat import ChatPromptTemplate
9
10
 
@@ -1,4 +1,5 @@
1
1
  """Evaluators that measure embedding distances."""
2
+
2
3
  from langchain.evaluation.embedding_distance.base import (
3
4
  EmbeddingDistance,
4
5
  EmbeddingDistanceEvalChain,
@@ -1,4 +1,5 @@
1
1
  """A chain for comparing the output of two models using embeddings."""
2
+
2
3
  from enum import Enum
3
4
  from typing import Any, Dict, List, Optional
4
5
 
@@ -1,4 +1,5 @@
1
1
  """Loading datasets and evaluators."""
2
+
2
3
  from typing import Any, Dict, List, Optional, Sequence, Type, Union
3
4
 
4
5
  from langchain_core.language_models import BaseLanguageModel
@@ -1,4 +1,5 @@
1
1
  """Evaluators for parsing strings."""
2
+
2
3
  import json
3
4
  from operator import eq
4
5
  from typing import Any, Callable, Optional, Union, cast
@@ -1,4 +1,5 @@
1
1
  """Chains and utils related to evaluating question answering functionality."""
2
+
2
3
  from langchain.evaluation.qa.eval_chain import (
3
4
  ContextQAEvalChain,
4
5
  CotQAEvalChain,
@@ -1,4 +1,5 @@
1
1
  """LLM Chains for evaluating question answering."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import re
@@ -1,4 +1,5 @@
1
1
  """LLM Chain for generating examples for question answering."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from typing import Any
@@ -1,4 +1,5 @@
1
1
  """Interfaces to be implemented by general evaluators."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import logging
@@ -22,6 +22,7 @@ Example:
22
22
  # "However, it does not provide an explanation of what the formula means."
23
23
  # }
24
24
  """
25
+
25
26
  from langchain.evaluation.scoring.eval_chain import (
26
27
  LabeledScoreStringEvalChain,
27
28
  ScoreStringEvalChain,
@@ -1,4 +1,5 @@
1
1
  """Base classes for scoring the output of a model on a scale of 1-10."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import logging
@@ -4,6 +4,7 @@ This prompt is used to score the responses and evaluate how it follows the instr
4
4
  and answers the question. The prompt is based on the paper from
5
5
  Zheng, et. al. https://arxiv.org/abs/2306.05685
6
6
  """
7
+
7
8
  # flake8: noqa
8
9
  from langchain_core.prompts.chat import ChatPromptTemplate
9
10
 
@@ -1,4 +1,5 @@
1
1
  """String distance evaluators."""
2
+
2
3
  from langchain.evaluation.string_distance.base import (
3
4
  PairwiseStringDistanceEvalChain,
4
5
  StringDistance,
@@ -1,4 +1,5 @@
1
1
  """Keep here for backwards compatibility."""
2
+
2
3
  from langchain.chains.example_generator import generate_example
3
4
 
4
5
  __all__ = ["generate_example"]
langchain/formatting.py CHANGED
@@ -1,4 +1,5 @@
1
1
  """DEPRECATED: Kept for backwards compatibility."""
2
+
2
3
  from langchain_core.utils.formatting import StrictFormatter, formatter
3
4
 
4
5
  __all__ = ["StrictFormatter", "formatter"]
@@ -1,4 +1,5 @@
1
1
  """Global values and configuration that apply to all of LangChain."""
2
+
2
3
  import warnings
3
4
  from typing import TYPE_CHECKING, Optional
4
5
 
@@ -1,4 +1,5 @@
1
1
  """**Graphs** provide a natural language interface to graph databases."""
2
+
2
3
  from typing import TYPE_CHECKING, Any
3
4
 
4
5
  from langchain._api import create_importer
@@ -11,6 +11,7 @@ Importantly, Index keeps on working even if the content being written is derived
11
11
  via a set of transformations from some source content (e.g., indexing children
12
12
  documents that were derived from parent documents by chunking.)
13
13
  """
14
+
14
15
  from typing import TYPE_CHECKING, Any
15
16
 
16
17
  from langchain_core.indexing.api import IndexingResult, aindex, index
@@ -13,6 +13,7 @@ allow it to work with a variety of SQL as a backend.
13
13
  * Keys can be listed based on the updated at field.
14
14
  * Keys can be deleted.
15
15
  """
16
+
16
17
  import contextlib
17
18
  import decimal
18
19
  import uuid
@@ -20,9 +21,7 @@ from typing import Any, AsyncGenerator, Dict, Generator, List, Optional, Sequenc
20
21
 
21
22
  from langchain_core.indexing import RecordManager
22
23
  from sqlalchemy import (
23
- URL,
24
24
  Column,
25
- Engine,
26
25
  Float,
27
26
  Index,
28
27
  String,
@@ -33,14 +32,19 @@ from sqlalchemy import (
33
32
  select,
34
33
  text,
35
34
  )
35
+ from sqlalchemy.engine import URL, Engine
36
36
  from sqlalchemy.ext.asyncio import (
37
37
  AsyncEngine,
38
38
  AsyncSession,
39
- async_sessionmaker,
40
39
  create_async_engine,
41
40
  )
42
- from sqlalchemy.ext.declarative import declarative_base
43
- from sqlalchemy.orm import Query, Session, sessionmaker
41
+ from sqlalchemy.orm import Query, Session, declarative_base, sessionmaker
42
+
43
+ try:
44
+ from sqlalchemy.ext.asyncio import async_sessionmaker
45
+ except ImportError:
46
+ # dummy for sqlalchemy < 2
47
+ async_sessionmaker = type("async_sessionmaker", (type,), {}) # type: ignore
44
48
 
45
49
  Base = declarative_base()
46
50
 
@@ -1,4 +1,5 @@
1
1
  """**Graphs** provide a natural language interface to graph databases."""
2
+
2
3
  from typing import TYPE_CHECKING, Any
3
4
 
4
5
  from langchain._api import create_importer
@@ -1,4 +1,5 @@
1
1
  """Relevant prompts for constructing indexes."""
2
+
2
3
  from langchain_core._api import warn_deprecated
3
4
 
4
5
  warn_deprecated(
langchain/input.py CHANGED
@@ -1,4 +1,5 @@
1
1
  """DEPRECATED: Kept for backwards compatibility."""
2
+
2
3
  from langchain_core.utils.input import (
3
4
  get_bolded_text,
4
5
  get_color_mapping,
@@ -17,6 +17,7 @@ access to the large language model (**LLM**) APIs and services.
17
17
  CallbackManager, AsyncCallbackManager,
18
18
  AIMessage, BaseMessage
19
19
  """ # noqa: E501
20
+
20
21
  import warnings
21
22
  from typing import Any, Callable, Dict, Type
22
23
 
@@ -1,4 +1,5 @@
1
1
  """Serialization and deserialization."""
2
+
2
3
  from langchain_core.load.dump import dumpd, dumps
3
4
  from langchain_core.load.load import load, loads
4
5
 
@@ -26,6 +26,7 @@
26
26
 
27
27
  AIMessage, BaseMessage, HumanMessage
28
28
  """ # noqa: E501
29
+
29
30
  from typing import TYPE_CHECKING, Any
30
31
 
31
32
  from langchain._api import create_importer
@@ -48,6 +49,9 @@ from langchain.memory.summary import ConversationSummaryMemory
48
49
  from langchain.memory.summary_buffer import ConversationSummaryBufferMemory
49
50
  from langchain.memory.token_buffer import ConversationTokenBufferMemory
50
51
  from langchain.memory.vectorstore import VectorStoreRetrieverMemory
52
+ from langchain.memory.vectorstore_token_buffer_memory import (
53
+ ConversationVectorStoreTokenBufferMemory, # avoid circular import
54
+ )
51
55
 
52
56
  if TYPE_CHECKING:
53
57
  from langchain_community.chat_message_histories import (
@@ -122,6 +126,7 @@ __all__ = [
122
126
  "ConversationSummaryBufferMemory",
123
127
  "ConversationSummaryMemory",
124
128
  "ConversationTokenBufferMemory",
129
+ "ConversationVectorStoreTokenBufferMemory",
125
130
  "CosmosDBChatMessageHistory",
126
131
  "DynamoDBChatMessageHistory",
127
132
  "ElasticsearchChatMessageHistory",