langchain 0.2.6__py3-none-any.whl → 0.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (182) hide show
  1. langchain/__init__.py +1 -0
  2. langchain/agents/__init__.py +5 -4
  3. langchain/agents/agent.py +272 -50
  4. langchain/agents/agent_iterator.py +20 -0
  5. langchain/agents/agent_toolkits/__init__.py +1 -0
  6. langchain/agents/agent_toolkits/file_management/__init__.py +1 -0
  7. langchain/agents/agent_toolkits/playwright/__init__.py +1 -0
  8. langchain/agents/agent_toolkits/vectorstore/base.py +1 -0
  9. langchain/agents/agent_toolkits/vectorstore/toolkit.py +1 -0
  10. langchain/agents/agent_types.py +1 -0
  11. langchain/agents/chat/base.py +37 -1
  12. langchain/agents/chat/output_parser.py +14 -0
  13. langchain/agents/conversational/base.py +38 -6
  14. langchain/agents/conversational/output_parser.py +10 -0
  15. langchain/agents/conversational_chat/base.py +42 -3
  16. langchain/agents/format_scratchpad/__init__.py +1 -0
  17. langchain/agents/format_scratchpad/log.py +12 -1
  18. langchain/agents/format_scratchpad/log_to_messages.py +10 -1
  19. langchain/agents/format_scratchpad/openai_functions.py +10 -5
  20. langchain/agents/format_scratchpad/tools.py +11 -7
  21. langchain/agents/initialize.py +15 -7
  22. langchain/agents/json_chat/base.py +6 -0
  23. langchain/agents/loading.py +7 -0
  24. langchain/agents/mrkl/base.py +39 -10
  25. langchain/agents/mrkl/output_parser.py +12 -0
  26. langchain/agents/openai_assistant/base.py +37 -14
  27. langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +32 -4
  28. langchain/agents/openai_functions_agent/base.py +61 -10
  29. langchain/agents/openai_functions_multi_agent/base.py +22 -7
  30. langchain/agents/openai_tools/base.py +3 -0
  31. langchain/agents/output_parsers/__init__.py +1 -0
  32. langchain/agents/react/base.py +1 -0
  33. langchain/agents/self_ask_with_search/base.py +1 -0
  34. langchain/agents/structured_chat/output_parser.py +3 -3
  35. langchain/agents/tools.py +3 -0
  36. langchain/agents/utils.py +9 -1
  37. langchain/base_language.py +1 -0
  38. langchain/callbacks/__init__.py +1 -0
  39. langchain/callbacks/base.py +1 -0
  40. langchain/callbacks/streaming_stdout.py +1 -0
  41. langchain/callbacks/streaming_stdout_final_only.py +1 -0
  42. langchain/callbacks/tracers/evaluation.py +1 -0
  43. langchain/chains/api/base.py +5 -2
  44. langchain/chains/base.py +1 -1
  45. langchain/chains/combine_documents/base.py +59 -0
  46. langchain/chains/combine_documents/map_reduce.py +4 -2
  47. langchain/chains/combine_documents/map_rerank.py +5 -3
  48. langchain/chains/combine_documents/refine.py +4 -2
  49. langchain/chains/combine_documents/stuff.py +9 -4
  50. langchain/chains/constitutional_ai/base.py +1 -0
  51. langchain/chains/constitutional_ai/models.py +1 -0
  52. langchain/chains/constitutional_ai/principles.py +1 -0
  53. langchain/chains/conversation/base.py +81 -1
  54. langchain/chains/conversational_retrieval/base.py +2 -1
  55. langchain/chains/elasticsearch_database/base.py +2 -1
  56. langchain/chains/hyde/base.py +1 -0
  57. langchain/chains/llm.py +1 -0
  58. langchain/chains/llm_checker/base.py +4 -3
  59. langchain/chains/llm_math/base.py +1 -0
  60. langchain/chains/loading.py +2 -1
  61. langchain/chains/mapreduce.py +1 -0
  62. langchain/chains/moderation.py +1 -1
  63. langchain/chains/natbot/base.py +1 -0
  64. langchain/chains/openai_functions/base.py +1 -0
  65. langchain/chains/qa_generation/base.py +47 -1
  66. langchain/chains/qa_with_sources/__init__.py +1 -0
  67. langchain/chains/qa_with_sources/loading.py +1 -0
  68. langchain/chains/qa_with_sources/vector_db.py +1 -1
  69. langchain/chains/query_constructor/base.py +1 -0
  70. langchain/chains/query_constructor/ir.py +1 -0
  71. langchain/chains/question_answering/chain.py +1 -0
  72. langchain/chains/retrieval_qa/base.py +3 -2
  73. langchain/chains/router/base.py +1 -0
  74. langchain/chains/router/llm_router.py +2 -1
  75. langchain/chains/router/multi_prompt.py +1 -0
  76. langchain/chains/router/multi_retrieval_qa.py +1 -0
  77. langchain/chains/sequential.py +2 -1
  78. langchain/chains/structured_output/base.py +6 -6
  79. langchain/chains/summarize/chain.py +1 -0
  80. langchain/chains/transform.py +4 -3
  81. langchain/chat_models/__init__.py +1 -0
  82. langchain/chat_models/base.py +607 -9
  83. langchain/docstore/__init__.py +1 -0
  84. langchain/document_loaders/__init__.py +1 -0
  85. langchain/document_transformers/__init__.py +1 -0
  86. langchain/embeddings/__init__.py +0 -1
  87. langchain/evaluation/__init__.py +2 -1
  88. langchain/evaluation/agents/__init__.py +1 -0
  89. langchain/evaluation/agents/trajectory_eval_prompt.py +1 -0
  90. langchain/evaluation/comparison/__init__.py +1 -0
  91. langchain/evaluation/comparison/eval_chain.py +1 -0
  92. langchain/evaluation/comparison/prompt.py +1 -0
  93. langchain/evaluation/embedding_distance/__init__.py +1 -0
  94. langchain/evaluation/embedding_distance/base.py +1 -0
  95. langchain/evaluation/loading.py +1 -0
  96. langchain/evaluation/parsing/base.py +1 -0
  97. langchain/evaluation/qa/__init__.py +1 -0
  98. langchain/evaluation/qa/eval_chain.py +1 -0
  99. langchain/evaluation/qa/generate_chain.py +1 -0
  100. langchain/evaluation/schema.py +1 -0
  101. langchain/evaluation/scoring/__init__.py +1 -0
  102. langchain/evaluation/scoring/eval_chain.py +1 -0
  103. langchain/evaluation/scoring/prompt.py +1 -0
  104. langchain/evaluation/string_distance/__init__.py +1 -0
  105. langchain/example_generator.py +1 -0
  106. langchain/formatting.py +1 -0
  107. langchain/globals/__init__.py +1 -0
  108. langchain/graphs/__init__.py +1 -0
  109. langchain/indexes/__init__.py +1 -0
  110. langchain/indexes/_sql_record_manager.py +1 -2
  111. langchain/indexes/graph.py +1 -0
  112. langchain/indexes/prompts/__init__.py +1 -0
  113. langchain/input.py +1 -0
  114. langchain/llms/__init__.py +1 -0
  115. langchain/load/__init__.py +1 -0
  116. langchain/memory/__init__.py +5 -0
  117. langchain/memory/vectorstore_token_buffer_memory.py +184 -0
  118. langchain/output_parsers/__init__.py +1 -0
  119. langchain/prompts/__init__.py +1 -0
  120. langchain/prompts/example_selector/__init__.py +1 -0
  121. langchain/python.py +1 -0
  122. langchain/requests.py +1 -0
  123. langchain/retrievers/__init__.py +1 -0
  124. langchain/retrievers/document_compressors/chain_extract.py +1 -0
  125. langchain/retrievers/document_compressors/chain_filter.py +1 -0
  126. langchain/retrievers/ensemble.py +1 -0
  127. langchain/retrievers/self_query/base.py +7 -7
  128. langchain/schema/__init__.py +1 -0
  129. langchain/schema/runnable/__init__.py +1 -0
  130. langchain/serpapi.py +1 -0
  131. langchain/smith/__init__.py +6 -5
  132. langchain/smith/evaluation/__init__.py +0 -1
  133. langchain/smith/evaluation/string_run_evaluator.py +1 -0
  134. langchain/sql_database.py +1 -0
  135. langchain/storage/__init__.py +1 -0
  136. langchain/storage/_lc_store.py +1 -0
  137. langchain/storage/in_memory.py +1 -0
  138. langchain/text_splitter.py +1 -0
  139. langchain/tools/__init__.py +1 -0
  140. langchain/tools/amadeus/__init__.py +1 -0
  141. langchain/tools/azure_cognitive_services/__init__.py +1 -0
  142. langchain/tools/bing_search/__init__.py +1 -0
  143. langchain/tools/dataforseo_api_search/__init__.py +1 -0
  144. langchain/tools/ddg_search/__init__.py +1 -0
  145. langchain/tools/edenai/__init__.py +1 -0
  146. langchain/tools/eleven_labs/__init__.py +1 -0
  147. langchain/tools/file_management/__init__.py +1 -0
  148. langchain/tools/github/__init__.py +1 -1
  149. langchain/tools/gitlab/__init__.py +1 -1
  150. langchain/tools/gmail/__init__.py +1 -0
  151. langchain/tools/golden_query/__init__.py +1 -0
  152. langchain/tools/google_cloud/__init__.py +1 -0
  153. langchain/tools/google_finance/__init__.py +1 -0
  154. langchain/tools/google_jobs/__init__.py +1 -0
  155. langchain/tools/google_lens/__init__.py +1 -0
  156. langchain/tools/google_places/__init__.py +1 -0
  157. langchain/tools/google_scholar/__init__.py +1 -0
  158. langchain/tools/google_search/__init__.py +1 -0
  159. langchain/tools/google_trends/__init__.py +1 -0
  160. langchain/tools/human/__init__.py +1 -0
  161. langchain/tools/memorize/__init__.py +1 -0
  162. langchain/tools/metaphor_search/__init__.py +1 -0
  163. langchain/tools/multion/__init__.py +1 -0
  164. langchain/tools/office365/__init__.py +1 -0
  165. langchain/tools/openapi/utils/openapi_utils.py +1 -0
  166. langchain/tools/openweathermap/__init__.py +1 -0
  167. langchain/tools/playwright/__init__.py +1 -0
  168. langchain/tools/shell/__init__.py +1 -0
  169. langchain/tools/slack/__init__.py +1 -0
  170. langchain/tools/sql_database/prompt.py +1 -0
  171. langchain/tools/steamship_image_generation/__init__.py +1 -0
  172. langchain/tools/tavily_search/__init__.py +1 -0
  173. langchain/tools/wolfram_alpha/__init__.py +1 -0
  174. langchain/tools/zapier/__init__.py +1 -0
  175. langchain/utilities/__init__.py +1 -0
  176. langchain/utilities/python.py +1 -0
  177. langchain/vectorstores/__init__.py +1 -0
  178. {langchain-0.2.6.dist-info → langchain-0.2.8.dist-info}/METADATA +2 -3
  179. {langchain-0.2.6.dist-info → langchain-0.2.8.dist-info}/RECORD +182 -181
  180. {langchain-0.2.6.dist-info → langchain-0.2.8.dist-info}/LICENSE +0 -0
  181. {langchain-0.2.6.dist-info → langchain-0.2.8.dist-info}/WHEEL +0 -0
  182. {langchain-0.2.6.dist-info → langchain-0.2.8.dist-info}/entry_points.txt +0 -0
@@ -14,6 +14,7 @@
14
14
 
15
15
  Document
16
16
  """ # noqa: E501
17
+
17
18
  from typing import TYPE_CHECKING, Any
18
19
 
19
20
  from langchain._api import create_importer
@@ -10,7 +10,6 @@ from different APIs and services.
10
10
  Embeddings --> <name>Embeddings # Examples: OpenAIEmbeddings, HuggingFaceEmbeddings
11
11
  """
12
12
 
13
-
14
13
  import logging
15
14
  from typing import TYPE_CHECKING, Any
16
15
 
@@ -38,7 +38,7 @@ name of the dataset to load.
38
38
  - Comparing the output of two models: :class:`PairwiseStringEvalChain <langchain.evaluation.comparison.eval_chain.PairwiseStringEvalChain>` or :class:`LabeledPairwiseStringEvalChain <langchain.evaluation.comparison.eval_chain.LabeledPairwiseStringEvalChain>` when there is additionally a reference label.
39
39
  - Judging the efficacy of an agent's tool usage: :class:`TrajectoryEvalChain <langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain>`
40
40
  - Checking whether an output complies with a set of criteria: :class:`CriteriaEvalChain <langchain.evaluation.criteria.eval_chain.CriteriaEvalChain>` or :class:`LabeledCriteriaEvalChain <langchain.evaluation.criteria.eval_chain.LabeledCriteriaEvalChain>` when there is additionally a reference label.
41
- - Computing semantic difference between a prediction and reference: :class:`EmbeddingDistanceEvalChain <langchain.evaluation.embedding_distance.base.EmbeddingDistanceEvalChain>` or between two predictions: :class:`PairwiseEmbeddingDistanceEvalChain <langchain.evaluation.embedding_distance.base.PairwiseEmbeddingDistanceEvalChain>`
41
+ - Computing semantic difference between a prediction and reference: :class:`EmbeddingDistanceEvalChain <langchain.evaluation.embedding_distance.base.EmbeddingDistanceEvalChain>` or between two predictions: :class:`PairwiseEmbeddingDistanceEvalChain <langchain.evaluation.embedding_distance.base.PairwiseEmbeddingDistanceEvalChain>`
42
42
  - Measuring the string distance between a prediction and reference :class:`StringDistanceEvalChain <langchain.evaluation.string_distance.base.StringDistanceEvalChain>` or between two predictions :class:`PairwiseStringDistanceEvalChain <langchain.evaluation.string_distance.base.PairwiseStringDistanceEvalChain>`
43
43
 
44
44
  **Low-level API**
@@ -52,6 +52,7 @@ These evaluators implement one of the following interfaces:
52
52
  These interfaces enable easier composability and usage within a higher level evaluation framework.
53
53
 
54
54
  """ # noqa: E501
55
+
55
56
  from langchain.evaluation.agents import TrajectoryEvalChain
56
57
  from langchain.evaluation.comparison import (
57
58
  LabeledPairwiseStringEvalChain,
@@ -1,4 +1,5 @@
1
1
  """Chains for evaluating ReAct style agents."""
2
+
2
3
  from langchain.evaluation.agents.trajectory_eval_chain import TrajectoryEvalChain
3
4
 
4
5
  __all__ = ["TrajectoryEvalChain"]
@@ -1,4 +1,5 @@
1
1
  """Prompt for trajectory evaluation chain."""
2
+
2
3
  # flake8: noqa
3
4
  from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
4
5
 
@@ -27,6 +27,7 @@ Example:
27
27
  # . " by explaining what the formula means.\\n[[B]]"
28
28
  # }
29
29
  """
30
+
30
31
  from langchain.evaluation.comparison.eval_chain import (
31
32
  LabeledPairwiseStringEvalChain,
32
33
  PairwiseStringEvalChain,
@@ -1,4 +1,5 @@
1
1
  """Base classes for comparing the output of two models."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import logging
@@ -4,6 +4,7 @@ This prompt is used to compare two responses and evaluate which one best follows
4
4
  and answers the question. The prompt is based on the paper from
5
5
  Zheng, et. al. https://arxiv.org/abs/2306.05685
6
6
  """
7
+
7
8
  # flake8: noqa
8
9
  from langchain_core.prompts.chat import ChatPromptTemplate
9
10
 
@@ -1,4 +1,5 @@
1
1
  """Evaluators that measure embedding distances."""
2
+
2
3
  from langchain.evaluation.embedding_distance.base import (
3
4
  EmbeddingDistance,
4
5
  EmbeddingDistanceEvalChain,
@@ -1,4 +1,5 @@
1
1
  """A chain for comparing the output of two models using embeddings."""
2
+
2
3
  from enum import Enum
3
4
  from typing import Any, Dict, List, Optional
4
5
 
@@ -1,4 +1,5 @@
1
1
  """Loading datasets and evaluators."""
2
+
2
3
  from typing import Any, Dict, List, Optional, Sequence, Type, Union
3
4
 
4
5
  from langchain_core.language_models import BaseLanguageModel
@@ -1,4 +1,5 @@
1
1
  """Evaluators for parsing strings."""
2
+
2
3
  import json
3
4
  from operator import eq
4
5
  from typing import Any, Callable, Optional, Union, cast
@@ -1,4 +1,5 @@
1
1
  """Chains and utils related to evaluating question answering functionality."""
2
+
2
3
  from langchain.evaluation.qa.eval_chain import (
3
4
  ContextQAEvalChain,
4
5
  CotQAEvalChain,
@@ -1,4 +1,5 @@
1
1
  """LLM Chains for evaluating question answering."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import re
@@ -1,4 +1,5 @@
1
1
  """LLM Chain for generating examples for question answering."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from typing import Any
@@ -1,4 +1,5 @@
1
1
  """Interfaces to be implemented by general evaluators."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import logging
@@ -22,6 +22,7 @@ Example:
22
22
  # "However, it does not provide an explanation of what the formula means."
23
23
  # }
24
24
  """
25
+
25
26
  from langchain.evaluation.scoring.eval_chain import (
26
27
  LabeledScoreStringEvalChain,
27
28
  ScoreStringEvalChain,
@@ -1,4 +1,5 @@
1
1
  """Base classes for scoring the output of a model on a scale of 1-10."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import logging
@@ -4,6 +4,7 @@ This prompt is used to score the responses and evaluate how it follows the instr
4
4
  and answers the question. The prompt is based on the paper from
5
5
  Zheng, et. al. https://arxiv.org/abs/2306.05685
6
6
  """
7
+
7
8
  # flake8: noqa
8
9
  from langchain_core.prompts.chat import ChatPromptTemplate
9
10
 
@@ -1,4 +1,5 @@
1
1
  """String distance evaluators."""
2
+
2
3
  from langchain.evaluation.string_distance.base import (
3
4
  PairwiseStringDistanceEvalChain,
4
5
  StringDistance,
@@ -1,4 +1,5 @@
1
1
  """Keep here for backwards compatibility."""
2
+
2
3
  from langchain.chains.example_generator import generate_example
3
4
 
4
5
  __all__ = ["generate_example"]
langchain/formatting.py CHANGED
@@ -1,4 +1,5 @@
1
1
  """DEPRECATED: Kept for backwards compatibility."""
2
+
2
3
  from langchain_core.utils.formatting import StrictFormatter, formatter
3
4
 
4
5
  __all__ = ["StrictFormatter", "formatter"]
@@ -1,4 +1,5 @@
1
1
  """Global values and configuration that apply to all of LangChain."""
2
+
2
3
  import warnings
3
4
  from typing import TYPE_CHECKING, Optional
4
5
 
@@ -1,4 +1,5 @@
1
1
  """**Graphs** provide a natural language interface to graph databases."""
2
+
2
3
  from typing import TYPE_CHECKING, Any
3
4
 
4
5
  from langchain._api import create_importer
@@ -11,6 +11,7 @@ Importantly, Index keeps on working even if the content being written is derived
11
11
  via a set of transformations from some source content (e.g., indexing children
12
12
  documents that were derived from parent documents by chunking.)
13
13
  """
14
+
14
15
  from typing import TYPE_CHECKING, Any
15
16
 
16
17
  from langchain_core.indexing.api import IndexingResult, aindex, index
@@ -38,8 +38,7 @@ from sqlalchemy.ext.asyncio import (
38
38
  AsyncSession,
39
39
  create_async_engine,
40
40
  )
41
- from sqlalchemy.ext.declarative import declarative_base
42
- from sqlalchemy.orm import Query, Session, sessionmaker
41
+ from sqlalchemy.orm import Query, Session, declarative_base, sessionmaker
43
42
 
44
43
  try:
45
44
  from sqlalchemy.ext.asyncio import async_sessionmaker
@@ -1,4 +1,5 @@
1
1
  """**Graphs** provide a natural language interface to graph databases."""
2
+
2
3
  from typing import TYPE_CHECKING, Any
3
4
 
4
5
  from langchain._api import create_importer
@@ -1,4 +1,5 @@
1
1
  """Relevant prompts for constructing indexes."""
2
+
2
3
  from langchain_core._api import warn_deprecated
3
4
 
4
5
  warn_deprecated(
langchain/input.py CHANGED
@@ -1,4 +1,5 @@
1
1
  """DEPRECATED: Kept for backwards compatibility."""
2
+
2
3
  from langchain_core.utils.input import (
3
4
  get_bolded_text,
4
5
  get_color_mapping,
@@ -17,6 +17,7 @@ access to the large language model (**LLM**) APIs and services.
17
17
  CallbackManager, AsyncCallbackManager,
18
18
  AIMessage, BaseMessage
19
19
  """ # noqa: E501
20
+
20
21
  import warnings
21
22
  from typing import Any, Callable, Dict, Type
22
23
 
@@ -1,4 +1,5 @@
1
1
  """Serialization and deserialization."""
2
+
2
3
  from langchain_core.load.dump import dumpd, dumps
3
4
  from langchain_core.load.load import load, loads
4
5
 
@@ -26,6 +26,7 @@
26
26
 
27
27
  AIMessage, BaseMessage, HumanMessage
28
28
  """ # noqa: E501
29
+
29
30
  from typing import TYPE_CHECKING, Any
30
31
 
31
32
  from langchain._api import create_importer
@@ -48,6 +49,9 @@ from langchain.memory.summary import ConversationSummaryMemory
48
49
  from langchain.memory.summary_buffer import ConversationSummaryBufferMemory
49
50
  from langchain.memory.token_buffer import ConversationTokenBufferMemory
50
51
  from langchain.memory.vectorstore import VectorStoreRetrieverMemory
52
+ from langchain.memory.vectorstore_token_buffer_memory import (
53
+ ConversationVectorStoreTokenBufferMemory, # avoid circular import
54
+ )
51
55
 
52
56
  if TYPE_CHECKING:
53
57
  from langchain_community.chat_message_histories import (
@@ -122,6 +126,7 @@ __all__ = [
122
126
  "ConversationSummaryBufferMemory",
123
127
  "ConversationSummaryMemory",
124
128
  "ConversationTokenBufferMemory",
129
+ "ConversationVectorStoreTokenBufferMemory",
125
130
  "CosmosDBChatMessageHistory",
126
131
  "DynamoDBChatMessageHistory",
127
132
  "ElasticsearchChatMessageHistory",
@@ -0,0 +1,184 @@
1
+ """
2
+ Class for a conversation memory buffer with older messages stored in a vectorstore .
3
+
4
+ This implementats a conversation memory in which the messages are stored in a memory
5
+ buffer up to a specified token limit. When the limit is exceeded, older messages are
6
+ saved to a vectorstore backing database. The vectorstore can be made persistent across
7
+ sessions.
8
+ """
9
+
10
+ import warnings
11
+ from datetime import datetime
12
+ from typing import Any, Dict, List
13
+
14
+ from langchain_core.messages import BaseMessage
15
+ from langchain_core.prompts.chat import SystemMessagePromptTemplate
16
+ from langchain_core.pydantic_v1 import Field, PrivateAttr
17
+ from langchain_core.vectorstores import VectorStoreRetriever
18
+
19
+ from langchain.memory import ConversationTokenBufferMemory, VectorStoreRetrieverMemory
20
+ from langchain.memory.chat_memory import BaseChatMemory
21
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
22
+
23
+ DEFAULT_HISTORY_TEMPLATE = """
24
+ Current date and time: {current_time}.
25
+
26
+ Potentially relevant timestamped excerpts of previous conversations (you
27
+ do not need to use these if irrelevant):
28
+ {previous_history}
29
+
30
+ """
31
+
32
+ TIMESTAMP_FORMAT = "%Y-%m-%d %H:%M:%S %Z"
33
+
34
+
35
+ class ConversationVectorStoreTokenBufferMemory(ConversationTokenBufferMemory):
36
+ """Conversation chat memory with token limit and vectordb backing.
37
+
38
+ load_memory_variables() will return a dict with the key "history".
39
+ It contains background information retrieved from the vector store
40
+ plus recent lines of the current conversation.
41
+
42
+ To help the LLM understand the part of the conversation stored in the
43
+ vectorstore, each interaction is timestamped and the current date and
44
+ time is also provided in the history. A side effect of this is that the
45
+ LLM will have access to the current date and time.
46
+
47
+ Initialization arguments:
48
+
49
+ This class accepts all the initialization arguments of
50
+ ConversationTokenBufferMemory, such as `llm`. In addition, it
51
+ accepts the following additional arguments
52
+
53
+ retriever: (required) A VectorStoreRetriever object to use
54
+ as the vector backing store
55
+
56
+ split_chunk_size: (optional, 1000) Token chunk split size
57
+ for long messages generated by the AI
58
+
59
+ previous_history_template: (optional) Template used to format
60
+ the contents of the prompt history
61
+
62
+
63
+ Example using ChromaDB:
64
+
65
+ .. code-block:: python
66
+
67
+ from langchain.memory.token_buffer_vectorstore_memory import (
68
+ ConversationVectorStoreTokenBufferMemory
69
+ )
70
+ from langchain_community.vectorstores import Chroma
71
+ from langchain_community.embeddings import HuggingFaceInstructEmbeddings
72
+ from langchain_openai import OpenAI
73
+
74
+ embedder = HuggingFaceInstructEmbeddings(
75
+ query_instruction="Represent the query for retrieval: "
76
+ )
77
+ chroma = Chroma(collection_name="demo",
78
+ embedding_function=embedder,
79
+ collection_metadata={"hnsw:space": "cosine"},
80
+ )
81
+
82
+ retriever = chroma.as_retriever(
83
+ search_type="similarity_score_threshold",
84
+ search_kwargs={
85
+ 'k': 5,
86
+ 'score_threshold': 0.75,
87
+ },
88
+ )
89
+
90
+ conversation_memory = ConversationVectorStoreTokenBufferMemory(
91
+ return_messages=True,
92
+ llm=OpenAI(),
93
+ retriever=retriever,
94
+ max_token_limit = 1000,
95
+ )
96
+
97
+ conversation_memory.save_context({"Human": "Hi there"},
98
+ {"AI": "Nice to meet you!"}
99
+ )
100
+ conversation_memory.save_context({"Human": "Nice day isn't it?"},
101
+ {"AI": "I love Wednesdays."}
102
+ )
103
+ conversation_memory.load_memory_variables({"input": "What time is it?"})
104
+
105
+ """
106
+
107
+ retriever: VectorStoreRetriever = Field(exclude=True)
108
+ memory_key: str = "history"
109
+ previous_history_template: str = DEFAULT_HISTORY_TEMPLATE
110
+ split_chunk_size: int = 1000
111
+
112
+ _memory_retriever: VectorStoreRetrieverMemory = PrivateAttr(default=None)
113
+ _timestamps: List[datetime] = PrivateAttr(default_factory=list)
114
+
115
+ @property
116
+ def memory_retriever(self) -> VectorStoreRetrieverMemory:
117
+ """Return a memory retriever from the passed retriever object."""
118
+ if self._memory_retriever is not None:
119
+ return self._memory_retriever
120
+ self._memory_retriever = VectorStoreRetrieverMemory(retriever=self.retriever)
121
+ return self._memory_retriever
122
+
123
+ def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
124
+ """Return history and memory buffer."""
125
+ try:
126
+ with warnings.catch_warnings():
127
+ warnings.simplefilter("ignore")
128
+ memory_variables = self.memory_retriever.load_memory_variables(inputs)
129
+ previous_history = memory_variables[self.memory_retriever.memory_key]
130
+ except AssertionError: # happens when db is empty
131
+ previous_history = ""
132
+ current_history = super().load_memory_variables(inputs)
133
+ template = SystemMessagePromptTemplate.from_template(
134
+ self.previous_history_template
135
+ )
136
+ messages = [
137
+ template.format(
138
+ previous_history=previous_history,
139
+ current_time=datetime.now().astimezone().strftime(TIMESTAMP_FORMAT),
140
+ )
141
+ ]
142
+ messages.extend(current_history[self.memory_key])
143
+ return {self.memory_key: messages}
144
+
145
+ def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
146
+ """Save context from this conversation to buffer. Pruned."""
147
+ BaseChatMemory.save_context(self, inputs, outputs)
148
+ self._timestamps.append(datetime.now().astimezone())
149
+ # Prune buffer if it exceeds max token limit
150
+ buffer = self.chat_memory.messages
151
+ curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
152
+ if curr_buffer_length > self.max_token_limit:
153
+ while curr_buffer_length > self.max_token_limit:
154
+ self._pop_and_store_interaction(buffer)
155
+ curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
156
+
157
+ def save_remainder(self) -> None:
158
+ """
159
+ Save the remainder of the conversation buffer to the vector store.
160
+
161
+ This is useful if you have made the vectorstore persistent, in which
162
+ case this can be called before the end of the session to store the
163
+ remainder of the conversation.
164
+ """
165
+ buffer = self.chat_memory.messages
166
+ while len(buffer) > 0:
167
+ self._pop_and_store_interaction(buffer)
168
+
169
+ def _pop_and_store_interaction(self, buffer: List[BaseMessage]) -> None:
170
+ input = buffer.pop(0)
171
+ output = buffer.pop(0)
172
+ timestamp = self._timestamps.pop(0).strftime(TIMESTAMP_FORMAT)
173
+ # Split AI output into smaller chunks to avoid creating documents
174
+ # that will overflow the context window
175
+ ai_chunks = self._split_long_ai_text(str(output.content))
176
+ for index, chunk in enumerate(ai_chunks):
177
+ self.memory_retriever.save_context(
178
+ {"Human": f"<{timestamp}/00> {str(input.content)}"},
179
+ {"AI": f"<{timestamp}/{index:02}> {chunk}"},
180
+ )
181
+
182
+ def _split_long_ai_text(self, text: str) -> List[str]:
183
+ splitter = RecursiveCharacterTextSplitter(chunk_size=self.split_chunk_size)
184
+ return [chunk.page_content for chunk in splitter.create_documents([text])]
@@ -12,6 +12,7 @@
12
12
 
13
13
  Serializable, Generation, PromptValue
14
14
  """ # noqa: E501
15
+
15
16
  from typing import TYPE_CHECKING, Any
16
17
 
17
18
  from langchain_core.output_parsers import (
@@ -27,6 +27,7 @@ from multiple components. Prompt classes and functions make constructing
27
27
  ChatPromptValue
28
28
 
29
29
  """ # noqa: E501
30
+
30
31
  from typing import TYPE_CHECKING, Any
31
32
 
32
33
  from langchain_core.example_selectors import (
@@ -1,4 +1,5 @@
1
1
  """Logic for selecting examples to include in prompts."""
2
+
2
3
  from typing import TYPE_CHECKING, Any
3
4
 
4
5
  from langchain_core.example_selectors.length_based import (
langchain/python.py CHANGED
@@ -1,4 +1,5 @@
1
1
  """For backwards compatibility."""
2
+
2
3
  from typing import Any
3
4
 
4
5
  from langchain._api import create_importer
langchain/requests.py CHANGED
@@ -1,4 +1,5 @@
1
1
  """DEPRECATED: Kept for backwards compatibility."""
2
+
2
3
  from typing import TYPE_CHECKING, Any
3
4
 
4
5
  from langchain._api import create_importer
@@ -17,6 +17,7 @@ the backbone of a retriever, but there are other types of retrievers as well.
17
17
  Document, Serializable, Callbacks,
18
18
  CallbackManagerForRetrieverRun, AsyncCallbackManagerForRetrieverRun
19
19
  """
20
+
20
21
  from typing import TYPE_CHECKING, Any
21
22
 
22
23
  from langchain._api.module_import import create_importer
@@ -1,4 +1,5 @@
1
1
  """DocumentFilter that uses an LLM chain to extract the relevant parts of documents."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import asyncio
@@ -1,4 +1,5 @@
1
1
  """Filter that uses an LLM to drop documents that aren't relevant to the query."""
2
+
2
3
  from typing import Any, Callable, Dict, Optional, Sequence
3
4
 
4
5
  from langchain_core.callbacks.manager import Callbacks
@@ -2,6 +2,7 @@
2
2
  Ensemble retriever that ensemble the results of
3
3
  multiple retrievers by using weighted Reciprocal Rank Fusion
4
4
  """
5
+
5
6
  import asyncio
6
7
  from collections import defaultdict
7
8
  from collections.abc import Hashable
@@ -169,7 +169,7 @@ def _get_builtin_translator(vectorstore: VectorStore) -> Visitor:
169
169
  return ChromaTranslator()
170
170
 
171
171
  try:
172
- from langchain_postgres import PGVector
172
+ from langchain_postgres import PGVector # type: ignore[no-redef]
173
173
  from langchain_postgres import PGVectorTranslator as NewPGVectorTranslator
174
174
  except ImportError:
175
175
  pass
@@ -310,16 +310,16 @@ class SelfQueryRetriever(BaseRetriever):
310
310
  "allowed_comparators" not in chain_kwargs
311
311
  and structured_query_translator.allowed_comparators is not None
312
312
  ):
313
- chain_kwargs[
314
- "allowed_comparators"
315
- ] = structured_query_translator.allowed_comparators
313
+ chain_kwargs["allowed_comparators"] = (
314
+ structured_query_translator.allowed_comparators
315
+ )
316
316
  if (
317
317
  "allowed_operators" not in chain_kwargs
318
318
  and structured_query_translator.allowed_operators is not None
319
319
  ):
320
- chain_kwargs[
321
- "allowed_operators"
322
- ] = structured_query_translator.allowed_operators
320
+ chain_kwargs["allowed_operators"] = (
321
+ structured_query_translator.allowed_operators
322
+ )
323
323
  query_constructor = load_query_constructor_runnable(
324
324
  llm,
325
325
  document_contents,
@@ -1,4 +1,5 @@
1
1
  """**Schemas** are the LangChain Base Classes and Interfaces."""
2
+
2
3
  from langchain_core.agents import AgentAction, AgentFinish
3
4
  from langchain_core.caches import BaseCache
4
5
  from langchain_core.chat_history import BaseChatMessageHistory
@@ -14,6 +14,7 @@ creating more responsive UX.
14
14
 
15
15
  This module contains schema and implementation of LangChain Runnables primitives.
16
16
  """
17
+
17
18
  from langchain_core.runnables.base import (
18
19
  Runnable,
19
20
  RunnableBinding,
langchain/serpapi.py CHANGED
@@ -1,4 +1,5 @@
1
1
  """For backwards compatibility."""
2
+
2
3
  from typing import TYPE_CHECKING, Any
3
4
 
4
5
  from langchain._api import create_importer
@@ -54,19 +54,19 @@ or LangSmith's `RunEvaluator` classes.
54
54
  from langchain.evaluation import StringEvaluator
55
55
 
56
56
  class MyStringEvaluator(StringEvaluator):
57
-
57
+
58
58
  @property
59
59
  def requires_input(self) -> bool:
60
60
  return False
61
-
61
+
62
62
  @property
63
63
  def requires_reference(self) -> bool:
64
64
  return True
65
-
65
+
66
66
  @property
67
67
  def evaluation_name(self) -> str:
68
68
  return "exact_match"
69
-
69
+
70
70
  def _evaluate_strings(self, prediction, reference=None, input=None, **kwargs) -> dict:
71
71
  return {"score": prediction == reference}
72
72
 
@@ -80,7 +80,7 @@ or LangSmith's `RunEvaluator` classes.
80
80
  "<my_dataset_name>",
81
81
  construct_chain,
82
82
  evaluation=evaluation_config,
83
- )
83
+ )
84
84
 
85
85
  **Primary Functions**
86
86
 
@@ -88,6 +88,7 @@ or LangSmith's `RunEvaluator` classes.
88
88
  - :func:`run_on_dataset <langchain.smith.evaluation.runner_utils.run_on_dataset>`: Function to evaluate a chain, agent, or other LangChain component over a dataset.
89
89
  - :class:`RunEvalConfig <langchain.smith.evaluation.config.RunEvalConfig>`: Class representing the configuration for running evaluation. You can select evaluators by :class:`EvaluatorType <langchain.evaluation.schema.EvaluatorType>` or config, or you can pass in `custom_evaluators`
90
90
  """ # noqa: E501
91
+
91
92
  from langchain.smith.evaluation import (
92
93
  RunEvalConfig,
93
94
  arun_on_dataset,
@@ -51,7 +51,6 @@ For more information on the LangSmith API, see the `LangSmith API documentation
51
51
 
52
52
  """ # noqa: E501
53
53
 
54
-
55
54
  from langchain.smith.evaluation.config import RunEvalConfig
56
55
  from langchain.smith.evaluation.runner_utils import (
57
56
  InputFormatError,
@@ -1,4 +1,5 @@
1
1
  """Run evaluator wrapper for string evaluators."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from abc import abstractmethod