langchain 0.2.9__py3-none-any.whl → 0.2.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/agents/agent.py +0 -24
- langchain/agents/react/agent.py +1 -1
- langchain/chains/conversational_retrieval/base.py +5 -2
- langchain/chains/openai_functions/qa_with_structure.py +6 -5
- langchain/chains/structured_output/base.py +4 -3
- langchain/evaluation/loading.py +1 -1
- langchain/memory/summary.py +12 -0
- langchain/memory/summary_buffer.py +46 -0
- langchain/memory/vectorstore_token_buffer_memory.py +2 -2
- langchain/output_parsers/fix.py +19 -10
- langchain/output_parsers/retry.py +22 -9
- langchain/retrievers/contextual_compression.py +2 -2
- langchain/retrievers/document_compressors/__init__.py +4 -0
- langchain/retrievers/document_compressors/listwise_rerank.py +137 -0
- langchain/retrievers/multi_query.py +1 -1
- langchain/retrievers/parent_document_retriever.py +41 -19
- {langchain-0.2.9.dist-info → langchain-0.2.11.dist-info}/METADATA +4 -2
- {langchain-0.2.9.dist-info → langchain-0.2.11.dist-info}/RECORD +22 -21
- /langchain/{globals/__init__.py → globals.py} +0 -0
- {langchain-0.2.9.dist-info → langchain-0.2.11.dist-info}/LICENSE +0 -0
- {langchain-0.2.9.dist-info → langchain-0.2.11.dist-info}/WHEEL +0 -0
- {langchain-0.2.9.dist-info → langchain-0.2.11.dist-info}/entry_points.txt +0 -0
langchain/agents/agent.py
CHANGED
|
@@ -1146,30 +1146,6 @@ class AgentExecutor(Chain):
|
|
|
1146
1146
|
)
|
|
1147
1147
|
return values
|
|
1148
1148
|
|
|
1149
|
-
@root_validator(pre=False, skip_on_failure=True)
|
|
1150
|
-
def validate_return_direct_tool(cls, values: Dict) -> Dict:
|
|
1151
|
-
"""Validate that tools are compatible with agent.
|
|
1152
|
-
|
|
1153
|
-
Args:
|
|
1154
|
-
values: Values to validate.
|
|
1155
|
-
|
|
1156
|
-
Returns:
|
|
1157
|
-
Dict: Validated values.
|
|
1158
|
-
|
|
1159
|
-
Raises:
|
|
1160
|
-
ValueError: If tools that have `return_direct=True` are not allowed.
|
|
1161
|
-
"""
|
|
1162
|
-
agent = values["agent"]
|
|
1163
|
-
tools = values["tools"]
|
|
1164
|
-
if isinstance(agent, BaseMultiActionAgent):
|
|
1165
|
-
for tool in tools:
|
|
1166
|
-
if tool.return_direct:
|
|
1167
|
-
raise ValueError(
|
|
1168
|
-
"Tools that have `return_direct=True` are not allowed "
|
|
1169
|
-
"in multi-action agents"
|
|
1170
|
-
)
|
|
1171
|
-
return values
|
|
1172
|
-
|
|
1173
1149
|
@root_validator(pre=True)
|
|
1174
1150
|
def validate_runnable_agent(cls, values: Dict) -> Dict:
|
|
1175
1151
|
"""Convert runnable to agent if passed in.
|
langchain/agents/react/agent.py
CHANGED
|
@@ -71,7 +71,7 @@ def create_react_agent(
|
|
|
71
71
|
"input": "what's my name?",
|
|
72
72
|
# Notice that chat_history is a string
|
|
73
73
|
# since this prompt is aimed at LLMs, not chat models
|
|
74
|
-
"chat_history": "Human: My name is Bob
|
|
74
|
+
"chat_history": "Human: My name is Bob\\nAI: Hello Bob!",
|
|
75
75
|
}
|
|
76
76
|
)
|
|
77
77
|
|
|
@@ -42,8 +42,11 @@ def _get_chat_history(chat_history: List[CHAT_TURN_TYPE]) -> str:
|
|
|
42
42
|
buffer = ""
|
|
43
43
|
for dialogue_turn in chat_history:
|
|
44
44
|
if isinstance(dialogue_turn, BaseMessage):
|
|
45
|
-
|
|
46
|
-
|
|
45
|
+
if len(dialogue_turn.content) > 0:
|
|
46
|
+
role_prefix = _ROLE_MAP.get(
|
|
47
|
+
dialogue_turn.type, f"{dialogue_turn.type}: "
|
|
48
|
+
)
|
|
49
|
+
buffer += f"\n{role_prefix}{dialogue_turn.content}"
|
|
47
50
|
elif isinstance(dialogue_turn, tuple):
|
|
48
51
|
human = "Human: " + dialogue_turn[0]
|
|
49
52
|
ai = "Assistant: " + dialogue_turn[1]
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any, List, Optional, Type, Union
|
|
1
|
+
from typing import Any, List, Optional, Type, Union, cast
|
|
2
2
|
|
|
3
3
|
from langchain_core.language_models import BaseLanguageModel
|
|
4
4
|
from langchain_core.messages import HumanMessage, SystemMessage
|
|
@@ -10,6 +10,7 @@ from langchain_core.output_parsers.openai_functions import (
|
|
|
10
10
|
from langchain_core.prompts import PromptTemplate
|
|
11
11
|
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
|
|
12
12
|
from langchain_core.pydantic_v1 import BaseModel, Field
|
|
13
|
+
from langchain_core.utils.pydantic import is_basemodel_subclass
|
|
13
14
|
|
|
14
15
|
from langchain.chains.llm import LLMChain
|
|
15
16
|
from langchain.chains.openai_functions.utils import get_llm_kwargs
|
|
@@ -45,7 +46,7 @@ def create_qa_with_structure_chain(
|
|
|
45
46
|
|
|
46
47
|
"""
|
|
47
48
|
if output_parser == "pydantic":
|
|
48
|
-
if not (isinstance(schema, type) and
|
|
49
|
+
if not (isinstance(schema, type) and is_basemodel_subclass(schema)):
|
|
49
50
|
raise ValueError(
|
|
50
51
|
"Must provide a pydantic class for schema when output_parser is "
|
|
51
52
|
"'pydantic'."
|
|
@@ -60,10 +61,10 @@ def create_qa_with_structure_chain(
|
|
|
60
61
|
f"Got unexpected output_parser: {output_parser}. "
|
|
61
62
|
f"Should be one of `pydantic` or `base`."
|
|
62
63
|
)
|
|
63
|
-
if isinstance(schema, type) and
|
|
64
|
-
schema_dict = schema.schema()
|
|
64
|
+
if isinstance(schema, type) and is_basemodel_subclass(schema):
|
|
65
|
+
schema_dict = cast(dict, schema.schema())
|
|
65
66
|
else:
|
|
66
|
-
schema_dict = schema
|
|
67
|
+
schema_dict = cast(dict, schema)
|
|
67
68
|
function = {
|
|
68
69
|
"name": schema_dict["title"],
|
|
69
70
|
"description": schema_dict["description"],
|
|
@@ -24,6 +24,7 @@ from langchain_core.utils.function_calling import (
|
|
|
24
24
|
convert_to_openai_function,
|
|
25
25
|
convert_to_openai_tool,
|
|
26
26
|
)
|
|
27
|
+
from langchain_core.utils.pydantic import is_basemodel_subclass
|
|
27
28
|
|
|
28
29
|
|
|
29
30
|
@deprecated(
|
|
@@ -465,7 +466,7 @@ def _get_openai_tool_output_parser(
|
|
|
465
466
|
*,
|
|
466
467
|
first_tool_only: bool = False,
|
|
467
468
|
) -> Union[BaseOutputParser, BaseGenerationOutputParser]:
|
|
468
|
-
if isinstance(tool, type) and
|
|
469
|
+
if isinstance(tool, type) and is_basemodel_subclass(tool):
|
|
469
470
|
output_parser: Union[BaseOutputParser, BaseGenerationOutputParser] = (
|
|
470
471
|
PydanticToolsParser(tools=[tool], first_tool_only=first_tool_only)
|
|
471
472
|
)
|
|
@@ -493,7 +494,7 @@ def get_openai_output_parser(
|
|
|
493
494
|
not a Pydantic class, then the output parser will automatically extract
|
|
494
495
|
only the function arguments and not the function name.
|
|
495
496
|
"""
|
|
496
|
-
if isinstance(functions[0], type) and
|
|
497
|
+
if isinstance(functions[0], type) and is_basemodel_subclass(functions[0]):
|
|
497
498
|
if len(functions) > 1:
|
|
498
499
|
pydantic_schema: Union[Dict, Type[BaseModel]] = {
|
|
499
500
|
convert_to_openai_function(fn)["name"]: fn for fn in functions
|
|
@@ -516,7 +517,7 @@ def _create_openai_json_runnable(
|
|
|
516
517
|
output_parser: Optional[Union[BaseOutputParser, BaseGenerationOutputParser]] = None,
|
|
517
518
|
) -> Runnable:
|
|
518
519
|
""""""
|
|
519
|
-
if isinstance(output_schema, type) and
|
|
520
|
+
if isinstance(output_schema, type) and is_basemodel_subclass(output_schema):
|
|
520
521
|
output_parser = output_parser or PydanticOutputParser(
|
|
521
522
|
pydantic_object=output_schema, # type: ignore
|
|
522
523
|
)
|
langchain/evaluation/loading.py
CHANGED
langchain/memory/summary.py
CHANGED
|
@@ -34,6 +34,18 @@ class SummarizerMixin(BaseModel):
|
|
|
34
34
|
chain = LLMChain(llm=self.llm, prompt=self.prompt)
|
|
35
35
|
return chain.predict(summary=existing_summary, new_lines=new_lines)
|
|
36
36
|
|
|
37
|
+
async def apredict_new_summary(
|
|
38
|
+
self, messages: List[BaseMessage], existing_summary: str
|
|
39
|
+
) -> str:
|
|
40
|
+
new_lines = get_buffer_string(
|
|
41
|
+
messages,
|
|
42
|
+
human_prefix=self.human_prefix,
|
|
43
|
+
ai_prefix=self.ai_prefix,
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
chain = LLMChain(llm=self.llm, prompt=self.prompt)
|
|
47
|
+
return await chain.apredict(summary=existing_summary, new_lines=new_lines)
|
|
48
|
+
|
|
37
49
|
|
|
38
50
|
class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin):
|
|
39
51
|
"""Conversation summarizer to chat memory."""
|
|
@@ -19,6 +19,11 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
19
19
|
"""String buffer of memory."""
|
|
20
20
|
return self.load_memory_variables({})[self.memory_key]
|
|
21
21
|
|
|
22
|
+
async def abuffer(self) -> Union[str, List[BaseMessage]]:
|
|
23
|
+
"""Async memory buffer."""
|
|
24
|
+
memory_variables = await self.aload_memory_variables({})
|
|
25
|
+
return memory_variables[self.memory_key]
|
|
26
|
+
|
|
22
27
|
@property
|
|
23
28
|
def memory_variables(self) -> List[str]:
|
|
24
29
|
"""Will always return list of memory variables.
|
|
@@ -43,6 +48,22 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
43
48
|
)
|
|
44
49
|
return {self.memory_key: final_buffer}
|
|
45
50
|
|
|
51
|
+
async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
|
52
|
+
"""Asynchronously return key-value pairs given the text input to the chain."""
|
|
53
|
+
buffer = await self.chat_memory.aget_messages()
|
|
54
|
+
if self.moving_summary_buffer != "":
|
|
55
|
+
first_messages: List[BaseMessage] = [
|
|
56
|
+
self.summary_message_cls(content=self.moving_summary_buffer)
|
|
57
|
+
]
|
|
58
|
+
buffer = first_messages + buffer
|
|
59
|
+
if self.return_messages:
|
|
60
|
+
final_buffer: Any = buffer
|
|
61
|
+
else:
|
|
62
|
+
final_buffer = get_buffer_string(
|
|
63
|
+
buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix
|
|
64
|
+
)
|
|
65
|
+
return {self.memory_key: final_buffer}
|
|
66
|
+
|
|
46
67
|
@root_validator()
|
|
47
68
|
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
|
|
48
69
|
"""Validate that prompt input variables are consistent."""
|
|
@@ -60,6 +81,13 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
60
81
|
super().save_context(inputs, outputs)
|
|
61
82
|
self.prune()
|
|
62
83
|
|
|
84
|
+
async def asave_context(
|
|
85
|
+
self, inputs: Dict[str, Any], outputs: Dict[str, str]
|
|
86
|
+
) -> None:
|
|
87
|
+
"""Asynchronously save context from this conversation to buffer."""
|
|
88
|
+
await super().asave_context(inputs, outputs)
|
|
89
|
+
await self.aprune()
|
|
90
|
+
|
|
63
91
|
def prune(self) -> None:
|
|
64
92
|
"""Prune buffer if it exceeds max token limit"""
|
|
65
93
|
buffer = self.chat_memory.messages
|
|
@@ -73,7 +101,25 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
73
101
|
pruned_memory, self.moving_summary_buffer
|
|
74
102
|
)
|
|
75
103
|
|
|
104
|
+
async def aprune(self) -> None:
|
|
105
|
+
"""Asynchronously prune buffer if it exceeds max token limit"""
|
|
106
|
+
buffer = self.chat_memory.messages
|
|
107
|
+
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
|
|
108
|
+
if curr_buffer_length > self.max_token_limit:
|
|
109
|
+
pruned_memory = []
|
|
110
|
+
while curr_buffer_length > self.max_token_limit:
|
|
111
|
+
pruned_memory.append(buffer.pop(0))
|
|
112
|
+
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
|
|
113
|
+
self.moving_summary_buffer = await self.apredict_new_summary(
|
|
114
|
+
pruned_memory, self.moving_summary_buffer
|
|
115
|
+
)
|
|
116
|
+
|
|
76
117
|
def clear(self) -> None:
|
|
77
118
|
"""Clear memory contents."""
|
|
78
119
|
super().clear()
|
|
79
120
|
self.moving_summary_buffer = ""
|
|
121
|
+
|
|
122
|
+
async def aclear(self) -> None:
|
|
123
|
+
"""Asynchronously clear memory contents."""
|
|
124
|
+
await super().aclear()
|
|
125
|
+
self.moving_summary_buffer = ""
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Class for a conversation memory buffer with older messages stored in a vectorstore .
|
|
3
3
|
|
|
4
|
-
This
|
|
4
|
+
This implements a conversation memory in which the messages are stored in a memory
|
|
5
5
|
buffer up to a specified token limit. When the limit is exceeded, older messages are
|
|
6
6
|
saved to a vectorstore backing database. The vectorstore can be made persistent across
|
|
7
7
|
sessions.
|
|
@@ -67,7 +67,7 @@ class ConversationVectorStoreTokenBufferMemory(ConversationTokenBufferMemory):
|
|
|
67
67
|
from langchain.memory.token_buffer_vectorstore_memory import (
|
|
68
68
|
ConversationVectorStoreTokenBufferMemory
|
|
69
69
|
)
|
|
70
|
-
from
|
|
70
|
+
from langchain_chroma import Chroma
|
|
71
71
|
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
|
|
72
72
|
from langchain_openai import OpenAI
|
|
73
73
|
|
langchain/output_parsers/fix.py
CHANGED
|
@@ -7,12 +7,19 @@ from langchain_core.language_models import BaseLanguageModel
|
|
|
7
7
|
from langchain_core.output_parsers import BaseOutputParser
|
|
8
8
|
from langchain_core.prompts import BasePromptTemplate
|
|
9
9
|
from langchain_core.runnables import RunnableSerializable
|
|
10
|
+
from typing_extensions import TypedDict
|
|
10
11
|
|
|
11
12
|
from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT
|
|
12
13
|
|
|
13
14
|
T = TypeVar("T")
|
|
14
15
|
|
|
15
16
|
|
|
17
|
+
class OutputFixingParserRetryChainInput(TypedDict, total=False):
|
|
18
|
+
instructions: str
|
|
19
|
+
completion: str
|
|
20
|
+
error: str
|
|
21
|
+
|
|
22
|
+
|
|
16
23
|
class OutputFixingParser(BaseOutputParser[T]):
|
|
17
24
|
"""Wrap a parser and try to fix parsing errors."""
|
|
18
25
|
|
|
@@ -23,7 +30,9 @@ class OutputFixingParser(BaseOutputParser[T]):
|
|
|
23
30
|
parser: BaseOutputParser[T]
|
|
24
31
|
"""The parser to use to parse the output."""
|
|
25
32
|
# Should be an LLMChain but we want to avoid top-level imports from langchain.chains
|
|
26
|
-
retry_chain: Union[
|
|
33
|
+
retry_chain: Union[
|
|
34
|
+
RunnableSerializable[OutputFixingParserRetryChainInput, str], Any
|
|
35
|
+
]
|
|
27
36
|
"""The RunnableSerializable to use to retry the completion (Legacy: LLMChain)."""
|
|
28
37
|
max_retries: int = 1
|
|
29
38
|
"""The maximum number of times to retry the parse."""
|
|
@@ -73,16 +82,16 @@ class OutputFixingParser(BaseOutputParser[T]):
|
|
|
73
82
|
try:
|
|
74
83
|
completion = self.retry_chain.invoke(
|
|
75
84
|
dict(
|
|
76
|
-
instructions=self.parser.get_format_instructions(),
|
|
77
|
-
|
|
85
|
+
instructions=self.parser.get_format_instructions(),
|
|
86
|
+
completion=completion,
|
|
78
87
|
error=repr(e),
|
|
79
88
|
)
|
|
80
89
|
)
|
|
81
90
|
except (NotImplementedError, AttributeError):
|
|
82
|
-
# Case: self.parser does not have get_format_instructions
|
|
91
|
+
# Case: self.parser does not have get_format_instructions
|
|
83
92
|
completion = self.retry_chain.invoke(
|
|
84
93
|
dict(
|
|
85
|
-
|
|
94
|
+
completion=completion,
|
|
86
95
|
error=repr(e),
|
|
87
96
|
)
|
|
88
97
|
)
|
|
@@ -102,7 +111,7 @@ class OutputFixingParser(BaseOutputParser[T]):
|
|
|
102
111
|
retries += 1
|
|
103
112
|
if self.legacy and hasattr(self.retry_chain, "arun"):
|
|
104
113
|
completion = await self.retry_chain.arun(
|
|
105
|
-
instructions=self.parser.get_format_instructions(),
|
|
114
|
+
instructions=self.parser.get_format_instructions(),
|
|
106
115
|
completion=completion,
|
|
107
116
|
error=repr(e),
|
|
108
117
|
)
|
|
@@ -110,16 +119,16 @@ class OutputFixingParser(BaseOutputParser[T]):
|
|
|
110
119
|
try:
|
|
111
120
|
completion = await self.retry_chain.ainvoke(
|
|
112
121
|
dict(
|
|
113
|
-
instructions=self.parser.get_format_instructions(),
|
|
114
|
-
|
|
122
|
+
instructions=self.parser.get_format_instructions(),
|
|
123
|
+
completion=completion,
|
|
115
124
|
error=repr(e),
|
|
116
125
|
)
|
|
117
126
|
)
|
|
118
127
|
except (NotImplementedError, AttributeError):
|
|
119
|
-
# Case: self.parser does not have get_format_instructions
|
|
128
|
+
# Case: self.parser does not have get_format_instructions
|
|
120
129
|
completion = await self.retry_chain.ainvoke(
|
|
121
130
|
dict(
|
|
122
|
-
|
|
131
|
+
completion=completion,
|
|
123
132
|
error=repr(e),
|
|
124
133
|
)
|
|
125
134
|
)
|
|
@@ -8,6 +8,7 @@ from langchain_core.output_parsers import BaseOutputParser
|
|
|
8
8
|
from langchain_core.prompt_values import PromptValue
|
|
9
9
|
from langchain_core.prompts import BasePromptTemplate, PromptTemplate
|
|
10
10
|
from langchain_core.runnables import RunnableSerializable
|
|
11
|
+
from typing_extensions import TypedDict
|
|
11
12
|
|
|
12
13
|
NAIVE_COMPLETION_RETRY = """Prompt:
|
|
13
14
|
{prompt}
|
|
@@ -34,6 +35,17 @@ NAIVE_RETRY_WITH_ERROR_PROMPT = PromptTemplate.from_template(
|
|
|
34
35
|
T = TypeVar("T")
|
|
35
36
|
|
|
36
37
|
|
|
38
|
+
class RetryOutputParserRetryChainInput(TypedDict):
|
|
39
|
+
prompt: str
|
|
40
|
+
completion: str
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class RetryWithErrorOutputParserRetryChainInput(TypedDict):
|
|
44
|
+
prompt: str
|
|
45
|
+
completion: str
|
|
46
|
+
error: str
|
|
47
|
+
|
|
48
|
+
|
|
37
49
|
class RetryOutputParser(BaseOutputParser[T]):
|
|
38
50
|
"""Wrap a parser and try to fix parsing errors.
|
|
39
51
|
|
|
@@ -44,7 +56,7 @@ class RetryOutputParser(BaseOutputParser[T]):
|
|
|
44
56
|
parser: BaseOutputParser[T]
|
|
45
57
|
"""The parser to use to parse the output."""
|
|
46
58
|
# Should be an LLMChain but we want to avoid top-level imports from langchain.chains
|
|
47
|
-
retry_chain: Union[RunnableSerializable, Any]
|
|
59
|
+
retry_chain: Union[RunnableSerializable[RetryOutputParserRetryChainInput, str], Any]
|
|
48
60
|
"""The RunnableSerializable to use to retry the completion (Legacy: LLMChain)."""
|
|
49
61
|
max_retries: int = 1
|
|
50
62
|
"""The maximum number of times to retry the parse."""
|
|
@@ -97,13 +109,12 @@ class RetryOutputParser(BaseOutputParser[T]):
|
|
|
97
109
|
completion = self.retry_chain.run(
|
|
98
110
|
prompt=prompt_value.to_string(),
|
|
99
111
|
completion=completion,
|
|
100
|
-
error=repr(e),
|
|
101
112
|
)
|
|
102
113
|
else:
|
|
103
114
|
completion = self.retry_chain.invoke(
|
|
104
115
|
dict(
|
|
105
116
|
prompt=prompt_value.to_string(),
|
|
106
|
-
|
|
117
|
+
completion=completion,
|
|
107
118
|
)
|
|
108
119
|
)
|
|
109
120
|
|
|
@@ -139,7 +150,7 @@ class RetryOutputParser(BaseOutputParser[T]):
|
|
|
139
150
|
completion = await self.retry_chain.ainvoke(
|
|
140
151
|
dict(
|
|
141
152
|
prompt=prompt_value.to_string(),
|
|
142
|
-
|
|
153
|
+
completion=completion,
|
|
143
154
|
)
|
|
144
155
|
)
|
|
145
156
|
|
|
@@ -174,8 +185,10 @@ class RetryWithErrorOutputParser(BaseOutputParser[T]):
|
|
|
174
185
|
|
|
175
186
|
parser: BaseOutputParser[T]
|
|
176
187
|
"""The parser to use to parse the output."""
|
|
177
|
-
# Should be an LLMChain but we want to avoid top-level imports from langchain.chains
|
|
178
|
-
retry_chain: Union[
|
|
188
|
+
# Should be an LLMChain but we want to avoid top-level imports from langchain.chains
|
|
189
|
+
retry_chain: Union[
|
|
190
|
+
RunnableSerializable[RetryWithErrorOutputParserRetryChainInput, str], Any
|
|
191
|
+
]
|
|
179
192
|
"""The RunnableSerializable to use to retry the completion (Legacy: LLMChain)."""
|
|
180
193
|
max_retries: int = 1
|
|
181
194
|
"""The maximum number of times to retry the parse."""
|
|
@@ -204,7 +217,7 @@ class RetryWithErrorOutputParser(BaseOutputParser[T]):
|
|
|
204
217
|
chain = prompt | llm
|
|
205
218
|
return cls(parser=parser, retry_chain=chain, max_retries=max_retries)
|
|
206
219
|
|
|
207
|
-
def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T:
|
|
220
|
+
def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T:
|
|
208
221
|
retries = 0
|
|
209
222
|
|
|
210
223
|
while retries <= self.max_retries:
|
|
@@ -224,7 +237,7 @@ class RetryWithErrorOutputParser(BaseOutputParser[T]):
|
|
|
224
237
|
else:
|
|
225
238
|
completion = self.retry_chain.invoke(
|
|
226
239
|
dict(
|
|
227
|
-
|
|
240
|
+
completion=completion,
|
|
228
241
|
prompt=prompt_value.to_string(),
|
|
229
242
|
error=repr(e),
|
|
230
243
|
)
|
|
@@ -253,7 +266,7 @@ class RetryWithErrorOutputParser(BaseOutputParser[T]):
|
|
|
253
266
|
completion = await self.retry_chain.ainvoke(
|
|
254
267
|
dict(
|
|
255
268
|
prompt=prompt_value.to_string(),
|
|
256
|
-
|
|
269
|
+
completion=completion,
|
|
257
270
|
error=repr(e),
|
|
258
271
|
)
|
|
259
272
|
)
|
|
@@ -5,7 +5,7 @@ from langchain_core.callbacks import (
|
|
|
5
5
|
CallbackManagerForRetrieverRun,
|
|
6
6
|
)
|
|
7
7
|
from langchain_core.documents import Document
|
|
8
|
-
from langchain_core.retrievers import BaseRetriever
|
|
8
|
+
from langchain_core.retrievers import BaseRetriever, RetrieverLike
|
|
9
9
|
|
|
10
10
|
from langchain.retrievers.document_compressors.base import (
|
|
11
11
|
BaseDocumentCompressor,
|
|
@@ -18,7 +18,7 @@ class ContextualCompressionRetriever(BaseRetriever):
|
|
|
18
18
|
base_compressor: BaseDocumentCompressor
|
|
19
19
|
"""Compressor for compressing retrieved documents."""
|
|
20
20
|
|
|
21
|
-
base_retriever:
|
|
21
|
+
base_retriever: RetrieverLike
|
|
22
22
|
"""Base Retriever to use for getting relevant documents."""
|
|
23
23
|
|
|
24
24
|
class Config:
|
|
@@ -15,6 +15,9 @@ from langchain.retrievers.document_compressors.cross_encoder_rerank import (
|
|
|
15
15
|
from langchain.retrievers.document_compressors.embeddings_filter import (
|
|
16
16
|
EmbeddingsFilter,
|
|
17
17
|
)
|
|
18
|
+
from langchain.retrievers.document_compressors.listwise_rerank import (
|
|
19
|
+
LLMListwiseRerank,
|
|
20
|
+
)
|
|
18
21
|
|
|
19
22
|
_module_lookup = {
|
|
20
23
|
"FlashrankRerank": "langchain_community.document_compressors.flashrank_rerank",
|
|
@@ -31,6 +34,7 @@ def __getattr__(name: str) -> Any:
|
|
|
31
34
|
__all__ = [
|
|
32
35
|
"DocumentCompressorPipeline",
|
|
33
36
|
"EmbeddingsFilter",
|
|
37
|
+
"LLMListwiseRerank",
|
|
34
38
|
"LLMChainExtractor",
|
|
35
39
|
"LLMChainFilter",
|
|
36
40
|
"CohereRerank",
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
"""Filter that uses an LLM to rerank documents listwise and select top-k."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, List, Optional, Sequence
|
|
4
|
+
|
|
5
|
+
from langchain_core.callbacks import Callbacks
|
|
6
|
+
from langchain_core.documents import BaseDocumentCompressor, Document
|
|
7
|
+
from langchain_core.language_models import BaseLanguageModel
|
|
8
|
+
from langchain_core.prompts import BasePromptTemplate, ChatPromptTemplate
|
|
9
|
+
from langchain_core.pydantic_v1 import BaseModel, Field
|
|
10
|
+
from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough
|
|
11
|
+
|
|
12
|
+
_default_system_tmpl = """{context}
|
|
13
|
+
|
|
14
|
+
Sort the Documents by their relevance to the Query."""
|
|
15
|
+
_DEFAULT_PROMPT = ChatPromptTemplate.from_messages(
|
|
16
|
+
[("system", _default_system_tmpl), ("human", "{query}")],
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _get_prompt_input(input_: dict) -> Dict[str, Any]:
|
|
21
|
+
"""Return the compression chain input."""
|
|
22
|
+
documents = input_["documents"]
|
|
23
|
+
context = ""
|
|
24
|
+
for index, doc in enumerate(documents):
|
|
25
|
+
context += f"Document ID: {index}\n```{doc.page_content}```\n\n"
|
|
26
|
+
context += f"Documents = [Document ID: 0, ..., Document ID: {len(documents) - 1}]"
|
|
27
|
+
return {"query": input_["query"], "context": context}
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _parse_ranking(results: dict) -> List[Document]:
|
|
31
|
+
ranking = results["ranking"]
|
|
32
|
+
docs = results["documents"]
|
|
33
|
+
return [docs[i] for i in ranking.ranked_document_ids]
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class LLMListwiseRerank(BaseDocumentCompressor):
|
|
37
|
+
"""Document compressor that uses `Zero-Shot Listwise Document Reranking`.
|
|
38
|
+
|
|
39
|
+
Adapted from: https://arxiv.org/pdf/2305.02156.pdf
|
|
40
|
+
|
|
41
|
+
``LLMListwiseRerank`` uses a language model to rerank a list of documents based on
|
|
42
|
+
their relevance to a query.
|
|
43
|
+
|
|
44
|
+
**NOTE**: requires that underlying model implement ``with_structured_output``.
|
|
45
|
+
|
|
46
|
+
Example usage:
|
|
47
|
+
.. code-block:: python
|
|
48
|
+
|
|
49
|
+
from langchain.retrievers.document_compressors.listwise_rerank import (
|
|
50
|
+
LLMListwiseRerank,
|
|
51
|
+
)
|
|
52
|
+
from langchain_core.documents import Document
|
|
53
|
+
from langchain_openai import ChatOpenAI
|
|
54
|
+
|
|
55
|
+
documents = [
|
|
56
|
+
Document("Sally is my friend from school"),
|
|
57
|
+
Document("Steve is my friend from home"),
|
|
58
|
+
Document("I didn't always like yogurt"),
|
|
59
|
+
Document("I wonder why it's called football"),
|
|
60
|
+
Document("Where's waldo"),
|
|
61
|
+
]
|
|
62
|
+
|
|
63
|
+
reranker = LLMListwiseRerank.from_llm(
|
|
64
|
+
llm=ChatOpenAI(model="gpt-3.5-turbo"), top_n=3
|
|
65
|
+
)
|
|
66
|
+
compressed_docs = reranker.compress_documents(documents, "Who is steve")
|
|
67
|
+
assert len(compressed_docs) == 3
|
|
68
|
+
assert "Steve" in compressed_docs[0].page_content
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
reranker: Runnable[Dict, List[Document]]
|
|
72
|
+
"""LLM-based reranker to use for filtering documents. Expected to take in a dict
|
|
73
|
+
with 'documents: Sequence[Document]' and 'query: str' keys and output a
|
|
74
|
+
List[Document]."""
|
|
75
|
+
|
|
76
|
+
top_n: int = 3
|
|
77
|
+
"""Number of documents to return."""
|
|
78
|
+
|
|
79
|
+
class Config:
|
|
80
|
+
arbitrary_types_allowed = True
|
|
81
|
+
|
|
82
|
+
def compress_documents(
|
|
83
|
+
self,
|
|
84
|
+
documents: Sequence[Document],
|
|
85
|
+
query: str,
|
|
86
|
+
callbacks: Optional[Callbacks] = None,
|
|
87
|
+
) -> Sequence[Document]:
|
|
88
|
+
"""Filter down documents based on their relevance to the query."""
|
|
89
|
+
results = self.reranker.invoke(
|
|
90
|
+
{"documents": documents, "query": query}, config={"callbacks": callbacks}
|
|
91
|
+
)
|
|
92
|
+
return results[: self.top_n]
|
|
93
|
+
|
|
94
|
+
@classmethod
|
|
95
|
+
def from_llm(
|
|
96
|
+
cls,
|
|
97
|
+
llm: BaseLanguageModel,
|
|
98
|
+
*,
|
|
99
|
+
prompt: Optional[BasePromptTemplate] = None,
|
|
100
|
+
**kwargs: Any,
|
|
101
|
+
) -> "LLMListwiseRerank":
|
|
102
|
+
"""Create a LLMListwiseRerank document compressor from a language model.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
llm: The language model to use for filtering. **Must implement
|
|
106
|
+
BaseLanguageModel.with_structured_output().**
|
|
107
|
+
prompt: The prompt to use for the filter.
|
|
108
|
+
**kwargs: Additional arguments to pass to the constructor.
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
A LLMListwiseRerank document compressor that uses the given language model.
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
if llm.with_structured_output == BaseLanguageModel.with_structured_output:
|
|
115
|
+
raise ValueError(
|
|
116
|
+
f"llm of type {type(llm)} does not implement `with_structured_output`."
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
class RankDocuments(BaseModel):
|
|
120
|
+
"""Rank the documents by their relevance to the user question.
|
|
121
|
+
Rank from most to least relevant."""
|
|
122
|
+
|
|
123
|
+
ranked_document_ids: List[int] = Field(
|
|
124
|
+
...,
|
|
125
|
+
description=(
|
|
126
|
+
"The integer IDs of the documents, sorted from most to least "
|
|
127
|
+
"relevant to the user question."
|
|
128
|
+
),
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
_prompt = prompt if prompt is not None else _DEFAULT_PROMPT
|
|
132
|
+
reranker = RunnablePassthrough.assign(
|
|
133
|
+
ranking=RunnableLambda(_get_prompt_input)
|
|
134
|
+
| _prompt
|
|
135
|
+
| llm.with_structured_output(RankDocuments)
|
|
136
|
+
) | RunnableLambda(_parse_ranking)
|
|
137
|
+
return cls(reranker=reranker, **kwargs)
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import uuid
|
|
2
|
-
from typing import Any, List, Optional, Sequence
|
|
2
|
+
from typing import Any, List, Optional, Sequence, Tuple
|
|
3
3
|
|
|
4
4
|
from langchain_core.documents import Document
|
|
5
5
|
from langchain_text_splitters import TextSplitter
|
|
@@ -31,8 +31,8 @@ class ParentDocumentRetriever(MultiVectorRetriever):
|
|
|
31
31
|
|
|
32
32
|
.. code-block:: python
|
|
33
33
|
|
|
34
|
+
from langchain_chroma import Chroma
|
|
34
35
|
from langchain_community.embeddings import OpenAIEmbeddings
|
|
35
|
-
from langchain_community.vectorstores import Chroma
|
|
36
36
|
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
|
37
37
|
from langchain.storage import InMemoryStore
|
|
38
38
|
|
|
@@ -69,27 +69,12 @@ class ParentDocumentRetriever(MultiVectorRetriever):
|
|
|
69
69
|
metadata.
|
|
70
70
|
"""
|
|
71
71
|
|
|
72
|
-
def
|
|
72
|
+
def _split_docs_for_adding(
|
|
73
73
|
self,
|
|
74
74
|
documents: List[Document],
|
|
75
75
|
ids: Optional[List[str]] = None,
|
|
76
76
|
add_to_docstore: bool = True,
|
|
77
|
-
|
|
78
|
-
) -> None:
|
|
79
|
-
"""Adds documents to the docstore and vectorstores.
|
|
80
|
-
|
|
81
|
-
Args:
|
|
82
|
-
documents: List of documents to add
|
|
83
|
-
ids: Optional list of ids for documents. If provided should be the same
|
|
84
|
-
length as the list of documents. Can be provided if parent documents
|
|
85
|
-
are already in the document store and you don't want to re-add
|
|
86
|
-
to the docstore. If not provided, random UUIDs will be used as
|
|
87
|
-
ids.
|
|
88
|
-
add_to_docstore: Boolean of whether to add documents to docstore.
|
|
89
|
-
This can be false if and only if `ids` are provided. You may want
|
|
90
|
-
to set this to False if the documents are already in the docstore
|
|
91
|
-
and you don't want to re-add them.
|
|
92
|
-
"""
|
|
77
|
+
) -> Tuple[List[Document], List[Tuple[str, Document]]]:
|
|
93
78
|
if self.parent_splitter is not None:
|
|
94
79
|
documents = self.parent_splitter.split_documents(documents)
|
|
95
80
|
if ids is None:
|
|
@@ -120,6 +105,43 @@ class ParentDocumentRetriever(MultiVectorRetriever):
|
|
|
120
105
|
_doc.metadata[self.id_key] = _id
|
|
121
106
|
docs.extend(sub_docs)
|
|
122
107
|
full_docs.append((_id, doc))
|
|
108
|
+
|
|
109
|
+
return docs, full_docs
|
|
110
|
+
|
|
111
|
+
def add_documents(
|
|
112
|
+
self,
|
|
113
|
+
documents: List[Document],
|
|
114
|
+
ids: Optional[List[str]] = None,
|
|
115
|
+
add_to_docstore: bool = True,
|
|
116
|
+
**kwargs: Any,
|
|
117
|
+
) -> None:
|
|
118
|
+
"""Adds documents to the docstore and vectorstores.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
documents: List of documents to add
|
|
122
|
+
ids: Optional list of ids for documents. If provided should be the same
|
|
123
|
+
length as the list of documents. Can be provided if parent documents
|
|
124
|
+
are already in the document store and you don't want to re-add
|
|
125
|
+
to the docstore. If not provided, random UUIDs will be used as
|
|
126
|
+
ids.
|
|
127
|
+
add_to_docstore: Boolean of whether to add documents to docstore.
|
|
128
|
+
This can be false if and only if `ids` are provided. You may want
|
|
129
|
+
to set this to False if the documents are already in the docstore
|
|
130
|
+
and you don't want to re-add them.
|
|
131
|
+
"""
|
|
132
|
+
docs, full_docs = self._split_docs_for_adding(documents, ids, add_to_docstore)
|
|
123
133
|
self.vectorstore.add_documents(docs, **kwargs)
|
|
124
134
|
if add_to_docstore:
|
|
125
135
|
self.docstore.mset(full_docs)
|
|
136
|
+
|
|
137
|
+
async def aadd_documents(
|
|
138
|
+
self,
|
|
139
|
+
documents: List[Document],
|
|
140
|
+
ids: Optional[List[str]] = None,
|
|
141
|
+
add_to_docstore: bool = True,
|
|
142
|
+
**kwargs: Any,
|
|
143
|
+
) -> None:
|
|
144
|
+
docs, full_docs = self._split_docs_for_adding(documents, ids, add_to_docstore)
|
|
145
|
+
await self.vectorstore.aadd_documents(docs, **kwargs)
|
|
146
|
+
if add_to_docstore:
|
|
147
|
+
await self.docstore.amset(full_docs)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.11
|
|
4
4
|
Summary: Building applications with LLMs through composability
|
|
5
5
|
Home-page: https://github.com/langchain-ai/langchain
|
|
6
6
|
License: MIT
|
|
@@ -15,7 +15,7 @@ Requires-Dist: PyYAML (>=5.3)
|
|
|
15
15
|
Requires-Dist: SQLAlchemy (>=1.4,<3)
|
|
16
16
|
Requires-Dist: aiohttp (>=3.8.3,<4.0.0)
|
|
17
17
|
Requires-Dist: async-timeout (>=4.0.0,<5.0.0) ; python_version < "3.11"
|
|
18
|
-
Requires-Dist: langchain-core (>=0.2.
|
|
18
|
+
Requires-Dist: langchain-core (>=0.2.23,<0.3.0)
|
|
19
19
|
Requires-Dist: langchain-text-splitters (>=0.2.0,<0.3.0)
|
|
20
20
|
Requires-Dist: langsmith (>=0.1.17,<0.2.0)
|
|
21
21
|
Requires-Dist: numpy (>=1,<2) ; python_version < "3.12"
|
|
@@ -24,6 +24,8 @@ Requires-Dist: pydantic (>=1,<3)
|
|
|
24
24
|
Requires-Dist: requests (>=2,<3)
|
|
25
25
|
Requires-Dist: tenacity (>=8.1.0,<9.0.0,!=8.4.0)
|
|
26
26
|
Project-URL: Repository, https://github.com/langchain-ai/langchain
|
|
27
|
+
Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D0%22&expanded=true
|
|
28
|
+
Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
|
|
27
29
|
Description-Content-Type: text/markdown
|
|
28
30
|
|
|
29
31
|
# 🦜️🔗 LangChain
|
|
@@ -7,7 +7,7 @@ langchain/_api/path.py,sha256=ovJP6Pcf7L_KaKvMMet9G9OzfLTb-sZV2pEw3Tp7o3I,122
|
|
|
7
7
|
langchain/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
8
|
langchain/adapters/openai.py,sha256=kWvS_DdRtpcc49vDY8zLUo3BrtXA3a89bLJu3Sksvaw,1996
|
|
9
9
|
langchain/agents/__init__.py,sha256=dm8fJzo_wRX_Fz6XF-o8Uuduy5G5vE2B52RWBdzUIz8,6242
|
|
10
|
-
langchain/agents/agent.py,sha256=
|
|
10
|
+
langchain/agents/agent.py,sha256=JDpb8_nKV_0Yt0rIsvFTl6zdr38REBggMGfftuBk09k,61641
|
|
11
11
|
langchain/agents/agent_iterator.py,sha256=rmyKOHFHStwjqsQDDyNnFaDr43G32fpMCks7rOz4wq0,16438
|
|
12
12
|
langchain/agents/agent_toolkits/__init__.py,sha256=xgjLQ6eP0npPAJJr0O1lM-dZwtP0XR2fJOovJFIgNuo,7365
|
|
13
13
|
langchain/agents/agent_toolkits/ainetwork/__init__.py,sha256=henfKntuAEjG1KoN-Hk1IHy3fFGCYPWLEuZtF2bIdZI,25
|
|
@@ -129,7 +129,7 @@ langchain/agents/output_parsers/self_ask.py,sha256=-4_-hQbKB1ichR5odEyeYUV-wIdLm
|
|
|
129
129
|
langchain/agents/output_parsers/tools.py,sha256=9hRlUsJVmS0VmFzEKVYfg5AeusynB2lw4Xi4uYns5JM,3753
|
|
130
130
|
langchain/agents/output_parsers/xml.py,sha256=2MjxW4nAM4sZN-in3K40_K5DBx6cI2Erb0TZbpSoZIY,1658
|
|
131
131
|
langchain/agents/react/__init__.py,sha256=9RIjjaUDfWnoMEMpV57JQ0CwZZC5Soh357QdKpVIM-4,76
|
|
132
|
-
langchain/agents/react/agent.py,sha256=
|
|
132
|
+
langchain/agents/react/agent.py,sha256=pKKK3x4lKfMjiOZDF8T3_Q-PTRyCwKUBGrBhAAazmgw,5096
|
|
133
133
|
langchain/agents/react/base.py,sha256=eCUikQQ688hp1s3h0Nqlz4ueaIVrpfRQe50TNai3I4Y,5799
|
|
134
134
|
langchain/agents/react/output_parser.py,sha256=bEL3U3mxYGK7_7Lm4GlOq8JKQTgyHFQQIEVUUZjV1qs,1231
|
|
135
135
|
langchain/agents/react/textworld_prompt.py,sha256=b9WDM8pFmqrfAWJ8n6zkxlPlxQI5oHljZ1R9g5y6cRE,1906
|
|
@@ -232,7 +232,7 @@ langchain/chains/conversation/base.py,sha256=057I9fvxKk8QxIqFPgFGuLfUH--7Pj37Y6t
|
|
|
232
232
|
langchain/chains/conversation/memory.py,sha256=KoKmk5FjPEkioolvmFxcJgRr2wRdWIe1LNBHCtGgUKo,1396
|
|
233
233
|
langchain/chains/conversation/prompt.py,sha256=84xC4dy8yNiCSICT4b6UvZdQXpPifMVw1hf7WnFAVkw,913
|
|
234
234
|
langchain/chains/conversational_retrieval/__init__.py,sha256=hq7jx-kmg3s8qLYnV7gPmzVIPcGqW69H6cXIjklvGjY,49
|
|
235
|
-
langchain/chains/conversational_retrieval/base.py,sha256=
|
|
235
|
+
langchain/chains/conversational_retrieval/base.py,sha256=pdFiox5XV1NLZ6hlgEtR9TwCqTjKIom-jBk-eJ_YRcE,21097
|
|
236
236
|
langchain/chains/conversational_retrieval/prompts.py,sha256=kJITwauXq7dYKnSBoL2EcDTqAnJZlWF_GzJ9C55ZEv8,720
|
|
237
237
|
langchain/chains/elasticsearch_database/__init__.py,sha256=B3Zxy8mxTb4bfMGHC__26BFkvT_6bPisS4rPIFiFWdU,126
|
|
238
238
|
langchain/chains/elasticsearch_database/base.py,sha256=25eDmw6PUfyGWtPueMDWwBz3jwYNl9TAWVaHBB8ZlwY,8319
|
|
@@ -290,7 +290,7 @@ langchain/chains/openai_functions/base.py,sha256=UpCGcUYxz93G8MMdqq1LcHZ74Y_MnS9
|
|
|
290
290
|
langchain/chains/openai_functions/citation_fuzzy_match.py,sha256=bHkYOhTgEG1wIhdC06WL6Wmnc_s-KctLs1cC5PLxwgQ,3558
|
|
291
291
|
langchain/chains/openai_functions/extraction.py,sha256=NTP9lKKeHcqQC_PQtAJLKTsvzAmAdoqPYbyEPlVFUxw,7319
|
|
292
292
|
langchain/chains/openai_functions/openapi.py,sha256=512z96_-iv73_LAd-doxWyjbh2tSrlmLKs0YJY2mfV8,11869
|
|
293
|
-
langchain/chains/openai_functions/qa_with_structure.py,sha256=
|
|
293
|
+
langchain/chains/openai_functions/qa_with_structure.py,sha256=SIhc7FeSlpPr5BQ9-jUmru_uMCl66LF47Ldx_eeNJ3I,4053
|
|
294
294
|
langchain/chains/openai_functions/tagging.py,sha256=nbvW29Cb4tHTz1kQciQa8Upti01brRbhGgC2Mqou2V0,2663
|
|
295
295
|
langchain/chains/openai_functions/utils.py,sha256=GDhYjszQGut1UcJ-dyPvkwiT8gHOV0IejRuIfN7_fhw,1255
|
|
296
296
|
langchain/chains/openai_tools/__init__.py,sha256=xX0If1Nx_ocEOI56EGxCI0v0RZ1_VUegzyODAj0RLVU,134
|
|
@@ -336,7 +336,7 @@ langchain/chains/sql_database/__init__.py,sha256=jQotWN4EWMD98Jk-f7rqh5YtbXbP9XX
|
|
|
336
336
|
langchain/chains/sql_database/prompt.py,sha256=W0xFqVZ18PzxmutnIBJrocXus8_QBByrKtxg8CjGaYw,15458
|
|
337
337
|
langchain/chains/sql_database/query.py,sha256=h-QP5ESatTFj8t7sGsHppXSchy3ZGL1U1afza-Lo8fc,5421
|
|
338
338
|
langchain/chains/structured_output/__init__.py,sha256=-6nFe-gznavFc3XCMv8XkEzuXoto2rI8Q-bcruVPOR8,204
|
|
339
|
-
langchain/chains/structured_output/base.py,sha256=
|
|
339
|
+
langchain/chains/structured_output/base.py,sha256=dOZMme8WBJDgFEXe-TJ34SKi2zR25ZeYUnNqnCAqXZw,25611
|
|
340
340
|
langchain/chains/summarize/__init__.py,sha256=mg1lKtH_x-oJ5qvKY6OD7g9kkqbjMVbL3l3OhfozSQM,151
|
|
341
341
|
langchain/chains/summarize/chain.py,sha256=QA3EgTnT067OLm5waUv_3oiI1mS3KD_uvFkHlns-Jxo,6193
|
|
342
342
|
langchain/chains/summarize/map_reduce_prompt.py,sha256=HZSitW2_WhJINN-_YJCzU6zJXbPuMr5zFek31AzutuQ,238
|
|
@@ -638,7 +638,7 @@ langchain/evaluation/embedding_distance/__init__.py,sha256=YLtGUI4ZMxjsn2Q0dGZ-R
|
|
|
638
638
|
langchain/evaluation/embedding_distance/base.py,sha256=9HmLuuwpEbrdlSqQKvUsBb3jT9d0IyL1mE7cvpbk2dg,17096
|
|
639
639
|
langchain/evaluation/exact_match/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
640
640
|
langchain/evaluation/exact_match/base.py,sha256=BykyjgKQ94391eDODzn3m1RXao9ZSXtc9wiww_fysXI,2751
|
|
641
|
-
langchain/evaluation/loading.py,sha256=
|
|
641
|
+
langchain/evaluation/loading.py,sha256=1zUtEao_F9292O0fNHl8i93bw1V94RDsFwXZTWe4-pA,7296
|
|
642
642
|
langchain/evaluation/parsing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
643
643
|
langchain/evaluation/parsing/base.py,sha256=oshaVFsY9ggIgOZX_3Xe-x7LPSRaQejmqLRT-nUvSVI,5242
|
|
644
644
|
langchain/evaluation/parsing/json_distance.py,sha256=00h1wUNQyvjQiXi2OWlKb50Hcn_X55w4kndM1L38cAM,3662
|
|
@@ -658,7 +658,7 @@ langchain/evaluation/string_distance/__init__.py,sha256=qAz9Z709ocAi_Yd9nbkKnFt1
|
|
|
658
658
|
langchain/evaluation/string_distance/base.py,sha256=IbCDJxkzBAoAwSsSKRCDc2wPEzEyNv_duIA50ZKOato,14019
|
|
659
659
|
langchain/example_generator.py,sha256=q_JvQKn2pgJOHcBeFc851GpaR4seOZXDe9TISAJheEY,142
|
|
660
660
|
langchain/formatting.py,sha256=4s5AwApo_6t2pVfoFXOgFU9sNNdpVDD44B4ryOwJMJo,168
|
|
661
|
-
langchain/globals
|
|
661
|
+
langchain/globals.py,sha256=SUMrEo_KlpODNBDj4JZDILhbxTK_GGDEYmUQVQ-Hzus,7436
|
|
662
662
|
langchain/graphs/__init__.py,sha256=l12tO5owB32RcKbu5O8rtOK0qLVjGee9JjX3RUVT54Q,1528
|
|
663
663
|
langchain/graphs/arangodb_graph.py,sha256=3Gu4bnS0q27AUEuUnoK2asz67iU8KpJktQ2uJvJ-iy0,796
|
|
664
664
|
langchain/graphs/falkordb_graph.py,sha256=PdrxQC9Tl0txQtDTFNk2qR9m5L0apWPwq-SWq3lxGMc,618
|
|
@@ -804,12 +804,12 @@ langchain/memory/motorhead_memory.py,sha256=OXjtlAQi1ioRXdM3GVcYmReynkKn8Vm1e5Tr
|
|
|
804
804
|
langchain/memory/prompt.py,sha256=r8vxZSRydSOWJzRszStN0Wky4n3fyM_QJ2XoKMsP3JA,8181
|
|
805
805
|
langchain/memory/readonly.py,sha256=IbZFbyuPo_bHEzyACQcLIcOPpczoX5CLfM_n0YllYjw,792
|
|
806
806
|
langchain/memory/simple.py,sha256=7El81OHJA0HBqwJ-AZDTQFPfB7B5NEsmY_fEOrwD0XA,761
|
|
807
|
-
langchain/memory/summary.py,sha256=
|
|
808
|
-
langchain/memory/summary_buffer.py,sha256=
|
|
807
|
+
langchain/memory/summary.py,sha256=arzVYYAkWWRwhLVZmkX3ap1mrpmrnPYqmDFp3o56RYs,3801
|
|
808
|
+
langchain/memory/summary_buffer.py,sha256=f_oaPGczQVHpMQ4oRiR7e2ZGG4ehdQ-9h_GzjEj3TaE,5068
|
|
809
809
|
langchain/memory/token_buffer.py,sha256=E1N7bWSkAmi-7V7F-7iRl-BADStnplp-zwtUndjXBMM,2144
|
|
810
810
|
langchain/memory/utils.py,sha256=PvauM6AkPRX5Hy5sY6NysuieRI9Oae1IeC61y1iIQMs,617
|
|
811
811
|
langchain/memory/vectorstore.py,sha256=SMt1iqtqTm3rcecWqwEmCcX5l-r_JVggKpuf4faUIGI,3875
|
|
812
|
-
langchain/memory/vectorstore_token_buffer_memory.py,sha256=
|
|
812
|
+
langchain/memory/vectorstore_token_buffer_memory.py,sha256=uB7N-3KHSpbzeS2TTnxIzMqRIfsgki4w8uyNKkE-cWw,7620
|
|
813
813
|
langchain/memory/zep_memory.py,sha256=WMrAJ7jymx0_0d3JnhCuklJxfomsGhEEEQ6uPMJ21Bo,628
|
|
814
814
|
langchain/model_laboratory.py,sha256=IaJzVG_SbFX7W6ODriqqme-Q5x0MB18j4Bhg1Y-fWLo,3278
|
|
815
815
|
langchain/output_parsers/__init__.py,sha256=A9fDuB-lYuOIN8QbDx-fULqSwugB7saLRKD23gdaIl4,2720
|
|
@@ -818,7 +818,7 @@ langchain/output_parsers/combining.py,sha256=dYNXviLuZBe4OV1hyTXJy7EJ5mFtmc6KAX5
|
|
|
818
818
|
langchain/output_parsers/datetime.py,sha256=zxhwax0YxVahE3CCHMXTqjpyzQcffgZ9J0NA0qLL0_8,1974
|
|
819
819
|
langchain/output_parsers/enum.py,sha256=1oGbXB7ujsAdUSkYQG8XV4gBq4pNqSOCdu3ANt0lYpo,1287
|
|
820
820
|
langchain/output_parsers/ernie_functions.py,sha256=86DsYlAGncjRalnmw5ZGwhH80lP2ms6zaw8PJGC3m3Q,1427
|
|
821
|
-
langchain/output_parsers/fix.py,sha256=
|
|
821
|
+
langchain/output_parsers/fix.py,sha256=ByGx4IbiVNu9pxo0NHc5Pvf1cUIYhE5LHhHgUZX2i18,5515
|
|
822
822
|
langchain/output_parsers/format_instructions.py,sha256=y5oSpjwzgmvYRNhfe0JmKHHdFZZP65L2snJI6xcMXEY,3958
|
|
823
823
|
langchain/output_parsers/json.py,sha256=2FJL7uLd7pHgvpQm-r5XDyt9S1ZZ9mlJUW8ilQAQ0k4,340
|
|
824
824
|
langchain/output_parsers/list.py,sha256=D35r0U51Xy5wHn-VcWxr97Ftul4UqszmyLetDi4syYQ,310
|
|
@@ -831,7 +831,7 @@ langchain/output_parsers/pydantic.py,sha256=uxbrfdyPnZxfdDvmuDr3QOmBFMwML3SfMDEm
|
|
|
831
831
|
langchain/output_parsers/rail_parser.py,sha256=iHmX3ux2jE2k0MsLqe5XCrJ1eQOBBfZtRbRzQoYPTfU,691
|
|
832
832
|
langchain/output_parsers/regex.py,sha256=TAkxKzxRQQ810LuXbxYatwLZgsYhoVwez3j5e2P55bA,1230
|
|
833
833
|
langchain/output_parsers/regex_dict.py,sha256=UK6iL4Hx-q6UlPNEGLAnbh7_8-IwtXY2V1-_KicG1Z8,1725
|
|
834
|
-
langchain/output_parsers/retry.py,sha256=
|
|
834
|
+
langchain/output_parsers/retry.py,sha256=FpbfPwC9kqwbEqHE9MmF6UJH0rakEt1OxSwMdLxNuyA,10295
|
|
835
835
|
langchain/output_parsers/structured.py,sha256=YdoqEl1FXanSNVtXZapYPKgiz7VfudzXvBXYQvwr4vo,3165
|
|
836
836
|
langchain/output_parsers/xml.py,sha256=WDHazWjxO-nDAzxkBJrd1tGINVrzo4mH2-Qgqtz9Y2w,93
|
|
837
837
|
langchain/output_parsers/yaml.py,sha256=4JLARJgFf-B2eikneVk3hDtCo9WQdlmPCHOMIpOgcAw,2269
|
|
@@ -863,10 +863,10 @@ langchain/retrievers/bm25.py,sha256=L3Pq77NNfV0YDlMkU-ODvJN8ksi1SROQ-vYpPqN5gHs,
|
|
|
863
863
|
langchain/retrievers/chaindesk.py,sha256=e3oHctHNecz14jz70sMw0_YrFjeWXv7Q04r--DnxWq4,641
|
|
864
864
|
langchain/retrievers/chatgpt_plugin_retriever.py,sha256=Pds7FgWv-e6u43noFsO3v2YV8Y6FUjdkmYs5zjl79Nk,653
|
|
865
865
|
langchain/retrievers/cohere_rag_retriever.py,sha256=YMhx_AmBHUDw6-_cQtnESl0WKjtRmjvbDNQvZs3iYm4,641
|
|
866
|
-
langchain/retrievers/contextual_compression.py,sha256=
|
|
866
|
+
langchain/retrievers/contextual_compression.py,sha256=I8VHaS2DHoFvAHotix9GjOm5933PpaVbU--qVP7UXDQ,2305
|
|
867
867
|
langchain/retrievers/databerry.py,sha256=uMTLwG-QWCaORSPeFshi105VvXCizjF6551XHXXjzcE,661
|
|
868
868
|
langchain/retrievers/docarray.py,sha256=5BHkTy7uI5HUFi-k9qS6ZYxMyGdKbAwxhKqpz3cNCTM,791
|
|
869
|
-
langchain/retrievers/document_compressors/__init__.py,sha256=
|
|
869
|
+
langchain/retrievers/document_compressors/__init__.py,sha256=H0xp8dSYIEYZWdAEQN_zY4DX6gx3kepw9jTC_gUSZyk,1263
|
|
870
870
|
langchain/retrievers/document_compressors/base.py,sha256=z2jNHsojFntSdK3XOKtEL-jqWioGOQkYxzZnT8iNwp4,2942
|
|
871
871
|
langchain/retrievers/document_compressors/chain_extract.py,sha256=FeE67K9dSyiHQnP2Y0CMkO9seRBmb3oGSBr521kjxyM,4216
|
|
872
872
|
langchain/retrievers/document_compressors/chain_extract_prompt.py,sha256=FezN4Fk0tRcRFcD1Nf1r2SUyUt49yQKzdcV_iCQj6rE,366
|
|
@@ -877,6 +877,7 @@ langchain/retrievers/document_compressors/cross_encoder.py,sha256=_Z7SoPSfOUSk-r
|
|
|
877
877
|
langchain/retrievers/document_compressors/cross_encoder_rerank.py,sha256=Rbs1y8Tw-vtKVyS93pLHMyjLgQ3w52SktjjCC2pPWuA,1597
|
|
878
878
|
langchain/retrievers/document_compressors/embeddings_filter.py,sha256=znq71R5Qync5JgQsbSDFpPnhPxGR2YiU0WIDGJn-EVQ,5211
|
|
879
879
|
langchain/retrievers/document_compressors/flashrank_rerank.py,sha256=Eo86fJ_T2IbEEeCkI_5rb3Ao4gsdenv-_Ukt33MuMko,709
|
|
880
|
+
langchain/retrievers/document_compressors/listwise_rerank.py,sha256=yo7kptthbmhsW5d4AozHxLqv9_-_E5WCO1WajH025-0,5117
|
|
880
881
|
langchain/retrievers/elastic_search_bm25.py,sha256=eRboOkRQj-_E53gUQIZzxQ1bX0-uEMv7LAQSD7K7Qf8,665
|
|
881
882
|
langchain/retrievers/embedchain.py,sha256=IUnhr3QK7IJ4IMHZDrTBpZuVQ1kyxhG-bAjmOMXb5eA,644
|
|
882
883
|
langchain/retrievers/ensemble.py,sha256=q_-E44ww1NkfxV8h1DiS9sGCsgeTTRp00bnMWe5vD7k,10513
|
|
@@ -889,10 +890,10 @@ langchain/retrievers/llama_index.py,sha256=TKuU8atpKcsoRuaK_iU5HLFOjHN8e3FxCe61s
|
|
|
889
890
|
langchain/retrievers/merger_retriever.py,sha256=uzwpkarGfgByXbqCFYNHXL-mczqfTgJI--9Y6EmY63g,3601
|
|
890
891
|
langchain/retrievers/metal.py,sha256=E9KmySjhmpq_kZhDhOLS8sH4KpbOnWUodR4-3Kd2E30,629
|
|
891
892
|
langchain/retrievers/milvus.py,sha256=f_vi-uodWcS5PyYq-8QD8S7Bx1t_uVswQtqG2D35XnE,796
|
|
892
|
-
langchain/retrievers/multi_query.py,sha256=
|
|
893
|
+
langchain/retrievers/multi_query.py,sha256=q57Ju4GCJh0lKxGOd_Y5WhEWZEc2gz4sY5qqo19t1xc,7091
|
|
893
894
|
langchain/retrievers/multi_vector.py,sha256=rb5gDEAzhzHURJ-VfKGnvq7erZ-xWklnk8RQCBTNsds,4731
|
|
894
895
|
langchain/retrievers/outline.py,sha256=uNuqhoHkfDx73ZEYbHbFjVmJfW-eAdLUzyC9EuoV608,635
|
|
895
|
-
langchain/retrievers/parent_document_retriever.py,sha256=
|
|
896
|
+
langchain/retrievers/parent_document_retriever.py,sha256=oKVp_s5ROske6O0E25yZPOjGA0xmvTGLobmWw_AHgGE,5990
|
|
896
897
|
langchain/retrievers/pinecone_hybrid_search.py,sha256=oEbmHdKIZ86H1O8GhzNC1KVfKb_xAJdRJXpODMY6X3Y,674
|
|
897
898
|
langchain/retrievers/pubmed.py,sha256=kbgj7U6x5YiXcVWobxIJDPnx3eiBAMK5HyRlELcIxsY,632
|
|
898
899
|
langchain/retrievers/pupmed.py,sha256=kbgj7U6x5YiXcVWobxIJDPnx3eiBAMK5HyRlELcIxsY,632
|
|
@@ -1334,8 +1335,8 @@ langchain/vectorstores/xata.py,sha256=HW_Oi5Hz8rH2JaUhRNWQ-3hLYmNzD8eAz6K5YqPArm
|
|
|
1334
1335
|
langchain/vectorstores/yellowbrick.py,sha256=-lnjGcRE8Q1nEPOTdbKYTw5noS2cy2ce1ePOU804-_o,624
|
|
1335
1336
|
langchain/vectorstores/zep.py,sha256=RJ2auxoA6uHHLEZknw3_jeFmYJYVt-PWKMBcNMGV6TM,798
|
|
1336
1337
|
langchain/vectorstores/zilliz.py,sha256=XhPPIUfKPFJw0_svCoBgCnNkkBLoRVVcyuMfOnE5IxU,609
|
|
1337
|
-
langchain-0.2.
|
|
1338
|
-
langchain-0.2.
|
|
1339
|
-
langchain-0.2.
|
|
1340
|
-
langchain-0.2.
|
|
1341
|
-
langchain-0.2.
|
|
1338
|
+
langchain-0.2.11.dist-info/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
|
|
1339
|
+
langchain-0.2.11.dist-info/METADATA,sha256=CfzWGm_XdcIyDAK8jpMLQfugb4KZIKLLr7WRS7KHqjk,7074
|
|
1340
|
+
langchain-0.2.11.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
|
1341
|
+
langchain-0.2.11.dist-info/entry_points.txt,sha256=IgKjoXnkkVC8Nm7ggiFMCNAk01ua6RVTb9cmZTVNm5w,58
|
|
1342
|
+
langchain-0.2.11.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|