langchain 0.2.10__py3-none-any.whl → 0.2.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/agents/agent.py +0 -24
- langchain/agents/react/agent.py +1 -1
- langchain/chains/conversational_retrieval/base.py +5 -2
- langchain/chains/openai_functions/qa_with_structure.py +6 -5
- langchain/chains/structured_output/base.py +4 -3
- langchain/memory/summary.py +12 -0
- langchain/memory/summary_buffer.py +46 -0
- langchain/memory/vectorstore_token_buffer_memory.py +1 -1
- langchain/retrievers/contextual_compression.py +2 -2
- langchain/retrievers/parent_document_retriever.py +1 -1
- {langchain-0.2.10.dist-info → langchain-0.2.11.dist-info}/METADATA +4 -2
- {langchain-0.2.10.dist-info → langchain-0.2.11.dist-info}/RECORD +16 -16
- /langchain/{globals/__init__.py → globals.py} +0 -0
- {langchain-0.2.10.dist-info → langchain-0.2.11.dist-info}/LICENSE +0 -0
- {langchain-0.2.10.dist-info → langchain-0.2.11.dist-info}/WHEEL +0 -0
- {langchain-0.2.10.dist-info → langchain-0.2.11.dist-info}/entry_points.txt +0 -0
langchain/agents/agent.py
CHANGED
|
@@ -1146,30 +1146,6 @@ class AgentExecutor(Chain):
|
|
|
1146
1146
|
)
|
|
1147
1147
|
return values
|
|
1148
1148
|
|
|
1149
|
-
@root_validator(pre=False, skip_on_failure=True)
|
|
1150
|
-
def validate_return_direct_tool(cls, values: Dict) -> Dict:
|
|
1151
|
-
"""Validate that tools are compatible with agent.
|
|
1152
|
-
|
|
1153
|
-
Args:
|
|
1154
|
-
values: Values to validate.
|
|
1155
|
-
|
|
1156
|
-
Returns:
|
|
1157
|
-
Dict: Validated values.
|
|
1158
|
-
|
|
1159
|
-
Raises:
|
|
1160
|
-
ValueError: If tools that have `return_direct=True` are not allowed.
|
|
1161
|
-
"""
|
|
1162
|
-
agent = values["agent"]
|
|
1163
|
-
tools = values["tools"]
|
|
1164
|
-
if isinstance(agent, BaseMultiActionAgent):
|
|
1165
|
-
for tool in tools:
|
|
1166
|
-
if tool.return_direct:
|
|
1167
|
-
raise ValueError(
|
|
1168
|
-
"Tools that have `return_direct=True` are not allowed "
|
|
1169
|
-
"in multi-action agents"
|
|
1170
|
-
)
|
|
1171
|
-
return values
|
|
1172
|
-
|
|
1173
1149
|
@root_validator(pre=True)
|
|
1174
1150
|
def validate_runnable_agent(cls, values: Dict) -> Dict:
|
|
1175
1151
|
"""Convert runnable to agent if passed in.
|
langchain/agents/react/agent.py
CHANGED
|
@@ -71,7 +71,7 @@ def create_react_agent(
|
|
|
71
71
|
"input": "what's my name?",
|
|
72
72
|
# Notice that chat_history is a string
|
|
73
73
|
# since this prompt is aimed at LLMs, not chat models
|
|
74
|
-
"chat_history": "Human: My name is Bob
|
|
74
|
+
"chat_history": "Human: My name is Bob\\nAI: Hello Bob!",
|
|
75
75
|
}
|
|
76
76
|
)
|
|
77
77
|
|
|
@@ -42,8 +42,11 @@ def _get_chat_history(chat_history: List[CHAT_TURN_TYPE]) -> str:
|
|
|
42
42
|
buffer = ""
|
|
43
43
|
for dialogue_turn in chat_history:
|
|
44
44
|
if isinstance(dialogue_turn, BaseMessage):
|
|
45
|
-
|
|
46
|
-
|
|
45
|
+
if len(dialogue_turn.content) > 0:
|
|
46
|
+
role_prefix = _ROLE_MAP.get(
|
|
47
|
+
dialogue_turn.type, f"{dialogue_turn.type}: "
|
|
48
|
+
)
|
|
49
|
+
buffer += f"\n{role_prefix}{dialogue_turn.content}"
|
|
47
50
|
elif isinstance(dialogue_turn, tuple):
|
|
48
51
|
human = "Human: " + dialogue_turn[0]
|
|
49
52
|
ai = "Assistant: " + dialogue_turn[1]
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any, List, Optional, Type, Union
|
|
1
|
+
from typing import Any, List, Optional, Type, Union, cast
|
|
2
2
|
|
|
3
3
|
from langchain_core.language_models import BaseLanguageModel
|
|
4
4
|
from langchain_core.messages import HumanMessage, SystemMessage
|
|
@@ -10,6 +10,7 @@ from langchain_core.output_parsers.openai_functions import (
|
|
|
10
10
|
from langchain_core.prompts import PromptTemplate
|
|
11
11
|
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
|
|
12
12
|
from langchain_core.pydantic_v1 import BaseModel, Field
|
|
13
|
+
from langchain_core.utils.pydantic import is_basemodel_subclass
|
|
13
14
|
|
|
14
15
|
from langchain.chains.llm import LLMChain
|
|
15
16
|
from langchain.chains.openai_functions.utils import get_llm_kwargs
|
|
@@ -45,7 +46,7 @@ def create_qa_with_structure_chain(
|
|
|
45
46
|
|
|
46
47
|
"""
|
|
47
48
|
if output_parser == "pydantic":
|
|
48
|
-
if not (isinstance(schema, type) and
|
|
49
|
+
if not (isinstance(schema, type) and is_basemodel_subclass(schema)):
|
|
49
50
|
raise ValueError(
|
|
50
51
|
"Must provide a pydantic class for schema when output_parser is "
|
|
51
52
|
"'pydantic'."
|
|
@@ -60,10 +61,10 @@ def create_qa_with_structure_chain(
|
|
|
60
61
|
f"Got unexpected output_parser: {output_parser}. "
|
|
61
62
|
f"Should be one of `pydantic` or `base`."
|
|
62
63
|
)
|
|
63
|
-
if isinstance(schema, type) and
|
|
64
|
-
schema_dict = schema.schema()
|
|
64
|
+
if isinstance(schema, type) and is_basemodel_subclass(schema):
|
|
65
|
+
schema_dict = cast(dict, schema.schema())
|
|
65
66
|
else:
|
|
66
|
-
schema_dict = schema
|
|
67
|
+
schema_dict = cast(dict, schema)
|
|
67
68
|
function = {
|
|
68
69
|
"name": schema_dict["title"],
|
|
69
70
|
"description": schema_dict["description"],
|
|
@@ -24,6 +24,7 @@ from langchain_core.utils.function_calling import (
|
|
|
24
24
|
convert_to_openai_function,
|
|
25
25
|
convert_to_openai_tool,
|
|
26
26
|
)
|
|
27
|
+
from langchain_core.utils.pydantic import is_basemodel_subclass
|
|
27
28
|
|
|
28
29
|
|
|
29
30
|
@deprecated(
|
|
@@ -465,7 +466,7 @@ def _get_openai_tool_output_parser(
|
|
|
465
466
|
*,
|
|
466
467
|
first_tool_only: bool = False,
|
|
467
468
|
) -> Union[BaseOutputParser, BaseGenerationOutputParser]:
|
|
468
|
-
if isinstance(tool, type) and
|
|
469
|
+
if isinstance(tool, type) and is_basemodel_subclass(tool):
|
|
469
470
|
output_parser: Union[BaseOutputParser, BaseGenerationOutputParser] = (
|
|
470
471
|
PydanticToolsParser(tools=[tool], first_tool_only=first_tool_only)
|
|
471
472
|
)
|
|
@@ -493,7 +494,7 @@ def get_openai_output_parser(
|
|
|
493
494
|
not a Pydantic class, then the output parser will automatically extract
|
|
494
495
|
only the function arguments and not the function name.
|
|
495
496
|
"""
|
|
496
|
-
if isinstance(functions[0], type) and
|
|
497
|
+
if isinstance(functions[0], type) and is_basemodel_subclass(functions[0]):
|
|
497
498
|
if len(functions) > 1:
|
|
498
499
|
pydantic_schema: Union[Dict, Type[BaseModel]] = {
|
|
499
500
|
convert_to_openai_function(fn)["name"]: fn for fn in functions
|
|
@@ -516,7 +517,7 @@ def _create_openai_json_runnable(
|
|
|
516
517
|
output_parser: Optional[Union[BaseOutputParser, BaseGenerationOutputParser]] = None,
|
|
517
518
|
) -> Runnable:
|
|
518
519
|
""""""
|
|
519
|
-
if isinstance(output_schema, type) and
|
|
520
|
+
if isinstance(output_schema, type) and is_basemodel_subclass(output_schema):
|
|
520
521
|
output_parser = output_parser or PydanticOutputParser(
|
|
521
522
|
pydantic_object=output_schema, # type: ignore
|
|
522
523
|
)
|
langchain/memory/summary.py
CHANGED
|
@@ -34,6 +34,18 @@ class SummarizerMixin(BaseModel):
|
|
|
34
34
|
chain = LLMChain(llm=self.llm, prompt=self.prompt)
|
|
35
35
|
return chain.predict(summary=existing_summary, new_lines=new_lines)
|
|
36
36
|
|
|
37
|
+
async def apredict_new_summary(
|
|
38
|
+
self, messages: List[BaseMessage], existing_summary: str
|
|
39
|
+
) -> str:
|
|
40
|
+
new_lines = get_buffer_string(
|
|
41
|
+
messages,
|
|
42
|
+
human_prefix=self.human_prefix,
|
|
43
|
+
ai_prefix=self.ai_prefix,
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
chain = LLMChain(llm=self.llm, prompt=self.prompt)
|
|
47
|
+
return await chain.apredict(summary=existing_summary, new_lines=new_lines)
|
|
48
|
+
|
|
37
49
|
|
|
38
50
|
class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin):
|
|
39
51
|
"""Conversation summarizer to chat memory."""
|
|
@@ -19,6 +19,11 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
19
19
|
"""String buffer of memory."""
|
|
20
20
|
return self.load_memory_variables({})[self.memory_key]
|
|
21
21
|
|
|
22
|
+
async def abuffer(self) -> Union[str, List[BaseMessage]]:
|
|
23
|
+
"""Async memory buffer."""
|
|
24
|
+
memory_variables = await self.aload_memory_variables({})
|
|
25
|
+
return memory_variables[self.memory_key]
|
|
26
|
+
|
|
22
27
|
@property
|
|
23
28
|
def memory_variables(self) -> List[str]:
|
|
24
29
|
"""Will always return list of memory variables.
|
|
@@ -43,6 +48,22 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
43
48
|
)
|
|
44
49
|
return {self.memory_key: final_buffer}
|
|
45
50
|
|
|
51
|
+
async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
|
52
|
+
"""Asynchronously return key-value pairs given the text input to the chain."""
|
|
53
|
+
buffer = await self.chat_memory.aget_messages()
|
|
54
|
+
if self.moving_summary_buffer != "":
|
|
55
|
+
first_messages: List[BaseMessage] = [
|
|
56
|
+
self.summary_message_cls(content=self.moving_summary_buffer)
|
|
57
|
+
]
|
|
58
|
+
buffer = first_messages + buffer
|
|
59
|
+
if self.return_messages:
|
|
60
|
+
final_buffer: Any = buffer
|
|
61
|
+
else:
|
|
62
|
+
final_buffer = get_buffer_string(
|
|
63
|
+
buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix
|
|
64
|
+
)
|
|
65
|
+
return {self.memory_key: final_buffer}
|
|
66
|
+
|
|
46
67
|
@root_validator()
|
|
47
68
|
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
|
|
48
69
|
"""Validate that prompt input variables are consistent."""
|
|
@@ -60,6 +81,13 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
60
81
|
super().save_context(inputs, outputs)
|
|
61
82
|
self.prune()
|
|
62
83
|
|
|
84
|
+
async def asave_context(
|
|
85
|
+
self, inputs: Dict[str, Any], outputs: Dict[str, str]
|
|
86
|
+
) -> None:
|
|
87
|
+
"""Asynchronously save context from this conversation to buffer."""
|
|
88
|
+
await super().asave_context(inputs, outputs)
|
|
89
|
+
await self.aprune()
|
|
90
|
+
|
|
63
91
|
def prune(self) -> None:
|
|
64
92
|
"""Prune buffer if it exceeds max token limit"""
|
|
65
93
|
buffer = self.chat_memory.messages
|
|
@@ -73,7 +101,25 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
73
101
|
pruned_memory, self.moving_summary_buffer
|
|
74
102
|
)
|
|
75
103
|
|
|
104
|
+
async def aprune(self) -> None:
|
|
105
|
+
"""Asynchronously prune buffer if it exceeds max token limit"""
|
|
106
|
+
buffer = self.chat_memory.messages
|
|
107
|
+
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
|
|
108
|
+
if curr_buffer_length > self.max_token_limit:
|
|
109
|
+
pruned_memory = []
|
|
110
|
+
while curr_buffer_length > self.max_token_limit:
|
|
111
|
+
pruned_memory.append(buffer.pop(0))
|
|
112
|
+
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
|
|
113
|
+
self.moving_summary_buffer = await self.apredict_new_summary(
|
|
114
|
+
pruned_memory, self.moving_summary_buffer
|
|
115
|
+
)
|
|
116
|
+
|
|
76
117
|
def clear(self) -> None:
|
|
77
118
|
"""Clear memory contents."""
|
|
78
119
|
super().clear()
|
|
79
120
|
self.moving_summary_buffer = ""
|
|
121
|
+
|
|
122
|
+
async def aclear(self) -> None:
|
|
123
|
+
"""Asynchronously clear memory contents."""
|
|
124
|
+
await super().aclear()
|
|
125
|
+
self.moving_summary_buffer = ""
|
|
@@ -67,7 +67,7 @@ class ConversationVectorStoreTokenBufferMemory(ConversationTokenBufferMemory):
|
|
|
67
67
|
from langchain.memory.token_buffer_vectorstore_memory import (
|
|
68
68
|
ConversationVectorStoreTokenBufferMemory
|
|
69
69
|
)
|
|
70
|
-
from
|
|
70
|
+
from langchain_chroma import Chroma
|
|
71
71
|
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
|
|
72
72
|
from langchain_openai import OpenAI
|
|
73
73
|
|
|
@@ -5,7 +5,7 @@ from langchain_core.callbacks import (
|
|
|
5
5
|
CallbackManagerForRetrieverRun,
|
|
6
6
|
)
|
|
7
7
|
from langchain_core.documents import Document
|
|
8
|
-
from langchain_core.retrievers import BaseRetriever
|
|
8
|
+
from langchain_core.retrievers import BaseRetriever, RetrieverLike
|
|
9
9
|
|
|
10
10
|
from langchain.retrievers.document_compressors.base import (
|
|
11
11
|
BaseDocumentCompressor,
|
|
@@ -18,7 +18,7 @@ class ContextualCompressionRetriever(BaseRetriever):
|
|
|
18
18
|
base_compressor: BaseDocumentCompressor
|
|
19
19
|
"""Compressor for compressing retrieved documents."""
|
|
20
20
|
|
|
21
|
-
base_retriever:
|
|
21
|
+
base_retriever: RetrieverLike
|
|
22
22
|
"""Base Retriever to use for getting relevant documents."""
|
|
23
23
|
|
|
24
24
|
class Config:
|
|
@@ -31,8 +31,8 @@ class ParentDocumentRetriever(MultiVectorRetriever):
|
|
|
31
31
|
|
|
32
32
|
.. code-block:: python
|
|
33
33
|
|
|
34
|
+
from langchain_chroma import Chroma
|
|
34
35
|
from langchain_community.embeddings import OpenAIEmbeddings
|
|
35
|
-
from langchain_community.vectorstores import Chroma
|
|
36
36
|
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
|
37
37
|
from langchain.storage import InMemoryStore
|
|
38
38
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.11
|
|
4
4
|
Summary: Building applications with LLMs through composability
|
|
5
5
|
Home-page: https://github.com/langchain-ai/langchain
|
|
6
6
|
License: MIT
|
|
@@ -15,7 +15,7 @@ Requires-Dist: PyYAML (>=5.3)
|
|
|
15
15
|
Requires-Dist: SQLAlchemy (>=1.4,<3)
|
|
16
16
|
Requires-Dist: aiohttp (>=3.8.3,<4.0.0)
|
|
17
17
|
Requires-Dist: async-timeout (>=4.0.0,<5.0.0) ; python_version < "3.11"
|
|
18
|
-
Requires-Dist: langchain-core (>=0.2.
|
|
18
|
+
Requires-Dist: langchain-core (>=0.2.23,<0.3.0)
|
|
19
19
|
Requires-Dist: langchain-text-splitters (>=0.2.0,<0.3.0)
|
|
20
20
|
Requires-Dist: langsmith (>=0.1.17,<0.2.0)
|
|
21
21
|
Requires-Dist: numpy (>=1,<2) ; python_version < "3.12"
|
|
@@ -24,6 +24,8 @@ Requires-Dist: pydantic (>=1,<3)
|
|
|
24
24
|
Requires-Dist: requests (>=2,<3)
|
|
25
25
|
Requires-Dist: tenacity (>=8.1.0,<9.0.0,!=8.4.0)
|
|
26
26
|
Project-URL: Repository, https://github.com/langchain-ai/langchain
|
|
27
|
+
Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D0%22&expanded=true
|
|
28
|
+
Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
|
|
27
29
|
Description-Content-Type: text/markdown
|
|
28
30
|
|
|
29
31
|
# 🦜️🔗 LangChain
|
|
@@ -7,7 +7,7 @@ langchain/_api/path.py,sha256=ovJP6Pcf7L_KaKvMMet9G9OzfLTb-sZV2pEw3Tp7o3I,122
|
|
|
7
7
|
langchain/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
8
|
langchain/adapters/openai.py,sha256=kWvS_DdRtpcc49vDY8zLUo3BrtXA3a89bLJu3Sksvaw,1996
|
|
9
9
|
langchain/agents/__init__.py,sha256=dm8fJzo_wRX_Fz6XF-o8Uuduy5G5vE2B52RWBdzUIz8,6242
|
|
10
|
-
langchain/agents/agent.py,sha256=
|
|
10
|
+
langchain/agents/agent.py,sha256=JDpb8_nKV_0Yt0rIsvFTl6zdr38REBggMGfftuBk09k,61641
|
|
11
11
|
langchain/agents/agent_iterator.py,sha256=rmyKOHFHStwjqsQDDyNnFaDr43G32fpMCks7rOz4wq0,16438
|
|
12
12
|
langchain/agents/agent_toolkits/__init__.py,sha256=xgjLQ6eP0npPAJJr0O1lM-dZwtP0XR2fJOovJFIgNuo,7365
|
|
13
13
|
langchain/agents/agent_toolkits/ainetwork/__init__.py,sha256=henfKntuAEjG1KoN-Hk1IHy3fFGCYPWLEuZtF2bIdZI,25
|
|
@@ -129,7 +129,7 @@ langchain/agents/output_parsers/self_ask.py,sha256=-4_-hQbKB1ichR5odEyeYUV-wIdLm
|
|
|
129
129
|
langchain/agents/output_parsers/tools.py,sha256=9hRlUsJVmS0VmFzEKVYfg5AeusynB2lw4Xi4uYns5JM,3753
|
|
130
130
|
langchain/agents/output_parsers/xml.py,sha256=2MjxW4nAM4sZN-in3K40_K5DBx6cI2Erb0TZbpSoZIY,1658
|
|
131
131
|
langchain/agents/react/__init__.py,sha256=9RIjjaUDfWnoMEMpV57JQ0CwZZC5Soh357QdKpVIM-4,76
|
|
132
|
-
langchain/agents/react/agent.py,sha256=
|
|
132
|
+
langchain/agents/react/agent.py,sha256=pKKK3x4lKfMjiOZDF8T3_Q-PTRyCwKUBGrBhAAazmgw,5096
|
|
133
133
|
langchain/agents/react/base.py,sha256=eCUikQQ688hp1s3h0Nqlz4ueaIVrpfRQe50TNai3I4Y,5799
|
|
134
134
|
langchain/agents/react/output_parser.py,sha256=bEL3U3mxYGK7_7Lm4GlOq8JKQTgyHFQQIEVUUZjV1qs,1231
|
|
135
135
|
langchain/agents/react/textworld_prompt.py,sha256=b9WDM8pFmqrfAWJ8n6zkxlPlxQI5oHljZ1R9g5y6cRE,1906
|
|
@@ -232,7 +232,7 @@ langchain/chains/conversation/base.py,sha256=057I9fvxKk8QxIqFPgFGuLfUH--7Pj37Y6t
|
|
|
232
232
|
langchain/chains/conversation/memory.py,sha256=KoKmk5FjPEkioolvmFxcJgRr2wRdWIe1LNBHCtGgUKo,1396
|
|
233
233
|
langchain/chains/conversation/prompt.py,sha256=84xC4dy8yNiCSICT4b6UvZdQXpPifMVw1hf7WnFAVkw,913
|
|
234
234
|
langchain/chains/conversational_retrieval/__init__.py,sha256=hq7jx-kmg3s8qLYnV7gPmzVIPcGqW69H6cXIjklvGjY,49
|
|
235
|
-
langchain/chains/conversational_retrieval/base.py,sha256=
|
|
235
|
+
langchain/chains/conversational_retrieval/base.py,sha256=pdFiox5XV1NLZ6hlgEtR9TwCqTjKIom-jBk-eJ_YRcE,21097
|
|
236
236
|
langchain/chains/conversational_retrieval/prompts.py,sha256=kJITwauXq7dYKnSBoL2EcDTqAnJZlWF_GzJ9C55ZEv8,720
|
|
237
237
|
langchain/chains/elasticsearch_database/__init__.py,sha256=B3Zxy8mxTb4bfMGHC__26BFkvT_6bPisS4rPIFiFWdU,126
|
|
238
238
|
langchain/chains/elasticsearch_database/base.py,sha256=25eDmw6PUfyGWtPueMDWwBz3jwYNl9TAWVaHBB8ZlwY,8319
|
|
@@ -290,7 +290,7 @@ langchain/chains/openai_functions/base.py,sha256=UpCGcUYxz93G8MMdqq1LcHZ74Y_MnS9
|
|
|
290
290
|
langchain/chains/openai_functions/citation_fuzzy_match.py,sha256=bHkYOhTgEG1wIhdC06WL6Wmnc_s-KctLs1cC5PLxwgQ,3558
|
|
291
291
|
langchain/chains/openai_functions/extraction.py,sha256=NTP9lKKeHcqQC_PQtAJLKTsvzAmAdoqPYbyEPlVFUxw,7319
|
|
292
292
|
langchain/chains/openai_functions/openapi.py,sha256=512z96_-iv73_LAd-doxWyjbh2tSrlmLKs0YJY2mfV8,11869
|
|
293
|
-
langchain/chains/openai_functions/qa_with_structure.py,sha256=
|
|
293
|
+
langchain/chains/openai_functions/qa_with_structure.py,sha256=SIhc7FeSlpPr5BQ9-jUmru_uMCl66LF47Ldx_eeNJ3I,4053
|
|
294
294
|
langchain/chains/openai_functions/tagging.py,sha256=nbvW29Cb4tHTz1kQciQa8Upti01brRbhGgC2Mqou2V0,2663
|
|
295
295
|
langchain/chains/openai_functions/utils.py,sha256=GDhYjszQGut1UcJ-dyPvkwiT8gHOV0IejRuIfN7_fhw,1255
|
|
296
296
|
langchain/chains/openai_tools/__init__.py,sha256=xX0If1Nx_ocEOI56EGxCI0v0RZ1_VUegzyODAj0RLVU,134
|
|
@@ -336,7 +336,7 @@ langchain/chains/sql_database/__init__.py,sha256=jQotWN4EWMD98Jk-f7rqh5YtbXbP9XX
|
|
|
336
336
|
langchain/chains/sql_database/prompt.py,sha256=W0xFqVZ18PzxmutnIBJrocXus8_QBByrKtxg8CjGaYw,15458
|
|
337
337
|
langchain/chains/sql_database/query.py,sha256=h-QP5ESatTFj8t7sGsHppXSchy3ZGL1U1afza-Lo8fc,5421
|
|
338
338
|
langchain/chains/structured_output/__init__.py,sha256=-6nFe-gznavFc3XCMv8XkEzuXoto2rI8Q-bcruVPOR8,204
|
|
339
|
-
langchain/chains/structured_output/base.py,sha256=
|
|
339
|
+
langchain/chains/structured_output/base.py,sha256=dOZMme8WBJDgFEXe-TJ34SKi2zR25ZeYUnNqnCAqXZw,25611
|
|
340
340
|
langchain/chains/summarize/__init__.py,sha256=mg1lKtH_x-oJ5qvKY6OD7g9kkqbjMVbL3l3OhfozSQM,151
|
|
341
341
|
langchain/chains/summarize/chain.py,sha256=QA3EgTnT067OLm5waUv_3oiI1mS3KD_uvFkHlns-Jxo,6193
|
|
342
342
|
langchain/chains/summarize/map_reduce_prompt.py,sha256=HZSitW2_WhJINN-_YJCzU6zJXbPuMr5zFek31AzutuQ,238
|
|
@@ -658,7 +658,7 @@ langchain/evaluation/string_distance/__init__.py,sha256=qAz9Z709ocAi_Yd9nbkKnFt1
|
|
|
658
658
|
langchain/evaluation/string_distance/base.py,sha256=IbCDJxkzBAoAwSsSKRCDc2wPEzEyNv_duIA50ZKOato,14019
|
|
659
659
|
langchain/example_generator.py,sha256=q_JvQKn2pgJOHcBeFc851GpaR4seOZXDe9TISAJheEY,142
|
|
660
660
|
langchain/formatting.py,sha256=4s5AwApo_6t2pVfoFXOgFU9sNNdpVDD44B4ryOwJMJo,168
|
|
661
|
-
langchain/globals
|
|
661
|
+
langchain/globals.py,sha256=SUMrEo_KlpODNBDj4JZDILhbxTK_GGDEYmUQVQ-Hzus,7436
|
|
662
662
|
langchain/graphs/__init__.py,sha256=l12tO5owB32RcKbu5O8rtOK0qLVjGee9JjX3RUVT54Q,1528
|
|
663
663
|
langchain/graphs/arangodb_graph.py,sha256=3Gu4bnS0q27AUEuUnoK2asz67iU8KpJktQ2uJvJ-iy0,796
|
|
664
664
|
langchain/graphs/falkordb_graph.py,sha256=PdrxQC9Tl0txQtDTFNk2qR9m5L0apWPwq-SWq3lxGMc,618
|
|
@@ -804,12 +804,12 @@ langchain/memory/motorhead_memory.py,sha256=OXjtlAQi1ioRXdM3GVcYmReynkKn8Vm1e5Tr
|
|
|
804
804
|
langchain/memory/prompt.py,sha256=r8vxZSRydSOWJzRszStN0Wky4n3fyM_QJ2XoKMsP3JA,8181
|
|
805
805
|
langchain/memory/readonly.py,sha256=IbZFbyuPo_bHEzyACQcLIcOPpczoX5CLfM_n0YllYjw,792
|
|
806
806
|
langchain/memory/simple.py,sha256=7El81OHJA0HBqwJ-AZDTQFPfB7B5NEsmY_fEOrwD0XA,761
|
|
807
|
-
langchain/memory/summary.py,sha256=
|
|
808
|
-
langchain/memory/summary_buffer.py,sha256=
|
|
807
|
+
langchain/memory/summary.py,sha256=arzVYYAkWWRwhLVZmkX3ap1mrpmrnPYqmDFp3o56RYs,3801
|
|
808
|
+
langchain/memory/summary_buffer.py,sha256=f_oaPGczQVHpMQ4oRiR7e2ZGG4ehdQ-9h_GzjEj3TaE,5068
|
|
809
809
|
langchain/memory/token_buffer.py,sha256=E1N7bWSkAmi-7V7F-7iRl-BADStnplp-zwtUndjXBMM,2144
|
|
810
810
|
langchain/memory/utils.py,sha256=PvauM6AkPRX5Hy5sY6NysuieRI9Oae1IeC61y1iIQMs,617
|
|
811
811
|
langchain/memory/vectorstore.py,sha256=SMt1iqtqTm3rcecWqwEmCcX5l-r_JVggKpuf4faUIGI,3875
|
|
812
|
-
langchain/memory/vectorstore_token_buffer_memory.py,sha256=
|
|
812
|
+
langchain/memory/vectorstore_token_buffer_memory.py,sha256=uB7N-3KHSpbzeS2TTnxIzMqRIfsgki4w8uyNKkE-cWw,7620
|
|
813
813
|
langchain/memory/zep_memory.py,sha256=WMrAJ7jymx0_0d3JnhCuklJxfomsGhEEEQ6uPMJ21Bo,628
|
|
814
814
|
langchain/model_laboratory.py,sha256=IaJzVG_SbFX7W6ODriqqme-Q5x0MB18j4Bhg1Y-fWLo,3278
|
|
815
815
|
langchain/output_parsers/__init__.py,sha256=A9fDuB-lYuOIN8QbDx-fULqSwugB7saLRKD23gdaIl4,2720
|
|
@@ -863,7 +863,7 @@ langchain/retrievers/bm25.py,sha256=L3Pq77NNfV0YDlMkU-ODvJN8ksi1SROQ-vYpPqN5gHs,
|
|
|
863
863
|
langchain/retrievers/chaindesk.py,sha256=e3oHctHNecz14jz70sMw0_YrFjeWXv7Q04r--DnxWq4,641
|
|
864
864
|
langchain/retrievers/chatgpt_plugin_retriever.py,sha256=Pds7FgWv-e6u43noFsO3v2YV8Y6FUjdkmYs5zjl79Nk,653
|
|
865
865
|
langchain/retrievers/cohere_rag_retriever.py,sha256=YMhx_AmBHUDw6-_cQtnESl0WKjtRmjvbDNQvZs3iYm4,641
|
|
866
|
-
langchain/retrievers/contextual_compression.py,sha256=
|
|
866
|
+
langchain/retrievers/contextual_compression.py,sha256=I8VHaS2DHoFvAHotix9GjOm5933PpaVbU--qVP7UXDQ,2305
|
|
867
867
|
langchain/retrievers/databerry.py,sha256=uMTLwG-QWCaORSPeFshi105VvXCizjF6551XHXXjzcE,661
|
|
868
868
|
langchain/retrievers/docarray.py,sha256=5BHkTy7uI5HUFi-k9qS6ZYxMyGdKbAwxhKqpz3cNCTM,791
|
|
869
869
|
langchain/retrievers/document_compressors/__init__.py,sha256=H0xp8dSYIEYZWdAEQN_zY4DX6gx3kepw9jTC_gUSZyk,1263
|
|
@@ -893,7 +893,7 @@ langchain/retrievers/milvus.py,sha256=f_vi-uodWcS5PyYq-8QD8S7Bx1t_uVswQtqG2D35Xn
|
|
|
893
893
|
langchain/retrievers/multi_query.py,sha256=q57Ju4GCJh0lKxGOd_Y5WhEWZEc2gz4sY5qqo19t1xc,7091
|
|
894
894
|
langchain/retrievers/multi_vector.py,sha256=rb5gDEAzhzHURJ-VfKGnvq7erZ-xWklnk8RQCBTNsds,4731
|
|
895
895
|
langchain/retrievers/outline.py,sha256=uNuqhoHkfDx73ZEYbHbFjVmJfW-eAdLUzyC9EuoV608,635
|
|
896
|
-
langchain/retrievers/parent_document_retriever.py,sha256=
|
|
896
|
+
langchain/retrievers/parent_document_retriever.py,sha256=oKVp_s5ROske6O0E25yZPOjGA0xmvTGLobmWw_AHgGE,5990
|
|
897
897
|
langchain/retrievers/pinecone_hybrid_search.py,sha256=oEbmHdKIZ86H1O8GhzNC1KVfKb_xAJdRJXpODMY6X3Y,674
|
|
898
898
|
langchain/retrievers/pubmed.py,sha256=kbgj7U6x5YiXcVWobxIJDPnx3eiBAMK5HyRlELcIxsY,632
|
|
899
899
|
langchain/retrievers/pupmed.py,sha256=kbgj7U6x5YiXcVWobxIJDPnx3eiBAMK5HyRlELcIxsY,632
|
|
@@ -1335,8 +1335,8 @@ langchain/vectorstores/xata.py,sha256=HW_Oi5Hz8rH2JaUhRNWQ-3hLYmNzD8eAz6K5YqPArm
|
|
|
1335
1335
|
langchain/vectorstores/yellowbrick.py,sha256=-lnjGcRE8Q1nEPOTdbKYTw5noS2cy2ce1ePOU804-_o,624
|
|
1336
1336
|
langchain/vectorstores/zep.py,sha256=RJ2auxoA6uHHLEZknw3_jeFmYJYVt-PWKMBcNMGV6TM,798
|
|
1337
1337
|
langchain/vectorstores/zilliz.py,sha256=XhPPIUfKPFJw0_svCoBgCnNkkBLoRVVcyuMfOnE5IxU,609
|
|
1338
|
-
langchain-0.2.
|
|
1339
|
-
langchain-0.2.
|
|
1340
|
-
langchain-0.2.
|
|
1341
|
-
langchain-0.2.
|
|
1342
|
-
langchain-0.2.
|
|
1338
|
+
langchain-0.2.11.dist-info/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
|
|
1339
|
+
langchain-0.2.11.dist-info/METADATA,sha256=CfzWGm_XdcIyDAK8jpMLQfugb4KZIKLLr7WRS7KHqjk,7074
|
|
1340
|
+
langchain-0.2.11.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
|
1341
|
+
langchain-0.2.11.dist-info/entry_points.txt,sha256=IgKjoXnkkVC8Nm7ggiFMCNAk01ua6RVTb9cmZTVNm5w,58
|
|
1342
|
+
langchain-0.2.11.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|