langchain 0.2.10__py3-none-any.whl → 0.2.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/agents/agent.py +0 -24
- langchain/agents/chat/output_parser.py +2 -2
- langchain/agents/output_parsers/react_json_single_input.py +2 -2
- langchain/agents/react/agent.py +1 -1
- langchain/agents/structured_chat/output_parser.py +2 -2
- langchain/chains/conversational_retrieval/base.py +5 -2
- langchain/chains/openai_functions/qa_with_structure.py +6 -5
- langchain/chains/structured_output/base.py +4 -3
- langchain/chat_models/base.py +28 -3
- langchain/evaluation/agents/trajectory_eval_chain.py +1 -2
- langchain/evaluation/embedding_distance/base.py +3 -2
- langchain/evaluation/string_distance/base.py +3 -2
- langchain/memory/buffer.py +2 -2
- langchain/memory/summary.py +15 -2
- langchain/memory/summary_buffer.py +48 -2
- langchain/memory/vectorstore_token_buffer_memory.py +1 -1
- langchain/output_parsers/combining.py +2 -2
- langchain/output_parsers/enum.py +2 -2
- langchain/output_parsers/fix.py +4 -5
- langchain/output_parsers/retry.py +2 -2
- langchain/retrievers/contextual_compression.py +2 -2
- langchain/retrievers/document_compressors/embeddings_filter.py +3 -2
- langchain/retrievers/parent_document_retriever.py +1 -1
- langchain/retrievers/self_query/base.py +10 -0
- {langchain-0.2.10.dist-info → langchain-0.2.12.dist-info}/METADATA +4 -2
- {langchain-0.2.10.dist-info → langchain-0.2.12.dist-info}/RECORD +30 -30
- /langchain/{globals/__init__.py → globals.py} +0 -0
- {langchain-0.2.10.dist-info → langchain-0.2.12.dist-info}/LICENSE +0 -0
- {langchain-0.2.10.dist-info → langchain-0.2.12.dist-info}/WHEEL +0 -0
- {langchain-0.2.10.dist-info → langchain-0.2.12.dist-info}/entry_points.txt +0 -0
langchain/agents/agent.py
CHANGED
|
@@ -1146,30 +1146,6 @@ class AgentExecutor(Chain):
|
|
|
1146
1146
|
)
|
|
1147
1147
|
return values
|
|
1148
1148
|
|
|
1149
|
-
@root_validator(pre=False, skip_on_failure=True)
|
|
1150
|
-
def validate_return_direct_tool(cls, values: Dict) -> Dict:
|
|
1151
|
-
"""Validate that tools are compatible with agent.
|
|
1152
|
-
|
|
1153
|
-
Args:
|
|
1154
|
-
values: Values to validate.
|
|
1155
|
-
|
|
1156
|
-
Returns:
|
|
1157
|
-
Dict: Validated values.
|
|
1158
|
-
|
|
1159
|
-
Raises:
|
|
1160
|
-
ValueError: If tools that have `return_direct=True` are not allowed.
|
|
1161
|
-
"""
|
|
1162
|
-
agent = values["agent"]
|
|
1163
|
-
tools = values["tools"]
|
|
1164
|
-
if isinstance(agent, BaseMultiActionAgent):
|
|
1165
|
-
for tool in tools:
|
|
1166
|
-
if tool.return_direct:
|
|
1167
|
-
raise ValueError(
|
|
1168
|
-
"Tools that have `return_direct=True` are not allowed "
|
|
1169
|
-
"in multi-action agents"
|
|
1170
|
-
)
|
|
1171
|
-
return values
|
|
1172
|
-
|
|
1173
1149
|
@root_validator(pre=True)
|
|
1174
1150
|
def validate_runnable_agent(cls, values: Dict) -> Dict:
|
|
1175
1151
|
"""Convert runnable to agent if passed in.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import re
|
|
3
|
-
from typing import Union
|
|
3
|
+
from typing import Pattern, Union
|
|
4
4
|
|
|
5
5
|
from langchain_core.agents import AgentAction, AgentFinish
|
|
6
6
|
from langchain_core.exceptions import OutputParserException
|
|
@@ -17,7 +17,7 @@ class ChatOutputParser(AgentOutputParser):
|
|
|
17
17
|
format_instructions: str = FORMAT_INSTRUCTIONS
|
|
18
18
|
"""Default formatting instructions"""
|
|
19
19
|
|
|
20
|
-
pattern = re.compile(r"^.*?`{3}(?:json)?\n(.*?)`{3}.*?$", re.DOTALL)
|
|
20
|
+
pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n(.*?)`{3}.*?$", re.DOTALL)
|
|
21
21
|
"""Regex pattern to parse the output."""
|
|
22
22
|
|
|
23
23
|
def get_format_instructions(self) -> str:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import re
|
|
3
|
-
from typing import Union
|
|
3
|
+
from typing import Pattern, Union
|
|
4
4
|
|
|
5
5
|
from langchain_core.agents import AgentAction, AgentFinish
|
|
6
6
|
from langchain_core.exceptions import OutputParserException
|
|
@@ -42,7 +42,7 @@ class ReActJsonSingleInputOutputParser(AgentOutputParser):
|
|
|
42
42
|
|
|
43
43
|
"""
|
|
44
44
|
|
|
45
|
-
pattern = re.compile(r"^.*?`{3}(?:json)?\n?(.*?)`{3}.*?$", re.DOTALL)
|
|
45
|
+
pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n?(.*?)`{3}.*?$", re.DOTALL)
|
|
46
46
|
"""Regex pattern to parse the output."""
|
|
47
47
|
|
|
48
48
|
def get_format_instructions(self) -> str:
|
langchain/agents/react/agent.py
CHANGED
|
@@ -71,7 +71,7 @@ def create_react_agent(
|
|
|
71
71
|
"input": "what's my name?",
|
|
72
72
|
# Notice that chat_history is a string
|
|
73
73
|
# since this prompt is aimed at LLMs, not chat models
|
|
74
|
-
"chat_history": "Human: My name is Bob
|
|
74
|
+
"chat_history": "Human: My name is Bob\\nAI: Hello Bob!",
|
|
75
75
|
}
|
|
76
76
|
)
|
|
77
77
|
|
|
@@ -3,7 +3,7 @@ from __future__ import annotations
|
|
|
3
3
|
import json
|
|
4
4
|
import logging
|
|
5
5
|
import re
|
|
6
|
-
from typing import Optional, Union
|
|
6
|
+
from typing import Optional, Pattern, Union
|
|
7
7
|
|
|
8
8
|
from langchain_core.agents import AgentAction, AgentFinish
|
|
9
9
|
from langchain_core.exceptions import OutputParserException
|
|
@@ -23,7 +23,7 @@ class StructuredChatOutputParser(AgentOutputParser):
|
|
|
23
23
|
format_instructions: str = FORMAT_INSTRUCTIONS
|
|
24
24
|
"""Default formatting instructions"""
|
|
25
25
|
|
|
26
|
-
pattern = re.compile(r"```(?:json\s+)?(\W.*?)```", re.DOTALL)
|
|
26
|
+
pattern: Pattern = re.compile(r"```(?:json\s+)?(\W.*?)```", re.DOTALL)
|
|
27
27
|
"""Regex pattern to parse the output."""
|
|
28
28
|
|
|
29
29
|
def get_format_instructions(self) -> str:
|
|
@@ -42,8 +42,11 @@ def _get_chat_history(chat_history: List[CHAT_TURN_TYPE]) -> str:
|
|
|
42
42
|
buffer = ""
|
|
43
43
|
for dialogue_turn in chat_history:
|
|
44
44
|
if isinstance(dialogue_turn, BaseMessage):
|
|
45
|
-
|
|
46
|
-
|
|
45
|
+
if len(dialogue_turn.content) > 0:
|
|
46
|
+
role_prefix = _ROLE_MAP.get(
|
|
47
|
+
dialogue_turn.type, f"{dialogue_turn.type}: "
|
|
48
|
+
)
|
|
49
|
+
buffer += f"\n{role_prefix}{dialogue_turn.content}"
|
|
47
50
|
elif isinstance(dialogue_turn, tuple):
|
|
48
51
|
human = "Human: " + dialogue_turn[0]
|
|
49
52
|
ai = "Assistant: " + dialogue_turn[1]
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any, List, Optional, Type, Union
|
|
1
|
+
from typing import Any, List, Optional, Type, Union, cast
|
|
2
2
|
|
|
3
3
|
from langchain_core.language_models import BaseLanguageModel
|
|
4
4
|
from langchain_core.messages import HumanMessage, SystemMessage
|
|
@@ -10,6 +10,7 @@ from langchain_core.output_parsers.openai_functions import (
|
|
|
10
10
|
from langchain_core.prompts import PromptTemplate
|
|
11
11
|
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
|
|
12
12
|
from langchain_core.pydantic_v1 import BaseModel, Field
|
|
13
|
+
from langchain_core.utils.pydantic import is_basemodel_subclass
|
|
13
14
|
|
|
14
15
|
from langchain.chains.llm import LLMChain
|
|
15
16
|
from langchain.chains.openai_functions.utils import get_llm_kwargs
|
|
@@ -45,7 +46,7 @@ def create_qa_with_structure_chain(
|
|
|
45
46
|
|
|
46
47
|
"""
|
|
47
48
|
if output_parser == "pydantic":
|
|
48
|
-
if not (isinstance(schema, type) and
|
|
49
|
+
if not (isinstance(schema, type) and is_basemodel_subclass(schema)):
|
|
49
50
|
raise ValueError(
|
|
50
51
|
"Must provide a pydantic class for schema when output_parser is "
|
|
51
52
|
"'pydantic'."
|
|
@@ -60,10 +61,10 @@ def create_qa_with_structure_chain(
|
|
|
60
61
|
f"Got unexpected output_parser: {output_parser}. "
|
|
61
62
|
f"Should be one of `pydantic` or `base`."
|
|
62
63
|
)
|
|
63
|
-
if isinstance(schema, type) and
|
|
64
|
-
schema_dict = schema.schema()
|
|
64
|
+
if isinstance(schema, type) and is_basemodel_subclass(schema):
|
|
65
|
+
schema_dict = cast(dict, schema.schema())
|
|
65
66
|
else:
|
|
66
|
-
schema_dict = schema
|
|
67
|
+
schema_dict = cast(dict, schema)
|
|
67
68
|
function = {
|
|
68
69
|
"name": schema_dict["title"],
|
|
69
70
|
"description": schema_dict["description"],
|
|
@@ -24,6 +24,7 @@ from langchain_core.utils.function_calling import (
|
|
|
24
24
|
convert_to_openai_function,
|
|
25
25
|
convert_to_openai_tool,
|
|
26
26
|
)
|
|
27
|
+
from langchain_core.utils.pydantic import is_basemodel_subclass
|
|
27
28
|
|
|
28
29
|
|
|
29
30
|
@deprecated(
|
|
@@ -465,7 +466,7 @@ def _get_openai_tool_output_parser(
|
|
|
465
466
|
*,
|
|
466
467
|
first_tool_only: bool = False,
|
|
467
468
|
) -> Union[BaseOutputParser, BaseGenerationOutputParser]:
|
|
468
|
-
if isinstance(tool, type) and
|
|
469
|
+
if isinstance(tool, type) and is_basemodel_subclass(tool):
|
|
469
470
|
output_parser: Union[BaseOutputParser, BaseGenerationOutputParser] = (
|
|
470
471
|
PydanticToolsParser(tools=[tool], first_tool_only=first_tool_only)
|
|
471
472
|
)
|
|
@@ -493,7 +494,7 @@ def get_openai_output_parser(
|
|
|
493
494
|
not a Pydantic class, then the output parser will automatically extract
|
|
494
495
|
only the function arguments and not the function name.
|
|
495
496
|
"""
|
|
496
|
-
if isinstance(functions[0], type) and
|
|
497
|
+
if isinstance(functions[0], type) and is_basemodel_subclass(functions[0]):
|
|
497
498
|
if len(functions) > 1:
|
|
498
499
|
pydantic_schema: Union[Dict, Type[BaseModel]] = {
|
|
499
500
|
convert_to_openai_function(fn)["name"]: fn for fn in functions
|
|
@@ -516,7 +517,7 @@ def _create_openai_json_runnable(
|
|
|
516
517
|
output_parser: Optional[Union[BaseOutputParser, BaseGenerationOutputParser]] = None,
|
|
517
518
|
) -> Runnable:
|
|
518
519
|
""""""
|
|
519
|
-
if isinstance(output_schema, type) and
|
|
520
|
+
if isinstance(output_schema, type) and is_basemodel_subclass(output_schema):
|
|
520
521
|
output_parser = output_parser or PydanticOutputParser(
|
|
521
522
|
pydantic_object=output_schema, # type: ignore
|
|
522
523
|
)
|
langchain/chat_models/base.py
CHANGED
|
@@ -102,6 +102,13 @@ def init_chat_model(
|
|
|
102
102
|
|
|
103
103
|
.. versionchanged:: 0.2.8
|
|
104
104
|
|
|
105
|
+
Support for ``configurable_fields`` and ``config_prefix`` added.
|
|
106
|
+
|
|
107
|
+
.. versionchanged:: 0.2.12
|
|
108
|
+
|
|
109
|
+
Support for Ollama via langchain-ollama package added. Previously
|
|
110
|
+
langchain-community version of Ollama (now deprecated) was installed by default.
|
|
111
|
+
|
|
105
112
|
Args:
|
|
106
113
|
model: The name of the model, e.g. "gpt-4o", "claude-3-opus-20240229".
|
|
107
114
|
model_provider: The model provider. Supported model_provider values and the
|
|
@@ -118,7 +125,7 @@ def init_chat_model(
|
|
|
118
125
|
- mistralai (langchain-mistralai)
|
|
119
126
|
- huggingface (langchain-huggingface)
|
|
120
127
|
- groq (langchain-groq)
|
|
121
|
-
- ollama (langchain-
|
|
128
|
+
- ollama (langchain-ollama) [support added in langchain==0.2.12]
|
|
122
129
|
|
|
123
130
|
Will attempt to infer model_provider from model if not specified. The
|
|
124
131
|
following providers will be inferred based on these model prefixes:
|
|
@@ -336,8 +343,20 @@ def _init_chat_model_helper(
|
|
|
336
343
|
|
|
337
344
|
return ChatFireworks(model=model, **kwargs)
|
|
338
345
|
elif model_provider == "ollama":
|
|
339
|
-
|
|
340
|
-
|
|
346
|
+
try:
|
|
347
|
+
_check_pkg("langchain_ollama")
|
|
348
|
+
from langchain_ollama import ChatOllama
|
|
349
|
+
except ImportError:
|
|
350
|
+
pass
|
|
351
|
+
|
|
352
|
+
# For backwards compatibility
|
|
353
|
+
try:
|
|
354
|
+
_check_pkg("langchain_community")
|
|
355
|
+
from langchain_community.chat_models import ChatOllama
|
|
356
|
+
except ImportError:
|
|
357
|
+
# If both langchain-ollama and langchain-community aren't available, raise
|
|
358
|
+
# an error related to langchain-ollama
|
|
359
|
+
_check_pkg("langchain_ollama")
|
|
341
360
|
|
|
342
361
|
return ChatOllama(model=model, **kwargs)
|
|
343
362
|
elif model_provider == "together":
|
|
@@ -366,6 +385,11 @@ def _init_chat_model_helper(
|
|
|
366
385
|
|
|
367
386
|
# TODO: update to use model= once ChatBedrock supports
|
|
368
387
|
return ChatBedrock(model_id=model, **kwargs)
|
|
388
|
+
elif model_provider == "bedrock_converse":
|
|
389
|
+
_check_pkg("langchain_aws")
|
|
390
|
+
from langchain_aws import ChatBedrockConverse
|
|
391
|
+
|
|
392
|
+
return ChatBedrockConverse(model=model, **kwargs)
|
|
369
393
|
else:
|
|
370
394
|
supported = ", ".join(_SUPPORTED_PROVIDERS)
|
|
371
395
|
raise ValueError(
|
|
@@ -388,6 +412,7 @@ _SUPPORTED_PROVIDERS = {
|
|
|
388
412
|
"huggingface",
|
|
389
413
|
"groq",
|
|
390
414
|
"bedrock",
|
|
415
|
+
"bedrock_converse",
|
|
391
416
|
}
|
|
392
417
|
|
|
393
418
|
|
|
@@ -283,8 +283,7 @@ The following is the expected answer. Use this to measure correctness:
|
|
|
283
283
|
|
|
284
284
|
def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:
|
|
285
285
|
"""Validate and prep inputs."""
|
|
286
|
-
|
|
287
|
-
inputs["reference"] = self._format_reference(inputs.get("reference"))
|
|
286
|
+
inputs["reference"] = self._format_reference(inputs.get("reference"))
|
|
288
287
|
return super().prep_inputs(inputs)
|
|
289
288
|
|
|
290
289
|
def _call(
|
|
@@ -10,7 +10,8 @@ from langchain_core.callbacks.manager import (
|
|
|
10
10
|
Callbacks,
|
|
11
11
|
)
|
|
12
12
|
from langchain_core.embeddings import Embeddings
|
|
13
|
-
from langchain_core.pydantic_v1 import Field
|
|
13
|
+
from langchain_core.pydantic_v1 import Field
|
|
14
|
+
from langchain_core.utils import pre_init
|
|
14
15
|
|
|
15
16
|
from langchain.chains.base import Chain
|
|
16
17
|
from langchain.evaluation.schema import PairwiseStringEvaluator, StringEvaluator
|
|
@@ -68,7 +69,7 @@ class _EmbeddingDistanceChainMixin(Chain):
|
|
|
68
69
|
embeddings: Embeddings = Field(default_factory=_embedding_factory)
|
|
69
70
|
distance_metric: EmbeddingDistance = Field(default=EmbeddingDistance.COSINE)
|
|
70
71
|
|
|
71
|
-
@
|
|
72
|
+
@pre_init
|
|
72
73
|
def _validate_tiktoken_installed(cls, values: Dict[str, Any]) -> Dict[str, Any]:
|
|
73
74
|
"""Validate that the TikTok library is installed.
|
|
74
75
|
|
|
@@ -8,7 +8,8 @@ from langchain_core.callbacks.manager import (
|
|
|
8
8
|
CallbackManagerForChainRun,
|
|
9
9
|
Callbacks,
|
|
10
10
|
)
|
|
11
|
-
from langchain_core.pydantic_v1 import Field
|
|
11
|
+
from langchain_core.pydantic_v1 import Field
|
|
12
|
+
from langchain_core.utils import pre_init
|
|
12
13
|
|
|
13
14
|
from langchain.chains.base import Chain
|
|
14
15
|
from langchain.evaluation.schema import PairwiseStringEvaluator, StringEvaluator
|
|
@@ -63,7 +64,7 @@ class _RapidFuzzChainMixin(Chain):
|
|
|
63
64
|
"""Whether to normalize the score to a value between 0 and 1.
|
|
64
65
|
Applies only to the Levenshtein and Damerau-Levenshtein distances."""
|
|
65
66
|
|
|
66
|
-
@
|
|
67
|
+
@pre_init
|
|
67
68
|
def validate_dependencies(cls, values: Dict[str, Any]) -> Dict[str, Any]:
|
|
68
69
|
"""
|
|
69
70
|
Validate that the rapidfuzz library is installed.
|
langchain/memory/buffer.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from typing import Any, Dict, List, Optional
|
|
2
2
|
|
|
3
3
|
from langchain_core.messages import BaseMessage, get_buffer_string
|
|
4
|
-
from langchain_core.
|
|
4
|
+
from langchain_core.utils import pre_init
|
|
5
5
|
|
|
6
6
|
from langchain.memory.chat_memory import BaseChatMemory, BaseMemory
|
|
7
7
|
from langchain.memory.utils import get_prompt_input_key
|
|
@@ -82,7 +82,7 @@ class ConversationStringBufferMemory(BaseMemory):
|
|
|
82
82
|
input_key: Optional[str] = None
|
|
83
83
|
memory_key: str = "history" #: :meta private:
|
|
84
84
|
|
|
85
|
-
@
|
|
85
|
+
@pre_init
|
|
86
86
|
def validate_chains(cls, values: Dict) -> Dict:
|
|
87
87
|
"""Validate that return messages is not True."""
|
|
88
88
|
if values.get("return_messages", False):
|
langchain/memory/summary.py
CHANGED
|
@@ -6,7 +6,8 @@ from langchain_core.chat_history import BaseChatMessageHistory
|
|
|
6
6
|
from langchain_core.language_models import BaseLanguageModel
|
|
7
7
|
from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string
|
|
8
8
|
from langchain_core.prompts import BasePromptTemplate
|
|
9
|
-
from langchain_core.pydantic_v1 import BaseModel
|
|
9
|
+
from langchain_core.pydantic_v1 import BaseModel
|
|
10
|
+
from langchain_core.utils import pre_init
|
|
10
11
|
|
|
11
12
|
from langchain.chains.llm import LLMChain
|
|
12
13
|
from langchain.memory.chat_memory import BaseChatMemory
|
|
@@ -34,6 +35,18 @@ class SummarizerMixin(BaseModel):
|
|
|
34
35
|
chain = LLMChain(llm=self.llm, prompt=self.prompt)
|
|
35
36
|
return chain.predict(summary=existing_summary, new_lines=new_lines)
|
|
36
37
|
|
|
38
|
+
async def apredict_new_summary(
|
|
39
|
+
self, messages: List[BaseMessage], existing_summary: str
|
|
40
|
+
) -> str:
|
|
41
|
+
new_lines = get_buffer_string(
|
|
42
|
+
messages,
|
|
43
|
+
human_prefix=self.human_prefix,
|
|
44
|
+
ai_prefix=self.ai_prefix,
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
chain = LLMChain(llm=self.llm, prompt=self.prompt)
|
|
48
|
+
return await chain.apredict(summary=existing_summary, new_lines=new_lines)
|
|
49
|
+
|
|
37
50
|
|
|
38
51
|
class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin):
|
|
39
52
|
"""Conversation summarizer to chat memory."""
|
|
@@ -73,7 +86,7 @@ class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin):
|
|
|
73
86
|
buffer = self.buffer
|
|
74
87
|
return {self.memory_key: buffer}
|
|
75
88
|
|
|
76
|
-
@
|
|
89
|
+
@pre_init
|
|
77
90
|
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
|
|
78
91
|
"""Validate that prompt input variables are consistent."""
|
|
79
92
|
prompt_variables = values["prompt"].input_variables
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from typing import Any, Dict, List, Union
|
|
2
2
|
|
|
3
3
|
from langchain_core.messages import BaseMessage, get_buffer_string
|
|
4
|
-
from langchain_core.
|
|
4
|
+
from langchain_core.utils import pre_init
|
|
5
5
|
|
|
6
6
|
from langchain.memory.chat_memory import BaseChatMemory
|
|
7
7
|
from langchain.memory.summary import SummarizerMixin
|
|
@@ -19,6 +19,11 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
19
19
|
"""String buffer of memory."""
|
|
20
20
|
return self.load_memory_variables({})[self.memory_key]
|
|
21
21
|
|
|
22
|
+
async def abuffer(self) -> Union[str, List[BaseMessage]]:
|
|
23
|
+
"""Async memory buffer."""
|
|
24
|
+
memory_variables = await self.aload_memory_variables({})
|
|
25
|
+
return memory_variables[self.memory_key]
|
|
26
|
+
|
|
22
27
|
@property
|
|
23
28
|
def memory_variables(self) -> List[str]:
|
|
24
29
|
"""Will always return list of memory variables.
|
|
@@ -43,7 +48,23 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
43
48
|
)
|
|
44
49
|
return {self.memory_key: final_buffer}
|
|
45
50
|
|
|
46
|
-
|
|
51
|
+
async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
|
52
|
+
"""Asynchronously return key-value pairs given the text input to the chain."""
|
|
53
|
+
buffer = await self.chat_memory.aget_messages()
|
|
54
|
+
if self.moving_summary_buffer != "":
|
|
55
|
+
first_messages: List[BaseMessage] = [
|
|
56
|
+
self.summary_message_cls(content=self.moving_summary_buffer)
|
|
57
|
+
]
|
|
58
|
+
buffer = first_messages + buffer
|
|
59
|
+
if self.return_messages:
|
|
60
|
+
final_buffer: Any = buffer
|
|
61
|
+
else:
|
|
62
|
+
final_buffer = get_buffer_string(
|
|
63
|
+
buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix
|
|
64
|
+
)
|
|
65
|
+
return {self.memory_key: final_buffer}
|
|
66
|
+
|
|
67
|
+
@pre_init
|
|
47
68
|
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
|
|
48
69
|
"""Validate that prompt input variables are consistent."""
|
|
49
70
|
prompt_variables = values["prompt"].input_variables
|
|
@@ -60,6 +81,13 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
60
81
|
super().save_context(inputs, outputs)
|
|
61
82
|
self.prune()
|
|
62
83
|
|
|
84
|
+
async def asave_context(
|
|
85
|
+
self, inputs: Dict[str, Any], outputs: Dict[str, str]
|
|
86
|
+
) -> None:
|
|
87
|
+
"""Asynchronously save context from this conversation to buffer."""
|
|
88
|
+
await super().asave_context(inputs, outputs)
|
|
89
|
+
await self.aprune()
|
|
90
|
+
|
|
63
91
|
def prune(self) -> None:
|
|
64
92
|
"""Prune buffer if it exceeds max token limit"""
|
|
65
93
|
buffer = self.chat_memory.messages
|
|
@@ -73,7 +101,25 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
73
101
|
pruned_memory, self.moving_summary_buffer
|
|
74
102
|
)
|
|
75
103
|
|
|
104
|
+
async def aprune(self) -> None:
|
|
105
|
+
"""Asynchronously prune buffer if it exceeds max token limit"""
|
|
106
|
+
buffer = self.chat_memory.messages
|
|
107
|
+
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
|
|
108
|
+
if curr_buffer_length > self.max_token_limit:
|
|
109
|
+
pruned_memory = []
|
|
110
|
+
while curr_buffer_length > self.max_token_limit:
|
|
111
|
+
pruned_memory.append(buffer.pop(0))
|
|
112
|
+
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
|
|
113
|
+
self.moving_summary_buffer = await self.apredict_new_summary(
|
|
114
|
+
pruned_memory, self.moving_summary_buffer
|
|
115
|
+
)
|
|
116
|
+
|
|
76
117
|
def clear(self) -> None:
|
|
77
118
|
"""Clear memory contents."""
|
|
78
119
|
super().clear()
|
|
79
120
|
self.moving_summary_buffer = ""
|
|
121
|
+
|
|
122
|
+
async def aclear(self) -> None:
|
|
123
|
+
"""Asynchronously clear memory contents."""
|
|
124
|
+
await super().aclear()
|
|
125
|
+
self.moving_summary_buffer = ""
|
|
@@ -67,7 +67,7 @@ class ConversationVectorStoreTokenBufferMemory(ConversationTokenBufferMemory):
|
|
|
67
67
|
from langchain.memory.token_buffer_vectorstore_memory import (
|
|
68
68
|
ConversationVectorStoreTokenBufferMemory
|
|
69
69
|
)
|
|
70
|
-
from
|
|
70
|
+
from langchain_chroma import Chroma
|
|
71
71
|
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
|
|
72
72
|
from langchain_openai import OpenAI
|
|
73
73
|
|
|
@@ -3,7 +3,7 @@ from __future__ import annotations
|
|
|
3
3
|
from typing import Any, Dict, List
|
|
4
4
|
|
|
5
5
|
from langchain_core.output_parsers import BaseOutputParser
|
|
6
|
-
from langchain_core.
|
|
6
|
+
from langchain_core.utils import pre_init
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
class CombiningOutputParser(BaseOutputParser[Dict[str, Any]]):
|
|
@@ -15,7 +15,7 @@ class CombiningOutputParser(BaseOutputParser[Dict[str, Any]]):
|
|
|
15
15
|
def is_lc_serializable(cls) -> bool:
|
|
16
16
|
return True
|
|
17
17
|
|
|
18
|
-
@
|
|
18
|
+
@pre_init
|
|
19
19
|
def validate_parsers(cls, values: Dict[str, Any]) -> Dict[str, Any]:
|
|
20
20
|
"""Validate the parsers."""
|
|
21
21
|
parsers = values["parsers"]
|
langchain/output_parsers/enum.py
CHANGED
|
@@ -3,7 +3,7 @@ from typing import Dict, List, Type
|
|
|
3
3
|
|
|
4
4
|
from langchain_core.exceptions import OutputParserException
|
|
5
5
|
from langchain_core.output_parsers import BaseOutputParser
|
|
6
|
-
from langchain_core.
|
|
6
|
+
from langchain_core.utils import pre_init
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
class EnumOutputParser(BaseOutputParser[Enum]):
|
|
@@ -12,7 +12,7 @@ class EnumOutputParser(BaseOutputParser[Enum]):
|
|
|
12
12
|
enum: Type[Enum]
|
|
13
13
|
"""The enum to parse. Its values must be strings."""
|
|
14
14
|
|
|
15
|
-
@
|
|
15
|
+
@pre_init
|
|
16
16
|
def raise_deprecation(cls, values: Dict) -> Dict:
|
|
17
17
|
enum = values["enum"]
|
|
18
18
|
if not all(isinstance(e.value, str) for e in enum):
|
langchain/output_parsers/fix.py
CHANGED
|
@@ -3,10 +3,9 @@ from __future__ import annotations
|
|
|
3
3
|
from typing import Any, TypeVar, Union
|
|
4
4
|
|
|
5
5
|
from langchain_core.exceptions import OutputParserException
|
|
6
|
-
from langchain_core.
|
|
7
|
-
from langchain_core.output_parsers import BaseOutputParser
|
|
6
|
+
from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
|
|
8
7
|
from langchain_core.prompts import BasePromptTemplate
|
|
9
|
-
from langchain_core.runnables import RunnableSerializable
|
|
8
|
+
from langchain_core.runnables import Runnable, RunnableSerializable
|
|
10
9
|
from typing_extensions import TypedDict
|
|
11
10
|
|
|
12
11
|
from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT
|
|
@@ -42,7 +41,7 @@ class OutputFixingParser(BaseOutputParser[T]):
|
|
|
42
41
|
@classmethod
|
|
43
42
|
def from_llm(
|
|
44
43
|
cls,
|
|
45
|
-
llm:
|
|
44
|
+
llm: Runnable,
|
|
46
45
|
parser: BaseOutputParser[T],
|
|
47
46
|
prompt: BasePromptTemplate = NAIVE_FIX_PROMPT,
|
|
48
47
|
max_retries: int = 1,
|
|
@@ -58,7 +57,7 @@ class OutputFixingParser(BaseOutputParser[T]):
|
|
|
58
57
|
Returns:
|
|
59
58
|
OutputFixingParser
|
|
60
59
|
"""
|
|
61
|
-
chain = prompt | llm
|
|
60
|
+
chain = prompt | llm | StrOutputParser()
|
|
62
61
|
return cls(parser=parser, retry_chain=chain, max_retries=max_retries)
|
|
63
62
|
|
|
64
63
|
def parse(self, completion: str) -> T:
|
|
@@ -4,7 +4,7 @@ from typing import Any, TypeVar, Union
|
|
|
4
4
|
|
|
5
5
|
from langchain_core.exceptions import OutputParserException
|
|
6
6
|
from langchain_core.language_models import BaseLanguageModel
|
|
7
|
-
from langchain_core.output_parsers import BaseOutputParser
|
|
7
|
+
from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
|
|
8
8
|
from langchain_core.prompt_values import PromptValue
|
|
9
9
|
from langchain_core.prompts import BasePromptTemplate, PromptTemplate
|
|
10
10
|
from langchain_core.runnables import RunnableSerializable
|
|
@@ -82,7 +82,7 @@ class RetryOutputParser(BaseOutputParser[T]):
|
|
|
82
82
|
Returns:
|
|
83
83
|
RetryOutputParser
|
|
84
84
|
"""
|
|
85
|
-
chain = prompt | llm
|
|
85
|
+
chain = prompt | llm | StrOutputParser()
|
|
86
86
|
return cls(parser=parser, retry_chain=chain, max_retries=max_retries)
|
|
87
87
|
|
|
88
88
|
def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T:
|
|
@@ -5,7 +5,7 @@ from langchain_core.callbacks import (
|
|
|
5
5
|
CallbackManagerForRetrieverRun,
|
|
6
6
|
)
|
|
7
7
|
from langchain_core.documents import Document
|
|
8
|
-
from langchain_core.retrievers import BaseRetriever
|
|
8
|
+
from langchain_core.retrievers import BaseRetriever, RetrieverLike
|
|
9
9
|
|
|
10
10
|
from langchain.retrievers.document_compressors.base import (
|
|
11
11
|
BaseDocumentCompressor,
|
|
@@ -18,7 +18,7 @@ class ContextualCompressionRetriever(BaseRetriever):
|
|
|
18
18
|
base_compressor: BaseDocumentCompressor
|
|
19
19
|
"""Compressor for compressing retrieved documents."""
|
|
20
20
|
|
|
21
|
-
base_retriever:
|
|
21
|
+
base_retriever: RetrieverLike
|
|
22
22
|
"""Base Retriever to use for getting relevant documents."""
|
|
23
23
|
|
|
24
24
|
class Config:
|
|
@@ -4,7 +4,8 @@ import numpy as np
|
|
|
4
4
|
from langchain_core.callbacks.manager import Callbacks
|
|
5
5
|
from langchain_core.documents import Document
|
|
6
6
|
from langchain_core.embeddings import Embeddings
|
|
7
|
-
from langchain_core.pydantic_v1 import Field
|
|
7
|
+
from langchain_core.pydantic_v1 import Field
|
|
8
|
+
from langchain_core.utils import pre_init
|
|
8
9
|
|
|
9
10
|
from langchain.retrievers.document_compressors.base import (
|
|
10
11
|
BaseDocumentCompressor,
|
|
@@ -45,7 +46,7 @@ class EmbeddingsFilter(BaseDocumentCompressor):
|
|
|
45
46
|
|
|
46
47
|
arbitrary_types_allowed = True
|
|
47
48
|
|
|
48
|
-
@
|
|
49
|
+
@pre_init
|
|
49
50
|
def validate_params(cls, values: Dict) -> Dict:
|
|
50
51
|
"""Validate similarity parameters."""
|
|
51
52
|
if values["k"] is None and values["similarity_threshold"] is None:
|
|
@@ -31,8 +31,8 @@ class ParentDocumentRetriever(MultiVectorRetriever):
|
|
|
31
31
|
|
|
32
32
|
.. code-block:: python
|
|
33
33
|
|
|
34
|
+
from langchain_chroma import Chroma
|
|
34
35
|
from langchain_community.embeddings import OpenAIEmbeddings
|
|
35
|
-
from langchain_community.vectorstores import Chroma
|
|
36
36
|
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
|
37
37
|
from langchain.storage import InMemoryStore
|
|
38
38
|
|
|
@@ -177,6 +177,16 @@ def _get_builtin_translator(vectorstore: VectorStore) -> Visitor:
|
|
|
177
177
|
if isinstance(vectorstore, PGVector):
|
|
178
178
|
return NewPGVectorTranslator()
|
|
179
179
|
|
|
180
|
+
try:
|
|
181
|
+
# Added in langchain-community==0.2.11
|
|
182
|
+
from langchain_community.query_constructors.hanavector import HanaTranslator
|
|
183
|
+
from langchain_community.vectorstores import HanaDB
|
|
184
|
+
except ImportError:
|
|
185
|
+
pass
|
|
186
|
+
else:
|
|
187
|
+
if isinstance(vectorstore, HanaDB):
|
|
188
|
+
return HanaTranslator()
|
|
189
|
+
|
|
180
190
|
raise ValueError(
|
|
181
191
|
f"Self query retriever with Vector Store type {vectorstore.__class__}"
|
|
182
192
|
f" not supported."
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.12
|
|
4
4
|
Summary: Building applications with LLMs through composability
|
|
5
5
|
Home-page: https://github.com/langchain-ai/langchain
|
|
6
6
|
License: MIT
|
|
@@ -15,7 +15,7 @@ Requires-Dist: PyYAML (>=5.3)
|
|
|
15
15
|
Requires-Dist: SQLAlchemy (>=1.4,<3)
|
|
16
16
|
Requires-Dist: aiohttp (>=3.8.3,<4.0.0)
|
|
17
17
|
Requires-Dist: async-timeout (>=4.0.0,<5.0.0) ; python_version < "3.11"
|
|
18
|
-
Requires-Dist: langchain-core (>=0.2.
|
|
18
|
+
Requires-Dist: langchain-core (>=0.2.27,<0.3.0)
|
|
19
19
|
Requires-Dist: langchain-text-splitters (>=0.2.0,<0.3.0)
|
|
20
20
|
Requires-Dist: langsmith (>=0.1.17,<0.2.0)
|
|
21
21
|
Requires-Dist: numpy (>=1,<2) ; python_version < "3.12"
|
|
@@ -24,6 +24,8 @@ Requires-Dist: pydantic (>=1,<3)
|
|
|
24
24
|
Requires-Dist: requests (>=2,<3)
|
|
25
25
|
Requires-Dist: tenacity (>=8.1.0,<9.0.0,!=8.4.0)
|
|
26
26
|
Project-URL: Repository, https://github.com/langchain-ai/langchain
|
|
27
|
+
Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain%3D%3D0%22&expanded=true
|
|
28
|
+
Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/langchain
|
|
27
29
|
Description-Content-Type: text/markdown
|
|
28
30
|
|
|
29
31
|
# 🦜️🔗 LangChain
|
|
@@ -7,7 +7,7 @@ langchain/_api/path.py,sha256=ovJP6Pcf7L_KaKvMMet9G9OzfLTb-sZV2pEw3Tp7o3I,122
|
|
|
7
7
|
langchain/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
8
|
langchain/adapters/openai.py,sha256=kWvS_DdRtpcc49vDY8zLUo3BrtXA3a89bLJu3Sksvaw,1996
|
|
9
9
|
langchain/agents/__init__.py,sha256=dm8fJzo_wRX_Fz6XF-o8Uuduy5G5vE2B52RWBdzUIz8,6242
|
|
10
|
-
langchain/agents/agent.py,sha256=
|
|
10
|
+
langchain/agents/agent.py,sha256=JDpb8_nKV_0Yt0rIsvFTl6zdr38REBggMGfftuBk09k,61641
|
|
11
11
|
langchain/agents/agent_iterator.py,sha256=rmyKOHFHStwjqsQDDyNnFaDr43G32fpMCks7rOz4wq0,16438
|
|
12
12
|
langchain/agents/agent_toolkits/__init__.py,sha256=xgjLQ6eP0npPAJJr0O1lM-dZwtP0XR2fJOovJFIgNuo,7365
|
|
13
13
|
langchain/agents/agent_toolkits/ainetwork/__init__.py,sha256=henfKntuAEjG1KoN-Hk1IHy3fFGCYPWLEuZtF2bIdZI,25
|
|
@@ -83,7 +83,7 @@ langchain/agents/agent_toolkits/zapier/toolkit.py,sha256=BcFOzvckA9ZBz8HTeWUPFc_
|
|
|
83
83
|
langchain/agents/agent_types.py,sha256=VZlfRL0hO9XDBdoZV_axS0DatQn0ZSEB67lhINPSimY,1949
|
|
84
84
|
langchain/agents/chat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
85
85
|
langchain/agents/chat/base.py,sha256=Rkexaz_ZfptPBidCoHs_26QTwVGlGaWyPtoZi5LV9Po,6529
|
|
86
|
-
langchain/agents/chat/output_parser.py,sha256=
|
|
86
|
+
langchain/agents/chat/output_parser.py,sha256=0GRXvbNl18xqfSVHzA614pxVBuentIn--vC_QjFctoA,2367
|
|
87
87
|
langchain/agents/chat/prompt.py,sha256=4Ub4oZyIKmJRpWwxOyGcYwlyoK8jJ0kR60jW0lPspC8,1158
|
|
88
88
|
langchain/agents/conversational/__init__.py,sha256=TnMfDzoRzR-xCiR6ph3tn3H7OPbBPpuTsFuqkLMzjiA,75
|
|
89
89
|
langchain/agents/conversational/base.py,sha256=tBJF5FLkEhaTOEBJ-SoxE3NAaqdXhy6gd5Pufh9OSpU,6257
|
|
@@ -123,13 +123,13 @@ langchain/agents/output_parsers/__init__.py,sha256=Zzsf8moY-juhKCrnBDUhwgKQtW12c
|
|
|
123
123
|
langchain/agents/output_parsers/json.py,sha256=sW9e8fG4VlPnMn53dWIwSgnyRBUYs4ULFymrhW92sWQ,1846
|
|
124
124
|
langchain/agents/output_parsers/openai_functions.py,sha256=MjNEFVCxYgS6Efr3HX4rR1zoks2vJxoV8FCUa240jPQ,3467
|
|
125
125
|
langchain/agents/output_parsers/openai_tools.py,sha256=A_GpcYqy3xnkKrlBtrmHIUWwwLMyaKwWc8R-gEvRV3s,2317
|
|
126
|
-
langchain/agents/output_parsers/react_json_single_input.py,sha256=
|
|
126
|
+
langchain/agents/output_parsers/react_json_single_input.py,sha256=SUkOGmdGGzxB4e1CNJD1eo4dJneiMYsgfGVHpxZ5bfI,2473
|
|
127
127
|
langchain/agents/output_parsers/react_single_input.py,sha256=lIHosxNep1YFCgW9h71gEDWs59dmGeWlWedl9gWf11k,3218
|
|
128
128
|
langchain/agents/output_parsers/self_ask.py,sha256=-4_-hQbKB1ichR5odEyeYUV-wIdLmP5eGDxzw77Cop4,1545
|
|
129
129
|
langchain/agents/output_parsers/tools.py,sha256=9hRlUsJVmS0VmFzEKVYfg5AeusynB2lw4Xi4uYns5JM,3753
|
|
130
130
|
langchain/agents/output_parsers/xml.py,sha256=2MjxW4nAM4sZN-in3K40_K5DBx6cI2Erb0TZbpSoZIY,1658
|
|
131
131
|
langchain/agents/react/__init__.py,sha256=9RIjjaUDfWnoMEMpV57JQ0CwZZC5Soh357QdKpVIM-4,76
|
|
132
|
-
langchain/agents/react/agent.py,sha256=
|
|
132
|
+
langchain/agents/react/agent.py,sha256=pKKK3x4lKfMjiOZDF8T3_Q-PTRyCwKUBGrBhAAazmgw,5096
|
|
133
133
|
langchain/agents/react/base.py,sha256=eCUikQQ688hp1s3h0Nqlz4ueaIVrpfRQe50TNai3I4Y,5799
|
|
134
134
|
langchain/agents/react/output_parser.py,sha256=bEL3U3mxYGK7_7Lm4GlOq8JKQTgyHFQQIEVUUZjV1qs,1231
|
|
135
135
|
langchain/agents/react/textworld_prompt.py,sha256=b9WDM8pFmqrfAWJ8n6zkxlPlxQI5oHljZ1R9g5y6cRE,1906
|
|
@@ -141,7 +141,7 @@ langchain/agents/self_ask_with_search/output_parser.py,sha256=hLDqfU7xV_5G6c68of
|
|
|
141
141
|
langchain/agents/self_ask_with_search/prompt.py,sha256=J3mgTaq-KwT-yTorpDkCi8ruTPTPE8s4OTcL7o8GJgA,1926
|
|
142
142
|
langchain/agents/structured_chat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
143
143
|
langchain/agents/structured_chat/base.py,sha256=AL8e0NTYYgVZ3thd64wQWpaLf7pkY7sEk24AqAsTWaQ,10812
|
|
144
|
-
langchain/agents/structured_chat/output_parser.py,sha256=
|
|
144
|
+
langchain/agents/structured_chat/output_parser.py,sha256=XfqIyGZUGAoO8ctjzBTZ37bK82bpl2MJL_sGzgRFuNQ,3819
|
|
145
145
|
langchain/agents/structured_chat/prompt.py,sha256=OiBTRUOhvhSyO2jO2ByUUiaCrkK_tIUH9pMWWKs-aF4,992
|
|
146
146
|
langchain/agents/tool_calling_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
147
147
|
langchain/agents/tool_calling_agent/base.py,sha256=pEL8y4YN9pQICFYpkPi76jnMVD5-PZWIrgUIPat87F8,3870
|
|
@@ -232,7 +232,7 @@ langchain/chains/conversation/base.py,sha256=057I9fvxKk8QxIqFPgFGuLfUH--7Pj37Y6t
|
|
|
232
232
|
langchain/chains/conversation/memory.py,sha256=KoKmk5FjPEkioolvmFxcJgRr2wRdWIe1LNBHCtGgUKo,1396
|
|
233
233
|
langchain/chains/conversation/prompt.py,sha256=84xC4dy8yNiCSICT4b6UvZdQXpPifMVw1hf7WnFAVkw,913
|
|
234
234
|
langchain/chains/conversational_retrieval/__init__.py,sha256=hq7jx-kmg3s8qLYnV7gPmzVIPcGqW69H6cXIjklvGjY,49
|
|
235
|
-
langchain/chains/conversational_retrieval/base.py,sha256=
|
|
235
|
+
langchain/chains/conversational_retrieval/base.py,sha256=pdFiox5XV1NLZ6hlgEtR9TwCqTjKIom-jBk-eJ_YRcE,21097
|
|
236
236
|
langchain/chains/conversational_retrieval/prompts.py,sha256=kJITwauXq7dYKnSBoL2EcDTqAnJZlWF_GzJ9C55ZEv8,720
|
|
237
237
|
langchain/chains/elasticsearch_database/__init__.py,sha256=B3Zxy8mxTb4bfMGHC__26BFkvT_6bPisS4rPIFiFWdU,126
|
|
238
238
|
langchain/chains/elasticsearch_database/base.py,sha256=25eDmw6PUfyGWtPueMDWwBz3jwYNl9TAWVaHBB8ZlwY,8319
|
|
@@ -290,7 +290,7 @@ langchain/chains/openai_functions/base.py,sha256=UpCGcUYxz93G8MMdqq1LcHZ74Y_MnS9
|
|
|
290
290
|
langchain/chains/openai_functions/citation_fuzzy_match.py,sha256=bHkYOhTgEG1wIhdC06WL6Wmnc_s-KctLs1cC5PLxwgQ,3558
|
|
291
291
|
langchain/chains/openai_functions/extraction.py,sha256=NTP9lKKeHcqQC_PQtAJLKTsvzAmAdoqPYbyEPlVFUxw,7319
|
|
292
292
|
langchain/chains/openai_functions/openapi.py,sha256=512z96_-iv73_LAd-doxWyjbh2tSrlmLKs0YJY2mfV8,11869
|
|
293
|
-
langchain/chains/openai_functions/qa_with_structure.py,sha256=
|
|
293
|
+
langchain/chains/openai_functions/qa_with_structure.py,sha256=SIhc7FeSlpPr5BQ9-jUmru_uMCl66LF47Ldx_eeNJ3I,4053
|
|
294
294
|
langchain/chains/openai_functions/tagging.py,sha256=nbvW29Cb4tHTz1kQciQa8Upti01brRbhGgC2Mqou2V0,2663
|
|
295
295
|
langchain/chains/openai_functions/utils.py,sha256=GDhYjszQGut1UcJ-dyPvkwiT8gHOV0IejRuIfN7_fhw,1255
|
|
296
296
|
langchain/chains/openai_tools/__init__.py,sha256=xX0If1Nx_ocEOI56EGxCI0v0RZ1_VUegzyODAj0RLVU,134
|
|
@@ -336,7 +336,7 @@ langchain/chains/sql_database/__init__.py,sha256=jQotWN4EWMD98Jk-f7rqh5YtbXbP9XX
|
|
|
336
336
|
langchain/chains/sql_database/prompt.py,sha256=W0xFqVZ18PzxmutnIBJrocXus8_QBByrKtxg8CjGaYw,15458
|
|
337
337
|
langchain/chains/sql_database/query.py,sha256=h-QP5ESatTFj8t7sGsHppXSchy3ZGL1U1afza-Lo8fc,5421
|
|
338
338
|
langchain/chains/structured_output/__init__.py,sha256=-6nFe-gznavFc3XCMv8XkEzuXoto2rI8Q-bcruVPOR8,204
|
|
339
|
-
langchain/chains/structured_output/base.py,sha256=
|
|
339
|
+
langchain/chains/structured_output/base.py,sha256=dOZMme8WBJDgFEXe-TJ34SKi2zR25ZeYUnNqnCAqXZw,25611
|
|
340
340
|
langchain/chains/summarize/__init__.py,sha256=mg1lKtH_x-oJ5qvKY6OD7g9kkqbjMVbL3l3OhfozSQM,151
|
|
341
341
|
langchain/chains/summarize/chain.py,sha256=QA3EgTnT067OLm5waUv_3oiI1mS3KD_uvFkHlns-Jxo,6193
|
|
342
342
|
langchain/chains/summarize/map_reduce_prompt.py,sha256=HZSitW2_WhJINN-_YJCzU6zJXbPuMr5zFek31AzutuQ,238
|
|
@@ -360,7 +360,7 @@ langchain/chat_models/azure_openai.py,sha256=aRNol2PNC49PmvdZnwjhQeMFRDOOelPNAXz
|
|
|
360
360
|
langchain/chat_models/azureml_endpoint.py,sha256=6mxXm8UFXataLp0NYRGA88V3DpiNKPo095u_JGj7XGE,863
|
|
361
361
|
langchain/chat_models/baichuan.py,sha256=3-GveFoF5ZNyLdRNK6V4i3EDDjdseOTFWbCMhDbtO9w,643
|
|
362
362
|
langchain/chat_models/baidu_qianfan_endpoint.py,sha256=CZrX2SMpbE9H7wBXNC6rGvw-YqQl9zjuJrClYQxEzuI,715
|
|
363
|
-
langchain/chat_models/base.py,sha256=
|
|
363
|
+
langchain/chat_models/base.py,sha256=IdyyLmNNXuBO0JjBL8OyEsB2EgSoEWehHJi0czK25Ho,31046
|
|
364
364
|
langchain/chat_models/bedrock.py,sha256=HRV3T_0mEnZ8LvJJqAA_UVpt-_03G715oIgomRJw55M,757
|
|
365
365
|
langchain/chat_models/cohere.py,sha256=EYOECHX-nKRhZVfCfmFGZ2lr51PzaB5OvOEqmBCu1fI,633
|
|
366
366
|
langchain/chat_models/databricks.py,sha256=5_QkC5lG4OldaHC2FS0XylirJouyZx1YT95SKwc12M0,653
|
|
@@ -626,7 +626,7 @@ langchain/embeddings/xinference.py,sha256=nehpiy79abQ78Bm-Y9DA8FDvpACXROSIats0S6
|
|
|
626
626
|
langchain/env.py,sha256=fucAbfcmwiN1CjKSg5l2lzquRVoE7wqfuMMlaByuyEk,476
|
|
627
627
|
langchain/evaluation/__init__.py,sha256=1iX4-CeK-YkKtQh8npkJ5fhtbRPM668pPCz6SZ6WdJs,5803
|
|
628
628
|
langchain/evaluation/agents/__init__.py,sha256=Z3RFhkBgSauIRNp5dEUgkzY1Tr3kSeUwuotd0nrQViQ,166
|
|
629
|
-
langchain/evaluation/agents/trajectory_eval_chain.py,sha256=
|
|
629
|
+
langchain/evaluation/agents/trajectory_eval_chain.py,sha256=QA3fb4eKcZzFZMFwR8fy_rFF5D9KcZK3UrCli9UZ1FQ,13982
|
|
630
630
|
langchain/evaluation/agents/trajectory_eval_prompt.py,sha256=NY-kAJqoXfPP9zI9WsvEHEDp00ImG1Po9vBZm3U684M,5939
|
|
631
631
|
langchain/evaluation/comparison/__init__.py,sha256=1nxR3mXQ8eimpDjfarJgDRe30YjL2yeOYkFaNj09fRY,1401
|
|
632
632
|
langchain/evaluation/comparison/eval_chain.py,sha256=SD-0Iw5UKXBJY1rduef7BF4441b7ROiCxaYNyw7rJsQ,15935
|
|
@@ -635,7 +635,7 @@ langchain/evaluation/criteria/__init__.py,sha256=FE5qrrz5JwWXJWXCzdyNRevEPfmmfBf
|
|
|
635
635
|
langchain/evaluation/criteria/eval_chain.py,sha256=wvBBnB1bpZYtPwQnIpLHa3Rp-nrLSeo_XVGYixFrkIA,21276
|
|
636
636
|
langchain/evaluation/criteria/prompt.py,sha256=6OgXmdvlYVzRMeAxa1fYGIxqeNAz1NkFCZ6ezLgUnZM,1756
|
|
637
637
|
langchain/evaluation/embedding_distance/__init__.py,sha256=YLtGUI4ZMxjsn2Q0dGZ-R9YMFgZsarfJv9qzNEnrLQs,324
|
|
638
|
-
langchain/evaluation/embedding_distance/base.py,sha256=
|
|
638
|
+
langchain/evaluation/embedding_distance/base.py,sha256=9h5beSVF90Tr2jEA96PSh-O46RrzjhFup9cB4gnHkNY,17105
|
|
639
639
|
langchain/evaluation/exact_match/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
640
640
|
langchain/evaluation/exact_match/base.py,sha256=BykyjgKQ94391eDODzn3m1RXao9ZSXtc9wiww_fysXI,2751
|
|
641
641
|
langchain/evaluation/loading.py,sha256=1zUtEao_F9292O0fNHl8i93bw1V94RDsFwXZTWe4-pA,7296
|
|
@@ -655,10 +655,10 @@ langchain/evaluation/scoring/__init__.py,sha256=D5zPsGRGCpg3KJkfAu2SN096jZi9FRlD
|
|
|
655
655
|
langchain/evaluation/scoring/eval_chain.py,sha256=HVYmGGXL5EesOIPGG5Es1x4jD7zgaZEkJoeX-Bj6KFc,15463
|
|
656
656
|
langchain/evaluation/scoring/prompt.py,sha256=WqNq8bktJUjU8tcHWVuPJFWgsOIc-G7fYMDiejHhWIY,2130
|
|
657
657
|
langchain/evaluation/string_distance/__init__.py,sha256=qAz9Z709ocAi_Yd9nbkKnFt16nc9d_gTT55N7okXWmE,286
|
|
658
|
-
langchain/evaluation/string_distance/base.py,sha256=
|
|
658
|
+
langchain/evaluation/string_distance/base.py,sha256=EwxQBuzlj5vWiVeoPTu61ZK_1-oPaDr5Lkzo79EZ-SU,14039
|
|
659
659
|
langchain/example_generator.py,sha256=q_JvQKn2pgJOHcBeFc851GpaR4seOZXDe9TISAJheEY,142
|
|
660
660
|
langchain/formatting.py,sha256=4s5AwApo_6t2pVfoFXOgFU9sNNdpVDD44B4ryOwJMJo,168
|
|
661
|
-
langchain/globals
|
|
661
|
+
langchain/globals.py,sha256=SUMrEo_KlpODNBDj4JZDILhbxTK_GGDEYmUQVQ-Hzus,7436
|
|
662
662
|
langchain/graphs/__init__.py,sha256=l12tO5owB32RcKbu5O8rtOK0qLVjGee9JjX3RUVT54Q,1528
|
|
663
663
|
langchain/graphs/arangodb_graph.py,sha256=3Gu4bnS0q27AUEuUnoK2asz67iU8KpJktQ2uJvJ-iy0,796
|
|
664
664
|
langchain/graphs/falkordb_graph.py,sha256=PdrxQC9Tl0txQtDTFNk2qR9m5L0apWPwq-SWq3lxGMc,618
|
|
@@ -773,7 +773,7 @@ langchain/load/dump.py,sha256=st-Wju0x5jrMVfMzjeKF1jo3Jvn8b1cCCfLrAaIYvhM,100
|
|
|
773
773
|
langchain/load/load.py,sha256=sxSF6ySrMY4ouq77JPiuZKRx2lyVbqLoMi5ni5bHzAI,98
|
|
774
774
|
langchain/load/serializable.py,sha256=6iZp1sg_ozIDqXTDEk60IP89UEwZEJ4j0oMaHascLKI,412
|
|
775
775
|
langchain/memory/__init__.py,sha256=kQFlaG2Yuz1Y7U8e3Ngbv-13I3BPGKAI06Lz9sL-Lbc,5574
|
|
776
|
-
langchain/memory/buffer.py,sha256=
|
|
776
|
+
langchain/memory/buffer.py,sha256=1k_6Q1akqL65M9tGCQGDIRcK6Z9CqhbDzAm8sFE88Hw,4841
|
|
777
777
|
langchain/memory/buffer_window.py,sha256=hRFiodXZC1Xu7DFTmbWp5wtosuBkAEHQsPEXmMd-XIk,1616
|
|
778
778
|
langchain/memory/chat_memory.py,sha256=brb54tGKLBgLIhhvbpIH9eMxppl06AM54UGsU5GY5RM,2795
|
|
779
779
|
langchain/memory/chat_message_histories/__init__.py,sha256=AdCCNl_rxX4OVVLK6ZwwpMTo8VXzAS4v9bH1v2QjHec,3506
|
|
@@ -804,21 +804,21 @@ langchain/memory/motorhead_memory.py,sha256=OXjtlAQi1ioRXdM3GVcYmReynkKn8Vm1e5Tr
|
|
|
804
804
|
langchain/memory/prompt.py,sha256=r8vxZSRydSOWJzRszStN0Wky4n3fyM_QJ2XoKMsP3JA,8181
|
|
805
805
|
langchain/memory/readonly.py,sha256=IbZFbyuPo_bHEzyACQcLIcOPpczoX5CLfM_n0YllYjw,792
|
|
806
806
|
langchain/memory/simple.py,sha256=7El81OHJA0HBqwJ-AZDTQFPfB7B5NEsmY_fEOrwD0XA,761
|
|
807
|
-
langchain/memory/summary.py,sha256=
|
|
808
|
-
langchain/memory/summary_buffer.py,sha256=
|
|
807
|
+
langchain/memory/summary.py,sha256=fDqtwmYJlyrhG_j1fIuYg4aAfFZuwm9PTw61hqW2CeI,3819
|
|
808
|
+
langchain/memory/summary_buffer.py,sha256=5aM6ocE6jPXC9HqElDTyTwRNR6QydGDUf12BWtu5kTA,5048
|
|
809
809
|
langchain/memory/token_buffer.py,sha256=E1N7bWSkAmi-7V7F-7iRl-BADStnplp-zwtUndjXBMM,2144
|
|
810
810
|
langchain/memory/utils.py,sha256=PvauM6AkPRX5Hy5sY6NysuieRI9Oae1IeC61y1iIQMs,617
|
|
811
811
|
langchain/memory/vectorstore.py,sha256=SMt1iqtqTm3rcecWqwEmCcX5l-r_JVggKpuf4faUIGI,3875
|
|
812
|
-
langchain/memory/vectorstore_token_buffer_memory.py,sha256=
|
|
812
|
+
langchain/memory/vectorstore_token_buffer_memory.py,sha256=uB7N-3KHSpbzeS2TTnxIzMqRIfsgki4w8uyNKkE-cWw,7620
|
|
813
813
|
langchain/memory/zep_memory.py,sha256=WMrAJ7jymx0_0d3JnhCuklJxfomsGhEEEQ6uPMJ21Bo,628
|
|
814
814
|
langchain/model_laboratory.py,sha256=IaJzVG_SbFX7W6ODriqqme-Q5x0MB18j4Bhg1Y-fWLo,3278
|
|
815
815
|
langchain/output_parsers/__init__.py,sha256=A9fDuB-lYuOIN8QbDx-fULqSwugB7saLRKD23gdaIl4,2720
|
|
816
816
|
langchain/output_parsers/boolean.py,sha256=1-_Xtqhq-9ll4GxfPXW_5sAjAbODCWKF6yTPdVhY8mQ,1689
|
|
817
|
-
langchain/output_parsers/combining.py,sha256=
|
|
817
|
+
langchain/output_parsers/combining.py,sha256=tBQx3lVAz4YL52unRsRGofAgQPFbIgDU8MnwONGw5WQ,1795
|
|
818
818
|
langchain/output_parsers/datetime.py,sha256=zxhwax0YxVahE3CCHMXTqjpyzQcffgZ9J0NA0qLL0_8,1974
|
|
819
|
-
langchain/output_parsers/enum.py,sha256=
|
|
819
|
+
langchain/output_parsers/enum.py,sha256=VrkErkDrW6JEiIOjw18J0D4p_BU0p59pUcb7W1sRLbk,1267
|
|
820
820
|
langchain/output_parsers/ernie_functions.py,sha256=86DsYlAGncjRalnmw5ZGwhH80lP2ms6zaw8PJGC3m3Q,1427
|
|
821
|
-
langchain/output_parsers/fix.py,sha256=
|
|
821
|
+
langchain/output_parsers/fix.py,sha256=R7g7FUXOWeefYnXD0uFigWJYC1hPZwCkoFewp05jsBc,5492
|
|
822
822
|
langchain/output_parsers/format_instructions.py,sha256=y5oSpjwzgmvYRNhfe0JmKHHdFZZP65L2snJI6xcMXEY,3958
|
|
823
823
|
langchain/output_parsers/json.py,sha256=2FJL7uLd7pHgvpQm-r5XDyt9S1ZZ9mlJUW8ilQAQ0k4,340
|
|
824
824
|
langchain/output_parsers/list.py,sha256=D35r0U51Xy5wHn-VcWxr97Ftul4UqszmyLetDi4syYQ,310
|
|
@@ -831,7 +831,7 @@ langchain/output_parsers/pydantic.py,sha256=uxbrfdyPnZxfdDvmuDr3QOmBFMwML3SfMDEm
|
|
|
831
831
|
langchain/output_parsers/rail_parser.py,sha256=iHmX3ux2jE2k0MsLqe5XCrJ1eQOBBfZtRbRzQoYPTfU,691
|
|
832
832
|
langchain/output_parsers/regex.py,sha256=TAkxKzxRQQ810LuXbxYatwLZgsYhoVwez3j5e2P55bA,1230
|
|
833
833
|
langchain/output_parsers/regex_dict.py,sha256=UK6iL4Hx-q6UlPNEGLAnbh7_8-IwtXY2V1-_KicG1Z8,1725
|
|
834
|
-
langchain/output_parsers/retry.py,sha256=
|
|
834
|
+
langchain/output_parsers/retry.py,sha256=Tzm69N1UPX7u9tLhk8REkl7_wor-ZZ3udFrUkeBUVqo,10332
|
|
835
835
|
langchain/output_parsers/structured.py,sha256=YdoqEl1FXanSNVtXZapYPKgiz7VfudzXvBXYQvwr4vo,3165
|
|
836
836
|
langchain/output_parsers/xml.py,sha256=WDHazWjxO-nDAzxkBJrd1tGINVrzo4mH2-Qgqtz9Y2w,93
|
|
837
837
|
langchain/output_parsers/yaml.py,sha256=4JLARJgFf-B2eikneVk3hDtCo9WQdlmPCHOMIpOgcAw,2269
|
|
@@ -863,7 +863,7 @@ langchain/retrievers/bm25.py,sha256=L3Pq77NNfV0YDlMkU-ODvJN8ksi1SROQ-vYpPqN5gHs,
|
|
|
863
863
|
langchain/retrievers/chaindesk.py,sha256=e3oHctHNecz14jz70sMw0_YrFjeWXv7Q04r--DnxWq4,641
|
|
864
864
|
langchain/retrievers/chatgpt_plugin_retriever.py,sha256=Pds7FgWv-e6u43noFsO3v2YV8Y6FUjdkmYs5zjl79Nk,653
|
|
865
865
|
langchain/retrievers/cohere_rag_retriever.py,sha256=YMhx_AmBHUDw6-_cQtnESl0WKjtRmjvbDNQvZs3iYm4,641
|
|
866
|
-
langchain/retrievers/contextual_compression.py,sha256=
|
|
866
|
+
langchain/retrievers/contextual_compression.py,sha256=I8VHaS2DHoFvAHotix9GjOm5933PpaVbU--qVP7UXDQ,2305
|
|
867
867
|
langchain/retrievers/databerry.py,sha256=uMTLwG-QWCaORSPeFshi105VvXCizjF6551XHXXjzcE,661
|
|
868
868
|
langchain/retrievers/docarray.py,sha256=5BHkTy7uI5HUFi-k9qS6ZYxMyGdKbAwxhKqpz3cNCTM,791
|
|
869
869
|
langchain/retrievers/document_compressors/__init__.py,sha256=H0xp8dSYIEYZWdAEQN_zY4DX6gx3kepw9jTC_gUSZyk,1263
|
|
@@ -875,7 +875,7 @@ langchain/retrievers/document_compressors/chain_filter_prompt.py,sha256=FTQRPiEs
|
|
|
875
875
|
langchain/retrievers/document_compressors/cohere_rerank.py,sha256=HKZN5A_A-XoRzdLm5jSvuHA2vRraxGhQWr7B5WoUr3o,4542
|
|
876
876
|
langchain/retrievers/document_compressors/cross_encoder.py,sha256=_Z7SoPSfOUSk-rNIHX2lQgYV0TgVMKf3F9AnTH7EFiM,393
|
|
877
877
|
langchain/retrievers/document_compressors/cross_encoder_rerank.py,sha256=Rbs1y8Tw-vtKVyS93pLHMyjLgQ3w52SktjjCC2pPWuA,1597
|
|
878
|
-
langchain/retrievers/document_compressors/embeddings_filter.py,sha256=
|
|
878
|
+
langchain/retrievers/document_compressors/embeddings_filter.py,sha256=uWY_j5dddlZfyxu_vFYvk8Yj_tybNva-yAIWr2xruXs,5229
|
|
879
879
|
langchain/retrievers/document_compressors/flashrank_rerank.py,sha256=Eo86fJ_T2IbEEeCkI_5rb3Ao4gsdenv-_Ukt33MuMko,709
|
|
880
880
|
langchain/retrievers/document_compressors/listwise_rerank.py,sha256=yo7kptthbmhsW5d4AozHxLqv9_-_E5WCO1WajH025-0,5117
|
|
881
881
|
langchain/retrievers/elastic_search_bm25.py,sha256=eRboOkRQj-_E53gUQIZzxQ1bX0-uEMv7LAQSD7K7Qf8,665
|
|
@@ -893,7 +893,7 @@ langchain/retrievers/milvus.py,sha256=f_vi-uodWcS5PyYq-8QD8S7Bx1t_uVswQtqG2D35Xn
|
|
|
893
893
|
langchain/retrievers/multi_query.py,sha256=q57Ju4GCJh0lKxGOd_Y5WhEWZEc2gz4sY5qqo19t1xc,7091
|
|
894
894
|
langchain/retrievers/multi_vector.py,sha256=rb5gDEAzhzHURJ-VfKGnvq7erZ-xWklnk8RQCBTNsds,4731
|
|
895
895
|
langchain/retrievers/outline.py,sha256=uNuqhoHkfDx73ZEYbHbFjVmJfW-eAdLUzyC9EuoV608,635
|
|
896
|
-
langchain/retrievers/parent_document_retriever.py,sha256=
|
|
896
|
+
langchain/retrievers/parent_document_retriever.py,sha256=oKVp_s5ROske6O0E25yZPOjGA0xmvTGLobmWw_AHgGE,5990
|
|
897
897
|
langchain/retrievers/pinecone_hybrid_search.py,sha256=oEbmHdKIZ86H1O8GhzNC1KVfKb_xAJdRJXpODMY6X3Y,674
|
|
898
898
|
langchain/retrievers/pubmed.py,sha256=kbgj7U6x5YiXcVWobxIJDPnx3eiBAMK5HyRlELcIxsY,632
|
|
899
899
|
langchain/retrievers/pupmed.py,sha256=kbgj7U6x5YiXcVWobxIJDPnx3eiBAMK5HyRlELcIxsY,632
|
|
@@ -901,7 +901,7 @@ langchain/retrievers/re_phraser.py,sha256=tujIOQrkc5r_bQKVt7CxzI797wFb1TBwpngJLm
|
|
|
901
901
|
langchain/retrievers/remote_retriever.py,sha256=f1jPII31IkNrhkH1LvlUlNLRQNMKNvgE_7qHa3o3P04,659
|
|
902
902
|
langchain/retrievers/self_query/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
903
903
|
langchain/retrievers/self_query/astradb.py,sha256=lxlkYOr8xicH7MNyQKIg3Wc-XwhVpKGBn7maqYyR3Hk,670
|
|
904
|
-
langchain/retrievers/self_query/base.py,sha256=
|
|
904
|
+
langchain/retrievers/self_query/base.py,sha256=vyG8ZVRrUF9UqpE3O9buRmlaqwJa032ecsUB28-6Q-4,13491
|
|
905
905
|
langchain/retrievers/self_query/chroma.py,sha256=F0u_3Id1J1hIYM2D8_oNL2JJVetTFDyqW6fuGhjZ0ew,665
|
|
906
906
|
langchain/retrievers/self_query/dashvector.py,sha256=CJAJQuJYNmw_GUIwwlPx3Scu1uDESTnFF-CzZEwFRRg,685
|
|
907
907
|
langchain/retrievers/self_query/databricks_vector_search.py,sha256=S9V-XRfG6taeW3yRx_NZs4h-R4TiyHLnuJTIZa5rsqM,782
|
|
@@ -1335,8 +1335,8 @@ langchain/vectorstores/xata.py,sha256=HW_Oi5Hz8rH2JaUhRNWQ-3hLYmNzD8eAz6K5YqPArm
|
|
|
1335
1335
|
langchain/vectorstores/yellowbrick.py,sha256=-lnjGcRE8Q1nEPOTdbKYTw5noS2cy2ce1ePOU804-_o,624
|
|
1336
1336
|
langchain/vectorstores/zep.py,sha256=RJ2auxoA6uHHLEZknw3_jeFmYJYVt-PWKMBcNMGV6TM,798
|
|
1337
1337
|
langchain/vectorstores/zilliz.py,sha256=XhPPIUfKPFJw0_svCoBgCnNkkBLoRVVcyuMfOnE5IxU,609
|
|
1338
|
-
langchain-0.2.
|
|
1339
|
-
langchain-0.2.
|
|
1340
|
-
langchain-0.2.
|
|
1341
|
-
langchain-0.2.
|
|
1342
|
-
langchain-0.2.
|
|
1338
|
+
langchain-0.2.12.dist-info/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
|
|
1339
|
+
langchain-0.2.12.dist-info/METADATA,sha256=W_JatvIo2DI7FqiGRGYRiZHE7MajUfzdsitNsvftKD0,7074
|
|
1340
|
+
langchain-0.2.12.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
|
1341
|
+
langchain-0.2.12.dist-info/entry_points.txt,sha256=IgKjoXnkkVC8Nm7ggiFMCNAk01ua6RVTb9cmZTVNm5w,58
|
|
1342
|
+
langchain-0.2.12.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|