langchain 0.3.15__py3-none-any.whl → 0.3.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/agents/initialize.py +1 -2
- langchain/agents/loading.py +1 -2
- langchain/callbacks/tracers/logging.py +2 -2
- langchain/chains/api/base.py +1 -0
- langchain/chains/openai_functions/openapi.py +1 -1
- langchain/chains/openai_tools/extraction.py +4 -1
- langchain/chains/question_answering/refine_prompts.py +9 -2
- langchain/chains/sequential.py +4 -2
- langchain/chat_models/base.py +14 -5
- langchain/embeddings/base.py +6 -2
- langchain/evaluation/embedding_distance/base.py +12 -3
- langchain/indexes/vectorstore.py +79 -8
- langchain/memory/chat_memory.py +8 -2
- langchain/retrievers/document_compressors/cohere_rerank.py +4 -1
- langchain/retrievers/ensemble.py +3 -2
- langchain/smith/evaluation/runner_utils.py +1 -2
- {langchain-0.3.15.dist-info → langchain-0.3.16.dist-info}/METADATA +2 -2
- {langchain-0.3.15.dist-info → langchain-0.3.16.dist-info}/RECORD +21 -21
- {langchain-0.3.15.dist-info → langchain-0.3.16.dist-info}/LICENSE +0 -0
- {langchain-0.3.15.dist-info → langchain-0.3.16.dist-info}/WHEEL +0 -0
- {langchain-0.3.15.dist-info → langchain-0.3.16.dist-info}/entry_points.txt +0 -0
langchain/agents/initialize.py
CHANGED
|
@@ -84,8 +84,7 @@ def initialize_agent(
|
|
|
84
84
|
pass
|
|
85
85
|
else:
|
|
86
86
|
raise ValueError(
|
|
87
|
-
"Somehow both `agent` and `agent_path` are None, "
|
|
88
|
-
"this should never happen."
|
|
87
|
+
"Somehow both `agent` and `agent_path` are None, this should never happen."
|
|
89
88
|
)
|
|
90
89
|
return AgentExecutor.from_agent_and_tools(
|
|
91
90
|
agent=agent_obj,
|
langchain/agents/loading.py
CHANGED
|
@@ -58,8 +58,7 @@ def load_agent_from_config(
|
|
|
58
58
|
if load_from_tools:
|
|
59
59
|
if llm is None:
|
|
60
60
|
raise ValueError(
|
|
61
|
-
"If `load_from_llm_and_tools` is set to True, "
|
|
62
|
-
"then LLM must be provided"
|
|
61
|
+
"If `load_from_llm_and_tools` is set to True, then LLM must be provided"
|
|
63
62
|
)
|
|
64
63
|
if tools is None:
|
|
65
64
|
raise ValueError(
|
|
@@ -41,6 +41,6 @@ class LoggingCallbackHandler(FunctionCallbackHandler):
|
|
|
41
41
|
except TracerException:
|
|
42
42
|
crumbs_str = ""
|
|
43
43
|
self.function_callback(
|
|
44
|
-
f
|
|
45
|
-
f
|
|
44
|
+
f"{get_colored_text('[text]', color='blue')}"
|
|
45
|
+
f" {get_bolded_text(f'{crumbs_str}New text:')}\n{text}"
|
|
46
46
|
)
|
langchain/chains/api/base.py
CHANGED
|
@@ -233,7 +233,7 @@ class SimpleRequestChain(Chain):
|
|
|
233
233
|
response = (
|
|
234
234
|
f"{api_response.status_code}: {api_response.reason}"
|
|
235
235
|
+ f"\nFor {name} "
|
|
236
|
-
+ f"Called with args: {args.get('params','')}"
|
|
236
|
+
+ f"Called with args: {args.get('params', '')}"
|
|
237
237
|
)
|
|
238
238
|
else:
|
|
239
239
|
try:
|
|
@@ -68,7 +68,10 @@ def create_extraction_chain_pydantic(
|
|
|
68
68
|
if not isinstance(pydantic_schemas, list):
|
|
69
69
|
pydantic_schemas = [pydantic_schemas]
|
|
70
70
|
prompt = ChatPromptTemplate.from_messages(
|
|
71
|
-
[
|
|
71
|
+
[
|
|
72
|
+
("system", system_message),
|
|
73
|
+
("user", "{input}"),
|
|
74
|
+
]
|
|
72
75
|
)
|
|
73
76
|
functions = [convert_pydantic_to_openai_function(p) for p in pydantic_schemas]
|
|
74
77
|
tools = [{"type": "function", "function": d} for d in functions]
|
|
@@ -33,7 +33,11 @@ refine_template = (
|
|
|
33
33
|
"If the context isn't useful, return the original answer."
|
|
34
34
|
)
|
|
35
35
|
CHAT_REFINE_PROMPT = ChatPromptTemplate.from_messages(
|
|
36
|
-
[
|
|
36
|
+
[
|
|
37
|
+
("human", "{question}"),
|
|
38
|
+
("ai", "{existing_answer}"),
|
|
39
|
+
("human", refine_template),
|
|
40
|
+
]
|
|
37
41
|
)
|
|
38
42
|
REFINE_PROMPT_SELECTOR = ConditionalPromptSelector(
|
|
39
43
|
default_prompt=DEFAULT_REFINE_PROMPT,
|
|
@@ -60,7 +64,10 @@ chat_qa_prompt_template = (
|
|
|
60
64
|
"answer any questions"
|
|
61
65
|
)
|
|
62
66
|
CHAT_QUESTION_PROMPT = ChatPromptTemplate.from_messages(
|
|
63
|
-
[
|
|
67
|
+
[
|
|
68
|
+
("system", chat_qa_prompt_template),
|
|
69
|
+
("human", "{question}"),
|
|
70
|
+
]
|
|
64
71
|
)
|
|
65
72
|
QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(
|
|
66
73
|
default_prompt=DEFAULT_TEXT_QA_PROMPT,
|
langchain/chains/sequential.py
CHANGED
|
@@ -178,7 +178,9 @@ class SimpleSequentialChain(Chain):
|
|
|
178
178
|
_input = inputs[self.input_key]
|
|
179
179
|
color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))])
|
|
180
180
|
for i, chain in enumerate(self.chains):
|
|
181
|
-
_input = chain.run(
|
|
181
|
+
_input = chain.run(
|
|
182
|
+
_input, callbacks=_run_manager.get_child(f"step_{i + 1}")
|
|
183
|
+
)
|
|
182
184
|
if self.strip_outputs:
|
|
183
185
|
_input = _input.strip()
|
|
184
186
|
_run_manager.on_text(
|
|
@@ -196,7 +198,7 @@ class SimpleSequentialChain(Chain):
|
|
|
196
198
|
color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))])
|
|
197
199
|
for i, chain in enumerate(self.chains):
|
|
198
200
|
_input = await chain.arun(
|
|
199
|
-
_input, callbacks=_run_manager.get_child(f"step_{i+1}")
|
|
201
|
+
_input, callbacks=_run_manager.get_child(f"step_{i + 1}")
|
|
200
202
|
)
|
|
201
203
|
if self.strip_outputs:
|
|
202
204
|
_input = _input.strip()
|
langchain/chat_models/base.py
CHANGED
|
@@ -416,6 +416,11 @@ def _init_chat_model_helper(
|
|
|
416
416
|
from langchain_google_vertexai.model_garden import ChatAnthropicVertex
|
|
417
417
|
|
|
418
418
|
return ChatAnthropicVertex(model=model, **kwargs)
|
|
419
|
+
elif model_provider == "deepseek":
|
|
420
|
+
_check_pkg("langchain_deepseek", pkg_kebab="langchain-deepseek-official")
|
|
421
|
+
from langchain_deepseek import ChatDeepSeek
|
|
422
|
+
|
|
423
|
+
return ChatDeepSeek(model=model, **kwargs)
|
|
419
424
|
else:
|
|
420
425
|
supported = ", ".join(_SUPPORTED_PROVIDERS)
|
|
421
426
|
raise ValueError(
|
|
@@ -440,6 +445,7 @@ _SUPPORTED_PROVIDERS = {
|
|
|
440
445
|
"bedrock",
|
|
441
446
|
"bedrock_converse",
|
|
442
447
|
"google_anthropic_vertex",
|
|
448
|
+
"deepseek",
|
|
443
449
|
}
|
|
444
450
|
|
|
445
451
|
|
|
@@ -480,12 +486,11 @@ def _parse_model(model: str, model_provider: Optional[str]) -> Tuple[str, str]:
|
|
|
480
486
|
return model, model_provider
|
|
481
487
|
|
|
482
488
|
|
|
483
|
-
def _check_pkg(pkg: str) -> None:
|
|
489
|
+
def _check_pkg(pkg: str, *, pkg_kebab: Optional[str] = None) -> None:
|
|
484
490
|
if not util.find_spec(pkg):
|
|
485
|
-
pkg_kebab = pkg.replace("_", "-")
|
|
491
|
+
pkg_kebab = pkg_kebab if pkg_kebab is not None else pkg.replace("_", "-")
|
|
486
492
|
raise ImportError(
|
|
487
|
-
f"Unable to import {
|
|
488
|
-
f"`pip install -U {pkg_kebab}`"
|
|
493
|
+
f"Unable to import {pkg}. Please install with `pip install -U {pkg_kebab}`"
|
|
489
494
|
)
|
|
490
495
|
|
|
491
496
|
|
|
@@ -590,7 +595,11 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
|
|
590
595
|
queued_declarative_operations = list(self._queued_declarative_operations)
|
|
591
596
|
if remaining_config:
|
|
592
597
|
queued_declarative_operations.append(
|
|
593
|
-
(
|
|
598
|
+
(
|
|
599
|
+
"with_config",
|
|
600
|
+
(),
|
|
601
|
+
{"config": remaining_config},
|
|
602
|
+
)
|
|
594
603
|
)
|
|
595
604
|
return _ConfigurableModel(
|
|
596
605
|
default_config={**self._default_config, **model_params},
|
langchain/embeddings/base.py
CHANGED
|
@@ -13,6 +13,7 @@ _SUPPORTED_PROVIDERS = {
|
|
|
13
13
|
"google_vertexai": "langchain_google_vertexai",
|
|
14
14
|
"huggingface": "langchain_huggingface",
|
|
15
15
|
"mistralai": "langchain_mistralai",
|
|
16
|
+
"ollama": "langchain_ollama",
|
|
16
17
|
"openai": "langchain_openai",
|
|
17
18
|
}
|
|
18
19
|
|
|
@@ -174,8 +175,7 @@ def init_embeddings(
|
|
|
174
175
|
if not model:
|
|
175
176
|
providers = _SUPPORTED_PROVIDERS.keys()
|
|
176
177
|
raise ValueError(
|
|
177
|
-
"Must specify model name. "
|
|
178
|
-
f"Supported providers are: {', '.join(providers)}"
|
|
178
|
+
f"Must specify model name. Supported providers are: {', '.join(providers)}"
|
|
179
179
|
)
|
|
180
180
|
|
|
181
181
|
provider, model_name = _infer_model_and_provider(model, provider=provider)
|
|
@@ -210,6 +210,10 @@ def init_embeddings(
|
|
|
210
210
|
from langchain_huggingface import HuggingFaceEmbeddings
|
|
211
211
|
|
|
212
212
|
return HuggingFaceEmbeddings(model_name=model_name, **kwargs)
|
|
213
|
+
elif provider == "ollama":
|
|
214
|
+
from langchain_ollama import OllamaEmbeddings
|
|
215
|
+
|
|
216
|
+
return OllamaEmbeddings(model=model_name, **kwargs)
|
|
213
217
|
else:
|
|
214
218
|
raise ValueError(
|
|
215
219
|
f"Provider '{provider}' is not supported.\n"
|
|
@@ -310,7 +310,10 @@ class EmbeddingDistanceEvalChain(_EmbeddingDistanceChainMixin, StringEvaluator):
|
|
|
310
310
|
Dict[str, Any]: The computed score.
|
|
311
311
|
"""
|
|
312
312
|
embedded = await self.embeddings.aembed_documents(
|
|
313
|
-
[
|
|
313
|
+
[
|
|
314
|
+
inputs["prediction"],
|
|
315
|
+
inputs["reference"],
|
|
316
|
+
]
|
|
314
317
|
)
|
|
315
318
|
vectors = np.array(embedded)
|
|
316
319
|
score = self._compute_score(vectors)
|
|
@@ -427,7 +430,10 @@ class PairwiseEmbeddingDistanceEvalChain(
|
|
|
427
430
|
"""
|
|
428
431
|
vectors = np.array(
|
|
429
432
|
self.embeddings.embed_documents(
|
|
430
|
-
[
|
|
433
|
+
[
|
|
434
|
+
inputs["prediction"],
|
|
435
|
+
inputs["prediction_b"],
|
|
436
|
+
]
|
|
431
437
|
)
|
|
432
438
|
)
|
|
433
439
|
score = self._compute_score(vectors)
|
|
@@ -449,7 +455,10 @@ class PairwiseEmbeddingDistanceEvalChain(
|
|
|
449
455
|
Dict[str, Any]: The computed score.
|
|
450
456
|
"""
|
|
451
457
|
embedded = await self.embeddings.aembed_documents(
|
|
452
|
-
[
|
|
458
|
+
[
|
|
459
|
+
inputs["prediction"],
|
|
460
|
+
inputs["prediction_b"],
|
|
461
|
+
]
|
|
453
462
|
)
|
|
454
463
|
vectors = np.array(embedded)
|
|
455
464
|
score = self._compute_score(vectors)
|
langchain/indexes/vectorstore.py
CHANGED
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
"""Vectorstore stubs for the indexing api."""
|
|
2
|
+
|
|
1
3
|
from typing import Any, Dict, List, Optional, Type
|
|
2
4
|
|
|
3
5
|
from langchain_core.document_loaders import BaseLoader
|
|
@@ -13,6 +15,7 @@ from langchain.chains.retrieval_qa.base import RetrievalQA
|
|
|
13
15
|
|
|
14
16
|
|
|
15
17
|
def _get_default_text_splitter() -> TextSplitter:
|
|
18
|
+
"""Return the default text splitter used for chunking documents."""
|
|
16
19
|
return RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
|
|
17
20
|
|
|
18
21
|
|
|
@@ -33,7 +36,17 @@ class VectorStoreIndexWrapper(BaseModel):
|
|
|
33
36
|
retriever_kwargs: Optional[Dict[str, Any]] = None,
|
|
34
37
|
**kwargs: Any,
|
|
35
38
|
) -> str:
|
|
36
|
-
"""Query the vectorstore.
|
|
39
|
+
"""Query the vectorstore using the provided LLM.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
question: The question or prompt to query.
|
|
43
|
+
llm: The language model to use. Must not be None.
|
|
44
|
+
retriever_kwargs: Optional keyword arguments for the retriever.
|
|
45
|
+
**kwargs: Additional keyword arguments forwarded to the chain.
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
The result string from the RetrievalQA chain.
|
|
49
|
+
"""
|
|
37
50
|
if llm is None:
|
|
38
51
|
raise NotImplementedError(
|
|
39
52
|
"This API has been changed to require an LLM. "
|
|
@@ -55,7 +68,17 @@ class VectorStoreIndexWrapper(BaseModel):
|
|
|
55
68
|
retriever_kwargs: Optional[Dict[str, Any]] = None,
|
|
56
69
|
**kwargs: Any,
|
|
57
70
|
) -> str:
|
|
58
|
-
"""
|
|
71
|
+
"""Asynchronously query the vectorstore using the provided LLM.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
question: The question or prompt to query.
|
|
75
|
+
llm: The language model to use. Must not be None.
|
|
76
|
+
retriever_kwargs: Optional keyword arguments for the retriever.
|
|
77
|
+
**kwargs: Additional keyword arguments forwarded to the chain.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
The asynchronous result string from the RetrievalQA chain.
|
|
81
|
+
"""
|
|
59
82
|
if llm is None:
|
|
60
83
|
raise NotImplementedError(
|
|
61
84
|
"This API has been changed to require an LLM. "
|
|
@@ -77,7 +100,17 @@ class VectorStoreIndexWrapper(BaseModel):
|
|
|
77
100
|
retriever_kwargs: Optional[Dict[str, Any]] = None,
|
|
78
101
|
**kwargs: Any,
|
|
79
102
|
) -> dict:
|
|
80
|
-
"""Query the vectorstore and
|
|
103
|
+
"""Query the vectorstore and retrieve the answer along with sources.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
question: The question or prompt to query.
|
|
107
|
+
llm: The language model to use. Must not be None.
|
|
108
|
+
retriever_kwargs: Optional keyword arguments for the retriever.
|
|
109
|
+
**kwargs: Additional keyword arguments forwarded to the chain.
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
A dictionary containing the answer and source documents.
|
|
113
|
+
"""
|
|
81
114
|
if llm is None:
|
|
82
115
|
raise NotImplementedError(
|
|
83
116
|
"This API has been changed to require an LLM. "
|
|
@@ -99,7 +132,17 @@ class VectorStoreIndexWrapper(BaseModel):
|
|
|
99
132
|
retriever_kwargs: Optional[Dict[str, Any]] = None,
|
|
100
133
|
**kwargs: Any,
|
|
101
134
|
) -> dict:
|
|
102
|
-
"""
|
|
135
|
+
"""Asynchronously query the vectorstore and retrieve the answer and sources.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
question: The question or prompt to query.
|
|
139
|
+
llm: The language model to use. Must not be None.
|
|
140
|
+
retriever_kwargs: Optional keyword arguments for the retriever.
|
|
141
|
+
**kwargs: Additional keyword arguments forwarded to the chain.
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
A dictionary containing the answer and source documents.
|
|
145
|
+
"""
|
|
103
146
|
if llm is None:
|
|
104
147
|
raise NotImplementedError(
|
|
105
148
|
"This API has been changed to require an LLM. "
|
|
@@ -149,14 +192,28 @@ class VectorstoreIndexCreator(BaseModel):
|
|
|
149
192
|
)
|
|
150
193
|
|
|
151
194
|
def from_loaders(self, loaders: List[BaseLoader]) -> VectorStoreIndexWrapper:
|
|
152
|
-
"""Create a vectorstore index from loaders.
|
|
195
|
+
"""Create a vectorstore index from a list of loaders.
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
loaders: A list of `BaseLoader` instances to load documents.
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
A `VectorStoreIndexWrapper` containing the constructed vectorstore.
|
|
202
|
+
"""
|
|
153
203
|
docs = []
|
|
154
204
|
for loader in loaders:
|
|
155
205
|
docs.extend(loader.load())
|
|
156
206
|
return self.from_documents(docs)
|
|
157
207
|
|
|
158
208
|
async def afrom_loaders(self, loaders: List[BaseLoader]) -> VectorStoreIndexWrapper:
|
|
159
|
-
"""
|
|
209
|
+
"""Asynchronously create a vectorstore index from a list of loaders.
|
|
210
|
+
|
|
211
|
+
Args:
|
|
212
|
+
loaders: A list of `BaseLoader` instances to load documents.
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
A `VectorStoreIndexWrapper` containing the constructed vectorstore.
|
|
216
|
+
"""
|
|
160
217
|
docs = []
|
|
161
218
|
for loader in loaders:
|
|
162
219
|
async for doc in loader.alazy_load():
|
|
@@ -164,7 +221,14 @@ class VectorstoreIndexCreator(BaseModel):
|
|
|
164
221
|
return await self.afrom_documents(docs)
|
|
165
222
|
|
|
166
223
|
def from_documents(self, documents: List[Document]) -> VectorStoreIndexWrapper:
|
|
167
|
-
"""Create a vectorstore index from documents.
|
|
224
|
+
"""Create a vectorstore index from a list of documents.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
documents: A list of `Document` objects.
|
|
228
|
+
|
|
229
|
+
Returns:
|
|
230
|
+
A `VectorStoreIndexWrapper` containing the constructed vectorstore.
|
|
231
|
+
"""
|
|
168
232
|
sub_docs = self.text_splitter.split_documents(documents)
|
|
169
233
|
vectorstore = self.vectorstore_cls.from_documents(
|
|
170
234
|
sub_docs, self.embedding, **self.vectorstore_kwargs
|
|
@@ -174,7 +238,14 @@ class VectorstoreIndexCreator(BaseModel):
|
|
|
174
238
|
async def afrom_documents(
|
|
175
239
|
self, documents: List[Document]
|
|
176
240
|
) -> VectorStoreIndexWrapper:
|
|
177
|
-
"""
|
|
241
|
+
"""Asynchronously create a vectorstore index from a list of documents.
|
|
242
|
+
|
|
243
|
+
Args:
|
|
244
|
+
documents: A list of `Document` objects.
|
|
245
|
+
|
|
246
|
+
Returns:
|
|
247
|
+
A `VectorStoreIndexWrapper` containing the constructed vectorstore.
|
|
248
|
+
"""
|
|
178
249
|
sub_docs = self.text_splitter.split_documents(documents)
|
|
179
250
|
vectorstore = await self.vectorstore_cls.afrom_documents(
|
|
180
251
|
sub_docs, self.embedding, **self.vectorstore_kwargs
|
langchain/memory/chat_memory.py
CHANGED
|
@@ -71,7 +71,10 @@ class BaseChatMemory(BaseMemory, ABC):
|
|
|
71
71
|
"""Save context from this conversation to buffer."""
|
|
72
72
|
input_str, output_str = self._get_input_output(inputs, outputs)
|
|
73
73
|
self.chat_memory.add_messages(
|
|
74
|
-
[
|
|
74
|
+
[
|
|
75
|
+
HumanMessage(content=input_str),
|
|
76
|
+
AIMessage(content=output_str),
|
|
77
|
+
]
|
|
75
78
|
)
|
|
76
79
|
|
|
77
80
|
async def asave_context(
|
|
@@ -80,7 +83,10 @@ class BaseChatMemory(BaseMemory, ABC):
|
|
|
80
83
|
"""Save context from this conversation to buffer."""
|
|
81
84
|
input_str, output_str = self._get_input_output(inputs, outputs)
|
|
82
85
|
await self.chat_memory.aadd_messages(
|
|
83
|
-
[
|
|
86
|
+
[
|
|
87
|
+
HumanMessage(content=input_str),
|
|
88
|
+
AIMessage(content=output_str),
|
|
89
|
+
]
|
|
84
90
|
)
|
|
85
91
|
|
|
86
92
|
def clear(self) -> None:
|
|
@@ -92,7 +92,10 @@ class CohereRerank(BaseDocumentCompressor):
|
|
|
92
92
|
result_dicts = []
|
|
93
93
|
for res in results:
|
|
94
94
|
result_dicts.append(
|
|
95
|
-
{
|
|
95
|
+
{
|
|
96
|
+
"index": res.index,
|
|
97
|
+
"relevance_score": res.relevance_score,
|
|
98
|
+
}
|
|
96
99
|
)
|
|
97
100
|
return result_dicts
|
|
98
101
|
|
langchain/retrievers/ensemble.py
CHANGED
|
@@ -223,7 +223,7 @@ class EnsembleRetriever(BaseRetriever):
|
|
|
223
223
|
retriever.invoke(
|
|
224
224
|
query,
|
|
225
225
|
patch_config(
|
|
226
|
-
config, callbacks=run_manager.get_child(tag=f"retriever_{i+1}")
|
|
226
|
+
config, callbacks=run_manager.get_child(tag=f"retriever_{i + 1}")
|
|
227
227
|
),
|
|
228
228
|
)
|
|
229
229
|
for i, retriever in enumerate(self.retrievers)
|
|
@@ -265,7 +265,8 @@ class EnsembleRetriever(BaseRetriever):
|
|
|
265
265
|
retriever.ainvoke(
|
|
266
266
|
query,
|
|
267
267
|
patch_config(
|
|
268
|
-
config,
|
|
268
|
+
config,
|
|
269
|
+
callbacks=run_manager.get_child(tag=f"retriever_{i + 1}"),
|
|
269
270
|
),
|
|
270
271
|
)
|
|
271
272
|
for i, retriever in enumerate(self.retrievers)
|
|
@@ -247,8 +247,7 @@ def _get_prompt(inputs: Dict[str, Any]) -> str:
|
|
|
247
247
|
if "prompt" in inputs:
|
|
248
248
|
if not isinstance(inputs["prompt"], str):
|
|
249
249
|
raise InputFormatError(
|
|
250
|
-
"Expected string for 'prompt', got"
|
|
251
|
-
f" {type(inputs['prompt']).__name__}"
|
|
250
|
+
f"Expected string for 'prompt', got {type(inputs['prompt']).__name__}"
|
|
252
251
|
)
|
|
253
252
|
prompts = [inputs["prompt"]]
|
|
254
253
|
elif "prompts" in inputs:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.16
|
|
4
4
|
Summary: Building applications with LLMs through composability
|
|
5
5
|
Home-page: https://github.com/langchain-ai/langchain
|
|
6
6
|
License: MIT
|
|
@@ -16,7 +16,7 @@ Requires-Dist: PyYAML (>=5.3)
|
|
|
16
16
|
Requires-Dist: SQLAlchemy (>=1.4,<3)
|
|
17
17
|
Requires-Dist: aiohttp (>=3.8.3,<4.0.0)
|
|
18
18
|
Requires-Dist: async-timeout (>=4.0.0,<5.0.0) ; python_version < "3.11"
|
|
19
|
-
Requires-Dist: langchain-core (>=0.3.
|
|
19
|
+
Requires-Dist: langchain-core (>=0.3.32,<0.4.0)
|
|
20
20
|
Requires-Dist: langchain-text-splitters (>=0.3.3,<0.4.0)
|
|
21
21
|
Requires-Dist: langsmith (>=0.1.17,<0.4)
|
|
22
22
|
Requires-Dist: numpy (>=1.22.4,<2) ; python_version < "3.12"
|
|
@@ -100,12 +100,12 @@ langchain/agents/format_scratchpad/openai_functions.py,sha256=LtIroeeK_SQaxx3yAt
|
|
|
100
100
|
langchain/agents/format_scratchpad/openai_tools.py,sha256=vyBEqvIZ5HCradWWg0weg4bj9R3nr-CpGZqvSua9HnE,166
|
|
101
101
|
langchain/agents/format_scratchpad/tools.py,sha256=nyp_Z9sTnS6FLXSUfAEeZUxhpXcBLck52kdSz0Kas7I,1932
|
|
102
102
|
langchain/agents/format_scratchpad/xml.py,sha256=DtMBd2-Rgi2LdfxXNImYYNcCEy5lxk8ix7-SSCOpWQY,578
|
|
103
|
-
langchain/agents/initialize.py,sha256=
|
|
103
|
+
langchain/agents/initialize.py,sha256=pomYFftGjJf906OCfvOwpfpu4DJmkKiVWEWHOtc8jCY,3559
|
|
104
104
|
langchain/agents/json_chat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
105
105
|
langchain/agents/json_chat/base.py,sha256=V4jl4Vt_WxTotmSw8Py4rQU9GMnhHpXYC5x-wtRinxg,7994
|
|
106
106
|
langchain/agents/json_chat/prompt.py,sha256=gZukOH50C1llQ-AB2QvtL-PSrczv-a-gJLIPYP8z6vA,551
|
|
107
107
|
langchain/agents/load_tools.py,sha256=uMi1EZtkv2sgyUw6iXMNlCSZlIaju0Rw2svwMtkeW3E,286
|
|
108
|
-
langchain/agents/loading.py,sha256=
|
|
108
|
+
langchain/agents/loading.py,sha256=Obxmd9_F6jEF1eg4F0bB35Fr9JTAWGaxCRZuANnKWr4,4795
|
|
109
109
|
langchain/agents/mrkl/__init__.py,sha256=Gpz8w88wAF4GSXoGnuYOwZY5rhjFL5WGZvTVQa-YJas,86
|
|
110
110
|
langchain/agents/mrkl/base.py,sha256=yonYGfgMkTixmrknWROMjwjddiUCgmWEkfIaWVlJdAU,7177
|
|
111
111
|
langchain/agents/mrkl/output_parser.py,sha256=YQGSjQq5pR4kFUg1HrOS3laV6xgtHgtIOQ_TtJY0UFI,3720
|
|
@@ -189,7 +189,7 @@ langchain/callbacks/tracers/evaluation.py,sha256=ryLN36OsLjXiJmb_helQqxULOYt6BcJ
|
|
|
189
189
|
langchain/callbacks/tracers/langchain.py,sha256=KS1qe0UMdmQzoESWw696yWtQyg4_ZSXj4kNOtLfWFlU,218
|
|
190
190
|
langchain/callbacks/tracers/langchain_v1.py,sha256=gdFt_Orrv9W0P_ytMz0UkBTOiYFz8fOwrjKCFk96Bc8,99
|
|
191
191
|
langchain/callbacks/tracers/log_stream.py,sha256=Fghp01LH6Ucvj6q-NtvhYZzW3Ow1n-IXVlrdnh-rrLs,226
|
|
192
|
-
langchain/callbacks/tracers/logging.py,sha256=
|
|
192
|
+
langchain/callbacks/tracers/logging.py,sha256=HX9qbGC8UrErAZv4RC0DVEHzvtoLXSyFyohGcfy6z58,1352
|
|
193
193
|
langchain/callbacks/tracers/root_listeners.py,sha256=z4sMzTA35qnAd5S5K19Fu-8rySYOIDnEgYf0SjoQhk0,105
|
|
194
194
|
langchain/callbacks/tracers/run_collector.py,sha256=xDu5e45bJW8PyGaFul9tenkbjZ__MtfR1FoqpqM-BsA,120
|
|
195
195
|
langchain/callbacks/tracers/schemas.py,sha256=LzW3N2S6a0nozOY9lSLHDUAfn8aYrXIkd97iok6GdHw,470
|
|
@@ -201,7 +201,7 @@ langchain/callbacks/wandb_callback.py,sha256=mWcDRVTlUnzQGhN2BMiGhPsKw5uyB2qDQ_L
|
|
|
201
201
|
langchain/callbacks/whylabs_callback.py,sha256=N36XACtHYNgFSSYrNbfXiZ4nxSdwSrIE5e6xwxukrPc,688
|
|
202
202
|
langchain/chains/__init__.py,sha256=xsRWTwsP3mTejfnKTzsTKRwpYT5xthXZAde30M_118U,5092
|
|
203
203
|
langchain/chains/api/__init__.py,sha256=d8xBEQqFVNOMTm4qXNz5YiYkvA827Ayyd4XCG1KP-z4,84
|
|
204
|
-
langchain/chains/api/base.py,sha256=
|
|
204
|
+
langchain/chains/api/base.py,sha256=KzVOswgnZK4KjdCnfieI02iDsvWCEGzOZCCSlxhLTqc,15260
|
|
205
205
|
langchain/chains/api/news_docs.py,sha256=9vzx5nSPwe_cjFV8cemlfMp4EX8wiZe2eXBuRik2Vdg,2452
|
|
206
206
|
langchain/chains/api/open_meteo_docs.py,sha256=8pLSX24K37lcgq3jmgfThcuiz7WY3zkub_V6dtsqc18,3399
|
|
207
207
|
langchain/chains/api/openapi/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -289,12 +289,12 @@ langchain/chains/openai_functions/__init__.py,sha256=o8B_I98nFTlFPkF6FPpLyt8pU3E
|
|
|
289
289
|
langchain/chains/openai_functions/base.py,sha256=jfgnAuire9OLOL0kLqKScpjdBEXKYzyNx-Xz0xKArMA,10115
|
|
290
290
|
langchain/chains/openai_functions/citation_fuzzy_match.py,sha256=cd9kh6DKMKS-eCskWFcJmDQLOemne1SMe4pKHbJ-Mvc,5344
|
|
291
291
|
langchain/chains/openai_functions/extraction.py,sha256=2P99EoAb8iipW8TNJwNG2gUzgpWYSCZAvPU-kgUNfqU,7390
|
|
292
|
-
langchain/chains/openai_functions/openapi.py,sha256=
|
|
292
|
+
langchain/chains/openai_functions/openapi.py,sha256=Hi6zo0Wj2Q_sWbpsa5mtzm5Ark4DJzaJ7gJHoFUR2HE,14955
|
|
293
293
|
langchain/chains/openai_functions/qa_with_structure.py,sha256=hS_b7PZjsgD7OR8QXOboq1LGClbRc6TlKcdqMCATojA,4841
|
|
294
294
|
langchain/chains/openai_functions/tagging.py,sha256=5i4dAe019rCKN_zWYugHkW5U66yO9Gse8AxjxJLdnr0,6504
|
|
295
295
|
langchain/chains/openai_functions/utils.py,sha256=GDhYjszQGut1UcJ-dyPvkwiT8gHOV0IejRuIfN7_fhw,1255
|
|
296
296
|
langchain/chains/openai_tools/__init__.py,sha256=xX0If1Nx_ocEOI56EGxCI0v0RZ1_VUegzyODAj0RLVU,134
|
|
297
|
-
langchain/chains/openai_tools/extraction.py,sha256
|
|
297
|
+
langchain/chains/openai_tools/extraction.py,sha256=-_DQQRJhtq5Suj-PxIHDUdx61xEEaE7kRK5YTU315wU,3434
|
|
298
298
|
langchain/chains/prompt_selector.py,sha256=Ual6G-PFeZ5jZkeOXnLCYwffE1CFaOmAIHYu0tim6ps,1997
|
|
299
299
|
langchain/chains/qa_generation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
300
300
|
langchain/chains/qa_generation/base.py,sha256=obAA1qtCavCO3zkskRtB62is-geDOJG526T1N6tJbts,4187
|
|
@@ -317,7 +317,7 @@ langchain/chains/question_answering/__init__.py,sha256=wohLdJqGfpWOwy68EEleW73SC
|
|
|
317
317
|
langchain/chains/question_answering/chain.py,sha256=8Hcg2pA87R1ruofPPpSzQwOdxuCdTfbXPVQg9lwnvkg,9641
|
|
318
318
|
langchain/chains/question_answering/map_reduce_prompt.py,sha256=CrerC8PqW1-V8SsQQsFsMd7dfjTb04Urf2naQYVGxl0,8013
|
|
319
319
|
langchain/chains/question_answering/map_rerank_prompt.py,sha256=l2Ha1Xqr5Q6Y-Xh9af8JTni9gLAyhKJhmSErRFGw9s4,1622
|
|
320
|
-
langchain/chains/question_answering/refine_prompts.py,sha256=
|
|
320
|
+
langchain/chains/question_answering/refine_prompts.py,sha256=eYz3MTGVdXlikjMEOE8LWxYE0A-R3jWLJQ6LtEqFibc,2432
|
|
321
321
|
langchain/chains/question_answering/stuff_prompt.py,sha256=tXecxj10u9x0taPz4I1Kn-J0SOYcjIfr_8RINw9P7ys,1146
|
|
322
322
|
langchain/chains/retrieval.py,sha256=-ZHLdDQUkIQLjF9DMvpH_YgZKxShTm0GaSIhw1ab_EM,2742
|
|
323
323
|
langchain/chains/retrieval_qa/__init__.py,sha256=MGGNuZ-HVZDyk551hUjGexK3U9q-2Yi_VJkpi7MV2DE,62
|
|
@@ -331,7 +331,7 @@ langchain/chains/router/multi_prompt.py,sha256=lLpJsYShzRBnvwtV3AaBbUcB8x6sK1PSx
|
|
|
331
331
|
langchain/chains/router/multi_prompt_prompt.py,sha256=T8UbIuxblnI6Byhw-BMAzwQcbB5ww3N6BiMqMJxS6Jc,1156
|
|
332
332
|
langchain/chains/router/multi_retrieval_prompt.py,sha256=VUYGLWbwGiv03aSMW5sjdGNwsEa9FKgq0RcK5o3lkH4,1079
|
|
333
333
|
langchain/chains/router/multi_retrieval_qa.py,sha256=tjIhHEbOwtF3CLq0qQ8Kd78ao5BXRKZLsm9UlmHrdtQ,4254
|
|
334
|
-
langchain/chains/sequential.py,sha256=
|
|
334
|
+
langchain/chains/sequential.py,sha256=a9i0IGsjji57oJg-1QHJqSVcbMpdyqasYPGaeG3OU5I,7499
|
|
335
335
|
langchain/chains/sql_database/__init__.py,sha256=jQotWN4EWMD98Jk-f7rqh5YtbXbP9XXA0ypLGq8NgrM,47
|
|
336
336
|
langchain/chains/sql_database/prompt.py,sha256=W0xFqVZ18PzxmutnIBJrocXus8_QBByrKtxg8CjGaYw,15458
|
|
337
337
|
langchain/chains/sql_database/query.py,sha256=h-QP5ESatTFj8t7sGsHppXSchy3ZGL1U1afza-Lo8fc,5421
|
|
@@ -360,7 +360,7 @@ langchain/chat_models/azure_openai.py,sha256=aRNol2PNC49PmvdZnwjhQeMFRDOOelPNAXz
|
|
|
360
360
|
langchain/chat_models/azureml_endpoint.py,sha256=6mxXm8UFXataLp0NYRGA88V3DpiNKPo095u_JGj7XGE,863
|
|
361
361
|
langchain/chat_models/baichuan.py,sha256=3-GveFoF5ZNyLdRNK6V4i3EDDjdseOTFWbCMhDbtO9w,643
|
|
362
362
|
langchain/chat_models/baidu_qianfan_endpoint.py,sha256=CZrX2SMpbE9H7wBXNC6rGvw-YqQl9zjuJrClYQxEzuI,715
|
|
363
|
-
langchain/chat_models/base.py,sha256=
|
|
363
|
+
langchain/chat_models/base.py,sha256=js7jRvA-3DxoctDf3zshWI7m7rrI5ugXzSo45O1VJms,33333
|
|
364
364
|
langchain/chat_models/bedrock.py,sha256=HRV3T_0mEnZ8LvJJqAA_UVpt-_03G715oIgomRJw55M,757
|
|
365
365
|
langchain/chat_models/cohere.py,sha256=EYOECHX-nKRhZVfCfmFGZ2lr51PzaB5OvOEqmBCu1fI,633
|
|
366
366
|
langchain/chat_models/databricks.py,sha256=5_QkC5lG4OldaHC2FS0XylirJouyZx1YT95SKwc12M0,653
|
|
@@ -577,7 +577,7 @@ langchain/embeddings/aleph_alpha.py,sha256=_yTqGDHsHbh83Zp0MjJ497ilIxkEJm5ccmxOW
|
|
|
577
577
|
langchain/embeddings/awa.py,sha256=1cnMiwKKU3ml3Zz5s5WIpcZSlYNVFFGCaeJilrxN8HE,626
|
|
578
578
|
langchain/embeddings/azure_openai.py,sha256=tmICp-NOrxoVFENBy4F_0-c0l3znf8bOtBBo-UZhajg,650
|
|
579
579
|
langchain/embeddings/baidu_qianfan_endpoint.py,sha256=w7BeE53d7o9Y8Xf0cZntmmziih7oBJcmF-jBW70KJlc,662
|
|
580
|
-
langchain/embeddings/base.py,sha256=
|
|
580
|
+
langchain/embeddings/base.py,sha256=s2gGb98gt12NsY8YnYuKtmB9irf0i2EsoWQ-4XpeXeg,7550
|
|
581
581
|
langchain/embeddings/bedrock.py,sha256=tCBm3vcN0B21Ga6KvNwhgJpgjobC2VEcmPApUmwXO4E,638
|
|
582
582
|
langchain/embeddings/bookend.py,sha256=qWaQXZw9Gq11kEdfIO71h1H0NaXqVKm45TiStxd2xaM,638
|
|
583
583
|
langchain/embeddings/cache.py,sha256=69qxrvD4S5gtQvzv72a4sP9cES-KE3fH908C1XRDIDI,10187
|
|
@@ -635,7 +635,7 @@ langchain/evaluation/criteria/__init__.py,sha256=FE5qrrz5JwWXJWXCzdyNRevEPfmmfBf
|
|
|
635
635
|
langchain/evaluation/criteria/eval_chain.py,sha256=JkBEsgNPymOT3OqTSveRAsIr2Sk1O1oWjJZ664t0BuM,21279
|
|
636
636
|
langchain/evaluation/criteria/prompt.py,sha256=6OgXmdvlYVzRMeAxa1fYGIxqeNAz1NkFCZ6ezLgUnZM,1756
|
|
637
637
|
langchain/evaluation/embedding_distance/__init__.py,sha256=YLtGUI4ZMxjsn2Q0dGZ-R9YMFgZsarfJv9qzNEnrLQs,324
|
|
638
|
-
langchain/evaluation/embedding_distance/base.py,sha256=
|
|
638
|
+
langchain/evaluation/embedding_distance/base.py,sha256=h1loaVznNh0KMTjppKcMj8yhkcp2DuRKeJIYai8jRQY,17212
|
|
639
639
|
langchain/evaluation/exact_match/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
640
640
|
langchain/evaluation/exact_match/base.py,sha256=BykyjgKQ94391eDODzn3m1RXao9ZSXtc9wiww_fysXI,2751
|
|
641
641
|
langchain/evaluation/loading.py,sha256=1zUtEao_F9292O0fNHl8i93bw1V94RDsFwXZTWe4-pA,7296
|
|
@@ -681,7 +681,7 @@ langchain/indexes/prompts/__init__.py,sha256=5ohFoTxhpsRyltYRwAlmdaShczCPPkyvxbc
|
|
|
681
681
|
langchain/indexes/prompts/entity_extraction.py,sha256=gTKrAXGbbR3OKdtkgaq8UgigvNp8Q4oICcBHbaeVhOg,1952
|
|
682
682
|
langchain/indexes/prompts/entity_summarization.py,sha256=fqL7-zIdas0H1SXXUS94otZSxeGpg-7o-Ppc_rxKeSk,1157
|
|
683
683
|
langchain/indexes/prompts/knowledge_triplet_extraction.py,sha256=ZbFrUM14ZcbhiXFfbF9k8Ef7nEw1n1IB1GYD3sPTtps,1554
|
|
684
|
-
langchain/indexes/vectorstore.py,sha256=
|
|
684
|
+
langchain/indexes/vectorstore.py,sha256=OXgTUJ64d4FyxpWLbIxr2usF2rw515Cgp6kKyU6pnDA,9519
|
|
685
685
|
langchain/input.py,sha256=9OczJo7x4KQPqxSxihmP8hDsl7j14xosDrid-6hrjRY,283
|
|
686
686
|
langchain/llms/__init__.py,sha256=OPno9fx7ODpOZkpfyO4Q5TSseRhsnFfBy6HP7W6BdBw,17101
|
|
687
687
|
langchain/llms/ai21.py,sha256=73VaSKgn0M_QPmg9fgLNoJLcsCaIz8y1p25bmYpzvFw,735
|
|
@@ -775,7 +775,7 @@ langchain/load/serializable.py,sha256=6iZp1sg_ozIDqXTDEk60IP89UEwZEJ4j0oMaHascLK
|
|
|
775
775
|
langchain/memory/__init__.py,sha256=kQFlaG2Yuz1Y7U8e3Ngbv-13I3BPGKAI06Lz9sL-Lbc,5574
|
|
776
776
|
langchain/memory/buffer.py,sha256=4QRrzB1Xl57jbHv8Buu1nA_xNiwt-qb8xqCFHzLxrZg,6005
|
|
777
777
|
langchain/memory/buffer_window.py,sha256=ETXU6PWlOaCPlge01fb-FjLgaRio2Cps197Z0krCVII,1986
|
|
778
|
-
langchain/memory/chat_memory.py,sha256=
|
|
778
|
+
langchain/memory/chat_memory.py,sha256=zsOiuI0kttGPu3lso_33LA3w_NNTU1amBG1llryoPVY,3447
|
|
779
779
|
langchain/memory/chat_message_histories/__init__.py,sha256=AdCCNl_rxX4OVVLK6ZwwpMTo8VXzAS4v9bH1v2QjHec,3506
|
|
780
780
|
langchain/memory/chat_message_histories/astradb.py,sha256=KeIpJKN4LWHdjdpoeStBn8xazqoP0mVHCqZB1lw_AS4,692
|
|
781
781
|
langchain/memory/chat_message_histories/cassandra.py,sha256=OTSR2lgFyBQWZpw1Gw-aE9Kmtxth8JQGzhN_Qd5mKwM,698
|
|
@@ -872,7 +872,7 @@ langchain/retrievers/document_compressors/chain_extract.py,sha256=v0F2uIV5KS0BSc
|
|
|
872
872
|
langchain/retrievers/document_compressors/chain_extract_prompt.py,sha256=FezN4Fk0tRcRFcD1Nf1r2SUyUt49yQKzdcV_iCQj6rE,366
|
|
873
873
|
langchain/retrievers/document_compressors/chain_filter.py,sha256=hWDsvyeVvcqGZkEtN-7FIE-iZq7HA-oD-JwBeXzwKW0,4719
|
|
874
874
|
langchain/retrievers/document_compressors/chain_filter_prompt.py,sha256=FTQRPiEsZ0Q9MQXXkpBwxtcqJ9D6Zq0GbuTmMpXHobA,231
|
|
875
|
-
langchain/retrievers/document_compressors/cohere_rerank.py,sha256=
|
|
875
|
+
langchain/retrievers/document_compressors/cohere_rerank.py,sha256=7U35vqEdslr43q8H74CUzcDvbXuZqLnK8-MH8VrlKWo,4567
|
|
876
876
|
langchain/retrievers/document_compressors/cross_encoder.py,sha256=_Z7SoPSfOUSk-rNIHX2lQgYV0TgVMKf3F9AnTH7EFiM,393
|
|
877
877
|
langchain/retrievers/document_compressors/cross_encoder_rerank.py,sha256=ThgVrX8NeXFzE4eoftBoa1yz-sBJiDb-JISQa9Hep2k,1542
|
|
878
878
|
langchain/retrievers/document_compressors/embeddings_filter.py,sha256=_04uA8wOw5Eb5rzlu-6rLqxi9u7kqeD8t4xd9VsB_PA,5217
|
|
@@ -880,7 +880,7 @@ langchain/retrievers/document_compressors/flashrank_rerank.py,sha256=Eo86fJ_T2Ib
|
|
|
880
880
|
langchain/retrievers/document_compressors/listwise_rerank.py,sha256=i3dCqXBF27_sHPGxWOlCkVjt4s85QM0ikHZtPp2LpDs,5127
|
|
881
881
|
langchain/retrievers/elastic_search_bm25.py,sha256=eRboOkRQj-_E53gUQIZzxQ1bX0-uEMv7LAQSD7K7Qf8,665
|
|
882
882
|
langchain/retrievers/embedchain.py,sha256=IUnhr3QK7IJ4IMHZDrTBpZuVQ1kyxhG-bAjmOMXb5eA,644
|
|
883
|
-
langchain/retrievers/ensemble.py,sha256=
|
|
883
|
+
langchain/retrievers/ensemble.py,sha256=FaGOosQHiU_eqhC9-mw10mek4v0l74qivX57Ttp-1Mo,10622
|
|
884
884
|
langchain/retrievers/google_cloud_documentai_warehouse.py,sha256=wJZu2kOHjrBOpTeaPBxyKMIA9OlMuiZ4kul2FG1lJ0k,695
|
|
885
885
|
langchain/retrievers/google_vertex_ai_search.py,sha256=MlYVMne4jYU7lif0y5A-cQNC89DPnsCRljrQPm80GKQ,1040
|
|
886
886
|
langchain/retrievers/kay.py,sha256=rvIPgoA7IrNsYeJ2B4J-gaviS84inzmlifKoNWKEgc8,629
|
|
@@ -985,7 +985,7 @@ langchain/smith/evaluation/__init__.py,sha256=z9uREFLECT3nu7WKmGV4aSEXUTTeaCOLx8
|
|
|
985
985
|
langchain/smith/evaluation/config.py,sha256=_bJ0gHUjHudKKW53zvOQFvZMEmFPqhHz-qXjDAdcMyI,13449
|
|
986
986
|
langchain/smith/evaluation/name_generation.py,sha256=IWocrWNjWnV8GhHJ7BrbGcWK1v9TUikzubpSBNz4Px4,9936
|
|
987
987
|
langchain/smith/evaluation/progress.py,sha256=yFa-v03LPwk4UbZl3PcoO31hAJgORZ5luJ429isZDIA,3310
|
|
988
|
-
langchain/smith/evaluation/runner_utils.py,sha256=
|
|
988
|
+
langchain/smith/evaluation/runner_utils.py,sha256=zh-fImKzLQFyg13hhFScfWamLO1bZm3WUo-PxfxsYOs,54203
|
|
989
989
|
langchain/smith/evaluation/string_run_evaluator.py,sha256=PVy7CIlADMrGjmsDT3SYJ7utA28T7V8TLpJnDFqDqDI,17149
|
|
990
990
|
langchain/smith/evaluation/utils.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
991
991
|
langchain/sql_database.py,sha256=PbNTfJjIUemMO9ZkLiMIpKF-9GJ7Kto3ShcQrLPoOqk,664
|
|
@@ -1335,8 +1335,8 @@ langchain/vectorstores/xata.py,sha256=HW_Oi5Hz8rH2JaUhRNWQ-3hLYmNzD8eAz6K5YqPArm
|
|
|
1335
1335
|
langchain/vectorstores/yellowbrick.py,sha256=-lnjGcRE8Q1nEPOTdbKYTw5noS2cy2ce1ePOU804-_o,624
|
|
1336
1336
|
langchain/vectorstores/zep.py,sha256=RJ2auxoA6uHHLEZknw3_jeFmYJYVt-PWKMBcNMGV6TM,798
|
|
1337
1337
|
langchain/vectorstores/zilliz.py,sha256=XhPPIUfKPFJw0_svCoBgCnNkkBLoRVVcyuMfOnE5IxU,609
|
|
1338
|
-
langchain-0.3.
|
|
1339
|
-
langchain-0.3.
|
|
1340
|
-
langchain-0.3.
|
|
1341
|
-
langchain-0.3.
|
|
1342
|
-
langchain-0.3.
|
|
1338
|
+
langchain-0.3.16.dist-info/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
|
|
1339
|
+
langchain-0.3.16.dist-info/METADATA,sha256=PbAglzKagFXWtp3-u92vKMBd5yKI2ZKvzqcPrWxe58I,7127
|
|
1340
|
+
langchain-0.3.16.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
|
1341
|
+
langchain-0.3.16.dist-info/entry_points.txt,sha256=IgKjoXnkkVC8Nm7ggiFMCNAk01ua6RVTb9cmZTVNm5w,58
|
|
1342
|
+
langchain-0.3.16.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|