langchain 0.2.7__py3-none-any.whl → 0.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain might be problematic. Click here for more details.
- langchain/agents/openai_assistant/base.py +2 -1
- langchain/chains/combine_documents/stuff.py +8 -4
- langchain/chat_models/base.py +605 -7
- langchain/retrievers/multi_vector.py +16 -0
- {langchain-0.2.7.dist-info → langchain-0.2.9.dist-info}/METADATA +2 -2
- {langchain-0.2.7.dist-info → langchain-0.2.9.dist-info}/RECORD +9 -9
- {langchain-0.2.7.dist-info → langchain-0.2.9.dist-info}/LICENSE +0 -0
- {langchain-0.2.7.dist-info → langchain-0.2.9.dist-info}/WHEEL +0 -0
- {langchain-0.2.7.dist-info → langchain-0.2.9.dist-info}/entry_points.txt +0 -0
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import asyncio
|
|
3
4
|
import json
|
|
4
5
|
from json import JSONDecodeError
|
|
5
6
|
from time import sleep
|
|
@@ -742,5 +743,5 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
|
|
|
742
743
|
)
|
|
743
744
|
in_progress = run.status in ("in_progress", "queued")
|
|
744
745
|
if in_progress:
|
|
745
|
-
sleep(self.check_every_ms / 1000)
|
|
746
|
+
await asyncio.sleep(self.check_every_ms / 1000)
|
|
746
747
|
return run
|
|
@@ -27,13 +27,14 @@ def create_stuff_documents_chain(
|
|
|
27
27
|
output_parser: Optional[BaseOutputParser] = None,
|
|
28
28
|
document_prompt: Optional[BasePromptTemplate] = None,
|
|
29
29
|
document_separator: str = DEFAULT_DOCUMENT_SEPARATOR,
|
|
30
|
+
document_variable_name: str = DOCUMENTS_KEY,
|
|
30
31
|
) -> Runnable[Dict[str, Any], Any]:
|
|
31
32
|
"""Create a chain for passing a list of Documents to a model.
|
|
32
33
|
|
|
33
34
|
Args:
|
|
34
35
|
llm: Language model.
|
|
35
|
-
prompt: Prompt template. Must contain input variable "context"
|
|
36
|
-
used for passing in the formatted documents.
|
|
36
|
+
prompt: Prompt template. Must contain input variable "context" (override by
|
|
37
|
+
setting document_variable), which will be used for passing in the formatted documents.
|
|
37
38
|
output_parser: Output parser. Defaults to StrOutputParser.
|
|
38
39
|
document_prompt: Prompt used for formatting each document into a string. Input
|
|
39
40
|
variables can be "page_content" or any metadata keys that are in all
|
|
@@ -42,6 +43,8 @@ def create_stuff_documents_chain(
|
|
|
42
43
|
automatically retrieved from the `Document.metadata` dictionary. Default to
|
|
43
44
|
a prompt that only contains `Document.page_content`.
|
|
44
45
|
document_separator: String separator to use between formatted document strings.
|
|
46
|
+
document_variable_name: Variable name to use for the formatted documents in the prompt.
|
|
47
|
+
Defaults to "context".
|
|
45
48
|
|
|
46
49
|
Returns:
|
|
47
50
|
An LCEL Runnable. The input is a dictionary that must have a "context" key that
|
|
@@ -78,11 +81,12 @@ def create_stuff_documents_chain(
|
|
|
78
81
|
|
|
79
82
|
def format_docs(inputs: dict) -> str:
|
|
80
83
|
return document_separator.join(
|
|
81
|
-
format_document(doc, _document_prompt)
|
|
84
|
+
format_document(doc, _document_prompt)
|
|
85
|
+
for doc in inputs[document_variable_name]
|
|
82
86
|
)
|
|
83
87
|
|
|
84
88
|
return (
|
|
85
|
-
RunnablePassthrough.assign(**{
|
|
89
|
+
RunnablePassthrough.assign(**{document_variable_name: format_docs}).with_config(
|
|
86
90
|
run_name="format_inputs"
|
|
87
91
|
)
|
|
88
92
|
| prompt
|
langchain/chat_models/base.py
CHANGED
|
@@ -1,34 +1,107 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import warnings
|
|
1
4
|
from importlib import util
|
|
2
|
-
from typing import
|
|
5
|
+
from typing import (
|
|
6
|
+
Any,
|
|
7
|
+
AsyncIterator,
|
|
8
|
+
Callable,
|
|
9
|
+
Dict,
|
|
10
|
+
Iterator,
|
|
11
|
+
List,
|
|
12
|
+
Literal,
|
|
13
|
+
Optional,
|
|
14
|
+
Sequence,
|
|
15
|
+
Tuple,
|
|
16
|
+
Type,
|
|
17
|
+
Union,
|
|
18
|
+
cast,
|
|
19
|
+
overload,
|
|
20
|
+
)
|
|
3
21
|
|
|
4
22
|
from langchain_core._api import beta
|
|
5
|
-
from langchain_core.language_models
|
|
23
|
+
from langchain_core.language_models import (
|
|
6
24
|
BaseChatModel,
|
|
25
|
+
LanguageModelInput,
|
|
7
26
|
SimpleChatModel,
|
|
27
|
+
)
|
|
28
|
+
from langchain_core.language_models.chat_models import (
|
|
8
29
|
agenerate_from_stream,
|
|
9
30
|
generate_from_stream,
|
|
10
31
|
)
|
|
32
|
+
from langchain_core.messages import AnyMessage, BaseMessage
|
|
33
|
+
from langchain_core.pydantic_v1 import BaseModel
|
|
34
|
+
from langchain_core.runnables import Runnable, RunnableConfig
|
|
35
|
+
from langchain_core.runnables.schema import StreamEvent
|
|
36
|
+
from langchain_core.tools import BaseTool
|
|
37
|
+
from langchain_core.tracers import RunLog, RunLogPatch
|
|
38
|
+
from typing_extensions import TypeAlias
|
|
11
39
|
|
|
12
40
|
__all__ = [
|
|
41
|
+
"init_chat_model",
|
|
42
|
+
# For backwards compatibility
|
|
13
43
|
"BaseChatModel",
|
|
14
44
|
"SimpleChatModel",
|
|
15
45
|
"generate_from_stream",
|
|
16
46
|
"agenerate_from_stream",
|
|
17
|
-
"init_chat_model",
|
|
18
47
|
]
|
|
19
48
|
|
|
20
49
|
|
|
50
|
+
@overload
|
|
51
|
+
def init_chat_model( # type: ignore[overload-overlap]
|
|
52
|
+
model: str,
|
|
53
|
+
*,
|
|
54
|
+
model_provider: Optional[str] = None,
|
|
55
|
+
configurable_fields: Literal[None] = None,
|
|
56
|
+
config_prefix: Optional[str] = None,
|
|
57
|
+
**kwargs: Any,
|
|
58
|
+
) -> BaseChatModel: ...
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
@overload
|
|
62
|
+
def init_chat_model(
|
|
63
|
+
model: Literal[None] = None,
|
|
64
|
+
*,
|
|
65
|
+
model_provider: Optional[str] = None,
|
|
66
|
+
configurable_fields: Literal[None] = None,
|
|
67
|
+
config_prefix: Optional[str] = None,
|
|
68
|
+
**kwargs: Any,
|
|
69
|
+
) -> _ConfigurableModel: ...
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
@overload
|
|
73
|
+
def init_chat_model(
|
|
74
|
+
model: Optional[str] = None,
|
|
75
|
+
*,
|
|
76
|
+
model_provider: Optional[str] = None,
|
|
77
|
+
configurable_fields: Union[Literal["any"], List[str], Tuple[str, ...]] = ...,
|
|
78
|
+
config_prefix: Optional[str] = None,
|
|
79
|
+
**kwargs: Any,
|
|
80
|
+
) -> _ConfigurableModel: ...
|
|
81
|
+
|
|
82
|
+
|
|
21
83
|
# FOR CONTRIBUTORS: If adding support for a new provider, please append the provider
|
|
22
84
|
# name to the supported list in the docstring below. Do *not* change the order of the
|
|
23
85
|
# existing providers.
|
|
24
86
|
@beta()
|
|
25
87
|
def init_chat_model(
|
|
26
|
-
model:
|
|
27
|
-
|
|
88
|
+
model: Optional[str] = None,
|
|
89
|
+
*,
|
|
90
|
+
model_provider: Optional[str] = None,
|
|
91
|
+
configurable_fields: Optional[
|
|
92
|
+
Union[Literal["any"], List[str], Tuple[str, ...]]
|
|
93
|
+
] = None,
|
|
94
|
+
config_prefix: Optional[str] = None,
|
|
95
|
+
**kwargs: Any,
|
|
96
|
+
) -> Union[BaseChatModel, _ConfigurableModel]:
|
|
28
97
|
"""Initialize a ChatModel from the model name and provider.
|
|
29
98
|
|
|
30
99
|
Must have the integration package corresponding to the model provider installed.
|
|
31
100
|
|
|
101
|
+
.. versionadded:: 0.2.7
|
|
102
|
+
|
|
103
|
+
.. versionchanged:: 0.2.8
|
|
104
|
+
|
|
32
105
|
Args:
|
|
33
106
|
model: The name of the model, e.g. "gpt-4o", "claude-3-opus-20240229".
|
|
34
107
|
model_provider: The model provider. Supported model_provider values and the
|
|
@@ -55,19 +128,43 @@ def init_chat_model(
|
|
|
55
128
|
- gemini... -> google_vertexai
|
|
56
129
|
- command... -> cohere
|
|
57
130
|
- accounts/fireworks... -> fireworks
|
|
131
|
+
configurable_fields: Which model parameters are
|
|
132
|
+
configurable:
|
|
133
|
+
- None: No configurable fields.
|
|
134
|
+
- "any": All fields are configurable. *See Security Note below.*
|
|
135
|
+
- Union[List[str], Tuple[str, ...]]: Specified fields are configurable.
|
|
136
|
+
|
|
137
|
+
Fields are assumed to have config_prefix stripped if there is a
|
|
138
|
+
config_prefix. If model is specified, then defaults to None. If model is
|
|
139
|
+
not specified, then defaults to ``("model", "model_provider")``.
|
|
140
|
+
|
|
141
|
+
***Security Note***: Setting ``configurable_fields="any"`` means fields like
|
|
142
|
+
api_key, base_url, etc. can be altered at runtime, potentially redirecting
|
|
143
|
+
model requests to a different service/user. Make sure that if you're
|
|
144
|
+
accepting untrusted configurations that you enumerate the
|
|
145
|
+
``configurable_fields=(...)`` explicitly.
|
|
146
|
+
|
|
147
|
+
config_prefix: If config_prefix is a non-empty string then model will be
|
|
148
|
+
configurable at runtime via the
|
|
149
|
+
``config["configurable"]["{config_prefix}_{param}"]`` keys. If
|
|
150
|
+
config_prefix is an empty string then model will be configurable via
|
|
151
|
+
``config["configurable"]["{param}"]``.
|
|
58
152
|
kwargs: Additional keyword args to pass to
|
|
59
153
|
``<<selected ChatModel>>.__init__(model=model_name, **kwargs)``.
|
|
60
154
|
|
|
61
155
|
Returns:
|
|
62
|
-
|
|
156
|
+
A BaseChatModel corresponding to the model_name and model_provider specified if
|
|
157
|
+
configurability is inferred to be False. If configurable, a chat model emulator
|
|
158
|
+
that initializes the underlying model at runtime once a config is passed in.
|
|
63
159
|
|
|
64
160
|
Raises:
|
|
65
161
|
ValueError: If model_provider cannot be inferred or isn't supported.
|
|
66
162
|
ImportError: If the model provider integration package is not installed.
|
|
67
163
|
|
|
68
|
-
|
|
164
|
+
Initialize non-configurable models:
|
|
69
165
|
.. code-block:: python
|
|
70
166
|
|
|
167
|
+
# pip install langchain langchain-openai langchain-anthropic langchain-google-vertexai
|
|
71
168
|
from langchain.chat_models import init_chat_model
|
|
72
169
|
|
|
73
170
|
gpt_4o = init_chat_model("gpt-4o", model_provider="openai", temperature=0)
|
|
@@ -77,7 +174,125 @@ def init_chat_model(
|
|
|
77
174
|
gpt_4o.invoke("what's your name")
|
|
78
175
|
claude_opus.invoke("what's your name")
|
|
79
176
|
gemini_15.invoke("what's your name")
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
Create a partially configurable model with no default model:
|
|
180
|
+
.. code-block:: python
|
|
181
|
+
|
|
182
|
+
# pip install langchain langchain-openai langchain-anthropic
|
|
183
|
+
from langchain.chat_models import init_chat_model
|
|
184
|
+
|
|
185
|
+
# We don't need to specify configurable=True if a model isn't specified.
|
|
186
|
+
configurable_model = init_chat_model(temperature=0)
|
|
187
|
+
|
|
188
|
+
configurable_model.invoke(
|
|
189
|
+
"what's your name",
|
|
190
|
+
config={"configurable": {"model": "gpt-4o"}}
|
|
191
|
+
)
|
|
192
|
+
# GPT-4o response
|
|
193
|
+
|
|
194
|
+
configurable_model.invoke(
|
|
195
|
+
"what's your name",
|
|
196
|
+
config={"configurable": {"model": "claude-3-5-sonnet-20240620"}}
|
|
197
|
+
)
|
|
198
|
+
# claude-3.5 sonnet response
|
|
199
|
+
|
|
200
|
+
Create a fully configurable model with a default model and a config prefix:
|
|
201
|
+
.. code-block:: python
|
|
202
|
+
|
|
203
|
+
# pip install langchain langchain-openai langchain-anthropic
|
|
204
|
+
from langchain.chat_models import init_chat_model
|
|
205
|
+
|
|
206
|
+
configurable_model_with_default = init_chat_model(
|
|
207
|
+
"gpt-4o",
|
|
208
|
+
model_provider="openai",
|
|
209
|
+
configurable_fields="any", # this allows us to configure other params like temperature, max_tokens, etc at runtime.
|
|
210
|
+
config_prefix="foo",
|
|
211
|
+
temperature=0
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
configurable_model_with_default.invoke("what's your name")
|
|
215
|
+
# GPT-4o response with temperature 0
|
|
216
|
+
|
|
217
|
+
configurable_model_with_default.invoke(
|
|
218
|
+
"what's your name",
|
|
219
|
+
config={
|
|
220
|
+
"configurable": {
|
|
221
|
+
"foo_model": "claude-3-5-sonnet-20240620",
|
|
222
|
+
"foo_model_provider": "anthropic",
|
|
223
|
+
"foo_temperature": 0.6
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
)
|
|
227
|
+
# Claude-3.5 sonnet response with temperature 0.6
|
|
228
|
+
|
|
229
|
+
Bind tools to a configurable model:
|
|
230
|
+
You can call any ChatModel declarative methods on a configurable model in the
|
|
231
|
+
same way that you would with a normal model.
|
|
232
|
+
|
|
233
|
+
.. code-block:: python
|
|
234
|
+
|
|
235
|
+
# pip install langchain langchain-openai langchain-anthropic
|
|
236
|
+
from langchain.chat_models import init_chat_model
|
|
237
|
+
from langchain_core.pydantic_v1 import BaseModel, Field
|
|
238
|
+
|
|
239
|
+
class GetWeather(BaseModel):
|
|
240
|
+
'''Get the current weather in a given location'''
|
|
241
|
+
|
|
242
|
+
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
|
|
243
|
+
|
|
244
|
+
class GetPopulation(BaseModel):
|
|
245
|
+
'''Get the current population in a given location'''
|
|
246
|
+
|
|
247
|
+
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
|
|
248
|
+
|
|
249
|
+
configurable_model = init_chat_model(
|
|
250
|
+
"gpt-4o",
|
|
251
|
+
configurable_fields=("model", "model_provider"),
|
|
252
|
+
temperature=0
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
configurable_model_with_tools = configurable_model.bind_tools([GetWeather, GetPopulation])
|
|
256
|
+
configurable_model_with_tools.invoke(
|
|
257
|
+
"Which city is hotter today and which is bigger: LA or NY?"
|
|
258
|
+
)
|
|
259
|
+
# GPT-4o response with tool calls
|
|
260
|
+
|
|
261
|
+
configurable_model_with_tools.invoke(
|
|
262
|
+
"Which city is hotter today and which is bigger: LA or NY?",
|
|
263
|
+
config={"configurable": {"model": "claude-3-5-sonnet-20240620"}}
|
|
264
|
+
)
|
|
265
|
+
# Claude-3.5 sonnet response with tools
|
|
80
266
|
""" # noqa: E501
|
|
267
|
+
if not model and not configurable_fields:
|
|
268
|
+
configurable_fields = ("model", "model_provider")
|
|
269
|
+
config_prefix = config_prefix or ""
|
|
270
|
+
if config_prefix and not configurable_fields:
|
|
271
|
+
warnings.warn(
|
|
272
|
+
f"{config_prefix=} has been set but no fields are configurable. Set "
|
|
273
|
+
f"`configurable_fields=(...)` to specify the model params that are "
|
|
274
|
+
f"configurable."
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
if not configurable_fields:
|
|
278
|
+
return _init_chat_model_helper(
|
|
279
|
+
cast(str, model), model_provider=model_provider, **kwargs
|
|
280
|
+
)
|
|
281
|
+
else:
|
|
282
|
+
if model:
|
|
283
|
+
kwargs["model"] = model
|
|
284
|
+
if model_provider:
|
|
285
|
+
kwargs["model_provider"] = model_provider
|
|
286
|
+
return _ConfigurableModel(
|
|
287
|
+
default_config=kwargs,
|
|
288
|
+
config_prefix=config_prefix,
|
|
289
|
+
configurable_fields=configurable_fields,
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
def _init_chat_model_helper(
|
|
294
|
+
model: str, *, model_provider: Optional[str] = None, **kwargs: Any
|
|
295
|
+
) -> BaseChatModel:
|
|
81
296
|
model_provider = model_provider or _attempt_infer_model_provider(model)
|
|
82
297
|
if not model_provider:
|
|
83
298
|
raise ValueError(
|
|
@@ -200,3 +415,386 @@ def _check_pkg(pkg: str) -> None:
|
|
|
200
415
|
f"Unable to import {pkg_kebab}. Please install with "
|
|
201
416
|
f"`pip install -U {pkg_kebab}`"
|
|
202
417
|
)
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
def _remove_prefix(s: str, prefix: str) -> str:
|
|
421
|
+
if s.startswith(prefix):
|
|
422
|
+
s = s[len(prefix) :]
|
|
423
|
+
return s
|
|
424
|
+
|
|
425
|
+
|
|
426
|
+
_DECLARATIVE_METHODS = ("bind_tools", "with_structured_output")
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
|
430
|
+
def __init__(
|
|
431
|
+
self,
|
|
432
|
+
*,
|
|
433
|
+
default_config: Optional[dict] = None,
|
|
434
|
+
configurable_fields: Union[Literal["any"], List[str], Tuple[str, ...]] = "any",
|
|
435
|
+
config_prefix: str = "",
|
|
436
|
+
queued_declarative_operations: Sequence[Tuple[str, Tuple, Dict]] = (),
|
|
437
|
+
) -> None:
|
|
438
|
+
self._default_config: dict = default_config or {}
|
|
439
|
+
self._configurable_fields: Union[Literal["any"], List[str]] = (
|
|
440
|
+
configurable_fields
|
|
441
|
+
if configurable_fields == "any"
|
|
442
|
+
else list(configurable_fields)
|
|
443
|
+
)
|
|
444
|
+
self._config_prefix = (
|
|
445
|
+
config_prefix + "_"
|
|
446
|
+
if config_prefix and not config_prefix.endswith("_")
|
|
447
|
+
else config_prefix
|
|
448
|
+
)
|
|
449
|
+
self._queued_declarative_operations: List[Tuple[str, Tuple, Dict]] = list(
|
|
450
|
+
queued_declarative_operations
|
|
451
|
+
)
|
|
452
|
+
|
|
453
|
+
def __getattr__(self, name: str) -> Any:
|
|
454
|
+
if name in _DECLARATIVE_METHODS:
|
|
455
|
+
# Declarative operations that cannot be applied until after an actual model
|
|
456
|
+
# object is instantiated. So instead of returning the actual operation,
|
|
457
|
+
# we record the operation and its arguments in a queue. This queue is
|
|
458
|
+
# then applied in order whenever we actually instantiate the model (in
|
|
459
|
+
# self._model()).
|
|
460
|
+
def queue(*args: Any, **kwargs: Any) -> _ConfigurableModel:
|
|
461
|
+
queued_declarative_operations = list(
|
|
462
|
+
self._queued_declarative_operations
|
|
463
|
+
)
|
|
464
|
+
queued_declarative_operations.append((name, args, kwargs))
|
|
465
|
+
return _ConfigurableModel(
|
|
466
|
+
default_config=dict(self._default_config),
|
|
467
|
+
configurable_fields=list(self._configurable_fields)
|
|
468
|
+
if isinstance(self._configurable_fields, list)
|
|
469
|
+
else self._configurable_fields,
|
|
470
|
+
config_prefix=self._config_prefix,
|
|
471
|
+
queued_declarative_operations=queued_declarative_operations,
|
|
472
|
+
)
|
|
473
|
+
|
|
474
|
+
return queue
|
|
475
|
+
elif self._default_config and (model := self._model()) and hasattr(model, name):
|
|
476
|
+
return getattr(model, name)
|
|
477
|
+
else:
|
|
478
|
+
msg = f"{name} is not a BaseChatModel attribute"
|
|
479
|
+
if self._default_config:
|
|
480
|
+
msg += " and is not implemented on the default model"
|
|
481
|
+
msg += "."
|
|
482
|
+
raise AttributeError(msg)
|
|
483
|
+
|
|
484
|
+
def _model(self, config: Optional[RunnableConfig] = None) -> Runnable:
|
|
485
|
+
params = {**self._default_config, **self._model_params(config)}
|
|
486
|
+
model = _init_chat_model_helper(**params)
|
|
487
|
+
for name, args, kwargs in self._queued_declarative_operations:
|
|
488
|
+
model = getattr(model, name)(*args, **kwargs)
|
|
489
|
+
return model
|
|
490
|
+
|
|
491
|
+
def _model_params(self, config: Optional[RunnableConfig]) -> dict:
|
|
492
|
+
config = config or {}
|
|
493
|
+
model_params = {
|
|
494
|
+
_remove_prefix(k, self._config_prefix): v
|
|
495
|
+
for k, v in config.get("configurable", {}).items()
|
|
496
|
+
if k.startswith(self._config_prefix)
|
|
497
|
+
}
|
|
498
|
+
if self._configurable_fields != "any":
|
|
499
|
+
model_params = {
|
|
500
|
+
k: v for k, v in model_params.items() if k in self._configurable_fields
|
|
501
|
+
}
|
|
502
|
+
return model_params
|
|
503
|
+
|
|
504
|
+
def with_config(
|
|
505
|
+
self,
|
|
506
|
+
config: Optional[RunnableConfig] = None,
|
|
507
|
+
**kwargs: Any,
|
|
508
|
+
) -> _ConfigurableModel:
|
|
509
|
+
"""Bind config to a Runnable, returning a new Runnable."""
|
|
510
|
+
config = RunnableConfig(**(config or {}), **cast(RunnableConfig, kwargs))
|
|
511
|
+
model_params = self._model_params(config)
|
|
512
|
+
remaining_config = {k: v for k, v in config.items() if k != "configurable"}
|
|
513
|
+
remaining_config["configurable"] = {
|
|
514
|
+
k: v
|
|
515
|
+
for k, v in config.get("configurable", {}).items()
|
|
516
|
+
if _remove_prefix(k, self._config_prefix) not in model_params
|
|
517
|
+
}
|
|
518
|
+
queued_declarative_operations = list(self._queued_declarative_operations)
|
|
519
|
+
if remaining_config:
|
|
520
|
+
queued_declarative_operations.append(
|
|
521
|
+
("with_config", (), {"config": remaining_config})
|
|
522
|
+
)
|
|
523
|
+
return _ConfigurableModel(
|
|
524
|
+
default_config={**self._default_config, **model_params},
|
|
525
|
+
configurable_fields=list(self._configurable_fields)
|
|
526
|
+
if isinstance(self._configurable_fields, list)
|
|
527
|
+
else self._configurable_fields,
|
|
528
|
+
config_prefix=self._config_prefix,
|
|
529
|
+
queued_declarative_operations=queued_declarative_operations,
|
|
530
|
+
)
|
|
531
|
+
|
|
532
|
+
@property
|
|
533
|
+
def InputType(self) -> TypeAlias:
|
|
534
|
+
"""Get the input type for this runnable."""
|
|
535
|
+
from langchain_core.prompt_values import (
|
|
536
|
+
ChatPromptValueConcrete,
|
|
537
|
+
StringPromptValue,
|
|
538
|
+
)
|
|
539
|
+
|
|
540
|
+
# This is a version of LanguageModelInput which replaces the abstract
|
|
541
|
+
# base class BaseMessage with a union of its subclasses, which makes
|
|
542
|
+
# for a much better schema.
|
|
543
|
+
return Union[
|
|
544
|
+
str,
|
|
545
|
+
Union[StringPromptValue, ChatPromptValueConcrete],
|
|
546
|
+
List[AnyMessage],
|
|
547
|
+
]
|
|
548
|
+
|
|
549
|
+
def invoke(
|
|
550
|
+
self,
|
|
551
|
+
input: LanguageModelInput,
|
|
552
|
+
config: Optional[RunnableConfig] = None,
|
|
553
|
+
**kwargs: Any,
|
|
554
|
+
) -> Any:
|
|
555
|
+
return self._model(config).invoke(input, config=config, **kwargs)
|
|
556
|
+
|
|
557
|
+
async def ainvoke(
|
|
558
|
+
self,
|
|
559
|
+
input: LanguageModelInput,
|
|
560
|
+
config: Optional[RunnableConfig] = None,
|
|
561
|
+
**kwargs: Any,
|
|
562
|
+
) -> Any:
|
|
563
|
+
return await self._model(config).ainvoke(input, config=config, **kwargs)
|
|
564
|
+
|
|
565
|
+
def stream(
|
|
566
|
+
self,
|
|
567
|
+
input: LanguageModelInput,
|
|
568
|
+
config: Optional[RunnableConfig] = None,
|
|
569
|
+
**kwargs: Optional[Any],
|
|
570
|
+
) -> Iterator[Any]:
|
|
571
|
+
yield from self._model(config).stream(input, config=config, **kwargs)
|
|
572
|
+
|
|
573
|
+
async def astream(
|
|
574
|
+
self,
|
|
575
|
+
input: LanguageModelInput,
|
|
576
|
+
config: Optional[RunnableConfig] = None,
|
|
577
|
+
**kwargs: Optional[Any],
|
|
578
|
+
) -> AsyncIterator[Any]:
|
|
579
|
+
async for x in self._model(config).astream(input, config=config, **kwargs):
|
|
580
|
+
yield x
|
|
581
|
+
|
|
582
|
+
def batch(
|
|
583
|
+
self,
|
|
584
|
+
inputs: List[LanguageModelInput],
|
|
585
|
+
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
|
|
586
|
+
*,
|
|
587
|
+
return_exceptions: bool = False,
|
|
588
|
+
**kwargs: Optional[Any],
|
|
589
|
+
) -> List[Any]:
|
|
590
|
+
config = config or None
|
|
591
|
+
# If <= 1 config use the underlying models batch implementation.
|
|
592
|
+
if config is None or isinstance(config, dict) or len(config) <= 1:
|
|
593
|
+
if isinstance(config, list):
|
|
594
|
+
config = config[0]
|
|
595
|
+
return self._model(config).batch(
|
|
596
|
+
inputs, config=config, return_exceptions=return_exceptions, **kwargs
|
|
597
|
+
)
|
|
598
|
+
# If multiple configs default to Runnable.batch which uses executor to invoke
|
|
599
|
+
# in parallel.
|
|
600
|
+
else:
|
|
601
|
+
return super().batch(
|
|
602
|
+
inputs, config=config, return_exceptions=return_exceptions, **kwargs
|
|
603
|
+
)
|
|
604
|
+
|
|
605
|
+
async def abatch(
|
|
606
|
+
self,
|
|
607
|
+
inputs: List[LanguageModelInput],
|
|
608
|
+
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
|
|
609
|
+
*,
|
|
610
|
+
return_exceptions: bool = False,
|
|
611
|
+
**kwargs: Optional[Any],
|
|
612
|
+
) -> List[Any]:
|
|
613
|
+
config = config or None
|
|
614
|
+
# If <= 1 config use the underlying models batch implementation.
|
|
615
|
+
if config is None or isinstance(config, dict) or len(config) <= 1:
|
|
616
|
+
if isinstance(config, list):
|
|
617
|
+
config = config[0]
|
|
618
|
+
return await self._model(config).abatch(
|
|
619
|
+
inputs, config=config, return_exceptions=return_exceptions, **kwargs
|
|
620
|
+
)
|
|
621
|
+
# If multiple configs default to Runnable.batch which uses executor to invoke
|
|
622
|
+
# in parallel.
|
|
623
|
+
else:
|
|
624
|
+
return await super().abatch(
|
|
625
|
+
inputs, config=config, return_exceptions=return_exceptions, **kwargs
|
|
626
|
+
)
|
|
627
|
+
|
|
628
|
+
def batch_as_completed(
|
|
629
|
+
self,
|
|
630
|
+
inputs: Sequence[LanguageModelInput],
|
|
631
|
+
config: Optional[Union[RunnableConfig, Sequence[RunnableConfig]]] = None,
|
|
632
|
+
*,
|
|
633
|
+
return_exceptions: bool = False,
|
|
634
|
+
**kwargs: Any,
|
|
635
|
+
) -> Iterator[Tuple[int, Union[Any, Exception]]]:
|
|
636
|
+
config = config or None
|
|
637
|
+
# If <= 1 config use the underlying models batch implementation.
|
|
638
|
+
if config is None or isinstance(config, dict) or len(config) <= 1:
|
|
639
|
+
if isinstance(config, list):
|
|
640
|
+
config = config[0]
|
|
641
|
+
yield from self._model(cast(RunnableConfig, config)).batch_as_completed( # type: ignore[call-overload]
|
|
642
|
+
inputs, config=config, return_exceptions=return_exceptions, **kwargs
|
|
643
|
+
)
|
|
644
|
+
# If multiple configs default to Runnable.batch which uses executor to invoke
|
|
645
|
+
# in parallel.
|
|
646
|
+
else:
|
|
647
|
+
yield from super().batch_as_completed( # type: ignore[call-overload]
|
|
648
|
+
inputs, config=config, return_exceptions=return_exceptions, **kwargs
|
|
649
|
+
)
|
|
650
|
+
|
|
651
|
+
async def abatch_as_completed(
|
|
652
|
+
self,
|
|
653
|
+
inputs: Sequence[LanguageModelInput],
|
|
654
|
+
config: Optional[Union[RunnableConfig, Sequence[RunnableConfig]]] = None,
|
|
655
|
+
*,
|
|
656
|
+
return_exceptions: bool = False,
|
|
657
|
+
**kwargs: Any,
|
|
658
|
+
) -> AsyncIterator[Tuple[int, Any]]:
|
|
659
|
+
config = config or None
|
|
660
|
+
# If <= 1 config use the underlying models batch implementation.
|
|
661
|
+
if config is None or isinstance(config, dict) or len(config) <= 1:
|
|
662
|
+
if isinstance(config, list):
|
|
663
|
+
config = config[0]
|
|
664
|
+
async for x in self._model(
|
|
665
|
+
cast(RunnableConfig, config)
|
|
666
|
+
).abatch_as_completed( # type: ignore[call-overload]
|
|
667
|
+
inputs, config=config, return_exceptions=return_exceptions, **kwargs
|
|
668
|
+
):
|
|
669
|
+
yield x
|
|
670
|
+
# If multiple configs default to Runnable.batch which uses executor to invoke
|
|
671
|
+
# in parallel.
|
|
672
|
+
else:
|
|
673
|
+
async for x in super().abatch_as_completed( # type: ignore[call-overload]
|
|
674
|
+
inputs, config=config, return_exceptions=return_exceptions, **kwargs
|
|
675
|
+
):
|
|
676
|
+
yield x
|
|
677
|
+
|
|
678
|
+
def transform(
|
|
679
|
+
self,
|
|
680
|
+
input: Iterator[LanguageModelInput],
|
|
681
|
+
config: Optional[RunnableConfig] = None,
|
|
682
|
+
**kwargs: Optional[Any],
|
|
683
|
+
) -> Iterator[Any]:
|
|
684
|
+
for x in self._model(config).transform(input, config=config, **kwargs):
|
|
685
|
+
yield x
|
|
686
|
+
|
|
687
|
+
async def atransform(
|
|
688
|
+
self,
|
|
689
|
+
input: AsyncIterator[LanguageModelInput],
|
|
690
|
+
config: Optional[RunnableConfig] = None,
|
|
691
|
+
**kwargs: Optional[Any],
|
|
692
|
+
) -> AsyncIterator[Any]:
|
|
693
|
+
async for x in self._model(config).atransform(input, config=config, **kwargs):
|
|
694
|
+
yield x
|
|
695
|
+
|
|
696
|
+
@overload
|
|
697
|
+
def astream_log(
|
|
698
|
+
self,
|
|
699
|
+
input: Any,
|
|
700
|
+
config: Optional[RunnableConfig] = None,
|
|
701
|
+
*,
|
|
702
|
+
diff: Literal[True] = True,
|
|
703
|
+
with_streamed_output_list: bool = True,
|
|
704
|
+
include_names: Optional[Sequence[str]] = None,
|
|
705
|
+
include_types: Optional[Sequence[str]] = None,
|
|
706
|
+
include_tags: Optional[Sequence[str]] = None,
|
|
707
|
+
exclude_names: Optional[Sequence[str]] = None,
|
|
708
|
+
exclude_types: Optional[Sequence[str]] = None,
|
|
709
|
+
exclude_tags: Optional[Sequence[str]] = None,
|
|
710
|
+
**kwargs: Any,
|
|
711
|
+
) -> AsyncIterator[RunLogPatch]: ...
|
|
712
|
+
|
|
713
|
+
@overload
|
|
714
|
+
def astream_log(
|
|
715
|
+
self,
|
|
716
|
+
input: Any,
|
|
717
|
+
config: Optional[RunnableConfig] = None,
|
|
718
|
+
*,
|
|
719
|
+
diff: Literal[False],
|
|
720
|
+
with_streamed_output_list: bool = True,
|
|
721
|
+
include_names: Optional[Sequence[str]] = None,
|
|
722
|
+
include_types: Optional[Sequence[str]] = None,
|
|
723
|
+
include_tags: Optional[Sequence[str]] = None,
|
|
724
|
+
exclude_names: Optional[Sequence[str]] = None,
|
|
725
|
+
exclude_types: Optional[Sequence[str]] = None,
|
|
726
|
+
exclude_tags: Optional[Sequence[str]] = None,
|
|
727
|
+
**kwargs: Any,
|
|
728
|
+
) -> AsyncIterator[RunLog]: ...
|
|
729
|
+
|
|
730
|
+
async def astream_log(
|
|
731
|
+
self,
|
|
732
|
+
input: Any,
|
|
733
|
+
config: Optional[RunnableConfig] = None,
|
|
734
|
+
*,
|
|
735
|
+
diff: bool = True,
|
|
736
|
+
with_streamed_output_list: bool = True,
|
|
737
|
+
include_names: Optional[Sequence[str]] = None,
|
|
738
|
+
include_types: Optional[Sequence[str]] = None,
|
|
739
|
+
include_tags: Optional[Sequence[str]] = None,
|
|
740
|
+
exclude_names: Optional[Sequence[str]] = None,
|
|
741
|
+
exclude_types: Optional[Sequence[str]] = None,
|
|
742
|
+
exclude_tags: Optional[Sequence[str]] = None,
|
|
743
|
+
**kwargs: Any,
|
|
744
|
+
) -> Union[AsyncIterator[RunLogPatch], AsyncIterator[RunLog]]:
|
|
745
|
+
async for x in self._model(config).astream_log( # type: ignore[call-overload, misc]
|
|
746
|
+
input,
|
|
747
|
+
config=config,
|
|
748
|
+
diff=diff,
|
|
749
|
+
with_streamed_output_list=with_streamed_output_list,
|
|
750
|
+
include_names=include_names,
|
|
751
|
+
include_types=include_types,
|
|
752
|
+
include_tags=include_tags,
|
|
753
|
+
exclude_tags=exclude_tags,
|
|
754
|
+
exclude_types=exclude_types,
|
|
755
|
+
exclude_names=exclude_names,
|
|
756
|
+
**kwargs,
|
|
757
|
+
):
|
|
758
|
+
yield x
|
|
759
|
+
|
|
760
|
+
async def astream_events(
|
|
761
|
+
self,
|
|
762
|
+
input: Any,
|
|
763
|
+
config: Optional[RunnableConfig] = None,
|
|
764
|
+
*,
|
|
765
|
+
version: Literal["v1", "v2"],
|
|
766
|
+
include_names: Optional[Sequence[str]] = None,
|
|
767
|
+
include_types: Optional[Sequence[str]] = None,
|
|
768
|
+
include_tags: Optional[Sequence[str]] = None,
|
|
769
|
+
exclude_names: Optional[Sequence[str]] = None,
|
|
770
|
+
exclude_types: Optional[Sequence[str]] = None,
|
|
771
|
+
exclude_tags: Optional[Sequence[str]] = None,
|
|
772
|
+
**kwargs: Any,
|
|
773
|
+
) -> AsyncIterator[StreamEvent]:
|
|
774
|
+
async for x in self._model(config).astream_events(
|
|
775
|
+
input,
|
|
776
|
+
config=config,
|
|
777
|
+
version=version,
|
|
778
|
+
include_names=include_names,
|
|
779
|
+
include_types=include_types,
|
|
780
|
+
include_tags=include_tags,
|
|
781
|
+
exclude_tags=exclude_tags,
|
|
782
|
+
exclude_types=exclude_types,
|
|
783
|
+
exclude_names=exclude_names,
|
|
784
|
+
**kwargs,
|
|
785
|
+
):
|
|
786
|
+
yield x
|
|
787
|
+
|
|
788
|
+
# Explicitly added to satisfy downstream linters.
|
|
789
|
+
def bind_tools(
|
|
790
|
+
self,
|
|
791
|
+
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
|
|
792
|
+
**kwargs: Any,
|
|
793
|
+
) -> Runnable[LanguageModelInput, BaseMessage]:
|
|
794
|
+
return self.__getattr__("bind_tools")(tools, **kwargs)
|
|
795
|
+
|
|
796
|
+
# Explicitly added to satisfy downstream linters.
|
|
797
|
+
def with_structured_output(
|
|
798
|
+
self, schema: Union[Dict, Type[BaseModel]], **kwargs: Any
|
|
799
|
+
) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
|
|
800
|
+
return self.__getattr__("with_structured_output")(schema, **kwargs)
|
|
@@ -19,6 +19,8 @@ class SearchType(str, Enum):
|
|
|
19
19
|
|
|
20
20
|
similarity = "similarity"
|
|
21
21
|
"""Similarity search."""
|
|
22
|
+
similarity_score_threshold = "similarity_score_threshold"
|
|
23
|
+
"""Similarity search with a score threshold."""
|
|
22
24
|
mmr = "mmr"
|
|
23
25
|
"""Maximal Marginal Relevance reranking of similarity search."""
|
|
24
26
|
|
|
@@ -64,6 +66,13 @@ class MultiVectorRetriever(BaseRetriever):
|
|
|
64
66
|
sub_docs = self.vectorstore.max_marginal_relevance_search(
|
|
65
67
|
query, **self.search_kwargs
|
|
66
68
|
)
|
|
69
|
+
elif self.search_type == SearchType.similarity_score_threshold:
|
|
70
|
+
sub_docs_and_similarities = (
|
|
71
|
+
self.vectorstore.similarity_search_with_relevance_scores(
|
|
72
|
+
query, **self.search_kwargs
|
|
73
|
+
)
|
|
74
|
+
)
|
|
75
|
+
sub_docs = [sub_doc for sub_doc, _ in sub_docs_and_similarities]
|
|
67
76
|
else:
|
|
68
77
|
sub_docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
|
|
69
78
|
|
|
@@ -89,6 +98,13 @@ class MultiVectorRetriever(BaseRetriever):
|
|
|
89
98
|
sub_docs = await self.vectorstore.amax_marginal_relevance_search(
|
|
90
99
|
query, **self.search_kwargs
|
|
91
100
|
)
|
|
101
|
+
elif self.search_type == SearchType.similarity_score_threshold:
|
|
102
|
+
sub_docs_and_similarities = (
|
|
103
|
+
await self.vectorstore.asimilarity_search_with_relevance_scores(
|
|
104
|
+
query, **self.search_kwargs
|
|
105
|
+
)
|
|
106
|
+
)
|
|
107
|
+
sub_docs = [sub_doc for sub_doc, _ in sub_docs_and_similarities]
|
|
92
108
|
else:
|
|
93
109
|
sub_docs = await self.vectorstore.asimilarity_search(
|
|
94
110
|
query, **self.search_kwargs
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.9
|
|
4
4
|
Summary: Building applications with LLMs through composability
|
|
5
5
|
Home-page: https://github.com/langchain-ai/langchain
|
|
6
6
|
License: MIT
|
|
@@ -15,7 +15,7 @@ Requires-Dist: PyYAML (>=5.3)
|
|
|
15
15
|
Requires-Dist: SQLAlchemy (>=1.4,<3)
|
|
16
16
|
Requires-Dist: aiohttp (>=3.8.3,<4.0.0)
|
|
17
17
|
Requires-Dist: async-timeout (>=4.0.0,<5.0.0) ; python_version < "3.11"
|
|
18
|
-
Requires-Dist: langchain-core (>=0.2.
|
|
18
|
+
Requires-Dist: langchain-core (>=0.2.20,<0.3.0)
|
|
19
19
|
Requires-Dist: langchain-text-splitters (>=0.2.0,<0.3.0)
|
|
20
20
|
Requires-Dist: langsmith (>=0.1.17,<0.2.0)
|
|
21
21
|
Requires-Dist: numpy (>=1,<2) ; python_version < "3.12"
|
|
@@ -111,7 +111,7 @@ langchain/agents/mrkl/base.py,sha256=cj73s3Z0JxQotFgqhnxEDNkZT4nlL69g3ZleHkUuma4
|
|
|
111
111
|
langchain/agents/mrkl/output_parser.py,sha256=YQGSjQq5pR4kFUg1HrOS3laV6xgtHgtIOQ_TtJY0UFI,3720
|
|
112
112
|
langchain/agents/mrkl/prompt.py,sha256=2dTMP2lAWiLvCtuEijgQRjbKDlbPEnmx77duMwdJ7e4,641
|
|
113
113
|
langchain/agents/openai_assistant/__init__.py,sha256=Xssaqoxrix3hn1gKSOLmDRQzTxAoJk0ProGXmXQe8Mw,114
|
|
114
|
-
langchain/agents/openai_assistant/base.py,sha256=
|
|
114
|
+
langchain/agents/openai_assistant/base.py,sha256=L_YWEbv9f2MBUae7wpWIka3RroKzU_XXwVpbdP5XfAE,28257
|
|
115
115
|
langchain/agents/openai_functions_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
116
116
|
langchain/agents/openai_functions_agent/agent_token_buffer_memory.py,sha256=t3J3Qku4lvs-EGTbPRzhrxAwTVBoEj4tu5wbl5u2-N0,3764
|
|
117
117
|
langchain/agents/openai_functions_agent/base.py,sha256=sMPCrDXhJDZ_fnkpy0fJ0Ptfm02w0_3_JXm9Nktq3NQ,13498
|
|
@@ -221,7 +221,7 @@ langchain/chains/combine_documents/map_reduce.py,sha256=s9Z3r-STCqBs3cnbrsaE6Cxw
|
|
|
221
221
|
langchain/chains/combine_documents/map_rerank.py,sha256=oZAWR7_4v_L9PZDve3PVA-f3kU2WJiBTP98NqSwB2Is,9039
|
|
222
222
|
langchain/chains/combine_documents/reduce.py,sha256=kpUPpvs2dr1ewqdisaw8-O_m0_uu_uKO6wXfzevZjo8,13894
|
|
223
223
|
langchain/chains/combine_documents/refine.py,sha256=CZJ0-KpIVd2AUucYJPPvW93d9FXfZVZgkOMXUqLrv6Y,9138
|
|
224
|
-
langchain/chains/combine_documents/stuff.py,sha256=
|
|
224
|
+
langchain/chains/combine_documents/stuff.py,sha256=zvoTEVO9ML-mrt7rz0hjqe95A7q-lr9YnkHdGi8Xyis,11209
|
|
225
225
|
langchain/chains/constitutional_ai/__init__.py,sha256=Woq_Efl5d-MSTkhpg7HLts3kXysJVZLiz3tr05NTf5Q,107
|
|
226
226
|
langchain/chains/constitutional_ai/base.py,sha256=ANJ2nkpYt8cJ7_PoBVDm3DDGSTO1-oT3h8DFoH2gQRQ,6342
|
|
227
227
|
langchain/chains/constitutional_ai/models.py,sha256=qa5pmegx6iPmL7hJzhb6ytUkIZMSqbeRzua_M2hx_PE,284
|
|
@@ -360,7 +360,7 @@ langchain/chat_models/azure_openai.py,sha256=aRNol2PNC49PmvdZnwjhQeMFRDOOelPNAXz
|
|
|
360
360
|
langchain/chat_models/azureml_endpoint.py,sha256=6mxXm8UFXataLp0NYRGA88V3DpiNKPo095u_JGj7XGE,863
|
|
361
361
|
langchain/chat_models/baichuan.py,sha256=3-GveFoF5ZNyLdRNK6V4i3EDDjdseOTFWbCMhDbtO9w,643
|
|
362
362
|
langchain/chat_models/baidu_qianfan_endpoint.py,sha256=CZrX2SMpbE9H7wBXNC6rGvw-YqQl9zjuJrClYQxEzuI,715
|
|
363
|
-
langchain/chat_models/base.py,sha256=
|
|
363
|
+
langchain/chat_models/base.py,sha256=5fPcdb1ZaFuYPI0tIKQ1z5Qo8gLD50ICHOkX-x6wwP8,30099
|
|
364
364
|
langchain/chat_models/bedrock.py,sha256=HRV3T_0mEnZ8LvJJqAA_UVpt-_03G715oIgomRJw55M,757
|
|
365
365
|
langchain/chat_models/cohere.py,sha256=EYOECHX-nKRhZVfCfmFGZ2lr51PzaB5OvOEqmBCu1fI,633
|
|
366
366
|
langchain/chat_models/databricks.py,sha256=5_QkC5lG4OldaHC2FS0XylirJouyZx1YT95SKwc12M0,653
|
|
@@ -890,7 +890,7 @@ langchain/retrievers/merger_retriever.py,sha256=uzwpkarGfgByXbqCFYNHXL-mczqfTgJI
|
|
|
890
890
|
langchain/retrievers/metal.py,sha256=E9KmySjhmpq_kZhDhOLS8sH4KpbOnWUodR4-3Kd2E30,629
|
|
891
891
|
langchain/retrievers/milvus.py,sha256=f_vi-uodWcS5PyYq-8QD8S7Bx1t_uVswQtqG2D35XnE,796
|
|
892
892
|
langchain/retrievers/multi_query.py,sha256=2G90v5RxXiqM5JWIz6k8cgSdcrJ4uSGR6cebbCYFhbU,7049
|
|
893
|
-
langchain/retrievers/multi_vector.py,sha256=
|
|
893
|
+
langchain/retrievers/multi_vector.py,sha256=rb5gDEAzhzHURJ-VfKGnvq7erZ-xWklnk8RQCBTNsds,4731
|
|
894
894
|
langchain/retrievers/outline.py,sha256=uNuqhoHkfDx73ZEYbHbFjVmJfW-eAdLUzyC9EuoV608,635
|
|
895
895
|
langchain/retrievers/parent_document_retriever.py,sha256=Xhy2tnAl1dmrajt-iu78BiFFp2SEGoy0Zi2lIlljDFY,5236
|
|
896
896
|
langchain/retrievers/pinecone_hybrid_search.py,sha256=oEbmHdKIZ86H1O8GhzNC1KVfKb_xAJdRJXpODMY6X3Y,674
|
|
@@ -1334,8 +1334,8 @@ langchain/vectorstores/xata.py,sha256=HW_Oi5Hz8rH2JaUhRNWQ-3hLYmNzD8eAz6K5YqPArm
|
|
|
1334
1334
|
langchain/vectorstores/yellowbrick.py,sha256=-lnjGcRE8Q1nEPOTdbKYTw5noS2cy2ce1ePOU804-_o,624
|
|
1335
1335
|
langchain/vectorstores/zep.py,sha256=RJ2auxoA6uHHLEZknw3_jeFmYJYVt-PWKMBcNMGV6TM,798
|
|
1336
1336
|
langchain/vectorstores/zilliz.py,sha256=XhPPIUfKPFJw0_svCoBgCnNkkBLoRVVcyuMfOnE5IxU,609
|
|
1337
|
-
langchain-0.2.
|
|
1338
|
-
langchain-0.2.
|
|
1339
|
-
langchain-0.2.
|
|
1340
|
-
langchain-0.2.
|
|
1341
|
-
langchain-0.2.
|
|
1337
|
+
langchain-0.2.9.dist-info/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
|
|
1338
|
+
langchain-0.2.9.dist-info/METADATA,sha256=rRRjxzIaypaG1Ho9TlqNp6ruV5ii_yDIhFFC714s8xo,6854
|
|
1339
|
+
langchain-0.2.9.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
|
1340
|
+
langchain-0.2.9.dist-info/entry_points.txt,sha256=IgKjoXnkkVC8Nm7ggiFMCNAk01ua6RVTb9cmZTVNm5w,58
|
|
1341
|
+
langchain-0.2.9.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|