langchain-core 1.0.0a6__py3-none-any.whl → 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_core/__init__.py +1 -1
- langchain_core/_api/__init__.py +3 -4
- langchain_core/_api/beta_decorator.py +23 -26
- langchain_core/_api/deprecation.py +51 -64
- langchain_core/_api/path.py +3 -6
- langchain_core/_import_utils.py +3 -4
- langchain_core/agents.py +20 -22
- langchain_core/caches.py +65 -66
- langchain_core/callbacks/__init__.py +1 -8
- langchain_core/callbacks/base.py +321 -336
- langchain_core/callbacks/file.py +44 -44
- langchain_core/callbacks/manager.py +436 -513
- langchain_core/callbacks/stdout.py +29 -30
- langchain_core/callbacks/streaming_stdout.py +32 -32
- langchain_core/callbacks/usage.py +60 -57
- langchain_core/chat_history.py +53 -68
- langchain_core/document_loaders/base.py +27 -25
- langchain_core/document_loaders/blob_loaders.py +1 -1
- langchain_core/document_loaders/langsmith.py +44 -48
- langchain_core/documents/__init__.py +23 -3
- langchain_core/documents/base.py +98 -90
- langchain_core/documents/compressor.py +10 -10
- langchain_core/documents/transformers.py +34 -35
- langchain_core/embeddings/fake.py +50 -54
- langchain_core/example_selectors/length_based.py +1 -1
- langchain_core/example_selectors/semantic_similarity.py +28 -32
- langchain_core/exceptions.py +21 -20
- langchain_core/globals.py +3 -151
- langchain_core/indexing/__init__.py +1 -1
- langchain_core/indexing/api.py +121 -126
- langchain_core/indexing/base.py +73 -75
- langchain_core/indexing/in_memory.py +4 -6
- langchain_core/language_models/__init__.py +14 -29
- langchain_core/language_models/_utils.py +58 -61
- langchain_core/language_models/base.py +53 -162
- langchain_core/language_models/chat_models.py +298 -387
- langchain_core/language_models/fake.py +11 -11
- langchain_core/language_models/fake_chat_models.py +42 -36
- langchain_core/language_models/llms.py +125 -235
- langchain_core/load/dump.py +9 -12
- langchain_core/load/load.py +18 -28
- langchain_core/load/mapping.py +2 -4
- langchain_core/load/serializable.py +42 -40
- langchain_core/messages/__init__.py +10 -16
- langchain_core/messages/ai.py +148 -148
- langchain_core/messages/base.py +53 -51
- langchain_core/messages/block_translators/__init__.py +19 -22
- langchain_core/messages/block_translators/anthropic.py +6 -6
- langchain_core/messages/block_translators/bedrock_converse.py +5 -5
- langchain_core/messages/block_translators/google_genai.py +10 -7
- langchain_core/messages/block_translators/google_vertexai.py +4 -32
- langchain_core/messages/block_translators/groq.py +117 -21
- langchain_core/messages/block_translators/langchain_v0.py +5 -5
- langchain_core/messages/block_translators/openai.py +11 -11
- langchain_core/messages/chat.py +2 -6
- langchain_core/messages/content.py +337 -328
- langchain_core/messages/function.py +6 -10
- langchain_core/messages/human.py +24 -31
- langchain_core/messages/modifier.py +2 -2
- langchain_core/messages/system.py +19 -29
- langchain_core/messages/tool.py +74 -90
- langchain_core/messages/utils.py +474 -504
- langchain_core/output_parsers/__init__.py +13 -10
- langchain_core/output_parsers/base.py +61 -61
- langchain_core/output_parsers/format_instructions.py +9 -4
- langchain_core/output_parsers/json.py +12 -10
- langchain_core/output_parsers/list.py +21 -23
- langchain_core/output_parsers/openai_functions.py +49 -47
- langchain_core/output_parsers/openai_tools.py +16 -21
- langchain_core/output_parsers/pydantic.py +13 -14
- langchain_core/output_parsers/string.py +5 -5
- langchain_core/output_parsers/transform.py +15 -17
- langchain_core/output_parsers/xml.py +35 -34
- langchain_core/outputs/__init__.py +1 -1
- langchain_core/outputs/chat_generation.py +18 -18
- langchain_core/outputs/chat_result.py +1 -3
- langchain_core/outputs/generation.py +10 -11
- langchain_core/outputs/llm_result.py +10 -10
- langchain_core/prompt_values.py +11 -17
- langchain_core/prompts/__init__.py +3 -27
- langchain_core/prompts/base.py +48 -56
- langchain_core/prompts/chat.py +275 -325
- langchain_core/prompts/dict.py +5 -5
- langchain_core/prompts/few_shot.py +81 -88
- langchain_core/prompts/few_shot_with_templates.py +11 -13
- langchain_core/prompts/image.py +12 -14
- langchain_core/prompts/loading.py +4 -6
- langchain_core/prompts/message.py +3 -3
- langchain_core/prompts/prompt.py +24 -39
- langchain_core/prompts/string.py +26 -10
- langchain_core/prompts/structured.py +49 -53
- langchain_core/rate_limiters.py +51 -60
- langchain_core/retrievers.py +61 -198
- langchain_core/runnables/base.py +1476 -1626
- langchain_core/runnables/branch.py +53 -57
- langchain_core/runnables/config.py +72 -89
- langchain_core/runnables/configurable.py +120 -137
- langchain_core/runnables/fallbacks.py +83 -79
- langchain_core/runnables/graph.py +91 -97
- langchain_core/runnables/graph_ascii.py +27 -28
- langchain_core/runnables/graph_mermaid.py +38 -50
- langchain_core/runnables/graph_png.py +15 -16
- langchain_core/runnables/history.py +135 -148
- langchain_core/runnables/passthrough.py +124 -150
- langchain_core/runnables/retry.py +46 -51
- langchain_core/runnables/router.py +25 -30
- langchain_core/runnables/schema.py +75 -80
- langchain_core/runnables/utils.py +60 -67
- langchain_core/stores.py +85 -121
- langchain_core/structured_query.py +8 -8
- langchain_core/sys_info.py +27 -29
- langchain_core/tools/__init__.py +1 -14
- langchain_core/tools/base.py +284 -229
- langchain_core/tools/convert.py +160 -155
- langchain_core/tools/render.py +10 -10
- langchain_core/tools/retriever.py +12 -11
- langchain_core/tools/simple.py +19 -24
- langchain_core/tools/structured.py +32 -39
- langchain_core/tracers/__init__.py +1 -9
- langchain_core/tracers/base.py +97 -99
- langchain_core/tracers/context.py +29 -52
- langchain_core/tracers/core.py +49 -53
- langchain_core/tracers/evaluation.py +11 -11
- langchain_core/tracers/event_stream.py +65 -64
- langchain_core/tracers/langchain.py +21 -21
- langchain_core/tracers/log_stream.py +45 -45
- langchain_core/tracers/memory_stream.py +3 -3
- langchain_core/tracers/root_listeners.py +16 -16
- langchain_core/tracers/run_collector.py +2 -4
- langchain_core/tracers/schemas.py +0 -129
- langchain_core/tracers/stdout.py +3 -3
- langchain_core/utils/__init__.py +1 -4
- langchain_core/utils/_merge.py +2 -2
- langchain_core/utils/aiter.py +57 -61
- langchain_core/utils/env.py +9 -9
- langchain_core/utils/function_calling.py +89 -186
- langchain_core/utils/html.py +7 -8
- langchain_core/utils/input.py +6 -6
- langchain_core/utils/interactive_env.py +1 -1
- langchain_core/utils/iter.py +36 -40
- langchain_core/utils/json.py +4 -3
- langchain_core/utils/json_schema.py +9 -9
- langchain_core/utils/mustache.py +8 -10
- langchain_core/utils/pydantic.py +33 -35
- langchain_core/utils/strings.py +6 -9
- langchain_core/utils/usage.py +1 -1
- langchain_core/utils/utils.py +66 -62
- langchain_core/vectorstores/base.py +182 -216
- langchain_core/vectorstores/in_memory.py +101 -176
- langchain_core/vectorstores/utils.py +5 -5
- langchain_core/version.py +1 -1
- langchain_core-1.0.3.dist-info/METADATA +69 -0
- langchain_core-1.0.3.dist-info/RECORD +172 -0
- {langchain_core-1.0.0a6.dist-info → langchain_core-1.0.3.dist-info}/WHEEL +1 -1
- langchain_core/memory.py +0 -120
- langchain_core/messages/block_translators/ollama.py +0 -47
- langchain_core/prompts/pipeline.py +0 -138
- langchain_core/pydantic_v1/__init__.py +0 -30
- langchain_core/pydantic_v1/dataclasses.py +0 -23
- langchain_core/pydantic_v1/main.py +0 -23
- langchain_core/tracers/langchain_v1.py +0 -31
- langchain_core/utils/loading.py +0 -35
- langchain_core-1.0.0a6.dist-info/METADATA +0 -67
- langchain_core-1.0.0a6.dist-info/RECORD +0 -181
- langchain_core-1.0.0a6.dist-info/entry_points.txt +0 -4
|
@@ -4,17 +4,15 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
import json
|
|
6
6
|
import uuid
|
|
7
|
+
from collections.abc import Callable
|
|
7
8
|
from pathlib import Path
|
|
8
9
|
from typing import (
|
|
9
10
|
TYPE_CHECKING,
|
|
10
11
|
Any,
|
|
11
|
-
Callable,
|
|
12
|
-
Optional,
|
|
13
12
|
)
|
|
14
13
|
|
|
15
14
|
from typing_extensions import override
|
|
16
15
|
|
|
17
|
-
from langchain_core._api import deprecated
|
|
18
16
|
from langchain_core.documents import Document
|
|
19
17
|
from langchain_core.load import dumpd, load
|
|
20
18
|
from langchain_core.vectorstores import VectorStore
|
|
@@ -25,7 +23,6 @@ if TYPE_CHECKING:
|
|
|
25
23
|
from collections.abc import Iterator, Sequence
|
|
26
24
|
|
|
27
25
|
from langchain_core.embeddings import Embeddings
|
|
28
|
-
from langchain_core.indexing import UpsertResponse
|
|
29
26
|
|
|
30
27
|
try:
|
|
31
28
|
import numpy as np
|
|
@@ -41,126 +38,124 @@ class InMemoryVectorStore(VectorStore):
|
|
|
41
38
|
Uses a dictionary, and computes cosine similarity for search using numpy.
|
|
42
39
|
|
|
43
40
|
Setup:
|
|
44
|
-
Install
|
|
41
|
+
Install `langchain-core`.
|
|
45
42
|
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
43
|
+
```bash
|
|
44
|
+
pip install -U langchain-core
|
|
45
|
+
```
|
|
49
46
|
|
|
50
47
|
Key init args — indexing params:
|
|
51
48
|
embedding_function: Embeddings
|
|
52
49
|
Embedding function to use.
|
|
53
50
|
|
|
54
51
|
Instantiate:
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
from langchain_openai import OpenAIEmbeddings
|
|
52
|
+
```python
|
|
53
|
+
from langchain_core.vectorstores import InMemoryVectorStore
|
|
54
|
+
from langchain_openai import OpenAIEmbeddings
|
|
59
55
|
|
|
60
|
-
|
|
56
|
+
vector_store = InMemoryVectorStore(OpenAIEmbeddings())
|
|
57
|
+
```
|
|
61
58
|
|
|
62
59
|
Add Documents:
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
from langchain_core.documents import Document
|
|
60
|
+
```python
|
|
61
|
+
from langchain_core.documents import Document
|
|
66
62
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
63
|
+
document_1 = Document(id="1", page_content="foo", metadata={"baz": "bar"})
|
|
64
|
+
document_2 = Document(id="2", page_content="thud", metadata={"bar": "baz"})
|
|
65
|
+
document_3 = Document(id="3", page_content="i will be deleted :(")
|
|
70
66
|
|
|
71
|
-
|
|
72
|
-
|
|
67
|
+
documents = [document_1, document_2, document_3]
|
|
68
|
+
vector_store.add_documents(documents=documents)
|
|
69
|
+
```
|
|
73
70
|
|
|
74
71
|
Inspect documents:
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
72
|
+
```python
|
|
73
|
+
top_n = 10
|
|
74
|
+
for index, (id, doc) in enumerate(vector_store.store.items()):
|
|
75
|
+
if index < top_n:
|
|
76
|
+
# docs have keys 'id', 'vector', 'text', 'metadata'
|
|
77
|
+
print(f"{id}: {doc['text']}")
|
|
78
|
+
else:
|
|
79
|
+
break
|
|
80
|
+
```
|
|
84
81
|
|
|
85
82
|
Delete Documents:
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
83
|
+
```python
|
|
84
|
+
vector_store.delete(ids=["3"])
|
|
85
|
+
```
|
|
89
86
|
|
|
90
87
|
Search:
|
|
91
|
-
|
|
88
|
+
```python
|
|
89
|
+
results = vector_store.similarity_search(query="thud", k=1)
|
|
90
|
+
for doc in results:
|
|
91
|
+
print(f"* {doc.page_content} [{doc.metadata}]")
|
|
92
|
+
```
|
|
92
93
|
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
.. code-block::
|
|
98
|
-
|
|
99
|
-
* thud [{'bar': 'baz'}]
|
|
94
|
+
```txt
|
|
95
|
+
* thud [{'bar': 'baz'}]
|
|
96
|
+
```
|
|
100
97
|
|
|
101
98
|
Search with filter:
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
return doc.metadata.get("bar") == "baz"
|
|
99
|
+
```python
|
|
100
|
+
def _filter_function(doc: Document) -> bool:
|
|
101
|
+
return doc.metadata.get("bar") == "baz"
|
|
106
102
|
|
|
107
103
|
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
.. code-block::
|
|
115
|
-
|
|
116
|
-
* thud [{'bar': 'baz'}]
|
|
104
|
+
results = vector_store.similarity_search(
|
|
105
|
+
query="thud", k=1, filter=_filter_function
|
|
106
|
+
)
|
|
107
|
+
for doc in results:
|
|
108
|
+
print(f"* {doc.page_content} [{doc.metadata}]")
|
|
109
|
+
```
|
|
117
110
|
|
|
111
|
+
```txt
|
|
112
|
+
* thud [{'bar': 'baz'}]
|
|
113
|
+
```
|
|
118
114
|
|
|
119
115
|
Search with score:
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
116
|
+
```python
|
|
117
|
+
results = vector_store.similarity_search_with_score(query="qux", k=1)
|
|
118
|
+
for doc, score in results:
|
|
119
|
+
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
|
|
120
|
+
```
|
|
125
121
|
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
122
|
+
```txt
|
|
123
|
+
* [SIM=0.832268] foo [{'baz': 'bar'}]
|
|
124
|
+
```
|
|
129
125
|
|
|
130
126
|
Async:
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
# await vector_store.aadd_documents(documents=documents)
|
|
127
|
+
```python
|
|
128
|
+
# add documents
|
|
129
|
+
# await vector_store.aadd_documents(documents=documents)
|
|
135
130
|
|
|
136
|
-
|
|
137
|
-
|
|
131
|
+
# delete documents
|
|
132
|
+
# await vector_store.adelete(ids=["3"])
|
|
138
133
|
|
|
139
|
-
|
|
140
|
-
|
|
134
|
+
# search
|
|
135
|
+
# results = vector_store.asimilarity_search(query="thud", k=1)
|
|
141
136
|
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
137
|
+
# search with score
|
|
138
|
+
results = await vector_store.asimilarity_search_with_score(query="qux", k=1)
|
|
139
|
+
for doc, score in results:
|
|
140
|
+
print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
|
|
141
|
+
```
|
|
146
142
|
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
143
|
+
```txt
|
|
144
|
+
* [SIM=0.832268] foo [{'baz': 'bar'}]
|
|
145
|
+
```
|
|
150
146
|
|
|
151
147
|
Use as Retriever:
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
.. code-block::
|
|
161
|
-
|
|
162
|
-
[Document(id='2', metadata={'bar': 'baz'}, page_content='thud')]
|
|
148
|
+
```python
|
|
149
|
+
retriever = vector_store.as_retriever(
|
|
150
|
+
search_type="mmr",
|
|
151
|
+
search_kwargs={"k": 1, "fetch_k": 2, "lambda_mult": 0.5},
|
|
152
|
+
)
|
|
153
|
+
retriever.invoke("thud")
|
|
154
|
+
```
|
|
163
155
|
|
|
156
|
+
```txt
|
|
157
|
+
[Document(id='2', metadata={'bar': 'baz'}, page_content='thud')]
|
|
158
|
+
```
|
|
164
159
|
"""
|
|
165
160
|
|
|
166
161
|
def __init__(self, embedding: Embeddings) -> None:
|
|
@@ -180,20 +175,20 @@ class InMemoryVectorStore(VectorStore):
|
|
|
180
175
|
return self.embedding
|
|
181
176
|
|
|
182
177
|
@override
|
|
183
|
-
def delete(self, ids:
|
|
178
|
+
def delete(self, ids: Sequence[str] | None = None, **kwargs: Any) -> None:
|
|
184
179
|
if ids:
|
|
185
180
|
for _id in ids:
|
|
186
181
|
self.store.pop(_id, None)
|
|
187
182
|
|
|
188
183
|
@override
|
|
189
|
-
async def adelete(self, ids:
|
|
184
|
+
async def adelete(self, ids: Sequence[str] | None = None, **kwargs: Any) -> None:
|
|
190
185
|
self.delete(ids)
|
|
191
186
|
|
|
192
187
|
@override
|
|
193
188
|
def add_documents(
|
|
194
189
|
self,
|
|
195
190
|
documents: list[Document],
|
|
196
|
-
ids:
|
|
191
|
+
ids: list[str] | None = None,
|
|
197
192
|
**kwargs: Any,
|
|
198
193
|
) -> list[str]:
|
|
199
194
|
texts = [doc.page_content for doc in documents]
|
|
@@ -206,13 +201,13 @@ class InMemoryVectorStore(VectorStore):
|
|
|
206
201
|
)
|
|
207
202
|
raise ValueError(msg)
|
|
208
203
|
|
|
209
|
-
id_iterator: Iterator[
|
|
204
|
+
id_iterator: Iterator[str | None] = (
|
|
210
205
|
iter(ids) if ids else iter(doc.id for doc in documents)
|
|
211
206
|
)
|
|
212
207
|
|
|
213
208
|
ids_ = []
|
|
214
209
|
|
|
215
|
-
for doc, vector in zip(documents, vectors):
|
|
210
|
+
for doc, vector in zip(documents, vectors, strict=False):
|
|
216
211
|
doc_id = next(id_iterator)
|
|
217
212
|
doc_id_ = doc_id or str(uuid.uuid4())
|
|
218
213
|
ids_.append(doc_id_)
|
|
@@ -227,7 +222,7 @@ class InMemoryVectorStore(VectorStore):
|
|
|
227
222
|
|
|
228
223
|
@override
|
|
229
224
|
async def aadd_documents(
|
|
230
|
-
self, documents: list[Document], ids:
|
|
225
|
+
self, documents: list[Document], ids: list[str] | None = None, **kwargs: Any
|
|
231
226
|
) -> list[str]:
|
|
232
227
|
texts = [doc.page_content for doc in documents]
|
|
233
228
|
vectors = await self.embedding.aembed_documents(texts)
|
|
@@ -239,12 +234,12 @@ class InMemoryVectorStore(VectorStore):
|
|
|
239
234
|
)
|
|
240
235
|
raise ValueError(msg)
|
|
241
236
|
|
|
242
|
-
id_iterator: Iterator[
|
|
237
|
+
id_iterator: Iterator[str | None] = (
|
|
243
238
|
iter(ids) if ids else iter(doc.id for doc in documents)
|
|
244
239
|
)
|
|
245
240
|
ids_: list[str] = []
|
|
246
241
|
|
|
247
|
-
for doc, vector in zip(documents, vectors):
|
|
242
|
+
for doc, vector in zip(documents, vectors, strict=False):
|
|
248
243
|
doc_id = next(id_iterator)
|
|
249
244
|
doc_id_ = doc_id or str(uuid.uuid4())
|
|
250
245
|
ids_.append(doc_id_)
|
|
@@ -262,10 +257,10 @@ class InMemoryVectorStore(VectorStore):
|
|
|
262
257
|
"""Get documents by their ids.
|
|
263
258
|
|
|
264
259
|
Args:
|
|
265
|
-
ids: The
|
|
260
|
+
ids: The IDs of the documents to get.
|
|
266
261
|
|
|
267
262
|
Returns:
|
|
268
|
-
A list of Document objects.
|
|
263
|
+
A list of `Document` objects.
|
|
269
264
|
"""
|
|
270
265
|
documents = []
|
|
271
266
|
|
|
@@ -281,85 +276,15 @@ class InMemoryVectorStore(VectorStore):
|
|
|
281
276
|
)
|
|
282
277
|
return documents
|
|
283
278
|
|
|
284
|
-
@deprecated(
|
|
285
|
-
alternative="VectorStore.add_documents",
|
|
286
|
-
message=(
|
|
287
|
-
"This was a beta API that was added in 0.2.11. It'll be removed in 0.3.0."
|
|
288
|
-
),
|
|
289
|
-
since="0.2.29",
|
|
290
|
-
removal="1.0",
|
|
291
|
-
)
|
|
292
|
-
def upsert(self, items: Sequence[Document], /, **_kwargs: Any) -> UpsertResponse:
|
|
293
|
-
"""[DEPRECATED] Upsert documents into the store.
|
|
294
|
-
|
|
295
|
-
Args:
|
|
296
|
-
items: The documents to upsert.
|
|
297
|
-
|
|
298
|
-
Returns:
|
|
299
|
-
The upsert response.
|
|
300
|
-
"""
|
|
301
|
-
vectors = self.embedding.embed_documents([item.page_content for item in items])
|
|
302
|
-
ids = []
|
|
303
|
-
for item, vector in zip(items, vectors):
|
|
304
|
-
doc_id = item.id or str(uuid.uuid4())
|
|
305
|
-
ids.append(doc_id)
|
|
306
|
-
self.store[doc_id] = {
|
|
307
|
-
"id": doc_id,
|
|
308
|
-
"vector": vector,
|
|
309
|
-
"text": item.page_content,
|
|
310
|
-
"metadata": item.metadata,
|
|
311
|
-
}
|
|
312
|
-
return {
|
|
313
|
-
"succeeded": ids,
|
|
314
|
-
"failed": [],
|
|
315
|
-
}
|
|
316
|
-
|
|
317
|
-
@deprecated(
|
|
318
|
-
alternative="VectorStore.aadd_documents",
|
|
319
|
-
message=(
|
|
320
|
-
"This was a beta API that was added in 0.2.11. It'll be removed in 0.3.0."
|
|
321
|
-
),
|
|
322
|
-
since="0.2.29",
|
|
323
|
-
removal="1.0",
|
|
324
|
-
)
|
|
325
|
-
async def aupsert(
|
|
326
|
-
self, items: Sequence[Document], /, **_kwargs: Any
|
|
327
|
-
) -> UpsertResponse:
|
|
328
|
-
"""[DEPRECATED] Upsert documents into the store.
|
|
329
|
-
|
|
330
|
-
Args:
|
|
331
|
-
items: The documents to upsert.
|
|
332
|
-
|
|
333
|
-
Returns:
|
|
334
|
-
The upsert response.
|
|
335
|
-
"""
|
|
336
|
-
vectors = await self.embedding.aembed_documents(
|
|
337
|
-
[item.page_content for item in items]
|
|
338
|
-
)
|
|
339
|
-
ids = []
|
|
340
|
-
for item, vector in zip(items, vectors):
|
|
341
|
-
doc_id = item.id or str(uuid.uuid4())
|
|
342
|
-
ids.append(doc_id)
|
|
343
|
-
self.store[doc_id] = {
|
|
344
|
-
"id": doc_id,
|
|
345
|
-
"vector": vector,
|
|
346
|
-
"text": item.page_content,
|
|
347
|
-
"metadata": item.metadata,
|
|
348
|
-
}
|
|
349
|
-
return {
|
|
350
|
-
"succeeded": ids,
|
|
351
|
-
"failed": [],
|
|
352
|
-
}
|
|
353
|
-
|
|
354
279
|
@override
|
|
355
280
|
async def aget_by_ids(self, ids: Sequence[str], /) -> list[Document]:
|
|
356
281
|
"""Async get documents by their ids.
|
|
357
282
|
|
|
358
283
|
Args:
|
|
359
|
-
ids: The
|
|
284
|
+
ids: The IDs of the documents to get.
|
|
360
285
|
|
|
361
286
|
Returns:
|
|
362
|
-
A list of Document objects.
|
|
287
|
+
A list of `Document` objects.
|
|
363
288
|
"""
|
|
364
289
|
return self.get_by_ids(ids)
|
|
365
290
|
|
|
@@ -367,7 +292,7 @@ class InMemoryVectorStore(VectorStore):
|
|
|
367
292
|
self,
|
|
368
293
|
embedding: list[float],
|
|
369
294
|
k: int = 4,
|
|
370
|
-
filter:
|
|
295
|
+
filter: Callable[[Document], bool] | None = None, # noqa: A002
|
|
371
296
|
) -> list[tuple[Document, float, list[float]]]:
|
|
372
297
|
# get all docs with fixed order in list
|
|
373
298
|
docs = list(self.store.values())
|
|
@@ -410,7 +335,7 @@ class InMemoryVectorStore(VectorStore):
|
|
|
410
335
|
self,
|
|
411
336
|
embedding: list[float],
|
|
412
337
|
k: int = 4,
|
|
413
|
-
filter:
|
|
338
|
+
filter: Callable[[Document], bool] | None = None, # noqa: A002
|
|
414
339
|
**_kwargs: Any,
|
|
415
340
|
) -> list[tuple[Document, float]]:
|
|
416
341
|
"""Search for the most similar documents to the given embedding.
|
|
@@ -498,7 +423,7 @@ class InMemoryVectorStore(VectorStore):
|
|
|
498
423
|
fetch_k: int = 20,
|
|
499
424
|
lambda_mult: float = 0.5,
|
|
500
425
|
*,
|
|
501
|
-
filter:
|
|
426
|
+
filter: Callable[[Document], bool] | None = None,
|
|
502
427
|
**kwargs: Any,
|
|
503
428
|
) -> list[Document]:
|
|
504
429
|
prefetch_hits = self._similarity_search_with_score_by_vector(
|
|
@@ -564,7 +489,7 @@ class InMemoryVectorStore(VectorStore):
|
|
|
564
489
|
cls,
|
|
565
490
|
texts: list[str],
|
|
566
491
|
embedding: Embeddings,
|
|
567
|
-
metadatas:
|
|
492
|
+
metadatas: list[dict] | None = None,
|
|
568
493
|
**kwargs: Any,
|
|
569
494
|
) -> InMemoryVectorStore:
|
|
570
495
|
store = cls(
|
|
@@ -579,7 +504,7 @@ class InMemoryVectorStore(VectorStore):
|
|
|
579
504
|
cls,
|
|
580
505
|
texts: list[str],
|
|
581
506
|
embedding: Embeddings,
|
|
582
|
-
metadatas:
|
|
507
|
+
metadatas: list[dict] | None = None,
|
|
583
508
|
**kwargs: Any,
|
|
584
509
|
) -> InMemoryVectorStore:
|
|
585
510
|
store = cls(
|
|
@@ -597,7 +522,7 @@ class InMemoryVectorStore(VectorStore):
|
|
|
597
522
|
Args:
|
|
598
523
|
path: The path to load the vector store from.
|
|
599
524
|
embedding: The embedding to use.
|
|
600
|
-
kwargs: Additional arguments to pass to the constructor.
|
|
525
|
+
**kwargs: Additional arguments to pass to the constructor.
|
|
601
526
|
|
|
602
527
|
Returns:
|
|
603
528
|
A VectorStore object.
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
"""Internal utilities for the in memory implementation of VectorStore
|
|
1
|
+
"""Internal utilities for the in memory implementation of `VectorStore`.
|
|
2
2
|
|
|
3
3
|
These are part of a private API, and users should not use them directly
|
|
4
4
|
as they can change without notice.
|
|
@@ -8,7 +8,7 @@ from __future__ import annotations
|
|
|
8
8
|
|
|
9
9
|
import logging
|
|
10
10
|
import warnings
|
|
11
|
-
from typing import TYPE_CHECKING
|
|
11
|
+
from typing import TYPE_CHECKING
|
|
12
12
|
|
|
13
13
|
try:
|
|
14
14
|
import numpy as np
|
|
@@ -25,7 +25,7 @@ except ImportError:
|
|
|
25
25
|
_HAS_SIMSIMD = False
|
|
26
26
|
|
|
27
27
|
if TYPE_CHECKING:
|
|
28
|
-
Matrix =
|
|
28
|
+
Matrix = list[list[float]] | list[np.ndarray] | np.ndarray
|
|
29
29
|
|
|
30
30
|
logger = logging.getLogger(__name__)
|
|
31
31
|
|
|
@@ -112,8 +112,8 @@ def maximal_marginal_relevance(
|
|
|
112
112
|
Args:
|
|
113
113
|
query_embedding: The query embedding.
|
|
114
114
|
embedding_list: A list of embeddings.
|
|
115
|
-
lambda_mult: The lambda parameter for MMR.
|
|
116
|
-
k: The number of embeddings to return.
|
|
115
|
+
lambda_mult: The lambda parameter for MMR.
|
|
116
|
+
k: The number of embeddings to return.
|
|
117
117
|
|
|
118
118
|
Returns:
|
|
119
119
|
A list of indices of the embeddings to return.
|
langchain_core/version.py
CHANGED
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: langchain-core
|
|
3
|
+
Version: 1.0.3
|
|
4
|
+
Summary: Building applications with LLMs through composability
|
|
5
|
+
Project-URL: Homepage, https://docs.langchain.com/
|
|
6
|
+
Project-URL: Documentation, https://reference.langchain.com/python/langchain_core/
|
|
7
|
+
Project-URL: Source, https://github.com/langchain-ai/langchain/tree/master/libs/core
|
|
8
|
+
Project-URL: Changelog, https://github.com/langchain-ai/langchain/releases?q=%22langchain-core%3D%3D1%22
|
|
9
|
+
Project-URL: Twitter, https://x.com/LangChainAI
|
|
10
|
+
Project-URL: Slack, https://www.langchain.com/join-community
|
|
11
|
+
Project-URL: Reddit, https://www.reddit.com/r/LangChain/
|
|
12
|
+
License: MIT
|
|
13
|
+
Requires-Python: <4.0.0,>=3.10.0
|
|
14
|
+
Requires-Dist: jsonpatch<2.0.0,>=1.33.0
|
|
15
|
+
Requires-Dist: langsmith<1.0.0,>=0.3.45
|
|
16
|
+
Requires-Dist: packaging<26.0.0,>=23.2.0
|
|
17
|
+
Requires-Dist: pydantic<3.0.0,>=2.7.4
|
|
18
|
+
Requires-Dist: pyyaml<7.0.0,>=5.3.0
|
|
19
|
+
Requires-Dist: tenacity!=8.4.0,<10.0.0,>=8.1.0
|
|
20
|
+
Requires-Dist: typing-extensions<5.0.0,>=4.7.0
|
|
21
|
+
Description-Content-Type: text/markdown
|
|
22
|
+
|
|
23
|
+
# 🦜🍎️ LangChain Core
|
|
24
|
+
|
|
25
|
+
[](https://pypi.org/project/langchain-core/#history)
|
|
26
|
+
[](https://opensource.org/licenses/MIT)
|
|
27
|
+
[](https://pypistats.org/packages/langchain-core)
|
|
28
|
+
[](https://twitter.com/langchainai)
|
|
29
|
+
|
|
30
|
+
Looking for the JS/TS version? Check out [LangChain.js](https://github.com/langchain-ai/langchainjs).
|
|
31
|
+
|
|
32
|
+
To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
|
|
33
|
+
[LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
|
|
34
|
+
|
|
35
|
+
## Quick Install
|
|
36
|
+
|
|
37
|
+
```bash
|
|
38
|
+
pip install langchain-core
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
## 🤔 What is this?
|
|
42
|
+
|
|
43
|
+
LangChain Core contains the base abstractions that power the LangChain ecosystem.
|
|
44
|
+
|
|
45
|
+
These abstractions are designed to be as modular and simple as possible.
|
|
46
|
+
|
|
47
|
+
The benefit of having these abstractions is that any provider can implement the required interface and then easily be used in the rest of the LangChain ecosystem.
|
|
48
|
+
|
|
49
|
+
## ⛰️ Why build on top of LangChain Core?
|
|
50
|
+
|
|
51
|
+
The LangChain ecosystem is built on top of `langchain-core`. Some of the benefits:
|
|
52
|
+
|
|
53
|
+
- **Modularity**: We've designed Core around abstractions that are independent of each other, and not tied to any specific model provider.
|
|
54
|
+
- **Stability**: We are committed to a stable versioning scheme, and will communicate any breaking changes with advance notice and version bumps.
|
|
55
|
+
- **Battle-tested**: Core components have the largest install base in the LLM ecosystem, and are used in production by many companies.
|
|
56
|
+
|
|
57
|
+
## 📖 Documentation
|
|
58
|
+
|
|
59
|
+
For full documentation, see the [API reference](https://reference.langchain.com/python/langchain_core/).
|
|
60
|
+
|
|
61
|
+
## 📕 Releases & Versioning
|
|
62
|
+
|
|
63
|
+
See our [Releases](https://docs.langchain.com/oss/python/release-policy) and [Versioning](https://docs.langchain.com/oss/python/versioning) policies.
|
|
64
|
+
|
|
65
|
+
## 💁 Contributing
|
|
66
|
+
|
|
67
|
+
As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
|
|
68
|
+
|
|
69
|
+
For detailed information on how to contribute, see the [Contributing Guide](https://docs.langchain.com/oss/python/contributing/overview).
|