langchain-postgres 0.0.12__py3-none-any.whl → 0.0.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_postgres/__init__.py +6 -0
- langchain_postgres/chat_message_histories.py +7 -1
- langchain_postgres/utils/pgvector_migrator.py +321 -0
- langchain_postgres/v2/__init__.py +0 -0
- langchain_postgres/v2/async_vectorstore.py +1268 -0
- langchain_postgres/v2/engine.py +348 -0
- langchain_postgres/v2/indexes.py +155 -0
- langchain_postgres/v2/vectorstores.py +842 -0
- langchain_postgres/vectorstores.py +4 -4
- langchain_postgres-0.0.14.dist-info/METADATA +170 -0
- langchain_postgres-0.0.14.dist-info/RECORD +16 -0
- langchain_postgres-0.0.12.dist-info/METADATA +0 -109
- langchain_postgres-0.0.12.dist-info/RECORD +0 -10
- {langchain_postgres-0.0.12.dist-info → langchain_postgres-0.0.14.dist-info}/LICENSE +0 -0
- {langchain_postgres-0.0.12.dist-info → langchain_postgres-0.0.14.dist-info}/WHEEL +0 -0
langchain_postgres/__init__.py
CHANGED
@@ -2,6 +2,8 @@ from importlib import metadata
|
|
2
2
|
|
3
3
|
from langchain_postgres.chat_message_histories import PostgresChatMessageHistory
|
4
4
|
from langchain_postgres.translator import PGVectorTranslator
|
5
|
+
from langchain_postgres.v2.engine import Column, ColumnDict, PGEngine
|
6
|
+
from langchain_postgres.v2.vectorstores import PGVectorStore
|
5
7
|
from langchain_postgres.vectorstores import PGVector
|
6
8
|
|
7
9
|
try:
|
@@ -12,7 +14,11 @@ except metadata.PackageNotFoundError:
|
|
12
14
|
|
13
15
|
__all__ = [
|
14
16
|
"__version__",
|
17
|
+
"Column",
|
18
|
+
"ColumnDict",
|
19
|
+
"PGEngine",
|
15
20
|
"PostgresChatMessageHistory",
|
16
21
|
"PGVector",
|
22
|
+
"PGVectorStore",
|
17
23
|
"PGVectorTranslator",
|
18
24
|
]
|
@@ -340,11 +340,17 @@ class PostgresChatMessageHistory(BaseChatMessageHistory):
|
|
340
340
|
messages = messages_from_dict(items)
|
341
341
|
return messages
|
342
342
|
|
343
|
-
@property
|
343
|
+
@property
|
344
344
|
def messages(self) -> List[BaseMessage]:
|
345
345
|
"""The abstraction required a property."""
|
346
346
|
return self.get_messages()
|
347
347
|
|
348
|
+
@messages.setter
|
349
|
+
def messages(self, value: list[BaseMessage]) -> None:
|
350
|
+
"""Clear the stored messages and appends a list of messages."""
|
351
|
+
self.clear()
|
352
|
+
self.add_messages(value)
|
353
|
+
|
348
354
|
def clear(self) -> None:
|
349
355
|
"""Clear the chat message history for the GIVEN session."""
|
350
356
|
if self._connection is None:
|
@@ -0,0 +1,321 @@
|
|
1
|
+
import asyncio
|
2
|
+
import json
|
3
|
+
import warnings
|
4
|
+
from typing import Any, AsyncIterator, Iterator, Optional, Sequence, TypeVar
|
5
|
+
|
6
|
+
from sqlalchemy import RowMapping, text
|
7
|
+
from sqlalchemy.exc import ProgrammingError, SQLAlchemyError
|
8
|
+
|
9
|
+
from ..v2.engine import PGEngine
|
10
|
+
from ..v2.vectorstores import PGVectorStore
|
11
|
+
|
12
|
+
COLLECTIONS_TABLE = "langchain_pg_collection"
|
13
|
+
EMBEDDINGS_TABLE = "langchain_pg_embedding"
|
14
|
+
|
15
|
+
T = TypeVar("T")
|
16
|
+
|
17
|
+
|
18
|
+
async def __aget_collection_uuid(
|
19
|
+
engine: PGEngine,
|
20
|
+
collection_name: str,
|
21
|
+
) -> str:
|
22
|
+
"""
|
23
|
+
Get the collection uuid for a collection present in PGVector tables.
|
24
|
+
|
25
|
+
Args:
|
26
|
+
engine (PGEngine): The PG engine corresponding to the Database.
|
27
|
+
collection_name (str): The name of the collection to get the uuid for.
|
28
|
+
Returns:
|
29
|
+
The uuid corresponding to the collection.
|
30
|
+
"""
|
31
|
+
query = f"SELECT name, uuid FROM {COLLECTIONS_TABLE} WHERE name = :collection_name"
|
32
|
+
async with engine._pool.connect() as conn:
|
33
|
+
result = await conn.execute(
|
34
|
+
text(query), parameters={"collection_name": collection_name}
|
35
|
+
)
|
36
|
+
result_map = result.mappings()
|
37
|
+
result_fetch = result_map.fetchone()
|
38
|
+
if result_fetch is None:
|
39
|
+
raise ValueError(f"Collection, {collection_name} not found.")
|
40
|
+
return result_fetch.uuid
|
41
|
+
|
42
|
+
|
43
|
+
async def __aextract_pgvector_collection(
|
44
|
+
engine: PGEngine,
|
45
|
+
collection_name: str,
|
46
|
+
batch_size: int = 1000,
|
47
|
+
) -> AsyncIterator[Sequence[RowMapping]]:
|
48
|
+
"""
|
49
|
+
Extract all data belonging to a PGVector collection.
|
50
|
+
|
51
|
+
Args:
|
52
|
+
engine (PGEngine): The PG engine corresponding to the Database.
|
53
|
+
collection_name (str): The name of the collection to get the data for.
|
54
|
+
batch_size (int): The batch size for collection extraction.
|
55
|
+
Default: 1000. Optional.
|
56
|
+
|
57
|
+
Yields:
|
58
|
+
The data present in the collection.
|
59
|
+
"""
|
60
|
+
try:
|
61
|
+
uuid_task = asyncio.create_task(__aget_collection_uuid(engine, collection_name))
|
62
|
+
query = f"SELECT * FROM {EMBEDDINGS_TABLE} WHERE collection_id = :id"
|
63
|
+
async with engine._pool.connect() as conn:
|
64
|
+
uuid = await uuid_task
|
65
|
+
result_proxy = await conn.execute(text(query), parameters={"id": uuid})
|
66
|
+
while True:
|
67
|
+
rows = result_proxy.fetchmany(size=batch_size)
|
68
|
+
if not rows:
|
69
|
+
break
|
70
|
+
yield [row._mapping for row in rows]
|
71
|
+
except ValueError:
|
72
|
+
raise ValueError(f"Collection, {collection_name} does not exist.")
|
73
|
+
except SQLAlchemyError as e:
|
74
|
+
raise ProgrammingError(
|
75
|
+
statement=f"Failed to extract data from collection '{collection_name}': {e}",
|
76
|
+
params={"id": uuid},
|
77
|
+
orig=e,
|
78
|
+
) from e
|
79
|
+
|
80
|
+
|
81
|
+
async def __concurrent_batch_insert(
|
82
|
+
data_batches: AsyncIterator[Sequence[RowMapping]],
|
83
|
+
vector_store: PGVectorStore,
|
84
|
+
max_concurrency: int = 100,
|
85
|
+
) -> None:
|
86
|
+
pending: set[Any] = set()
|
87
|
+
async for batch_data in data_batches:
|
88
|
+
pending.add(
|
89
|
+
asyncio.ensure_future(
|
90
|
+
vector_store.aadd_embeddings(
|
91
|
+
texts=[data.document for data in batch_data],
|
92
|
+
embeddings=[json.loads(data.embedding) for data in batch_data],
|
93
|
+
metadatas=[data.cmetadata for data in batch_data],
|
94
|
+
ids=[data.id for data in batch_data],
|
95
|
+
)
|
96
|
+
)
|
97
|
+
)
|
98
|
+
if len(pending) >= max_concurrency:
|
99
|
+
_, pending = await asyncio.wait(
|
100
|
+
pending, return_when=asyncio.FIRST_COMPLETED
|
101
|
+
)
|
102
|
+
if pending:
|
103
|
+
await asyncio.wait(pending)
|
104
|
+
|
105
|
+
|
106
|
+
async def __amigrate_pgvector_collection(
|
107
|
+
engine: PGEngine,
|
108
|
+
collection_name: str,
|
109
|
+
vector_store: PGVectorStore,
|
110
|
+
delete_pg_collection: Optional[bool] = False,
|
111
|
+
insert_batch_size: int = 1000,
|
112
|
+
) -> None:
|
113
|
+
"""
|
114
|
+
Migrate all data present in a PGVector collection to use separate tables for each collection.
|
115
|
+
The new data format is compatible with the PGVectoreStore interface.
|
116
|
+
|
117
|
+
Args:
|
118
|
+
engine (PGEngine): The PG engine corresponding to the Database.
|
119
|
+
collection_name (str): The collection to migrate.
|
120
|
+
vector_store (PGVectorStore): The PGVectorStore object corresponding to the new collection table.
|
121
|
+
delete_pg_collection (bool): An option to delete the original data upon migration.
|
122
|
+
Default: False. Optional.
|
123
|
+
insert_batch_size (int): Number of rows to insert at once in the table.
|
124
|
+
Default: 1000.
|
125
|
+
"""
|
126
|
+
destination_table = vector_store.get_table_name()
|
127
|
+
|
128
|
+
# Get row count in PGVector collection
|
129
|
+
uuid_task = asyncio.create_task(__aget_collection_uuid(engine, collection_name))
|
130
|
+
query = (
|
131
|
+
f"SELECT COUNT(*) FROM {EMBEDDINGS_TABLE} WHERE collection_id=:collection_id"
|
132
|
+
)
|
133
|
+
async with engine._pool.connect() as conn:
|
134
|
+
uuid = await uuid_task
|
135
|
+
result = await conn.execute(text(query), parameters={"collection_id": uuid})
|
136
|
+
result_map = result.mappings()
|
137
|
+
collection_data_len = result_map.fetchone()
|
138
|
+
if collection_data_len is None:
|
139
|
+
warnings.warn(f"Collection, {collection_name} contains no elements.")
|
140
|
+
return
|
141
|
+
|
142
|
+
# Extract data from the collection and batch insert into the new table
|
143
|
+
data_batches = __aextract_pgvector_collection(
|
144
|
+
engine, collection_name, batch_size=insert_batch_size
|
145
|
+
)
|
146
|
+
await __concurrent_batch_insert(data_batches, vector_store, max_concurrency=100)
|
147
|
+
|
148
|
+
# Validate data migration
|
149
|
+
query = f"SELECT COUNT(*) FROM {destination_table}"
|
150
|
+
async with engine._pool.connect() as conn:
|
151
|
+
result = await conn.execute(text(query))
|
152
|
+
result_map = result.mappings()
|
153
|
+
table_size = result_map.fetchone()
|
154
|
+
if not table_size:
|
155
|
+
raise ValueError(f"Table: {destination_table} does not exist.")
|
156
|
+
|
157
|
+
if collection_data_len["count"] != table_size["count"]:
|
158
|
+
raise ValueError(
|
159
|
+
"All data not yet migrated.\n"
|
160
|
+
f"Original row count: {collection_data_len['count']}\n"
|
161
|
+
f"Collection table, {destination_table} row count: {table_size['count']}"
|
162
|
+
)
|
163
|
+
elif delete_pg_collection:
|
164
|
+
# Delete PGVector data
|
165
|
+
query = f"DELETE FROM {EMBEDDINGS_TABLE} WHERE collection_id=:collection_id"
|
166
|
+
async with engine._pool.connect() as conn:
|
167
|
+
await conn.execute(text(query), parameters={"collection_id": uuid})
|
168
|
+
await conn.commit()
|
169
|
+
|
170
|
+
query = f"DELETE FROM {COLLECTIONS_TABLE} WHERE name=:collection_name"
|
171
|
+
async with engine._pool.connect() as conn:
|
172
|
+
await conn.execute(
|
173
|
+
text(query), parameters={"collection_name": collection_name}
|
174
|
+
)
|
175
|
+
await conn.commit()
|
176
|
+
print(f"Successfully deleted PGVector collection, {collection_name}")
|
177
|
+
|
178
|
+
|
179
|
+
async def __alist_pgvector_collection_names(
|
180
|
+
engine: PGEngine,
|
181
|
+
) -> list[str]:
|
182
|
+
"""Lists all collection names present in PGVector table."""
|
183
|
+
try:
|
184
|
+
query = f"SELECT name from {COLLECTIONS_TABLE}"
|
185
|
+
async with engine._pool.connect() as conn:
|
186
|
+
result = await conn.execute(text(query))
|
187
|
+
result_map = result.mappings()
|
188
|
+
all_rows = result_map.fetchall()
|
189
|
+
return [row["name"] for row in all_rows]
|
190
|
+
except ProgrammingError as e:
|
191
|
+
raise ValueError(
|
192
|
+
"Please provide the correct collection table name: " + str(e)
|
193
|
+
) from e
|
194
|
+
|
195
|
+
|
196
|
+
async def aextract_pgvector_collection(
|
197
|
+
engine: PGEngine,
|
198
|
+
collection_name: str,
|
199
|
+
batch_size: int = 1000,
|
200
|
+
) -> AsyncIterator[Sequence[RowMapping]]:
|
201
|
+
"""
|
202
|
+
Extract all data belonging to a PGVector collection.
|
203
|
+
|
204
|
+
Args:
|
205
|
+
engine (PGEngine): The PG engine corresponding to the Database.
|
206
|
+
collection_name (str): The name of the collection to get the data for.
|
207
|
+
batch_size (int): The batch size for collection extraction.
|
208
|
+
Default: 1000. Optional.
|
209
|
+
|
210
|
+
Yields:
|
211
|
+
The data present in the collection.
|
212
|
+
"""
|
213
|
+
iterator = __aextract_pgvector_collection(engine, collection_name, batch_size)
|
214
|
+
while True:
|
215
|
+
try:
|
216
|
+
result = await engine._run_as_async(iterator.__anext__())
|
217
|
+
yield result
|
218
|
+
except StopAsyncIteration:
|
219
|
+
break
|
220
|
+
|
221
|
+
|
222
|
+
async def alist_pgvector_collection_names(
|
223
|
+
engine: PGEngine,
|
224
|
+
) -> list[str]:
|
225
|
+
"""Lists all collection names present in PGVector table."""
|
226
|
+
return await engine._run_as_async(__alist_pgvector_collection_names(engine))
|
227
|
+
|
228
|
+
|
229
|
+
async def amigrate_pgvector_collection(
|
230
|
+
engine: PGEngine,
|
231
|
+
collection_name: str,
|
232
|
+
vector_store: PGVectorStore,
|
233
|
+
delete_pg_collection: Optional[bool] = False,
|
234
|
+
insert_batch_size: int = 1000,
|
235
|
+
) -> None:
|
236
|
+
"""
|
237
|
+
Migrate all data present in a PGVector collection to use separate tables for each collection.
|
238
|
+
The new data format is compatible with the PGVectorStore interface.
|
239
|
+
|
240
|
+
Args:
|
241
|
+
engine (PGEngine): The PG engine corresponding to the Database.
|
242
|
+
collection_name (str): The collection to migrate.
|
243
|
+
vector_store (PGVectorStore): The PGVectorStore object corresponding to the new collection table.
|
244
|
+
use_json_metadata (bool): An option to keep the PGVector metadata as json in the new table.
|
245
|
+
Default: False. Optional.
|
246
|
+
delete_pg_collection (bool): An option to delete the original data upon migration.
|
247
|
+
Default: False. Optional.
|
248
|
+
insert_batch_size (int): Number of rows to insert at once in the table.
|
249
|
+
Default: 1000.
|
250
|
+
"""
|
251
|
+
await engine._run_as_async(
|
252
|
+
__amigrate_pgvector_collection(
|
253
|
+
engine,
|
254
|
+
collection_name,
|
255
|
+
vector_store,
|
256
|
+
delete_pg_collection,
|
257
|
+
insert_batch_size,
|
258
|
+
)
|
259
|
+
)
|
260
|
+
|
261
|
+
|
262
|
+
def extract_pgvector_collection(
|
263
|
+
engine: PGEngine,
|
264
|
+
collection_name: str,
|
265
|
+
batch_size: int = 1000,
|
266
|
+
) -> Iterator[Sequence[RowMapping]]:
|
267
|
+
"""
|
268
|
+
Extract all data belonging to a PGVector collection.
|
269
|
+
|
270
|
+
Args:
|
271
|
+
engine (PGEngine): The PG engine corresponding to the Database.
|
272
|
+
collection_name (str): The name of the collection to get the data for.
|
273
|
+
batch_size (int): The batch size for collection extraction.
|
274
|
+
Default: 1000. Optional.
|
275
|
+
|
276
|
+
Yields:
|
277
|
+
The data present in the collection.
|
278
|
+
"""
|
279
|
+
iterator = __aextract_pgvector_collection(engine, collection_name, batch_size)
|
280
|
+
while True:
|
281
|
+
try:
|
282
|
+
result = engine._run_as_sync(iterator.__anext__())
|
283
|
+
yield result
|
284
|
+
except StopAsyncIteration:
|
285
|
+
break
|
286
|
+
|
287
|
+
|
288
|
+
def list_pgvector_collection_names(engine: PGEngine) -> list[str]:
|
289
|
+
"""Lists all collection names present in PGVector table."""
|
290
|
+
return engine._run_as_sync(__alist_pgvector_collection_names(engine))
|
291
|
+
|
292
|
+
|
293
|
+
def migrate_pgvector_collection(
|
294
|
+
engine: PGEngine,
|
295
|
+
collection_name: str,
|
296
|
+
vector_store: PGVectorStore,
|
297
|
+
delete_pg_collection: Optional[bool] = False,
|
298
|
+
insert_batch_size: int = 1000,
|
299
|
+
) -> None:
|
300
|
+
"""
|
301
|
+
Migrate all data present in a PGVector collection to use separate tables for each collection.
|
302
|
+
The new data format is compatible with the PGVectorStore interface.
|
303
|
+
|
304
|
+
Args:
|
305
|
+
engine (PGEngine): The PG engine corresponding to the Database.
|
306
|
+
collection_name (str): The collection to migrate.
|
307
|
+
vector_store (PGVectorStore): The PGVectorStore object corresponding to the new collection table.
|
308
|
+
delete_pg_collection (bool): An option to delete the original data upon migration.
|
309
|
+
Default: False. Optional.
|
310
|
+
insert_batch_size (int): Number of rows to insert at once in the table.
|
311
|
+
Default: 1000.
|
312
|
+
"""
|
313
|
+
engine._run_as_sync(
|
314
|
+
__amigrate_pgvector_collection(
|
315
|
+
engine,
|
316
|
+
collection_name,
|
317
|
+
vector_store,
|
318
|
+
delete_pg_collection,
|
319
|
+
insert_batch_size,
|
320
|
+
)
|
321
|
+
)
|
File without changes
|