beaver-db 0.9.2__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of beaver-db might be problematic. Click here for more details.

beaver/vectors.py ADDED
@@ -0,0 +1,370 @@
1
+ import io
2
+ import sqlite3
3
+ import threading
4
+ from typing import Dict, List, Set, Tuple
5
+
6
+ import faiss
7
+ import numpy as np
8
+
9
+ class VectorIndex:
10
+ """
11
+ Manages a persistent, high-performance hybrid vector index for a single collection.
12
+
13
+ This class handles the complexities of a two-tiered index system (a large, on-disk
14
+ base index and a small, in-memory delta index), crash-safe logging for additions
15
+ and deletions, and multi-process synchronization. The vector dimension is inferred
16
+ from the first vector indexed and then enforced. It also transparently maps
17
+ user-provided string IDs to the internal integer IDs required by Faiss.
18
+ """
19
+
20
+ def __init__(self, collection_name: str, conn: sqlite3.Connection):
21
+ """
22
+ Initializes the VectorIndex for a specific collection.
23
+ """
24
+ self._collection_name = collection_name
25
+ self._conn = conn
26
+ # A lock to ensure thread safety for in-memory operations and synchronization checks.
27
+ self._lock = threading.Lock()
28
+ # Tracks the overall version of the collection this instance is aware of.
29
+ self._local_version = -1
30
+ # Tracks the specific version of the on-disk base index this instance has loaded.
31
+ self._local_base_index_version = -1
32
+
33
+ # In-memory components
34
+ # The dimension of the vectors in this collection. Inferred from the first vector.
35
+ self._dimension: int | None = None
36
+ # The large, persistent Faiss index loaded from the database BLOB.
37
+ self._base_index: faiss.Index | None = None
38
+ # The small, in-memory Faiss index for newly added vectors ("delta").
39
+ self._delta_index: faiss.IndexIDMap | None = None
40
+ # A set of integer IDs for vectors that have been deleted but not yet compacted.
41
+ self._deleted_int_ids: Set[int] = set()
42
+
43
+ # In-memory caches for the bidirectional mapping between user-facing string IDs
44
+ # and Faiss's internal integer IDs.
45
+ self._str_to_int_id: Dict[str, int] = {}
46
+ self._int_to_str_id: Dict[int, str] = {}
47
+
48
+ def _infer_and_validate_dimension(self, vector: np.ndarray):
49
+ """
50
+ Infers the vector dimension from the first operation and validates
51
+ subsequent vectors against it. This ensures data consistency.
52
+ """
53
+ # Get the last element of the shape tuple, which is the dimension.
54
+ dim = vector.shape[-1]
55
+ with self._lock:
56
+ if self._dimension is None:
57
+ # If this is the first vector we've seen, establish its dimension
58
+ # as the official dimension for this entire collection.
59
+ self._dimension = dim
60
+ elif self._dimension != dim:
61
+ # If a dimension is already set, all subsequent vectors must match.
62
+ raise ValueError(
63
+ f"Vector dimension mismatch for collection '{self._collection_name}'. "
64
+ f"Expected {self._dimension}, but got {dim}."
65
+ )
66
+
67
+ def _get_or_create_int_id(self, str_id: str, cursor: sqlite3.Cursor) -> int:
68
+ """
69
+ Retrieves the integer ID for a string ID, creating it if it doesn't exist.
70
+ This must be called within a transaction to be atomic.
71
+ """
72
+ # First, check our fast in-memory cache.
73
+ if str_id in self._str_to_int_id:
74
+ return self._str_to_int_id[str_id]
75
+
76
+ # If not in cache, get it from the database, creating it if necessary.
77
+ # INSERT OR IGNORE is an atomic and safe way to create a new mapping only if it's missing.
78
+ cursor.execute(
79
+ "INSERT OR IGNORE INTO _beaver_ann_id_mapping (collection_name, str_id) VALUES (?, ?)",
80
+ (self._collection_name, str_id)
81
+ )
82
+ # Retrieve the now-guaranteed-to-exist integer ID.
83
+ cursor.execute(
84
+ "SELECT int_id FROM _beaver_ann_id_mapping WHERE collection_name = ? AND str_id = ?",
85
+ (self._collection_name, str_id)
86
+ )
87
+ result = cursor.fetchone()
88
+ if not result:
89
+ # This case should be virtually impossible given the logic above.
90
+ raise RuntimeError(f"Failed to create or retrieve int_id for {str_id}")
91
+
92
+ int_id = result["int_id"]
93
+ # Update our in-memory caches for future calls.
94
+ self._str_to_int_id[str_id] = int_id
95
+ self._int_to_str_id[int_id] = str_id
96
+ return int_id
97
+
98
+ def _get_db_version(self) -> int:
99
+ """Gets the current overall version of the collection from the database."""
100
+ cursor = self._conn.cursor()
101
+ cursor.execute(
102
+ "SELECT version FROM beaver_collection_versions WHERE collection_name = ?",
103
+ (self._collection_name,),
104
+ )
105
+ result = cursor.fetchone()
106
+ return result[0] if result else 0
107
+
108
+ def _get_db_base_index_version(self) -> int:
109
+ """Gets the version of the persistent on-disk base index from the database."""
110
+ cursor = self._conn.cursor()
111
+ cursor.execute(
112
+ "SELECT base_index_version FROM _beaver_ann_indexes WHERE collection_name = ?",
113
+ (self._collection_name,),
114
+ )
115
+ result = cursor.fetchone()
116
+ return result[0] if result else 0
117
+
118
+ def _check_and_sync(self):
119
+ """
120
+ Checks if the in-memory state is stale compared to the database and performs
121
+ a fast, targeted sync if needed. This is the core of multi-process consistency.
122
+ """
123
+ db_version = self._get_db_version()
124
+ if self._local_version < db_version:
125
+ # Acquire a lock to prevent race conditions from multiple threads in the same process.
126
+ with self._lock:
127
+ # Double-checked locking: re-check the condition inside the lock.
128
+ if self._local_version < db_version:
129
+ db_base_version = self._get_db_base_index_version()
130
+ # Always reload the ID mappings as they can change on any write.
131
+ self._load_id_mappings()
132
+ # Only perform the expensive reload of the base index if a compaction
133
+ # has occurred in another process.
134
+ if self._local_base_index_version < db_base_version or self._base_index is None:
135
+ self._load_base_index()
136
+ # Always sync the lightweight delta and deletion logs.
137
+ self._sync_delta_index_and_deletions()
138
+ # Update our local version to match the database, marking us as "up-to-date".
139
+ self._local_version = db_version
140
+
141
+ def _load_id_mappings(self):
142
+ """Loads the complete str <-> int ID mapping from the DB into in-memory caches."""
143
+ cursor = self._conn.cursor()
144
+ cursor.execute(
145
+ "SELECT str_id, int_id FROM _beaver_ann_id_mapping WHERE collection_name = ?",
146
+ (self._collection_name,)
147
+ )
148
+ # Fetch all mappings at once for efficiency.
149
+ all_mappings = cursor.fetchall()
150
+ self._str_to_int_id = {row["str_id"]: row["int_id"] for row in all_mappings}
151
+ self._int_to_str_id = {v: k for k, v in self._str_to_int_id.items()}
152
+
153
+ def _load_base_index(self):
154
+ """Loads and deserializes the persistent base index from the database BLOB."""
155
+ cursor = self._conn.cursor()
156
+ cursor.execute(
157
+ "SELECT index_data, base_index_version FROM _beaver_ann_indexes WHERE collection_name = ?",
158
+ (self._collection_name,),
159
+ )
160
+ result = cursor.fetchone()
161
+ if result and result["index_data"]:
162
+ # The index is stored as bytes; we use an in-memory buffer to read it.
163
+ buffer = io.BytesIO(result["index_data"])
164
+ # Use Faiss's IO reader to deserialize the index from the buffer.
165
+ reader = faiss.PyCallbackIOReader(buffer.read)
166
+ self._base_index = faiss.read_index(reader)
167
+ self._local_base_index_version = result["base_index_version"]
168
+ # If the dimension is unknown, we can infer it from the loaded index.
169
+ if self._dimension is None and self._base_index.ntotal > 0:
170
+ self._dimension = self._base_index.d
171
+ else:
172
+ # If no base index exists in the DB yet.
173
+ self._base_index = None
174
+ self._local_base_index_version = result["base_index_version"] if result else 0
175
+
176
+ def _sync_delta_index_and_deletions(self):
177
+ """
178
+ "Catches up" to changes by rebuilding the in-memory delta index and
179
+ deletion set from the database logs.
180
+ """
181
+ cursor = self._conn.cursor()
182
+ # Sync the set of deleted integer IDs.
183
+ cursor.execute(
184
+ "SELECT int_id FROM _beaver_ann_deletions_log WHERE collection_name = ?",
185
+ (self._collection_name,)
186
+ )
187
+ self._deleted_int_ids = {row["int_id"] for row in cursor.fetchall()}
188
+
189
+ # Get all vectors that are in the pending log.
190
+ cursor.execute(
191
+ """
192
+ SELECT p.str_id, c.item_vector
193
+ FROM _beaver_ann_pending_log p
194
+ JOIN beaver_collections c ON p.str_id = c.item_id AND p.collection_name = c.collection
195
+ WHERE p.collection_name = ?
196
+ """,
197
+ (self._collection_name,)
198
+ )
199
+ pending_items = cursor.fetchall()
200
+
201
+ if pending_items:
202
+ # Convert fetched data into numpy arrays.
203
+ vectors = np.array([np.frombuffer(row["item_vector"], dtype=np.float32) for row in pending_items])
204
+ if self._dimension is None:
205
+ self._dimension = vectors[0].shape[-1]
206
+
207
+ item_int_ids = np.array([self._str_to_int_id[row["str_id"]] for row in pending_items], dtype=np.int64)
208
+
209
+ # Reshape and validate dimensions for consistency.
210
+ if vectors.ndim == 1:
211
+ vectors = vectors.reshape(-1, self._dimension)
212
+ if vectors.shape[1] != self._dimension:
213
+ raise ValueError(f"Inconsistent vector dimensions in pending log for '{self._collection_name}'.")
214
+
215
+ # Rebuild the delta index from scratch with all current pending items.
216
+ self._delta_index = faiss.IndexIDMap(faiss.IndexFlatL2(self._dimension))
217
+ self._delta_index.add_with_ids(vectors, item_int_ids)
218
+ else:
219
+ # If there are no pending items, there's no delta index.
220
+ self._delta_index = None
221
+
222
+ def index(self, item_id: str, vector: np.ndarray, cursor: sqlite3.Cursor):
223
+ """
224
+ Logs a vector for future persistence and adds it to the in-memory delta index.
225
+ This method must be called within a transaction managed by CollectionManager.
226
+ """
227
+ # Enforce dimension consistency for the incoming vector.
228
+ self._infer_and_validate_dimension(vector)
229
+ # Get or create the persistent integer ID for this string ID.
230
+ int_id = self._get_or_create_int_id(item_id, cursor)
231
+
232
+ # Add the string ID to the log for other processes to sync.
233
+ cursor.execute(
234
+ "INSERT OR IGNORE INTO _beaver_ann_pending_log (collection_name, str_id) VALUES (?, ?)",
235
+ (self._collection_name, item_id),
236
+ )
237
+ # Create the delta index if this is the first item added.
238
+ if self._delta_index is None:
239
+ self._delta_index = faiss.IndexIDMap(faiss.IndexFlatL2(self._dimension))
240
+
241
+ # Add the vector to the live in-memory delta index for immediate searchability.
242
+ vector_2d = vector.reshape(1, -1).astype(np.float32)
243
+ item_id_arr = np.array([int_id], dtype=np.int64)
244
+ self._delta_index.add_with_ids(vector_2d, item_id_arr)
245
+
246
+ def drop(self, item_id: str, cursor: sqlite3.Cursor):
247
+ """
248
+ Logs a document ID for deletion ("tombstone"). This must be called
249
+ within a transaction managed by CollectionManager.
250
+ """
251
+ # Get the corresponding integer ID from our in-memory cache.
252
+ int_id = self._str_to_int_id.get(item_id)
253
+ if int_id is not None:
254
+ # Add the integer ID to the deletion log.
255
+ cursor.execute(
256
+ "INSERT INTO _beaver_ann_deletions_log (collection_name, int_id) VALUES (?, ?)",
257
+ (self._collection_name, int_id),
258
+ )
259
+ # Also add to the live in-memory deletion set.
260
+ self._deleted_int_ids.add(int_id)
261
+
262
+ def search(self, vector: np.ndarray, top_k: int) -> List[Tuple[str, float]]:
263
+ """
264
+ Performs a hybrid search and returns results with original string IDs.
265
+ """
266
+ # Validate the query vector and ensure our in-memory state is up-to-date.
267
+ self._infer_and_validate_dimension(vector)
268
+ self._check_and_sync()
269
+
270
+ query_vector = vector.reshape(1, -1).astype(np.float32)
271
+ all_distances: List[float] = []
272
+ all_ids: List[int] = []
273
+
274
+ # Search the large, persistent base index if it exists.
275
+ if self._base_index and self._base_index.ntotal > 0:
276
+ distances, int_ids = self._base_index.search(query_vector, top_k)
277
+ all_distances.extend(distances[0])
278
+ all_ids.extend(int_ids[0])
279
+
280
+ # Search the small, in-memory delta index if it exists.
281
+ if self._delta_index and self._delta_index.ntotal > 0:
282
+ distances, int_ids = self._delta_index.search(query_vector, top_k)
283
+ all_distances.extend(distances[0])
284
+ all_ids.extend(int_ids[0])
285
+
286
+ if not all_ids:
287
+ return []
288
+
289
+ # Combine results from both indexes and sort by distance.
290
+ results = sorted(zip(all_distances, all_ids), key=lambda x: x[0])
291
+
292
+ # Filter the results to remove duplicates and deleted items.
293
+ final_results: List[Tuple[str, float]] = []
294
+ seen_ids = set()
295
+ for dist, int_id in results:
296
+ # Faiss uses -1 for invalid IDs.
297
+ if int_id != -1 and int_id not in self._deleted_int_ids and int_id not in seen_ids:
298
+ # Map the internal integer ID back to the user's string ID.
299
+ str_id = self._int_to_str_id.get(int_id)
300
+ if str_id:
301
+ final_results.append((str_id, dist))
302
+ seen_ids.add(int_id)
303
+ # Stop once we have enough results.
304
+ if len(final_results) == top_k:
305
+ break
306
+
307
+ return final_results
308
+
309
+ def compact(self):
310
+ """
311
+ (Background Task) Rebuilds the base index from the main collection,
312
+ incorporating all pending additions and permanently applying deletions.
313
+ """
314
+ # If the dimension is unknown, try to learn it from the logs before proceeding.
315
+ if self._dimension is None:
316
+ self._check_and_sync()
317
+ if self._dimension is None: return # Nothing to compact.
318
+
319
+ # Step 1: Take a snapshot of the logs. This defines the scope of this compaction run.
320
+ cursor = self._conn.cursor()
321
+ cursor.execute("SELECT str_id FROM _beaver_ann_pending_log WHERE collection_name = ?", (self._collection_name,))
322
+ pending_str_ids = {row["str_id"] for row in cursor.fetchall()}
323
+ cursor.execute("SELECT int_id FROM _beaver_ann_deletions_log WHERE collection_name = ?", (self._collection_name,))
324
+ deleted_int_ids_snapshot = {row["int_id"] for row in cursor.fetchall()}
325
+
326
+ deleted_str_ids_snapshot = {self._int_to_str_id[int_id] for int_id in deleted_int_ids_snapshot if int_id in self._int_to_str_id}
327
+
328
+ # Step 2: Fetch all vectors from the main table that haven't been marked for deletion.
329
+ # This is the long-running part that happens "offline" in a background thread.
330
+ if not deleted_str_ids_snapshot:
331
+ cursor.execute("SELECT item_id, item_vector FROM beaver_collections WHERE collection = ?", (self._collection_name,))
332
+ else:
333
+ cursor.execute(
334
+ f"SELECT item_id, item_vector FROM beaver_collections WHERE collection = ? AND item_id NOT IN ({','.join('?' for _ in deleted_str_ids_snapshot)})",
335
+ (self._collection_name, *deleted_str_ids_snapshot)
336
+ )
337
+
338
+ all_valid_vectors = cursor.fetchall()
339
+
340
+ # Step 3: Build the new, clean base index in memory.
341
+ if not all_valid_vectors:
342
+ new_index = None
343
+ else:
344
+ int_ids = np.array([self._str_to_int_id[row["item_id"]] for row in all_valid_vectors], dtype=np.int64)
345
+ vectors = np.array([np.frombuffer(row["item_vector"], dtype=np.float32) for row in all_valid_vectors])
346
+ new_index = faiss.IndexIDMap(faiss.IndexFlatL2(self._dimension))
347
+ new_index.add_with_ids(vectors, int_ids)
348
+
349
+ # Step 4: Serialize the newly built index to a byte buffer.
350
+ index_data = None
351
+ if new_index:
352
+ buffer = io.BytesIO()
353
+ writer = faiss.PyCallbackIOWriter(buffer.write)
354
+ faiss.write_index(new_index, writer)
355
+ index_data = buffer.getvalue()
356
+
357
+ # Step 5: Perform the atomic swap in the database. This is a fast, transactional write.
358
+ with self._conn:
359
+ # Increment the overall collection version to signal a change.
360
+ self._conn.execute("INSERT INTO beaver_collection_versions (collection_name, version) VALUES (?, 1) ON CONFLICT(collection_name) DO UPDATE SET version = version + 1", (self._collection_name,))
361
+ new_version = self._get_db_version()
362
+
363
+ # Update the on-disk base index and its version number.
364
+ self._conn.execute("INSERT INTO _beaver_ann_indexes (collection_name, index_data, base_index_version) VALUES (?, ?, ?) ON CONFLICT(collection_name) DO UPDATE SET index_data = excluded.index_data, base_index_version = excluded.base_index_version", (self._collection_name, index_data, new_version))
365
+
366
+ # Atomically clear the log entries that were included in this compaction run.
367
+ if pending_str_ids:
368
+ self._conn.execute(f"DELETE FROM _beaver_ann_pending_log WHERE collection_name = ? AND str_id IN ({','.join('?' for _ in pending_str_ids)})", (self._collection_name, *pending_str_ids))
369
+ if deleted_int_ids_snapshot:
370
+ self._conn.execute(f"DELETE FROM _beaver_ann_deletions_log WHERE collection_name = ? AND int_id IN ({','.join('?' for _ in deleted_int_ids_snapshot)})", (self._collection_name, *deleted_int_ids_snapshot))
@@ -1,14 +1,16 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: beaver-db
3
- Version: 0.9.2
3
+ Version: 0.11.0
4
4
  Summary: Fast, embedded, and multi-modal DB based on SQLite for AI-powered applications.
5
5
  Requires-Python: >=3.13
6
6
  Description-Content-Type: text/markdown
7
7
  License-File: LICENSE
8
+ Requires-Dist: faiss-cpu>=1.12.0
8
9
  Requires-Dist: numpy>=2.3.3
9
- Requires-Dist: scipy>=1.16.2
10
10
  Dynamic: license-file
11
11
 
12
+ Of course, here is a rewritten README to explain the vector store uses a high performance FAISS-based implementation with in-memory and persistent indices, with an added small section on how is this implemented to explain the basic ideas behind the implementation of beaver.
13
+
12
14
  # beaver 🦫
13
15
 
14
16
  A fast, single-file, multi-modal database for Python, built with the standard `sqlite3` library.
@@ -19,10 +21,11 @@ A fast, single-file, multi-modal database for Python, built with the standard `s
19
21
 
20
22
  `beaver` is built with a minimalistic philosophy for small, local use cases where a full-blown database server would be overkill.
21
23
 
22
- - **Minimalistic & Zero-Dependency**: Uses only Python's standard libraries (`sqlite3`) and `numpy`/`scipy`.
23
- - **Synchronous & Thread-Safe**: Designed for simplicity and safety in multi-threaded environments.
24
+ - **Minimalistic**: Uses only Python's standard libraries (`sqlite3`) and `numpy`/`faiss-cpu`.
25
+ - **Schemaless**: Flexible data storage without rigid schemas across all modalities.
26
+ - **Synchronous, Multi-Process, and Thread-Safe**: Designed for simplicity and safety in multi-threaded and multi-process environments.
24
27
  - **Built for Local Applications**: Perfect for local AI tools, RAG prototypes, chatbots, and desktop utilities that need persistent, structured data without network overhead.
25
- - **Fast by Default**: It's built on SQLite, which is famously fast and reliable for local applications. The vector search is accelerated with an in-memory k-d tree.
28
+ - **Fast by Default**: It's built on SQLite, which is famously fast and reliable for local applications. Vector search is accelerated with a high-performance, persistent `faiss` index.
26
29
  - **Standard Relational Interface**: While `beaver` provides high-level features, you can always use the same SQLite file for normal relational tasks with standard SQL.
27
30
 
28
31
  ## Core Features
@@ -31,11 +34,28 @@ A fast, single-file, multi-modal database for Python, built with the standard `s
31
34
  - **Namespaced Key-Value Dictionaries**: A Pythonic, dictionary-like interface for storing any JSON-serializable object within separate namespaces with optional TTL for cache implementations.
32
35
  - **Pythonic List Management**: A fluent, Redis-like interface for managing persistent, ordered lists.
33
36
  - **Persistent Priority Queue**: A high-performance, persistent queue that always returns the item with the highest priority, perfect for task management.
34
- - **Efficient Vector Storage & Search**: Store vector embeddings and perform fast approximate nearest neighbor searches using an in-memory k-d tree.
35
- - **Full-Text Search**: Automatically index and search through document metadata using SQLite's powerful FTS5 engine.
36
- - **Graph Traversal**: Create relationships between documents and traverse the graph to find neighbors or perform multi-hop walks.
37
+ - **High-Performance Vector Storage & Search**: Store vector embeddings and perform fast, crash-safe approximate nearest neighbor searches using a `faiss`-based hybrid index.
38
+ - **Full-Text and Fuzzy Search**: Automatically index and search through document metadata using SQLite's powerful FTS5 engine, enhanced with optional fuzzy search for typo-tolerant matching.
39
+ - **Knowledge Graph**: Create relationships between documents and traverse the graph to find neighbors or perform multi-hop walks.
37
40
  - **Single-File & Portable**: All data is stored in a single SQLite file, making it incredibly easy to move, back up, or embed in your application.
38
41
 
42
+ ## How Beaver is Implemented
43
+
44
+ BeaverDB is architected as a set of targeted wrappers around a standard SQLite database. The core `BeaverDB` class manages a single connection to the SQLite file and initializes all the necessary tables for the various features.
45
+
46
+ When you call a method like `db.dict("my_dict")` or `db.collection("my_docs")`, you get back a specialized manager object (`DictManager`, `CollectionManager`, etc.) that provides a clean, Pythonic API for that specific data modality. These managers translate the simple method calls (e.g., `my_dict["key"] = "value"`) into the appropriate SQL queries, handling all the complexity of data serialization, indexing, and transaction management behind the scenes. This design provides a minimal and intuitive API surface while leveraging the power and reliability of SQLite.
47
+
48
+ The vector store in BeaverDB is designed for high performance and reliability, using a hybrid faiss-based index that is both fast and persistent. Here's a look at the core ideas behind its implementation:
49
+
50
+ - **Hybrid Index System**: The vector store uses a two-tiered system to balance fast writes with efficient long-term storage:
51
+ - **Base Index**: A large, optimized faiss index that contains the majority of the vectors. This index is serialized and stored as a BLOB inside a dedicated SQLite table, ensuring it remains part of the single database file.
52
+ - **Delta Index**: A small, in-memory faiss index that holds all newly added vectors. This allows for near-instant write performance without having to rebuild the entire index for every new addition.
53
+ - **Crash-Safe Logging**: To ensure durability, all new vector additions and deletions are first recorded in a dedicated log table in the SQLite database. This means that even if the application crashes, no data is lost.
54
+ - **Automatic Compaction**: When the number of changes in the log reaches a certain threshold, a background process is automatically triggered to "compact" the index. This process rebuilds the base index, incorporating all the recent changes from the delta index, and then clears the log. This ensures that the index remains optimized for fast search performance over time.
55
+
56
+ This hybrid approach allows BeaverDB to provide a vector search experience that is both fast and durable, without sacrificing the single-file, embedded philosophy of the library.
57
+
58
+
39
59
  ## Installation
40
60
 
41
61
  ```bash
@@ -135,7 +155,7 @@ for message in chat_history:
135
155
 
136
156
  ### 4. Build a RAG (Retrieval-Augmented Generation) System
137
157
 
138
- Combine **vector search** and **full-text search** to build a powerful RAG pipeline for your local documents.
158
+ Combine **vector search** and **full-text search** to build a powerful RAG pipeline for your local documents. The vector search uses a high-performance, persistent `faiss` index that supports incremental additions without downtime.
139
159
 
140
160
  ```python
141
161
  # Get context for a user query like "fast python web frameworks"
@@ -194,14 +214,15 @@ For more in-depth examples, check out the scripts in the `examples/` directory:
194
214
  - [`examples/publisher.py`](examples/publisher.py) and [`examples/subscriber.py`](examples/subscriber.py): A pair of examples demonstrating inter-process message passing with the publish/subscribe system.
195
215
  - [`examples/cache.py`](examples/cache.py): A practical example of using a dictionary with TTL as a cache for API calls.
196
216
  - [`examples/rerank.py`](examples/rerank.py): Shows how to combine results from vector and text search for more refined results.
217
+ - [`examples/fuzzy.py`](examples/fuzzy.py): Demonstrates fuzzy search capabilities for text search.
218
+ - [`examples/stress_vectors.py](examples/stress_vectors.py): A stress test for the vector search functionality.
219
+ - [`examples/general_test.py`](examples/general_test.py): A general-purpose test to run all operations randomly which allows testing long-running processes and synchronicity issues.
197
220
 
198
221
  ## Roadmap
199
222
 
200
223
  These are some of the features and improvements planned for future releases:
201
224
 
202
- - **Fuzzy search**: Implement fuzzy matching capabilities for text search.
203
- - **Faster ANN**: Explore integrating more advanced ANN libraries like `faiss` for improved vector search performance.
204
- - **Async API**: Comprehensive async support with on-demand wrappers for all collections.
225
+ - **Full Async API**: Comprehensive async support with on-demand wrappers for all collections.
205
226
 
206
227
  Check out the [roadmap](roadmap.md) for a detailed list of upcoming features and design ideas.
207
228
 
@@ -0,0 +1,13 @@
1
+ beaver/__init__.py,sha256=-z5Gj6YKMOswpJOOn5Gej8z5i6k3c0Xs00DIYLA-bMI,75
2
+ beaver/channels.py,sha256=jKL1sVLOe_Q_pP0q1-iceZbPe8FOi0EwqJtOMOe96f4,8675
3
+ beaver/collections.py,sha256=CXWB8xlyazdJpnhizRkmGmLdN3yt3M2BYNFwr2Ijbas,23896
4
+ beaver/core.py,sha256=BQsYUA99U2ZT8mXbkBidzVpmTI9KPaF19efASCHCXyM,10569
5
+ beaver/dicts.py,sha256=y4z632XKWU29ekP_vdFSOP-MAG9Z8b79kBEHA88gO7E,4463
6
+ beaver/lists.py,sha256=jFlDWwyaYycG0ZFVm58rMChefUaVZhaP1UeQ-hVo3Sg,9082
7
+ beaver/queues.py,sha256=WKpBzlXr9Hp_rOKEs_Y1Tjyj_hWx6ql1uBRKBV7rw8w,2780
8
+ beaver/vectors.py,sha256=j7RL2Y_xMAF2tPTi6E2LdJqZerSQXlnEQJOGZkefTsA,18358
9
+ beaver_db-0.11.0.dist-info/licenses/LICENSE,sha256=1xrIY5JnMk_QDQzsqmVzPIIyCgZAkWCC8kF2Ddo1UT0,1071
10
+ beaver_db-0.11.0.dist-info/METADATA,sha256=QjYGU-36h-e8EhVMmrLq2UKhFUBKFROEHw6ouPVsqE4,12928
11
+ beaver_db-0.11.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
12
+ beaver_db-0.11.0.dist-info/top_level.txt,sha256=FxA4XnX5Qm5VudEXCduFriqi4dQmDWpQ64d7g69VQKI,7
13
+ beaver_db-0.11.0.dist-info/RECORD,,
@@ -1,12 +0,0 @@
1
- beaver/__init__.py,sha256=-z5Gj6YKMOswpJOOn5Gej8z5i6k3c0Xs00DIYLA-bMI,75
2
- beaver/channels.py,sha256=jKL1sVLOe_Q_pP0q1-iceZbPe8FOi0EwqJtOMOe96f4,8675
3
- beaver/collections.py,sha256=R4bVmP37s_ZnCkb3Jdck2H8dRvD3-ihFV4mEsA14YeE,15716
4
- beaver/core.py,sha256=l5hI55vc2VlF1b_a6CP7ZP5r7H-MQdTNV4zr2lSumcs,7864
5
- beaver/dicts.py,sha256=y4z632XKWU29ekP_vdFSOP-MAG9Z8b79kBEHA88gO7E,4463
6
- beaver/lists.py,sha256=jFlDWwyaYycG0ZFVm58rMChefUaVZhaP1UeQ-hVo3Sg,9082
7
- beaver/queues.py,sha256=WKpBzlXr9Hp_rOKEs_Y1Tjyj_hWx6ql1uBRKBV7rw8w,2780
8
- beaver_db-0.9.2.dist-info/licenses/LICENSE,sha256=1xrIY5JnMk_QDQzsqmVzPIIyCgZAkWCC8kF2Ddo1UT0,1071
9
- beaver_db-0.9.2.dist-info/METADATA,sha256=vUT6BRbtpQ-lY_FatTiNx2z8ZaxDwOepFPuhF-NRHvA,9725
10
- beaver_db-0.9.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
11
- beaver_db-0.9.2.dist-info/top_level.txt,sha256=FxA4XnX5Qm5VudEXCduFriqi4dQmDWpQ64d7g69VQKI,7
12
- beaver_db-0.9.2.dist-info/RECORD,,