@mastra/qdrant 1.0.0-beta.2 → 1.0.0-beta.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/docs/README.md +32 -0
- package/dist/docs/SKILL.md +33 -0
- package/dist/docs/SOURCE_MAP.json +6 -0
- package/dist/docs/rag/01-vector-databases.md +638 -0
- package/dist/docs/rag/02-retrieval.md +549 -0
- package/dist/docs/vectors/01-reference.md +90 -0
- package/package.json +8 -8
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,23 @@
|
|
|
1
1
|
# @mastra/qdrant
|
|
2
2
|
|
|
3
|
+
## 1.0.0-beta.3
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- Add embedded documentation support for Mastra packages ([#11472](https://github.com/mastra-ai/mastra/pull/11472))
|
|
8
|
+
|
|
9
|
+
Mastra packages now include embedded documentation in the published npm package under `dist/docs/`. This enables coding agents and AI assistants to understand and use the framework by reading documentation directly from `node_modules`.
|
|
10
|
+
|
|
11
|
+
Each package includes:
|
|
12
|
+
- **SKILL.md** - Entry point explaining the package's purpose and capabilities
|
|
13
|
+
- **SOURCE_MAP.json** - Machine-readable index mapping exports to types and implementation files
|
|
14
|
+
- **Topic folders** - Conceptual documentation organized by feature area
|
|
15
|
+
|
|
16
|
+
Documentation is driven by the `packages` frontmatter field in MDX files, which maps docs to their corresponding packages. CI validation ensures all docs include this field.
|
|
17
|
+
|
|
18
|
+
- Updated dependencies [[`d2d3e22`](https://github.com/mastra-ai/mastra/commit/d2d3e22a419ee243f8812a84e3453dd44365ecb0), [`bc72b52`](https://github.com/mastra-ai/mastra/commit/bc72b529ee4478fe89ecd85a8be47ce0127b82a0), [`05b8bee`](https://github.com/mastra-ai/mastra/commit/05b8bee9e50e6c2a4a2bf210eca25ee212ca24fa), [`c042bd0`](https://github.com/mastra-ai/mastra/commit/c042bd0b743e0e86199d0cb83344ca7690e34a9c), [`940a2b2`](https://github.com/mastra-ai/mastra/commit/940a2b27480626ed7e74f55806dcd2181c1dd0c2), [`e0941c3`](https://github.com/mastra-ai/mastra/commit/e0941c3d7fc75695d5d258e7008fd5d6e650800c), [`0c0580a`](https://github.com/mastra-ai/mastra/commit/0c0580a42f697cd2a7d5973f25bfe7da9055038a), [`28f5f89`](https://github.com/mastra-ai/mastra/commit/28f5f89705f2409921e3c45178796c0e0d0bbb64), [`e601b27`](https://github.com/mastra-ai/mastra/commit/e601b272c70f3a5ecca610373aa6223012704892), [`3d3366f`](https://github.com/mastra-ai/mastra/commit/3d3366f31683e7137d126a3a57174a222c5801fb), [`5a4953f`](https://github.com/mastra-ai/mastra/commit/5a4953f7d25bb15ca31ed16038092a39cb3f98b3), [`eb9e522`](https://github.com/mastra-ai/mastra/commit/eb9e522ce3070a405e5b949b7bf5609ca51d7fe2), [`20e6f19`](https://github.com/mastra-ai/mastra/commit/20e6f1971d51d3ff6dd7accad8aaaae826d540ed), [`4f0b3c6`](https://github.com/mastra-ai/mastra/commit/4f0b3c66f196c06448487f680ccbb614d281e2f7), [`74c4f22`](https://github.com/mastra-ai/mastra/commit/74c4f22ed4c71e72598eacc346ba95cdbc00294f), [`81b6a8f`](https://github.com/mastra-ai/mastra/commit/81b6a8ff79f49a7549d15d66624ac1a0b8f5f971), [`e4d366a`](https://github.com/mastra-ai/mastra/commit/e4d366aeb500371dd4210d6aa8361a4c21d87034), [`a4f010b`](https://github.com/mastra-ai/mastra/commit/a4f010b22e4355a5fdee70a1fe0f6e4a692cc29e), [`73b0bb3`](https://github.com/mastra-ai/mastra/commit/73b0bb394dba7c9482eb467a97ab283dbc0ef4db), [`5627a8c`](https://github.com/mastra-ai/mastra/commit/5627a8c6dc11fe3711b3fa7a6ffd6eb34100a306), [`3ff45d1`](https://github.com/mastra-ai/mastra/commit/3ff45d10e0c80c5335a957ab563da72feb623520), [`251df45`](https://github.com/mastra-ai/mastra/commit/251df4531407dfa46d805feb40ff3fb49769f455), [`f894d14`](https://github.com/mastra-ai/mastra/commit/f894d148946629af7b1f452d65a9cf864cec3765), [`c2b9547`](https://github.com/mastra-ai/mastra/commit/c2b9547bf435f56339f23625a743b2147ab1c7a6), [`580b592`](https://github.com/mastra-ai/mastra/commit/580b5927afc82fe460dfdf9a38a902511b6b7e7f), [`58e3931`](https://github.com/mastra-ai/mastra/commit/58e3931af9baa5921688566210f00fb0c10479fa), [`08bb631`](https://github.com/mastra-ai/mastra/commit/08bb631ae2b14684b2678e3549d0b399a6f0561e), [`4fba91b`](https://github.com/mastra-ai/mastra/commit/4fba91bec7c95911dc28e369437596b152b04cd0), [`12b0cc4`](https://github.com/mastra-ai/mastra/commit/12b0cc4077d886b1a552637dedb70a7ade93528c)]:
|
|
19
|
+
- @mastra/core@1.0.0-beta.20
|
|
20
|
+
|
|
3
21
|
## 1.0.0-beta.2
|
|
4
22
|
|
|
5
23
|
### Patch Changes
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# @mastra/qdrant Documentation
|
|
2
|
+
|
|
3
|
+
> Embedded documentation for coding agents
|
|
4
|
+
|
|
5
|
+
## Quick Start
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
# Read the skill overview
|
|
9
|
+
cat docs/SKILL.md
|
|
10
|
+
|
|
11
|
+
# Get the source map
|
|
12
|
+
cat docs/SOURCE_MAP.json
|
|
13
|
+
|
|
14
|
+
# Read topic documentation
|
|
15
|
+
cat docs/<topic>/01-overview.md
|
|
16
|
+
```
|
|
17
|
+
|
|
18
|
+
## Structure
|
|
19
|
+
|
|
20
|
+
```
|
|
21
|
+
docs/
|
|
22
|
+
├── SKILL.md # Entry point
|
|
23
|
+
├── README.md # This file
|
|
24
|
+
├── SOURCE_MAP.json # Export index
|
|
25
|
+
├── rag/ (2 files)
|
|
26
|
+
├── vectors/ (1 files)
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
## Version
|
|
30
|
+
|
|
31
|
+
Package: @mastra/qdrant
|
|
32
|
+
Version: 1.0.0-beta.3
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: mastra-qdrant-docs
|
|
3
|
+
description: Documentation for @mastra/qdrant. Includes links to type definitions and readable implementation code in dist/.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# @mastra/qdrant Documentation
|
|
7
|
+
|
|
8
|
+
> **Version**: 1.0.0-beta.3
|
|
9
|
+
> **Package**: @mastra/qdrant
|
|
10
|
+
|
|
11
|
+
## Quick Navigation
|
|
12
|
+
|
|
13
|
+
Use SOURCE_MAP.json to find any export:
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
cat docs/SOURCE_MAP.json
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
Each export maps to:
|
|
20
|
+
- **types**: `.d.ts` file with JSDoc and API signatures
|
|
21
|
+
- **implementation**: `.js` chunk file with readable source
|
|
22
|
+
- **docs**: Conceptual documentation in `docs/`
|
|
23
|
+
|
|
24
|
+
## Top Exports
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
See SOURCE_MAP.json for the complete list.
|
|
29
|
+
|
|
30
|
+
## Available Topics
|
|
31
|
+
|
|
32
|
+
- [Rag](rag/) - 2 file(s)
|
|
33
|
+
- [Vectors](vectors/) - 1 file(s)
|
|
@@ -0,0 +1,638 @@
|
|
|
1
|
+
> Guide on vector storage options in Mastra, including embedded and dedicated vector databases for similarity search.
|
|
2
|
+
|
|
3
|
+
# Storing Embeddings in A Vector Database
|
|
4
|
+
|
|
5
|
+
After generating embeddings, you need to store them in a database that supports vector similarity search. Mastra provides a consistent interface for storing and querying embeddings across various vector databases.
|
|
6
|
+
|
|
7
|
+
## Supported Databases
|
|
8
|
+
|
|
9
|
+
**mongodb:**
|
|
10
|
+
|
|
11
|
+
```ts title="vector-store.ts"
|
|
12
|
+
import { MongoDBVector } from "@mastra/mongodb";
|
|
13
|
+
|
|
14
|
+
const store = new MongoDBVector({
|
|
15
|
+
uri: process.env.MONGODB_URI,
|
|
16
|
+
dbName: process.env.MONGODB_DATABASE,
|
|
17
|
+
});
|
|
18
|
+
await store.createIndex({
|
|
19
|
+
indexName: "myCollection",
|
|
20
|
+
dimension: 1536,
|
|
21
|
+
});
|
|
22
|
+
await store.upsert({
|
|
23
|
+
indexName: "myCollection",
|
|
24
|
+
vectors: embeddings,
|
|
25
|
+
metadata: chunks.map((chunk) => ({ text: chunk.text })),
|
|
26
|
+
});
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
### Using MongoDB Atlas Vector search
|
|
30
|
+
|
|
31
|
+
For detailed setup instructions and best practices, see the [official MongoDB Atlas Vector Search documentation](https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-overview/?utm_campaign=devrel&utm_source=third-party-content&utm_medium=cta&utm_content=mastra-docs).
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
**pg-vector:**
|
|
36
|
+
|
|
37
|
+
```ts title="vector-store.ts"
|
|
38
|
+
import { PgVector } from "@mastra/pg";
|
|
39
|
+
|
|
40
|
+
const store = new PgVector({
|
|
41
|
+
id: 'pg-vector',
|
|
42
|
+
connectionString: process.env.POSTGRES_CONNECTION_STRING,
|
|
43
|
+
});
|
|
44
|
+
|
|
45
|
+
await store.createIndex({
|
|
46
|
+
indexName: "myCollection",
|
|
47
|
+
dimension: 1536,
|
|
48
|
+
});
|
|
49
|
+
|
|
50
|
+
await store.upsert({
|
|
51
|
+
indexName: "myCollection",
|
|
52
|
+
vectors: embeddings,
|
|
53
|
+
metadata: chunks.map((chunk) => ({ text: chunk.text })),
|
|
54
|
+
});
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
### Using PostgreSQL with pgvector
|
|
58
|
+
|
|
59
|
+
PostgreSQL with the pgvector extension is a good solution for teams already using PostgreSQL who want to minimize infrastructure complexity.
|
|
60
|
+
For detailed setup instructions and best practices, see the [official pgvector repository](https://github.com/pgvector/pgvector).
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
**pinecone:**
|
|
65
|
+
|
|
66
|
+
```ts title="vector-store.ts"
|
|
67
|
+
import { PineconeVector } from "@mastra/pinecone";
|
|
68
|
+
|
|
69
|
+
const store = new PineconeVector({
|
|
70
|
+
id: 'pinecone-vector',
|
|
71
|
+
apiKey: process.env.PINECONE_API_KEY,
|
|
72
|
+
});
|
|
73
|
+
await store.createIndex({
|
|
74
|
+
indexName: "myCollection",
|
|
75
|
+
dimension: 1536,
|
|
76
|
+
});
|
|
77
|
+
await store.upsert({
|
|
78
|
+
indexName: "myCollection",
|
|
79
|
+
vectors: embeddings,
|
|
80
|
+
metadata: chunks.map((chunk) => ({ text: chunk.text })),
|
|
81
|
+
});
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
**qdrant:**
|
|
87
|
+
|
|
88
|
+
```ts title="vector-store.ts"
|
|
89
|
+
import { QdrantVector } from "@mastra/qdrant";
|
|
90
|
+
|
|
91
|
+
const store = new QdrantVector({
|
|
92
|
+
id: 'qdrant-vector',
|
|
93
|
+
url: process.env.QDRANT_URL,
|
|
94
|
+
apiKey: process.env.QDRANT_API_KEY,
|
|
95
|
+
});
|
|
96
|
+
|
|
97
|
+
await store.createIndex({
|
|
98
|
+
indexName: "myCollection",
|
|
99
|
+
dimension: 1536,
|
|
100
|
+
});
|
|
101
|
+
|
|
102
|
+
await store.upsert({
|
|
103
|
+
indexName: "myCollection",
|
|
104
|
+
vectors: embeddings,
|
|
105
|
+
metadata: chunks.map((chunk) => ({ text: chunk.text })),
|
|
106
|
+
});
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
**chroma:**
|
|
112
|
+
|
|
113
|
+
```ts title="vector-store.ts"
|
|
114
|
+
import { ChromaVector } from "@mastra/chroma";
|
|
115
|
+
|
|
116
|
+
// Running Chroma locally
|
|
117
|
+
// const store = new ChromaVector()
|
|
118
|
+
|
|
119
|
+
// Running on Chroma Cloud
|
|
120
|
+
const store = new ChromaVector({
|
|
121
|
+
id: 'chroma-vector',
|
|
122
|
+
apiKey: process.env.CHROMA_API_KEY,
|
|
123
|
+
tenant: process.env.CHROMA_TENANT,
|
|
124
|
+
database: process.env.CHROMA_DATABASE,
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
await store.createIndex({
|
|
128
|
+
indexName: "myCollection",
|
|
129
|
+
dimension: 1536,
|
|
130
|
+
});
|
|
131
|
+
|
|
132
|
+
await store.upsert({
|
|
133
|
+
indexName: "myCollection",
|
|
134
|
+
vectors: embeddings,
|
|
135
|
+
metadata: chunks.map((chunk) => ({ text: chunk.text })),
|
|
136
|
+
});
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
**astra:**
|
|
142
|
+
|
|
143
|
+
```ts title="vector-store.ts"
|
|
144
|
+
import { AstraVector } from "@mastra/astra";
|
|
145
|
+
|
|
146
|
+
const store = new AstraVector({
|
|
147
|
+
token: process.env.ASTRA_DB_TOKEN,
|
|
148
|
+
endpoint: process.env.ASTRA_DB_ENDPOINT,
|
|
149
|
+
keyspace: process.env.ASTRA_DB_KEYSPACE,
|
|
150
|
+
});
|
|
151
|
+
|
|
152
|
+
await store.createIndex({
|
|
153
|
+
indexName: "myCollection",
|
|
154
|
+
dimension: 1536,
|
|
155
|
+
});
|
|
156
|
+
|
|
157
|
+
await store.upsert({
|
|
158
|
+
indexName: "myCollection",
|
|
159
|
+
vectors: embeddings,
|
|
160
|
+
metadata: chunks.map((chunk) => ({ text: chunk.text })),
|
|
161
|
+
});
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
**libsql:**
|
|
167
|
+
|
|
168
|
+
```ts title="vector-store.ts"
|
|
169
|
+
import { LibSQLVector } from "@mastra/core/vector/libsql";
|
|
170
|
+
|
|
171
|
+
const store = new LibSQLVector({
|
|
172
|
+
id: 'libsql-vector',
|
|
173
|
+
connectionUrl: process.env.DATABASE_URL,
|
|
174
|
+
authToken: process.env.DATABASE_AUTH_TOKEN, // Optional: for Turso cloud databases
|
|
175
|
+
});
|
|
176
|
+
|
|
177
|
+
await store.createIndex({
|
|
178
|
+
indexName: "myCollection",
|
|
179
|
+
dimension: 1536,
|
|
180
|
+
});
|
|
181
|
+
|
|
182
|
+
await store.upsert({
|
|
183
|
+
indexName: "myCollection",
|
|
184
|
+
vectors: embeddings,
|
|
185
|
+
metadata: chunks.map((chunk) => ({ text: chunk.text })),
|
|
186
|
+
});
|
|
187
|
+
```
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
**upstash:**
|
|
192
|
+
|
|
193
|
+
```ts title="vector-store.ts"
|
|
194
|
+
import { UpstashVector } from "@mastra/upstash";
|
|
195
|
+
|
|
196
|
+
// In upstash they refer to the store as an index
|
|
197
|
+
const store = new UpstashVector({
|
|
198
|
+
id: 'upstash-vector',
|
|
199
|
+
url: process.env.UPSTASH_URL,
|
|
200
|
+
token: process.env.UPSTASH_TOKEN,
|
|
201
|
+
});
|
|
202
|
+
|
|
203
|
+
// There is no store.createIndex call here, Upstash creates indexes (known as namespaces in Upstash) automatically
|
|
204
|
+
// when you upsert if that namespace does not exist yet.
|
|
205
|
+
await store.upsert({
|
|
206
|
+
indexName: "myCollection", // the namespace name in Upstash
|
|
207
|
+
vectors: embeddings,
|
|
208
|
+
metadata: chunks.map((chunk) => ({ text: chunk.text })),
|
|
209
|
+
});
|
|
210
|
+
```
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
**cloudflare:**
|
|
215
|
+
|
|
216
|
+
```ts title="vector-store.ts"
|
|
217
|
+
import { CloudflareVector } from "@mastra/vectorize";
|
|
218
|
+
|
|
219
|
+
const store = new CloudflareVector({
|
|
220
|
+
accountId: process.env.CF_ACCOUNT_ID,
|
|
221
|
+
apiToken: process.env.CF_API_TOKEN,
|
|
222
|
+
});
|
|
223
|
+
await store.createIndex({
|
|
224
|
+
indexName: "myCollection",
|
|
225
|
+
dimension: 1536,
|
|
226
|
+
});
|
|
227
|
+
await store.upsert({
|
|
228
|
+
indexName: "myCollection",
|
|
229
|
+
vectors: embeddings,
|
|
230
|
+
metadata: chunks.map((chunk) => ({ text: chunk.text })),
|
|
231
|
+
});
|
|
232
|
+
```
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
**opensearch:**
|
|
237
|
+
|
|
238
|
+
```ts title="vector-store.ts"
|
|
239
|
+
import { OpenSearchVector } from "@mastra/opensearch";
|
|
240
|
+
|
|
241
|
+
const store = new OpenSearchVector({ url: process.env.OPENSEARCH_URL });
|
|
242
|
+
|
|
243
|
+
await store.createIndex({
|
|
244
|
+
indexName: "my-collection",
|
|
245
|
+
dimension: 1536,
|
|
246
|
+
});
|
|
247
|
+
|
|
248
|
+
await store.upsert({
|
|
249
|
+
indexName: "my-collection",
|
|
250
|
+
vectors: embeddings,
|
|
251
|
+
metadata: chunks.map((chunk) => ({ text: chunk.text })),
|
|
252
|
+
});
|
|
253
|
+
```
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
**elasticsearch:**
|
|
258
|
+
|
|
259
|
+
```ts title="vector-store.ts"
|
|
260
|
+
import { ElasticSearchVector } from "@mastra/elasticsearch";
|
|
261
|
+
|
|
262
|
+
const store = new ElasticSearchVector({ url: process.env.ELASTICSEARCH_URL });
|
|
263
|
+
|
|
264
|
+
await store.createIndex({
|
|
265
|
+
indexName: "my-collection",
|
|
266
|
+
dimension: 1536,
|
|
267
|
+
});
|
|
268
|
+
|
|
269
|
+
await store.upsert({
|
|
270
|
+
indexName: "my-collection",
|
|
271
|
+
vectors: embeddings,
|
|
272
|
+
metadata: chunks.map((chunk) => ({ text: chunk.text })),
|
|
273
|
+
});
|
|
274
|
+
```
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
**couchbase:**
|
|
278
|
+
|
|
279
|
+
```ts title="vector-store.ts"
|
|
280
|
+
import { CouchbaseVector } from "@mastra/couchbase";
|
|
281
|
+
|
|
282
|
+
const store = new CouchbaseVector({
|
|
283
|
+
connectionString: process.env.COUCHBASE_CONNECTION_STRING,
|
|
284
|
+
username: process.env.COUCHBASE_USERNAME,
|
|
285
|
+
password: process.env.COUCHBASE_PASSWORD,
|
|
286
|
+
bucketName: process.env.COUCHBASE_BUCKET,
|
|
287
|
+
scopeName: process.env.COUCHBASE_SCOPE,
|
|
288
|
+
collectionName: process.env.COUCHBASE_COLLECTION,
|
|
289
|
+
});
|
|
290
|
+
await store.createIndex({
|
|
291
|
+
indexName: "myCollection",
|
|
292
|
+
dimension: 1536,
|
|
293
|
+
});
|
|
294
|
+
await store.upsert({
|
|
295
|
+
indexName: "myCollection",
|
|
296
|
+
vectors: embeddings,
|
|
297
|
+
metadata: chunks.map((chunk) => ({ text: chunk.text })),
|
|
298
|
+
});
|
|
299
|
+
```
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
**lancedb:**
|
|
303
|
+
|
|
304
|
+
```ts title="vector-store.ts"
|
|
305
|
+
import { LanceVectorStore } from "@mastra/lance";
|
|
306
|
+
|
|
307
|
+
const store = await LanceVectorStore.create("/path/to/db");
|
|
308
|
+
|
|
309
|
+
await store.createIndex({
|
|
310
|
+
tableName: "myVectors",
|
|
311
|
+
indexName: "myCollection",
|
|
312
|
+
dimension: 1536,
|
|
313
|
+
});
|
|
314
|
+
|
|
315
|
+
await store.upsert({
|
|
316
|
+
tableName: "myVectors",
|
|
317
|
+
vectors: embeddings,
|
|
318
|
+
metadata: chunks.map((chunk) => ({ text: chunk.text })),
|
|
319
|
+
});
|
|
320
|
+
```
|
|
321
|
+
|
|
322
|
+
### Using LanceDB
|
|
323
|
+
|
|
324
|
+
LanceDB is an embedded vector database built on the Lance columnar format, suitable for local development or cloud deployment.
|
|
325
|
+
For detailed setup instructions and best practices, see the [official LanceDB documentation](https://lancedb.github.io/lancedb/).
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
**s3vectors:**
|
|
329
|
+
|
|
330
|
+
```ts title="vector-store.ts"
|
|
331
|
+
import { S3Vectors } from "@mastra/s3vectors";
|
|
332
|
+
|
|
333
|
+
const store = new S3Vectors({
|
|
334
|
+
vectorBucketName: "my-vector-bucket",
|
|
335
|
+
clientConfig: {
|
|
336
|
+
region: "us-east-1",
|
|
337
|
+
},
|
|
338
|
+
nonFilterableMetadataKeys: ["content"],
|
|
339
|
+
});
|
|
340
|
+
|
|
341
|
+
await store.createIndex({
|
|
342
|
+
indexName: "my-index",
|
|
343
|
+
dimension: 1536,
|
|
344
|
+
});
|
|
345
|
+
await store.upsert({
|
|
346
|
+
indexName: "my-index",
|
|
347
|
+
vectors: embeddings,
|
|
348
|
+
metadata: chunks.map((chunk) => ({ text: chunk.text })),
|
|
349
|
+
});
|
|
350
|
+
```
|
|
351
|
+
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
## Using Vector Storage
|
|
355
|
+
|
|
356
|
+
Once initialized, all vector stores share the same interface for creating indexes, upserting embeddings, and querying.
|
|
357
|
+
|
|
358
|
+
### Creating Indexes
|
|
359
|
+
|
|
360
|
+
Before storing embeddings, you need to create an index with the appropriate dimension size for your embedding model:
|
|
361
|
+
|
|
362
|
+
```ts title="store-embeddings.ts"
|
|
363
|
+
// Create an index with dimension 1536 (for text-embedding-3-small)
|
|
364
|
+
await store.createIndex({
|
|
365
|
+
indexName: "myCollection",
|
|
366
|
+
dimension: 1536,
|
|
367
|
+
});
|
|
368
|
+
```
|
|
369
|
+
|
|
370
|
+
The dimension size must match the output dimension of your chosen embedding model. Common dimension sizes are:
|
|
371
|
+
|
|
372
|
+
- OpenAI text-embedding-3-small: 1536 dimensions (or custom, e.g., 256)
|
|
373
|
+
- Cohere embed-multilingual-v3: 1024 dimensions
|
|
374
|
+
- Google text-embedding-004: 768 dimensions (or custom)
|
|
375
|
+
|
|
376
|
+
important
|
|
377
|
+
Index dimensions cannot be changed after creation. To use a different model, delete and recreate the index with the new dimension size.
|
|
378
|
+
|
|
379
|
+
### Naming Rules for Databases
|
|
380
|
+
|
|
381
|
+
Each vector database enforces specific naming conventions for indexes and collections to ensure compatibility and prevent conflicts.
|
|
382
|
+
|
|
383
|
+
**mongodb:**
|
|
384
|
+
|
|
385
|
+
Collection (index) names must:
|
|
386
|
+
- Start with a letter or underscore
|
|
387
|
+
- Be up to 120 bytes long
|
|
388
|
+
- Contain only letters, numbers, underscores, or dots
|
|
389
|
+
- Cannot contain `$` or the null character
|
|
390
|
+
- Example: `my_collection.123` is valid
|
|
391
|
+
- Example: `my-index` is not valid (contains hyphen)
|
|
392
|
+
- Example: `My$Collection` is not valid (contains `$`)
|
|
393
|
+
|
|
394
|
+
**pgVector:**
|
|
395
|
+
|
|
396
|
+
Index names must:
|
|
397
|
+
- Start with a letter or underscore
|
|
398
|
+
- Contain only letters, numbers, and underscores
|
|
399
|
+
- Example: `my_index_123` is valid
|
|
400
|
+
- Example: `my-index` is not valid (contains hyphen)
|
|
401
|
+
|
|
402
|
+
**pinecone:**
|
|
403
|
+
|
|
404
|
+
Index names must:
|
|
405
|
+
- Use only lowercase letters, numbers, and dashes
|
|
406
|
+
- Not contain dots (used for DNS routing)
|
|
407
|
+
- Not use non-Latin characters or emojis
|
|
408
|
+
- Have a combined length (with project ID) under 52 characters
|
|
409
|
+
- Example: `my-index-123` is valid
|
|
410
|
+
- Example: `my.index` is not valid (contains dot)
|
|
411
|
+
|
|
412
|
+
**qdrant:**
|
|
413
|
+
|
|
414
|
+
Collection names must:
|
|
415
|
+
- Be 1-255 characters long
|
|
416
|
+
- Not contain any of these special characters:
|
|
417
|
+
- `< > : " / \ | ? *`
|
|
418
|
+
- Null character (`\0`)
|
|
419
|
+
- Unit separator (`\u{1F}`)
|
|
420
|
+
- Example: `my_collection_123` is valid
|
|
421
|
+
- Example: `my/collection` is not valid (contains slash)
|
|
422
|
+
|
|
423
|
+
**chroma:**
|
|
424
|
+
|
|
425
|
+
Collection names must:
|
|
426
|
+
- Be 3-63 characters long
|
|
427
|
+
- Start and end with a letter or number
|
|
428
|
+
- Contain only letters, numbers, underscores, or hyphens
|
|
429
|
+
- Not contain consecutive periods (..)
|
|
430
|
+
- Not be a valid IPv4 address
|
|
431
|
+
- Example: `my-collection-123` is valid
|
|
432
|
+
- Example: `my..collection` is not valid (consecutive periods)
|
|
433
|
+
|
|
434
|
+
**astra:**
|
|
435
|
+
|
|
436
|
+
Collection names must:
|
|
437
|
+
- Not be empty
|
|
438
|
+
- Be 48 characters or less
|
|
439
|
+
- Contain only letters, numbers, and underscores
|
|
440
|
+
- Example: `my_collection_123` is valid
|
|
441
|
+
- Example: `my-collection` is not valid (contains hyphen)
|
|
442
|
+
|
|
443
|
+
**libsql:**
|
|
444
|
+
|
|
445
|
+
Index names must:
|
|
446
|
+
- Start with a letter or underscore
|
|
447
|
+
- Contain only letters, numbers, and underscores
|
|
448
|
+
- Example: `my_index_123` is valid
|
|
449
|
+
- Example: `my-index` is not valid (contains hyphen)
|
|
450
|
+
|
|
451
|
+
**upstash:**
|
|
452
|
+
|
|
453
|
+
Namespace names must:
|
|
454
|
+
- Be 2-100 characters long
|
|
455
|
+
- Contain only:
|
|
456
|
+
- Alphanumeric characters (a-z, A-Z, 0-9)
|
|
457
|
+
- Underscores, hyphens, dots
|
|
458
|
+
- Not start or end with special characters (_, -, .)
|
|
459
|
+
- Can be case-sensitive
|
|
460
|
+
- Example: `MyNamespace123` is valid
|
|
461
|
+
- Example: `_namespace` is not valid (starts with underscore)
|
|
462
|
+
|
|
463
|
+
**cloudflare:**
|
|
464
|
+
|
|
465
|
+
Index names must:
|
|
466
|
+
- Start with a letter
|
|
467
|
+
- Be shorter than 32 characters
|
|
468
|
+
- Contain only lowercase ASCII letters, numbers, and dashes
|
|
469
|
+
- Use dashes instead of spaces
|
|
470
|
+
- Example: `my-index-123` is valid
|
|
471
|
+
- Example: `My_Index` is not valid (uppercase and underscore)
|
|
472
|
+
|
|
473
|
+
**opensearch:**
|
|
474
|
+
|
|
475
|
+
Index names must:
|
|
476
|
+
- Use only lowercase letters
|
|
477
|
+
- Not begin with underscores or hyphens
|
|
478
|
+
- Not contain spaces, commas
|
|
479
|
+
- Not contain special characters (e.g. `:`, `"`, `*`, `+`, `/`, `\`, `|`, `?`, `#`, `>`, `<`)
|
|
480
|
+
- Example: `my-index-123` is valid
|
|
481
|
+
- Example: `My_Index` is not valid (contains uppercase letters)
|
|
482
|
+
- Example: `_myindex` is not valid (begins with underscore)
|
|
483
|
+
|
|
484
|
+
**elasticsearch:**
|
|
485
|
+
|
|
486
|
+
Index names must:
|
|
487
|
+
- Use only lowercase letters
|
|
488
|
+
- Not exceed 255 bytes (counting multi-byte characters)
|
|
489
|
+
- Not begin with underscores, hyphens, or plus signs
|
|
490
|
+
- Not contain spaces, commas
|
|
491
|
+
- Not contain special characters (e.g. `:`, `"`, `*`, `+`, `/`, `\`, `|`, `?`, `#`, `>`, `<`)
|
|
492
|
+
- Not be "." or ".."
|
|
493
|
+
- Not start with "." (deprecated except for system/hidden indices)
|
|
494
|
+
- Example: `my-index-123` is valid
|
|
495
|
+
- Example: `My_Index` is not valid (contains uppercase letters)
|
|
496
|
+
- Example: `_myindex` is not valid (begins with underscore)
|
|
497
|
+
- Example: `.myindex` is not valid (begins with dot, deprecated)
|
|
498
|
+
|
|
499
|
+
**s3vectors:**
|
|
500
|
+
|
|
501
|
+
Index names must:
|
|
502
|
+
- Be unique within the same vector bucket
|
|
503
|
+
- Be 3–63 characters long
|
|
504
|
+
- Use only lowercase letters (`a–z`), numbers (`0–9`), hyphens (`-`), and dots (`.`)
|
|
505
|
+
- Begin and end with a letter or number
|
|
506
|
+
- Example: `my-index.123` is valid
|
|
507
|
+
- Example: `my_index` is not valid (contains underscore)
|
|
508
|
+
- Example: `-myindex` is not valid (begins with hyphen)
|
|
509
|
+
- Example: `myindex-` is not valid (ends with hyphen)
|
|
510
|
+
- Example: `MyIndex` is not valid (contains uppercase letters)
|
|
511
|
+
|
|
512
|
+
|
|
513
|
+
### Upserting Embeddings
|
|
514
|
+
|
|
515
|
+
After creating an index, you can store embeddings along with their basic metadata:
|
|
516
|
+
|
|
517
|
+
```ts title="store-embeddings.ts"
|
|
518
|
+
// Store embeddings with their corresponding metadata
|
|
519
|
+
await store.upsert({
|
|
520
|
+
indexName: "myCollection", // index name
|
|
521
|
+
vectors: embeddings, // array of embedding vectors
|
|
522
|
+
metadata: chunks.map((chunk) => ({
|
|
523
|
+
text: chunk.text, // The original text content
|
|
524
|
+
id: chunk.id, // Optional unique identifier
|
|
525
|
+
})),
|
|
526
|
+
});
|
|
527
|
+
```
|
|
528
|
+
|
|
529
|
+
The upsert operation:
|
|
530
|
+
|
|
531
|
+
- Takes an array of embedding vectors and their corresponding metadata
|
|
532
|
+
- Updates existing vectors if they share the same ID
|
|
533
|
+
- Creates new vectors if they don't exist
|
|
534
|
+
- Automatically handles batching for large datasets
|
|
535
|
+
|
|
536
|
+
## Adding Metadata
|
|
537
|
+
|
|
538
|
+
Vector stores support rich metadata (any JSON-serializable fields) for filtering and organization. Since metadata is stored with no fixed schema, use consistent field naming to avoid unexpected query results.
|
|
539
|
+
|
|
540
|
+
important
|
|
541
|
+
Metadata is crucial for vector storage - without it, you'd only have numerical embeddings with no way to return the original text or filter results. Always store at least the source text as metadata.
|
|
542
|
+
|
|
543
|
+
```ts
|
|
544
|
+
// Store embeddings with rich metadata for better organization and filtering
|
|
545
|
+
await store.upsert({
|
|
546
|
+
indexName: "myCollection",
|
|
547
|
+
vectors: embeddings,
|
|
548
|
+
metadata: chunks.map((chunk) => ({
|
|
549
|
+
// Basic content
|
|
550
|
+
text: chunk.text,
|
|
551
|
+
id: chunk.id,
|
|
552
|
+
|
|
553
|
+
// Document organization
|
|
554
|
+
source: chunk.source,
|
|
555
|
+
category: chunk.category,
|
|
556
|
+
|
|
557
|
+
// Temporal metadata
|
|
558
|
+
createdAt: new Date().toISOString(),
|
|
559
|
+
version: "1.0",
|
|
560
|
+
|
|
561
|
+
// Custom fields
|
|
562
|
+
language: chunk.language,
|
|
563
|
+
author: chunk.author,
|
|
564
|
+
confidenceScore: chunk.score,
|
|
565
|
+
})),
|
|
566
|
+
});
|
|
567
|
+
```
|
|
568
|
+
|
|
569
|
+
Key metadata considerations:
|
|
570
|
+
|
|
571
|
+
- Be strict with field naming - inconsistencies like 'category' vs 'Category' will affect queries
|
|
572
|
+
- Only include fields you plan to filter or sort by - extra fields add overhead
|
|
573
|
+
- Add timestamps (e.g., 'createdAt', 'lastUpdated') to track content freshness
|
|
574
|
+
|
|
575
|
+
## Deleting Vectors
|
|
576
|
+
|
|
577
|
+
When building RAG applications, you often need to clean up stale vectors when documents are deleted or updated. Mastra provides the `deleteVectors` method that supports deleting vectors by metadata filters, making it easy to remove all embeddings associated with a specific document.
|
|
578
|
+
|
|
579
|
+
### Delete by Metadata Filter
|
|
580
|
+
|
|
581
|
+
The most common use case is deleting all vectors for a specific document when a user deletes it:
|
|
582
|
+
|
|
583
|
+
```ts title="delete-vectors.ts"
|
|
584
|
+
// Delete all vectors for a specific document
|
|
585
|
+
await store.deleteVectors({
|
|
586
|
+
indexName: "myCollection",
|
|
587
|
+
filter: { docId: "document-123" },
|
|
588
|
+
});
|
|
589
|
+
```
|
|
590
|
+
|
|
591
|
+
This is particularly useful when:
|
|
592
|
+
- A user deletes a document and you need to remove all its chunks
|
|
593
|
+
- You're re-indexing a document and want to remove old vectors first
|
|
594
|
+
- You need to clean up vectors for a specific user or tenant
|
|
595
|
+
|
|
596
|
+
### Delete Multiple Documents
|
|
597
|
+
|
|
598
|
+
You can also use complex filters to delete vectors matching multiple conditions:
|
|
599
|
+
|
|
600
|
+
```ts title="delete-vectors-advanced.ts"
|
|
601
|
+
// Delete all vectors for multiple documents
|
|
602
|
+
await store.deleteVectors({
|
|
603
|
+
indexName: "myCollection",
|
|
604
|
+
filter: {
|
|
605
|
+
docId: { $in: ["doc-1", "doc-2", "doc-3"] },
|
|
606
|
+
},
|
|
607
|
+
});
|
|
608
|
+
|
|
609
|
+
// Delete vectors for a specific user's documents
|
|
610
|
+
await store.deleteVectors({
|
|
611
|
+
indexName: "myCollection",
|
|
612
|
+
filter: {
|
|
613
|
+
$and: [
|
|
614
|
+
{ userId: "user-123" },
|
|
615
|
+
{ status: "archived" },
|
|
616
|
+
],
|
|
617
|
+
},
|
|
618
|
+
});
|
|
619
|
+
```
|
|
620
|
+
|
|
621
|
+
### Delete by Vector IDs
|
|
622
|
+
|
|
623
|
+
If you have specific vector IDs to delete, you can pass them directly:
|
|
624
|
+
|
|
625
|
+
```ts title="delete-by-ids.ts"
|
|
626
|
+
// Delete specific vectors by their IDs
|
|
627
|
+
await store.deleteVectors({
|
|
628
|
+
indexName: "myCollection",
|
|
629
|
+
ids: ["vec-1", "vec-2", "vec-3"],
|
|
630
|
+
});
|
|
631
|
+
```
|
|
632
|
+
|
|
633
|
+
## Best Practices
|
|
634
|
+
|
|
635
|
+
- Create indexes before bulk insertions
|
|
636
|
+
- Use batch operations for large insertions (the upsert method handles batching automatically)
|
|
637
|
+
- Only store metadata you'll query against
|
|
638
|
+
- Match embedding dimensions to your model (e.g., 1536 for `text-embedding-3-small`)
|
|
@@ -0,0 +1,549 @@
|
|
|
1
|
+
> Guide on retrieval processes in Mastra
|
|
2
|
+
|
|
3
|
+
# Retrieval in RAG Systems
|
|
4
|
+
|
|
5
|
+
After storing embeddings, you need to retrieve relevant chunks to answer user queries.
|
|
6
|
+
|
|
7
|
+
Mastra provides flexible retrieval options with support for semantic search, filtering, and re-ranking.
|
|
8
|
+
|
|
9
|
+
## How Retrieval Works
|
|
10
|
+
|
|
11
|
+
1. The user's query is converted to an embedding using the same model used for document embeddings
|
|
12
|
+
2. This embedding is compared to stored embeddings using vector similarity
|
|
13
|
+
3. The most similar chunks are retrieved and can be optionally:
|
|
14
|
+
|
|
15
|
+
- Filtered by metadata
|
|
16
|
+
- Re-ranked for better relevance
|
|
17
|
+
- Processed through a knowledge graph
|
|
18
|
+
|
|
19
|
+
## Basic Retrieval
|
|
20
|
+
|
|
21
|
+
The simplest approach is direct semantic search. This method uses vector similarity to find chunks that are semantically similar to the query:
|
|
22
|
+
|
|
23
|
+
```ts
|
|
24
|
+
import { embed } from "ai";
|
|
25
|
+
import { PgVector } from "@mastra/pg";
|
|
26
|
+
import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
|
|
27
|
+
|
|
28
|
+
// Convert query to embedding
|
|
29
|
+
const { embedding } = await embed({
|
|
30
|
+
value: "What are the main points in the article?",
|
|
31
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
// Query vector store
|
|
35
|
+
const pgVector = new PgVector({
|
|
36
|
+
id: 'pg-vector',
|
|
37
|
+
connectionString: process.env.POSTGRES_CONNECTION_STRING,
|
|
38
|
+
});
|
|
39
|
+
const results = await pgVector.query({
|
|
40
|
+
indexName: "embeddings",
|
|
41
|
+
queryVector: embedding,
|
|
42
|
+
topK: 10,
|
|
43
|
+
});
|
|
44
|
+
|
|
45
|
+
// Display results
|
|
46
|
+
console.log(results);
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
The `topK` parameter specifies the maximum number of most similar results to return from the vector search.
|
|
50
|
+
|
|
51
|
+
Results include both the text content and a similarity score:
|
|
52
|
+
|
|
53
|
+
```ts
|
|
54
|
+
[
|
|
55
|
+
{
|
|
56
|
+
text: "Climate change poses significant challenges...",
|
|
57
|
+
score: 0.89,
|
|
58
|
+
metadata: { source: "article1.txt" },
|
|
59
|
+
},
|
|
60
|
+
{
|
|
61
|
+
text: "Rising temperatures affect crop yields...",
|
|
62
|
+
score: 0.82,
|
|
63
|
+
metadata: { source: "article1.txt" },
|
|
64
|
+
},
|
|
65
|
+
];
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
## Advanced Retrieval options
|
|
69
|
+
|
|
70
|
+
### Metadata Filtering
|
|
71
|
+
|
|
72
|
+
Filter results based on metadata fields to narrow down the search space. This approach - combining vector similarity search with metadata filters - is sometimes called hybrid vector search, as it merges semantic search with structured filtering criteria.
|
|
73
|
+
|
|
74
|
+
This is useful when you have documents from different sources, time periods, or with specific attributes. Mastra provides a unified MongoDB-style query syntax that works across all supported vector stores.
|
|
75
|
+
|
|
76
|
+
For detailed information about available operators and syntax, see the [Metadata Filters Reference](https://mastra.ai/reference/v1/rag/metadata-filters).
|
|
77
|
+
|
|
78
|
+
Basic filtering examples:
|
|
79
|
+
|
|
80
|
+
```ts
|
|
81
|
+
// Simple equality filter
|
|
82
|
+
const results = await pgVector.query({
|
|
83
|
+
indexName: "embeddings",
|
|
84
|
+
queryVector: embedding,
|
|
85
|
+
topK: 10,
|
|
86
|
+
filter: {
|
|
87
|
+
source: "article1.txt",
|
|
88
|
+
},
|
|
89
|
+
});
|
|
90
|
+
|
|
91
|
+
// Numeric comparison
|
|
92
|
+
const results = await pgVector.query({
|
|
93
|
+
indexName: "embeddings",
|
|
94
|
+
queryVector: embedding,
|
|
95
|
+
topK: 10,
|
|
96
|
+
filter: {
|
|
97
|
+
price: { $gt: 100 },
|
|
98
|
+
},
|
|
99
|
+
});
|
|
100
|
+
|
|
101
|
+
// Multiple conditions
|
|
102
|
+
const results = await pgVector.query({
|
|
103
|
+
indexName: "embeddings",
|
|
104
|
+
queryVector: embedding,
|
|
105
|
+
topK: 10,
|
|
106
|
+
filter: {
|
|
107
|
+
category: "electronics",
|
|
108
|
+
price: { $lt: 1000 },
|
|
109
|
+
inStock: true,
|
|
110
|
+
},
|
|
111
|
+
});
|
|
112
|
+
|
|
113
|
+
// Array operations
|
|
114
|
+
const results = await pgVector.query({
|
|
115
|
+
indexName: "embeddings",
|
|
116
|
+
queryVector: embedding,
|
|
117
|
+
topK: 10,
|
|
118
|
+
filter: {
|
|
119
|
+
tags: { $in: ["sale", "new"] },
|
|
120
|
+
},
|
|
121
|
+
});
|
|
122
|
+
|
|
123
|
+
// Logical operators
|
|
124
|
+
const results = await pgVector.query({
|
|
125
|
+
indexName: "embeddings",
|
|
126
|
+
queryVector: embedding,
|
|
127
|
+
topK: 10,
|
|
128
|
+
filter: {
|
|
129
|
+
$or: [{ category: "electronics" }, { category: "accessories" }],
|
|
130
|
+
$and: [{ price: { $gt: 50 } }, { price: { $lt: 200 } }],
|
|
131
|
+
},
|
|
132
|
+
});
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
Common use cases for metadata filtering:
|
|
136
|
+
|
|
137
|
+
- Filter by document source or type
|
|
138
|
+
- Filter by date ranges
|
|
139
|
+
- Filter by specific categories or tags
|
|
140
|
+
- Filter by numerical ranges (e.g., price, rating)
|
|
141
|
+
- Combine multiple conditions for precise querying
|
|
142
|
+
- Filter by document attributes (e.g., language, author)
|
|
143
|
+
|
|
144
|
+
### Vector Query Tool
|
|
145
|
+
|
|
146
|
+
Sometimes you want to give your agent the ability to query a vector database directly. The Vector Query Tool allows your agent to be in charge of retrieval decisions, combining semantic search with optional filtering and reranking based on the agent's understanding of the user's needs.
|
|
147
|
+
|
|
148
|
+
```ts
|
|
149
|
+
import { createVectorQueryTool } from "@mastra/rag";
|
|
150
|
+
import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
|
|
151
|
+
|
|
152
|
+
const vectorQueryTool = createVectorQueryTool({
|
|
153
|
+
vectorStoreName: "pgVector",
|
|
154
|
+
indexName: "embeddings",
|
|
155
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
156
|
+
});
|
|
157
|
+
```
|
|
158
|
+
|
|
159
|
+
When creating the tool, pay special attention to the tool's name and description - these help the agent understand when and how to use the retrieval capabilities. For example, you might name it "SearchKnowledgeBase" and describe it as "Search through our documentation to find relevant information about X topic."
|
|
160
|
+
|
|
161
|
+
This is particularly useful when:
|
|
162
|
+
|
|
163
|
+
- Your agent needs to dynamically decide what information to retrieve
|
|
164
|
+
- The retrieval process requires complex decision-making
|
|
165
|
+
- You want the agent to combine multiple retrieval strategies based on context
|
|
166
|
+
|
|
167
|
+
#### Database-Specific Configurations
|
|
168
|
+
|
|
169
|
+
The Vector Query Tool supports database-specific configurations that enable you to leverage unique features and optimizations of different vector stores.
|
|
170
|
+
|
|
171
|
+
> **Note:**
|
|
172
|
+
These configurations are for **query-time options** like namespaces, performance tuning, and filtering—not for database connection setup.
|
|
173
|
+
|
|
174
|
+
Connection credentials (URLs, auth tokens) are configured when you instantiate the vector store class (e.g., `new LibSQLVector({ connectionUrl: '...' })`).
|
|
175
|
+
|
|
176
|
+
```ts
|
|
177
|
+
import { createVectorQueryTool } from "@mastra/rag";
|
|
178
|
+
import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
|
|
179
|
+
|
|
180
|
+
// Pinecone with namespace
|
|
181
|
+
const pineconeQueryTool = createVectorQueryTool({
|
|
182
|
+
vectorStoreName: "pinecone",
|
|
183
|
+
indexName: "docs",
|
|
184
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
185
|
+
databaseConfig: {
|
|
186
|
+
pinecone: {
|
|
187
|
+
namespace: "production", // Isolate data by environment
|
|
188
|
+
},
|
|
189
|
+
},
|
|
190
|
+
});
|
|
191
|
+
|
|
192
|
+
// pgVector with performance tuning
|
|
193
|
+
const pgVectorQueryTool = createVectorQueryTool({
|
|
194
|
+
vectorStoreName: "postgres",
|
|
195
|
+
indexName: "embeddings",
|
|
196
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
197
|
+
databaseConfig: {
|
|
198
|
+
pgvector: {
|
|
199
|
+
minScore: 0.7, // Filter low-quality results
|
|
200
|
+
ef: 200, // HNSW search parameter
|
|
201
|
+
probes: 10, // IVFFlat probe parameter
|
|
202
|
+
},
|
|
203
|
+
},
|
|
204
|
+
});
|
|
205
|
+
|
|
206
|
+
// Chroma with advanced filtering
|
|
207
|
+
const chromaQueryTool = createVectorQueryTool({
|
|
208
|
+
vectorStoreName: "chroma",
|
|
209
|
+
indexName: "documents",
|
|
210
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
211
|
+
databaseConfig: {
|
|
212
|
+
chroma: {
|
|
213
|
+
where: { category: "technical" },
|
|
214
|
+
whereDocument: { $contains: "API" },
|
|
215
|
+
},
|
|
216
|
+
},
|
|
217
|
+
});
|
|
218
|
+
|
|
219
|
+
// LanceDB with table specificity
|
|
220
|
+
const lanceQueryTool = createVectorQueryTool({
|
|
221
|
+
vectorStoreName: "lance",
|
|
222
|
+
indexName: "documents",
|
|
223
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
224
|
+
databaseConfig: {
|
|
225
|
+
lance: {
|
|
226
|
+
tableName: "myVectors", // Specify which table to query
|
|
227
|
+
includeAllColumns: true, // Include all metadata columns in results
|
|
228
|
+
},
|
|
229
|
+
},
|
|
230
|
+
});
|
|
231
|
+
```
|
|
232
|
+
|
|
233
|
+
**Key Benefits:**
|
|
234
|
+
|
|
235
|
+
- **Pinecone namespaces**: Organize vectors by tenant, environment, or data type
|
|
236
|
+
- **pgVector optimization**: Control search accuracy and speed with ef/probes parameters
|
|
237
|
+
- **Quality filtering**: Set minimum similarity thresholds to improve result relevance
|
|
238
|
+
- **LanceDB tables**: Separate data into tables for better organization and performance
|
|
239
|
+
- **Runtime flexibility**: Override configurations dynamically based on context
|
|
240
|
+
|
|
241
|
+
**Common Use Cases:**
|
|
242
|
+
|
|
243
|
+
- Multi-tenant applications using Pinecone namespaces
|
|
244
|
+
- Performance optimization in high-load scenarios
|
|
245
|
+
- Environment-specific configurations (dev/staging/prod)
|
|
246
|
+
- Quality-gated search results
|
|
247
|
+
- Embedded, file-based vector storage with LanceDB for edge deployment scenarios
|
|
248
|
+
|
|
249
|
+
You can also override these configurations at runtime using the request context:
|
|
250
|
+
|
|
251
|
+
```ts
|
|
252
|
+
import { RequestContext } from "@mastra/core/request-context";
|
|
253
|
+
|
|
254
|
+
const requestContext = new RequestContext();
|
|
255
|
+
requestContext.set("databaseConfig", {
|
|
256
|
+
pinecone: {
|
|
257
|
+
namespace: "runtime-namespace",
|
|
258
|
+
},
|
|
259
|
+
});
|
|
260
|
+
|
|
261
|
+
await pineconeQueryTool.execute({
|
|
262
|
+
context: { queryText: "search query" },
|
|
263
|
+
mastra,
|
|
264
|
+
requestContext,
|
|
265
|
+
});
|
|
266
|
+
```
|
|
267
|
+
|
|
268
|
+
For detailed configuration options and advanced usage, see the [Vector Query Tool Reference](https://mastra.ai/reference/v1/tools/vector-query-tool).
|
|
269
|
+
|
|
270
|
+
### Vector Store Prompts
|
|
271
|
+
|
|
272
|
+
Vector store prompts define query patterns and filtering capabilities for each vector database implementation.
|
|
273
|
+
When implementing filtering, these prompts are required in the agent's instructions to specify valid operators and syntax for each vector store implementation.
|
|
274
|
+
|
|
275
|
+
**pgvector:**
|
|
276
|
+
|
|
277
|
+
```ts
|
|
278
|
+
import { PGVECTOR_PROMPT } from "@mastra/pg";
|
|
279
|
+
|
|
280
|
+
export const ragAgent = new Agent({
|
|
281
|
+
id: "rag-agent",
|
|
282
|
+
name: "RAG Agent",
|
|
283
|
+
model: "openai/gpt-5.1",
|
|
284
|
+
instructions: `
|
|
285
|
+
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
286
|
+
${PGVECTOR_PROMPT}
|
|
287
|
+
`,
|
|
288
|
+
tools: { vectorQueryTool },
|
|
289
|
+
});
|
|
290
|
+
```
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
|
|
294
|
+
**pinecone:**
|
|
295
|
+
|
|
296
|
+
```ts title="vector-store.ts"
|
|
297
|
+
import { PINECONE_PROMPT } from "@mastra/pinecone";
|
|
298
|
+
|
|
299
|
+
export const ragAgent = new Agent({
|
|
300
|
+
id: "rag-agent",
|
|
301
|
+
name: "RAG Agent",
|
|
302
|
+
model: "openai/gpt-5.1",
|
|
303
|
+
instructions: `
|
|
304
|
+
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
305
|
+
${PINECONE_PROMPT}
|
|
306
|
+
`,
|
|
307
|
+
tools: { vectorQueryTool },
|
|
308
|
+
});
|
|
309
|
+
```
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+
**qdrant:**
|
|
314
|
+
|
|
315
|
+
```ts title="vector-store.ts"
|
|
316
|
+
import { QDRANT_PROMPT } from "@mastra/qdrant";
|
|
317
|
+
|
|
318
|
+
export const ragAgent = new Agent({
|
|
319
|
+
id: "rag-agent",
|
|
320
|
+
name: "RAG Agent",
|
|
321
|
+
model: "openai/gpt-5.1",
|
|
322
|
+
instructions: `
|
|
323
|
+
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
324
|
+
${QDRANT_PROMPT}
|
|
325
|
+
`,
|
|
326
|
+
tools: { vectorQueryTool },
|
|
327
|
+
});
|
|
328
|
+
```
|
|
329
|
+
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
**chroma:**
|
|
333
|
+
|
|
334
|
+
```ts title="vector-store.ts"
|
|
335
|
+
import { CHROMA_PROMPT } from "@mastra/chroma";
|
|
336
|
+
|
|
337
|
+
export const ragAgent = new Agent({
|
|
338
|
+
id: "rag-agent",
|
|
339
|
+
name: "RAG Agent",
|
|
340
|
+
model: "openai/gpt-5.1",
|
|
341
|
+
instructions: `
|
|
342
|
+
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
343
|
+
${CHROMA_PROMPT}
|
|
344
|
+
`,
|
|
345
|
+
tools: { vectorQueryTool },
|
|
346
|
+
});
|
|
347
|
+
```
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
|
|
351
|
+
**astra:**
|
|
352
|
+
|
|
353
|
+
```ts title="vector-store.ts"
|
|
354
|
+
import { ASTRA_PROMPT } from "@mastra/astra";
|
|
355
|
+
|
|
356
|
+
export const ragAgent = new Agent({
|
|
357
|
+
id: "rag-agent",
|
|
358
|
+
name: "RAG Agent",
|
|
359
|
+
model: "openai/gpt-5.1",
|
|
360
|
+
instructions: `
|
|
361
|
+
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
362
|
+
${ASTRA_PROMPT}
|
|
363
|
+
`,
|
|
364
|
+
tools: { vectorQueryTool },
|
|
365
|
+
});
|
|
366
|
+
```
|
|
367
|
+
|
|
368
|
+
|
|
369
|
+
|
|
370
|
+
**libsql:**
|
|
371
|
+
|
|
372
|
+
```ts title="vector-store.ts"
|
|
373
|
+
import { LIBSQL_PROMPT } from "@mastra/libsql";
|
|
374
|
+
|
|
375
|
+
export const ragAgent = new Agent({
|
|
376
|
+
id: "rag-agent",
|
|
377
|
+
name: "RAG Agent",
|
|
378
|
+
model: "openai/gpt-5.1",
|
|
379
|
+
instructions: `
|
|
380
|
+
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
381
|
+
${LIBSQL_PROMPT}
|
|
382
|
+
`,
|
|
383
|
+
tools: { vectorQueryTool },
|
|
384
|
+
});
|
|
385
|
+
```
|
|
386
|
+
|
|
387
|
+
|
|
388
|
+
|
|
389
|
+
**upstash:**
|
|
390
|
+
|
|
391
|
+
```ts title="vector-store.ts"
|
|
392
|
+
import { UPSTASH_PROMPT } from "@mastra/upstash";
|
|
393
|
+
|
|
394
|
+
export const ragAgent = new Agent({
|
|
395
|
+
id: "rag-agent",
|
|
396
|
+
name: "RAG Agent",
|
|
397
|
+
model: "openai/gpt-5.1",
|
|
398
|
+
instructions: `
|
|
399
|
+
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
400
|
+
${UPSTASH_PROMPT}
|
|
401
|
+
`,
|
|
402
|
+
tools: { vectorQueryTool },
|
|
403
|
+
});
|
|
404
|
+
```
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
|
|
408
|
+
**vectorize:**
|
|
409
|
+
|
|
410
|
+
```ts title="vector-store.ts"
|
|
411
|
+
import { VECTORIZE_PROMPT } from "@mastra/vectorize";
|
|
412
|
+
|
|
413
|
+
export const ragAgent = new Agent({
|
|
414
|
+
id: "rag-agent",
|
|
415
|
+
name: "RAG Agent",
|
|
416
|
+
model: "openai/gpt-5.1",
|
|
417
|
+
instructions: `
|
|
418
|
+
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
419
|
+
${VECTORIZE_PROMPT}
|
|
420
|
+
`,
|
|
421
|
+
tools: { vectorQueryTool },
|
|
422
|
+
});
|
|
423
|
+
```
|
|
424
|
+
|
|
425
|
+
|
|
426
|
+
|
|
427
|
+
**mongodb:**
|
|
428
|
+
|
|
429
|
+
```ts title="vector-store.ts"
|
|
430
|
+
import { MONGODB_PROMPT } from "@mastra/mongodb";
|
|
431
|
+
|
|
432
|
+
export const ragAgent = new Agent({
|
|
433
|
+
id: "rag-agent",
|
|
434
|
+
name: "RAG Agent",
|
|
435
|
+
model: "openai/gpt-5.1",
|
|
436
|
+
instructions: `
|
|
437
|
+
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
438
|
+
${MONGODB_PROMPT}
|
|
439
|
+
`,
|
|
440
|
+
tools: { vectorQueryTool },
|
|
441
|
+
});
|
|
442
|
+
```
|
|
443
|
+
|
|
444
|
+
|
|
445
|
+
|
|
446
|
+
**opensearch:**
|
|
447
|
+
|
|
448
|
+
```ts title="vector-store.ts"
|
|
449
|
+
import { OPENSEARCH_PROMPT } from "@mastra/opensearch";
|
|
450
|
+
|
|
451
|
+
export const ragAgent = new Agent({
|
|
452
|
+
id: "rag-agent",
|
|
453
|
+
name: "RAG Agent",
|
|
454
|
+
model: "openai/gpt-5.1",
|
|
455
|
+
instructions: `
|
|
456
|
+
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
457
|
+
${OPENSEARCH_PROMPT}
|
|
458
|
+
`,
|
|
459
|
+
tools: { vectorQueryTool },
|
|
460
|
+
});
|
|
461
|
+
```
|
|
462
|
+
|
|
463
|
+
|
|
464
|
+
|
|
465
|
+
**s3vectors:**
|
|
466
|
+
|
|
467
|
+
```ts title="vector-store.ts"
|
|
468
|
+
import { S3VECTORS_PROMPT } from "@mastra/s3vectors";
|
|
469
|
+
|
|
470
|
+
export const ragAgent = new Agent({
|
|
471
|
+
id: "rag-agent",
|
|
472
|
+
name: "RAG Agent",
|
|
473
|
+
model: "openai/gpt-5.1",
|
|
474
|
+
instructions: `
|
|
475
|
+
Process queries using the provided context. Structure responses to be concise and relevant.
|
|
476
|
+
${S3VECTORS_PROMPT}
|
|
477
|
+
`,
|
|
478
|
+
tools: { vectorQueryTool },
|
|
479
|
+
});
|
|
480
|
+
```
|
|
481
|
+
|
|
482
|
+
|
|
483
|
+
|
|
484
|
+
### Re-ranking
|
|
485
|
+
|
|
486
|
+
Initial vector similarity search can sometimes miss nuanced relevance. Re-ranking is a more computationally expensive process, but more accurate algorithm that improves results by:
|
|
487
|
+
|
|
488
|
+
- Considering word order and exact matches
|
|
489
|
+
- Applying more sophisticated relevance scoring
|
|
490
|
+
- Using a method called cross-attention between query and documents
|
|
491
|
+
|
|
492
|
+
Here's how to use re-ranking:
|
|
493
|
+
|
|
494
|
+
```ts
|
|
495
|
+
import {
|
|
496
|
+
rerankWithScorer as rerank,
|
|
497
|
+
MastraAgentRelevanceScorer
|
|
498
|
+
} from "@mastra/rag";
|
|
499
|
+
|
|
500
|
+
// Get initial results from vector search
|
|
501
|
+
const initialResults = await pgVector.query({
|
|
502
|
+
indexName: "embeddings",
|
|
503
|
+
queryVector: queryEmbedding,
|
|
504
|
+
topK: 10,
|
|
505
|
+
});
|
|
506
|
+
|
|
507
|
+
// Create a relevance scorer
|
|
508
|
+
const relevanceProvider = new MastraAgentRelevanceScorer('relevance-scorer', "openai/gpt-5.1");
|
|
509
|
+
|
|
510
|
+
// Re-rank the results
|
|
511
|
+
const rerankedResults = await rerank({
|
|
512
|
+
results: initialResults,
|
|
513
|
+
query,
|
|
514
|
+
scorer: relevanceProvider,
|
|
515
|
+
options: {
|
|
516
|
+
weights: {
|
|
517
|
+
semantic: 0.5, // How well the content matches the query semantically
|
|
518
|
+
vector: 0.3, // Original vector similarity score
|
|
519
|
+
position: 0.2, // Preserves original result ordering
|
|
520
|
+
},
|
|
521
|
+
topK: 10,
|
|
522
|
+
},
|
|
523
|
+
);
|
|
524
|
+
```
|
|
525
|
+
|
|
526
|
+
The weights control how different factors influence the final ranking:
|
|
527
|
+
|
|
528
|
+
- `semantic`: Higher values prioritize semantic understanding and relevance to the query
|
|
529
|
+
- `vector`: Higher values favor the original vector similarity scores
|
|
530
|
+
- `position`: Higher values help maintain the original ordering of results
|
|
531
|
+
|
|
532
|
+
> **Note:**
|
|
533
|
+
For semantic scoring to work properly during re-ranking, each result must include the text content in its `metadata.text` field.
|
|
534
|
+
|
|
535
|
+
You can also use other relevance score providers like Cohere or ZeroEntropy:
|
|
536
|
+
|
|
537
|
+
```ts
|
|
538
|
+
const relevanceProvider = new CohereRelevanceScorer("rerank-v3.5");
|
|
539
|
+
```
|
|
540
|
+
|
|
541
|
+
```ts
|
|
542
|
+
const relevanceProvider = new ZeroEntropyRelevanceScorer("zerank-1");
|
|
543
|
+
```
|
|
544
|
+
|
|
545
|
+
The re-ranked results combine vector similarity with semantic understanding to improve retrieval quality.
|
|
546
|
+
|
|
547
|
+
For more details about re-ranking, see the [rerank()](https://mastra.ai/reference/v1/rag/rerankWithScorer) method.
|
|
548
|
+
|
|
549
|
+
For graph-based retrieval that follows connections between chunks, see the [GraphRAG](https://mastra.ai/docs/v1/rag/graph-rag) documentation.
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
# Vectors API Reference
|
|
2
|
+
|
|
3
|
+
> API reference for vectors - 1 entries
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
## Reference: Qdrant Vector Store
|
|
9
|
+
|
|
10
|
+
> Documentation for integrating Qdrant with Mastra, a vector similarity search engine for managing vectors and payloads.
|
|
11
|
+
|
|
12
|
+
The QdrantVector class provides vector search using [Qdrant](https://qdrant.tech/), a vector similarity search engine.
|
|
13
|
+
It provides a production-ready service with a convenient API to store, search, and manage vectors with additional payload and extended filtering support.
|
|
14
|
+
|
|
15
|
+
## Constructor Options
|
|
16
|
+
|
|
17
|
+
## Methods
|
|
18
|
+
|
|
19
|
+
### createIndex()
|
|
20
|
+
|
|
21
|
+
### upsert()
|
|
22
|
+
|
|
23
|
+
### query()
|
|
24
|
+
|
|
25
|
+
### listIndexes()
|
|
26
|
+
|
|
27
|
+
Returns an array of index names as strings.
|
|
28
|
+
|
|
29
|
+
### describeIndex()
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
|
|
33
|
+
```typescript
|
|
34
|
+
interface IndexStats {
|
|
35
|
+
dimension: number;
|
|
36
|
+
count: number;
|
|
37
|
+
metric: "cosine" | "euclidean" | "dotproduct";
|
|
38
|
+
}
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
### deleteIndex()
|
|
42
|
+
|
|
43
|
+
### updateVector()
|
|
44
|
+
|
|
45
|
+
Update a single vector by ID or by metadata filter. Either `id` or `filter` must be provided, but not both.
|
|
46
|
+
|
|
47
|
+
Updates a vector and/or its metadata in the specified index. If both vector and metadata are provided, both will be updated. If only one is provided, only that will be updated.
|
|
48
|
+
|
|
49
|
+
### deleteVector()
|
|
50
|
+
|
|
51
|
+
Deletes a vector from the specified index by its ID.
|
|
52
|
+
|
|
53
|
+
### deleteVectors()
|
|
54
|
+
|
|
55
|
+
Delete multiple vectors by IDs or by metadata filter. Either `ids` or `filter` must be provided, but not both.
|
|
56
|
+
|
|
57
|
+
## Response Types
|
|
58
|
+
|
|
59
|
+
Query results are returned in this format:
|
|
60
|
+
|
|
61
|
+
```typescript
|
|
62
|
+
interface QueryResult {
|
|
63
|
+
id: string;
|
|
64
|
+
score: number;
|
|
65
|
+
metadata: Record<string, any>;
|
|
66
|
+
vector?: number[]; // Only included if includeVector is true
|
|
67
|
+
}
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
## Error Handling
|
|
71
|
+
|
|
72
|
+
The store throws typed errors that can be caught:
|
|
73
|
+
|
|
74
|
+
```typescript
|
|
75
|
+
try {
|
|
76
|
+
await store.query({
|
|
77
|
+
indexName: "index_name",
|
|
78
|
+
queryVector: queryVector,
|
|
79
|
+
});
|
|
80
|
+
} catch (error) {
|
|
81
|
+
if (error instanceof VectorStoreError) {
|
|
82
|
+
console.log(error.code); // 'connection_failed' | 'invalid_dimension' | etc
|
|
83
|
+
console.log(error.details); // Additional error context
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
## Related
|
|
89
|
+
|
|
90
|
+
- [Metadata Filters](../rag/metadata-filters)
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@mastra/qdrant",
|
|
3
|
-
"version": "1.0.0-beta.
|
|
3
|
+
"version": "1.0.0-beta.3",
|
|
4
4
|
"description": "Qdrant vector store provider for Mastra",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "dist/index.js",
|
|
@@ -23,18 +23,17 @@
|
|
|
23
23
|
"@qdrant/js-client-rest": "^1.15.1"
|
|
24
24
|
},
|
|
25
25
|
"devDependencies": {
|
|
26
|
-
"@microsoft/api-extractor": "^7.52.8",
|
|
27
26
|
"@types/node": "22.13.17",
|
|
28
27
|
"@vitest/coverage-v8": "4.0.12",
|
|
29
28
|
"@vitest/ui": "4.0.12",
|
|
30
29
|
"eslint": "^9.37.0",
|
|
31
30
|
"tsup": "^8.5.0",
|
|
32
|
-
"typescript": "^5.
|
|
33
|
-
"vitest": "4.0.
|
|
31
|
+
"typescript": "^5.9.3",
|
|
32
|
+
"vitest": "4.0.16",
|
|
33
|
+
"@internal/lint": "0.0.53",
|
|
34
34
|
"@internal/storage-test-utils": "0.0.49",
|
|
35
|
-
"@
|
|
36
|
-
"@
|
|
37
|
-
"@internal/lint": "0.0.53"
|
|
35
|
+
"@mastra/core": "1.0.0-beta.20",
|
|
36
|
+
"@internal/types-builder": "0.0.28"
|
|
38
37
|
},
|
|
39
38
|
"peerDependencies": {
|
|
40
39
|
"@mastra/core": ">=1.0.0-0 <2.0.0-0"
|
|
@@ -56,7 +55,8 @@
|
|
|
56
55
|
"node": ">=22.13.0"
|
|
57
56
|
},
|
|
58
57
|
"scripts": {
|
|
59
|
-
"build": "tsup --silent --config tsup.config.ts",
|
|
58
|
+
"build:lib": "tsup --silent --config tsup.config.ts",
|
|
59
|
+
"build:docs": "pnpx tsx ../../scripts/generate-package-docs.ts stores/qdrant",
|
|
60
60
|
"build:watch": "tsup --watch --silent --config tsup.config.ts",
|
|
61
61
|
"pretest": "docker compose up -d",
|
|
62
62
|
"posttest": "docker compose down -v",
|