@mastra/vectorize 1.0.0 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,645 @@
1
+ # Storing Embeddings in A Vector Database
2
+
3
+ After generating embeddings, you need to store them in a database that supports vector similarity search. Mastra provides a consistent interface for storing and querying embeddings across various vector databases.
4
+
5
+ ## Supported Databases
6
+
7
+ **MongoDB**:
8
+
9
+ ```ts
10
+ import { MongoDBVector } from '@mastra/mongodb'
11
+
12
+ const store = new MongoDBVector({
13
+ id: 'mongodb-vector',
14
+ uri: process.env.MONGODB_URI,
15
+ dbName: process.env.MONGODB_DATABASE,
16
+ })
17
+ await store.createIndex({
18
+ indexName: 'myCollection',
19
+ dimension: 1536,
20
+ })
21
+ await store.upsert({
22
+ indexName: 'myCollection',
23
+ vectors: embeddings,
24
+ metadata: chunks.map(chunk => ({ text: chunk.text })),
25
+ })
26
+ ```
27
+
28
+ ### Using MongoDB Atlas Vector search
29
+
30
+ For detailed setup instructions and best practices, see the [official MongoDB Atlas Vector Search documentation](https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-overview/?utm_campaign=devrel\&utm_source=third-party-content\&utm_medium=cta\&utm_content=mastra-docs).
31
+
32
+ **PgVector**:
33
+
34
+ ```ts
35
+ import { PgVector } from '@mastra/pg'
36
+
37
+ const store = new PgVector({
38
+ id: 'pg-vector',
39
+ connectionString: process.env.POSTGRES_CONNECTION_STRING,
40
+ })
41
+
42
+ await store.createIndex({
43
+ indexName: 'myCollection',
44
+ dimension: 1536,
45
+ })
46
+
47
+ await store.upsert({
48
+ indexName: 'myCollection',
49
+ vectors: embeddings,
50
+ metadata: chunks.map(chunk => ({ text: chunk.text })),
51
+ })
52
+ ```
53
+
54
+ ### Using PostgreSQL with pgvector
55
+
56
+ PostgreSQL with the pgvector extension is a good solution for teams already using PostgreSQL who want to minimize infrastructure complexity. For detailed setup instructions and best practices, see the [official pgvector repository](https://github.com/pgvector/pgvector).
57
+
58
+ **Pinecone**:
59
+
60
+ ```ts
61
+ import { PineconeVector } from '@mastra/pinecone'
62
+
63
+ const store = new PineconeVector({
64
+ id: 'pinecone-vector',
65
+ apiKey: process.env.PINECONE_API_KEY,
66
+ })
67
+ await store.createIndex({
68
+ indexName: 'myCollection',
69
+ dimension: 1536,
70
+ })
71
+ await store.upsert({
72
+ indexName: 'myCollection',
73
+ vectors: embeddings,
74
+ metadata: chunks.map(chunk => ({ text: chunk.text })),
75
+ })
76
+ ```
77
+
78
+ **Qdrant**:
79
+
80
+ ```ts
81
+ import { QdrantVector } from '@mastra/qdrant'
82
+
83
+ const store = new QdrantVector({
84
+ id: 'qdrant-vector',
85
+ url: process.env.QDRANT_URL,
86
+ apiKey: process.env.QDRANT_API_KEY,
87
+ })
88
+
89
+ await store.createIndex({
90
+ indexName: 'myCollection',
91
+ dimension: 1536,
92
+ })
93
+
94
+ await store.upsert({
95
+ indexName: 'myCollection',
96
+ vectors: embeddings,
97
+ metadata: chunks.map(chunk => ({ text: chunk.text })),
98
+ })
99
+ ```
100
+
101
+ **Chroma**:
102
+
103
+ ```ts
104
+ import { ChromaVector } from '@mastra/chroma'
105
+
106
+ // Running Chroma locally
107
+ // const store = new ChromaVector()
108
+
109
+ // Running on Chroma Cloud
110
+ const store = new ChromaVector({
111
+ id: 'chroma-vector',
112
+ apiKey: process.env.CHROMA_API_KEY,
113
+ tenant: process.env.CHROMA_TENANT,
114
+ database: process.env.CHROMA_DATABASE,
115
+ })
116
+
117
+ await store.createIndex({
118
+ indexName: 'myCollection',
119
+ dimension: 1536,
120
+ })
121
+
122
+ await store.upsert({
123
+ indexName: 'myCollection',
124
+ vectors: embeddings,
125
+ metadata: chunks.map(chunk => ({ text: chunk.text })),
126
+ })
127
+ ```
128
+
129
+ **Astra**:
130
+
131
+ ```ts
132
+ import { AstraVector } from '@mastra/astra'
133
+
134
+ const store = new AstraVector({
135
+ id: 'astra-vector',
136
+ token: process.env.ASTRA_DB_TOKEN,
137
+ endpoint: process.env.ASTRA_DB_ENDPOINT,
138
+ keyspace: process.env.ASTRA_DB_KEYSPACE,
139
+ })
140
+
141
+ await store.createIndex({
142
+ indexName: 'myCollection',
143
+ dimension: 1536,
144
+ })
145
+
146
+ await store.upsert({
147
+ indexName: 'myCollection',
148
+ vectors: embeddings,
149
+ metadata: chunks.map(chunk => ({ text: chunk.text })),
150
+ })
151
+ ```
152
+
153
+ **libSQL**:
154
+
155
+ ```ts
156
+ import { LibSQLVector } from '@mastra/core/vector/libsql'
157
+
158
+ const store = new LibSQLVector({
159
+ id: 'libsql-vector',
160
+ url: process.env.DATABASE_URL,
161
+ authToken: process.env.DATABASE_AUTH_TOKEN, // Optional: for Turso cloud databases
162
+ })
163
+
164
+ await store.createIndex({
165
+ indexName: 'myCollection',
166
+ dimension: 1536,
167
+ })
168
+
169
+ await store.upsert({
170
+ indexName: 'myCollection',
171
+ vectors: embeddings,
172
+ metadata: chunks.map(chunk => ({ text: chunk.text })),
173
+ })
174
+ ```
175
+
176
+ **Upstash**:
177
+
178
+ ```ts
179
+ import { UpstashVector } from '@mastra/upstash'
180
+
181
+ // In upstash they refer to the store as an index
182
+ const store = new UpstashVector({
183
+ id: 'upstash-vector',
184
+ url: process.env.UPSTASH_URL,
185
+ token: process.env.UPSTASH_TOKEN,
186
+ })
187
+
188
+ // There is no store.createIndex call here, Upstash creates indexes (known as namespaces in Upstash) automatically
189
+ // when you upsert if that namespace does not exist yet.
190
+ await store.upsert({
191
+ indexName: 'myCollection', // the namespace name in Upstash
192
+ vectors: embeddings,
193
+ metadata: chunks.map(chunk => ({ text: chunk.text })),
194
+ })
195
+ ```
196
+
197
+ **Cloudflare**:
198
+
199
+ ```ts
200
+ import { CloudflareVector } from '@mastra/vectorize'
201
+
202
+ const store = new CloudflareVector({
203
+ id: 'cloudflare-vector',
204
+ accountId: process.env.CF_ACCOUNT_ID,
205
+ apiToken: process.env.CF_API_TOKEN,
206
+ })
207
+ await store.createIndex({
208
+ indexName: 'myCollection',
209
+ dimension: 1536,
210
+ })
211
+ await store.upsert({
212
+ indexName: 'myCollection',
213
+ vectors: embeddings,
214
+ metadata: chunks.map(chunk => ({ text: chunk.text })),
215
+ })
216
+ ```
217
+
218
+ **OpenSearch**:
219
+
220
+ ```ts
221
+ import { OpenSearchVector } from '@mastra/opensearch'
222
+
223
+ const store = new OpenSearchVector({ id: 'opensearch', node: process.env.OPENSEARCH_URL })
224
+
225
+ await store.createIndex({
226
+ indexName: 'my-collection',
227
+ dimension: 1536,
228
+ })
229
+
230
+ await store.upsert({
231
+ indexName: 'my-collection',
232
+ vectors: embeddings,
233
+ metadata: chunks.map(chunk => ({ text: chunk.text })),
234
+ })
235
+ ```
236
+
237
+ **ElasticSearch**:
238
+
239
+ ```ts
240
+ import { ElasticSearchVector } from '@mastra/elasticsearch'
241
+
242
+ const store = new ElasticSearchVector({
243
+ id: 'elasticsearch-vector',
244
+ url: process.env.ELASTICSEARCH_URL,
245
+ auth: {
246
+ apiKey: process.env.ELASTICSEARCH_API_KEY,
247
+ },
248
+ })
249
+
250
+ await store.createIndex({
251
+ indexName: 'my-collection',
252
+ dimension: 1536,
253
+ })
254
+
255
+ await store.upsert({
256
+ indexName: 'my-collection',
257
+ vectors: embeddings,
258
+ metadata: chunks.map(chunk => ({ text: chunk.text })),
259
+ })
260
+ ```
261
+
262
+ ### Using Elasticsearch
263
+
264
+ For detailed setup instructions and best practices, see the [official Elasticsearch documentation](https://www.elastic.co/docs/solutions/search/get-started).
265
+
266
+ **Couchbase**:
267
+
268
+ ```ts
269
+ import { CouchbaseVector } from '@mastra/couchbase'
270
+
271
+ const store = new CouchbaseVector({
272
+ id: 'couchbase-vector',
273
+ connectionString: process.env.COUCHBASE_CONNECTION_STRING,
274
+ username: process.env.COUCHBASE_USERNAME,
275
+ password: process.env.COUCHBASE_PASSWORD,
276
+ bucketName: process.env.COUCHBASE_BUCKET,
277
+ scopeName: process.env.COUCHBASE_SCOPE,
278
+ collectionName: process.env.COUCHBASE_COLLECTION,
279
+ })
280
+ await store.createIndex({
281
+ indexName: 'myCollection',
282
+ dimension: 1536,
283
+ })
284
+ await store.upsert({
285
+ indexName: 'myCollection',
286
+ vectors: embeddings,
287
+ metadata: chunks.map(chunk => ({ text: chunk.text })),
288
+ })
289
+ ```
290
+
291
+ **Lance**:
292
+
293
+ ```ts
294
+ import { LanceVectorStore } from '@mastra/lance'
295
+
296
+ const store = await LanceVectorStore.create('/path/to/db')
297
+
298
+ await store.createIndex({
299
+ tableName: 'myVectors',
300
+ indexName: 'myCollection',
301
+ dimension: 1536,
302
+ })
303
+
304
+ await store.upsert({
305
+ tableName: 'myVectors',
306
+ vectors: embeddings,
307
+ metadata: chunks.map(chunk => ({ text: chunk.text })),
308
+ })
309
+ ```
310
+
311
+ ### Using LanceDB
312
+
313
+ LanceDB is an embedded vector database built on the Lance columnar format, suitable for local development or cloud deployment. For detailed setup instructions and best practices, see the [official LanceDB documentation](https://lancedb.github.io/lancedb/).
314
+
315
+ **S3 Vectors**:
316
+
317
+ ```ts
318
+ import { S3Vectors } from '@mastra/s3vectors'
319
+
320
+ const store = new S3Vectors({
321
+ id: 's3-vectors',
322
+ vectorBucketName: 'my-vector-bucket',
323
+ clientConfig: {
324
+ region: 'us-east-1',
325
+ },
326
+ nonFilterableMetadataKeys: ['content'],
327
+ })
328
+
329
+ await store.createIndex({
330
+ indexName: 'my-index',
331
+ dimension: 1536,
332
+ })
333
+ await store.upsert({
334
+ indexName: 'my-index',
335
+ vectors: embeddings,
336
+ metadata: chunks.map(chunk => ({ text: chunk.text })),
337
+ })
338
+ ```
339
+
340
+ ## Using Vector Storage
341
+
342
+ Once initialized, all vector stores share the same interface for creating indexes, upserting embeddings, and querying.
343
+
344
+ ### Creating Indexes
345
+
346
+ Before storing embeddings, you need to create an index with the appropriate dimension size for your embedding model:
347
+
348
+ ```ts
349
+ // Create an index with dimension 1536 (for text-embedding-3-small)
350
+ await store.createIndex({
351
+ indexName: 'myCollection',
352
+ dimension: 1536,
353
+ })
354
+ ```
355
+
356
+ The dimension size must match the output dimension of your chosen embedding model. Common dimension sizes are:
357
+
358
+ - OpenAI text-embedding-3-small: 1536 dimensions (or custom, e.g., 256)
359
+ - Cohere embed-multilingual-v3: 1024 dimensions
360
+ - Google gemini-embedding-001: 768 dimensions (or custom)
361
+
362
+ > **Warning:** Index dimensions cannot be changed after creation. To use a different model, delete and recreate the index with the new dimension size.
363
+
364
+ ### Naming Rules for Databases
365
+
366
+ Each vector database enforces specific naming conventions for indexes and collections to ensure compatibility and prevent conflicts.
367
+
368
+ **MongoDB**:
369
+
370
+ Collection (index) names must:
371
+
372
+ - Start with a letter or underscore
373
+ - Be up to 120 bytes long
374
+ - Contain only letters, numbers, underscores, or dots
375
+ - Cannot contain `$` or the null character
376
+ - Example: `my_collection.123` is valid
377
+ - Example: `my-index` is not valid (contains hyphen)
378
+ - Example: `My$Collection` is not valid (contains `$`)
379
+
380
+ **PgVector**:
381
+
382
+ Index names must:
383
+
384
+ - Start with a letter or underscore
385
+ - Contain only letters, numbers, and underscores
386
+ - Example: `my_index_123` is valid
387
+ - Example: `my-index` is not valid (contains hyphen)
388
+
389
+ **Pinecone**:
390
+
391
+ Index names must:
392
+
393
+ - Use only lowercase letters, numbers, and dashes
394
+
395
+ - Not contain dots (used for DNS routing)
396
+
397
+ - Not use non-Latin characters or emojis
398
+
399
+ - Have a combined length (with project ID) under 52 characters
400
+
401
+ - Example: `my-index-123` is valid
402
+ - Example: `my.index` is not valid (contains dot)
403
+
404
+ **Qdrant**:
405
+
406
+ Collection names must:
407
+
408
+ - Be 1-255 characters long
409
+
410
+ - Not contain any of these special characters:
411
+
412
+ - `< > : " / \ | ? *`
413
+ - Null character (`\0`)
414
+ - Unit separator (`\u{1F}`)
415
+
416
+ - Example: `my_collection_123` is valid
417
+
418
+ - Example: `my/collection` is not valid (contains slash)
419
+
420
+ **Chroma**:
421
+
422
+ Collection names must:
423
+
424
+ - Be 3-63 characters long
425
+ - Start and end with a letter or number
426
+ - Contain only letters, numbers, underscores, or hyphens
427
+ - Not contain consecutive periods (..)
428
+ - Not be a valid IPv4 address
429
+ - Example: `my-collection-123` is valid
430
+ - Example: `my..collection` is not valid (consecutive periods)
431
+
432
+ **Astra**:
433
+
434
+ Collection names must:
435
+
436
+ - Not be empty
437
+ - Be 48 characters or less
438
+ - Contain only letters, numbers, and underscores
439
+ - Example: `my_collection_123` is valid
440
+ - Example: `my-collection` is not valid (contains hyphen)
441
+
442
+ **libSQL**:
443
+
444
+ Index names must:
445
+
446
+ - Start with a letter or underscore
447
+ - Contain only letters, numbers, and underscores
448
+ - Example: `my_index_123` is valid
449
+ - Example: `my-index` is not valid (contains hyphen)
450
+
451
+ **Upstash**:
452
+
453
+ Namespace names must:
454
+
455
+ - Be 2-100 characters long
456
+
457
+ - Contain only:
458
+
459
+ - Alphanumeric characters (a-z, A-Z, 0-9)
460
+ - Underscores, hyphens, dots
461
+
462
+ - Not start or end with special characters (\_, -, .)
463
+
464
+ - Can be case-sensitive
465
+
466
+ - Example: `MyNamespace123` is valid
467
+
468
+ - Example: `_namespace` is not valid (starts with underscore)
469
+
470
+ **Cloudflare**:
471
+
472
+ Index names must:
473
+
474
+ - Start with a letter
475
+ - Be shorter than 32 characters
476
+ - Contain only lowercase ASCII letters, numbers, and dashes
477
+ - Use dashes instead of spaces
478
+ - Example: `my-index-123` is valid
479
+ - Example: `My_Index` is not valid (uppercase and underscore)
480
+
481
+ **OpenSearch**:
482
+
483
+ Index names must:
484
+
485
+ - Use only lowercase letters
486
+ - Not begin with underscores or hyphens
487
+ - Not contain spaces, commas
488
+ - Not contain special characters (e.g. `:`, `"`, `*`, `+`, `/`, `\`, `|`, `?`, `#`, `>`, `<`)
489
+ - Example: `my-index-123` is valid
490
+ - Example: `My_Index` is not valid (contains uppercase letters)
491
+ - Example: `_myindex` is not valid (begins with underscore)
492
+
493
+ **ElasticSearch**:
494
+
495
+ Index names must:
496
+
497
+ - Use only lowercase letters
498
+ - Not exceed 255 bytes (counting multi-byte characters)
499
+ - Not begin with underscores, hyphens, or plus signs
500
+ - Not contain spaces, commas
501
+ - Not contain special characters (e.g. `:`, `"`, `*`, `+`, `/`, `\`, `|`, `?`, `#`, `>`, `<`)
502
+ - Not be "." or ".."
503
+ - Not start with "." (deprecated except for system/hidden indices)
504
+ - Example: `my-index-123` is valid
505
+ - Example: `My_Index` is not valid (contains uppercase letters)
506
+ - Example: `_myindex` is not valid (begins with underscore)
507
+ - Example: `.myindex` is not valid (begins with dot, deprecated)
508
+
509
+ **S3 Vectors**:
510
+
511
+ Index names must:
512
+
513
+ - Be unique within the same vector bucket
514
+ - Be 3–63 characters long
515
+ - Use only lowercase letters (`a–z`), numbers (`0–9`), hyphens (`-`), and dots (`.`)
516
+ - Begin and end with a letter or number
517
+ - Example: `my-index.123` is valid
518
+ - Example: `my_index` is not valid (contains underscore)
519
+ - Example: `-myindex` is not valid (begins with hyphen)
520
+ - Example: `myindex-` is not valid (ends with hyphen)
521
+ - Example: `MyIndex` is not valid (contains uppercase letters)
522
+
523
+ ### Upserting Embeddings
524
+
525
+ After creating an index, you can store embeddings along with their basic metadata:
526
+
527
+ ```ts
528
+ // Store embeddings with their corresponding metadata
529
+ await store.upsert({
530
+ indexName: 'myCollection', // index name
531
+ vectors: embeddings, // array of embedding vectors
532
+ metadata: chunks.map(chunk => ({
533
+ text: chunk.text, // The original text content
534
+ id: chunk.id, // Optional unique identifier
535
+ })),
536
+ })
537
+ ```
538
+
539
+ The upsert operation:
540
+
541
+ - Takes an array of embedding vectors and their corresponding metadata
542
+ - Updates existing vectors if they share the same ID
543
+ - Creates new vectors if they don't exist
544
+ - Automatically handles batching for large datasets
545
+
546
+ ## Adding Metadata
547
+
548
+ Vector stores support rich metadata (any JSON-serializable fields) for filtering and organization. Since metadata is stored with no fixed schema, use consistent field naming to avoid unexpected query results.
549
+
550
+ > **Warning:** Metadata is crucial for vector storage - without it, you'd only have numerical embeddings with no way to return the original text or filter results. Always store at least the source text as metadata.
551
+
552
+ ```ts
553
+ // Store embeddings with rich metadata for better organization and filtering
554
+ await store.upsert({
555
+ indexName: 'myCollection',
556
+ vectors: embeddings,
557
+ metadata: chunks.map(chunk => ({
558
+ // Basic content
559
+ text: chunk.text,
560
+ id: chunk.id,
561
+
562
+ // Document organization
563
+ source: chunk.source,
564
+ category: chunk.category,
565
+
566
+ // Temporal metadata
567
+ createdAt: new Date().toISOString(),
568
+ version: '1.0',
569
+
570
+ // Custom fields
571
+ language: chunk.language,
572
+ author: chunk.author,
573
+ confidenceScore: chunk.score,
574
+ })),
575
+ })
576
+ ```
577
+
578
+ Key metadata considerations:
579
+
580
+ - Be strict with field naming - inconsistencies like 'category' vs 'Category' will affect queries
581
+ - Only include fields you plan to filter or sort by - extra fields add overhead
582
+ - Add timestamps (e.g., 'createdAt', 'lastUpdated') to track content freshness
583
+
584
+ ## Deleting Vectors
585
+
586
+ When building RAG applications, you often need to clean up stale vectors when documents are deleted or updated. Mastra provides the `deleteVectors` method that supports deleting vectors by metadata filters, making it easy to remove all embeddings associated with a specific document.
587
+
588
+ ### Delete by Metadata Filter
589
+
590
+ The most common use case is deleting all vectors for a specific document when a user deletes it:
591
+
592
+ ```ts
593
+ // Delete all vectors for a specific document
594
+ await store.deleteVectors({
595
+ indexName: 'myCollection',
596
+ filter: { docId: 'document-123' },
597
+ })
598
+ ```
599
+
600
+ This is particularly useful when:
601
+
602
+ - A user deletes a document and you need to remove all its chunks
603
+ - You're re-indexing a document and want to remove old vectors first
604
+ - You need to clean up vectors for a specific user or tenant
605
+
606
+ ### Delete Multiple Documents
607
+
608
+ You can also use complex filters to delete vectors matching multiple conditions:
609
+
610
+ ```ts
611
+ // Delete all vectors for multiple documents
612
+ await store.deleteVectors({
613
+ indexName: 'myCollection',
614
+ filter: {
615
+ docId: { $in: ['doc-1', 'doc-2', 'doc-3'] },
616
+ },
617
+ })
618
+
619
+ // Delete vectors for a specific user's documents
620
+ await store.deleteVectors({
621
+ indexName: 'myCollection',
622
+ filter: {
623
+ $and: [{ userId: 'user-123' }, { status: 'archived' }],
624
+ },
625
+ })
626
+ ```
627
+
628
+ ### Delete by Vector IDs
629
+
630
+ If you have specific vector IDs to delete, you can pass them directly:
631
+
632
+ ```ts
633
+ // Delete specific vectors by their IDs
634
+ await store.deleteVectors({
635
+ indexName: 'myCollection',
636
+ ids: ['vec-1', 'vec-2', 'vec-3'],
637
+ })
638
+ ```
639
+
640
+ ## Best Practices
641
+
642
+ - Create indexes before bulk insertions
643
+ - Use batch operations for large insertions (the upsert method handles batching automatically)
644
+ - Only store metadata you'll query against
645
+ - Match embedding dimensions to your model (e.g., 1536 for `text-embedding-3-small`)