@convex-dev/rag 0.3.1 → 0.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +325 -98
- package/dist/client/defaultChunker.d.ts.map +1 -1
- package/dist/client/defaultChunker.js +47 -16
- package/dist/client/defaultChunker.js.map +1 -1
- package/dist/client/fileUtils.d.ts +4 -2
- package/dist/client/fileUtils.d.ts.map +1 -1
- package/dist/client/fileUtils.js +5 -3
- package/dist/client/fileUtils.js.map +1 -1
- package/dist/client/index.d.ts +17 -13
- package/dist/client/index.d.ts.map +1 -1
- package/dist/client/index.js +11 -7
- package/dist/client/index.js.map +1 -1
- package/dist/component/_generated/api.d.ts +1 -0
- package/dist/component/chunks.d.ts +1 -0
- package/dist/component/chunks.d.ts.map +1 -1
- package/dist/component/chunks.js +2 -1
- package/dist/component/chunks.js.map +1 -1
- package/dist/component/entries.d.ts +2 -2
- package/dist/component/entries.d.ts.map +1 -1
- package/dist/component/entries.js +1 -1
- package/dist/component/entries.js.map +1 -1
- package/dist/shared.d.ts +2 -2
- package/dist/shared.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/client/defaultChunker.test.ts +1 -1
- package/src/client/defaultChunker.ts +73 -17
- package/src/client/fileUtils.ts +8 -4
- package/src/client/index.test.ts +11 -7
- package/src/client/index.ts +24 -18
- package/src/component/_generated/api.d.ts +1 -0
- package/src/component/chunks.test.ts +2 -0
- package/src/component/chunks.ts +2 -1
- package/src/component/entries.ts +3 -3
- package/src/shared.ts +2 -2
package/README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Convex RAG Component
|
|
2
2
|
|
|
3
|
-
[](https://badge.fury.io/js/@convex-dev%2Frag)
|
|
4
4
|
|
|
5
5
|
<!-- START: Include on https://convex.dev/components -->
|
|
6
6
|
|
|
@@ -57,23 +57,13 @@ import { RAG } from "@convex-dev/rag";
|
|
|
57
57
|
// Any AI SDK model that supports embeddings will work.
|
|
58
58
|
import { openai } from "@ai-sdk/openai";
|
|
59
59
|
|
|
60
|
-
const rag = new RAG
|
|
61
|
-
filterNames: ["category", "contentType", "categoryAndType"],
|
|
60
|
+
const rag = new RAG(components.rag, {
|
|
62
61
|
textEmbeddingModel: openai.embedding("text-embedding-3-small"),
|
|
63
|
-
embeddingDimension: 1536,
|
|
62
|
+
embeddingDimension: 1536, // Needs to match your embedding model
|
|
64
63
|
});
|
|
65
|
-
|
|
66
|
-
// Optional: Add type safety to your filters.
|
|
67
|
-
type FilterTypes = {
|
|
68
|
-
category: string;
|
|
69
|
-
contentType: string;
|
|
70
|
-
categoryAndType: { category: string; contentType: string };
|
|
71
|
-
};
|
|
72
64
|
```
|
|
73
65
|
|
|
74
|
-
##
|
|
75
|
-
|
|
76
|
-
### Add context to RAG
|
|
66
|
+
## Add context to RAG
|
|
77
67
|
|
|
78
68
|
Add content with text chunks. Each call to `add` will create a new **entry**.
|
|
79
69
|
It will embed the chunks automatically if you don't provide them.
|
|
@@ -91,52 +81,9 @@ export const add = action({
|
|
|
91
81
|
});
|
|
92
82
|
```
|
|
93
83
|
|
|
94
|
-
See below for how to add content asynchronously, e.g. to handle large files.
|
|
95
|
-
|
|
96
|
-
### Generate a response based on RAG context
|
|
97
|
-
|
|
98
|
-
You can use the `generateText` function to generate a response based on RAG context. This will automatically search for relevant entries and use them as context for the LLM, using default formatting.
|
|
99
|
-
|
|
100
|
-
The arguments to `generateText` are compatible with all arguments to `generateText` from the AI SDK.
|
|
101
|
-
|
|
102
|
-
To have more control over the context and prompting, you can use the `search` function to get the context, and then use any model to generate a response.
|
|
103
|
-
See below for more details.
|
|
104
|
-
|
|
105
|
-
```ts
|
|
106
|
-
export const askQuestion = action({
|
|
107
|
-
args: {
|
|
108
|
-
prompt: v.string(),
|
|
109
|
-
},
|
|
110
|
-
handler: async (ctx, args) => {
|
|
111
|
-
const userId = await getAuthUserId(ctx);
|
|
112
|
-
const { text, context } = await rag.generateText(ctx, {
|
|
113
|
-
search: { namespace: userId, limit: 10 },
|
|
114
|
-
prompt: args.prompt,
|
|
115
|
-
model: openai.chat("gpt-4o-mini"),
|
|
116
|
-
});
|
|
117
|
-
return { answer: text, context };
|
|
118
|
-
},
|
|
119
|
-
```
|
|
120
|
-
|
|
121
|
-
Note: You can specify any of the search options available on `rag.search`.
|
|
122
|
-
See below for more details.
|
|
123
|
-
|
|
124
|
-
### Using your own content splitter
|
|
125
|
-
|
|
126
|
-
By default, the component uses the `defaultChunker` to split the content into chunks.
|
|
127
|
-
You can pass in your own content chunks to the `add` or `addAsync` functions.
|
|
128
|
-
|
|
129
|
-
```ts
|
|
130
|
-
const chunks = await textSplitter.split(content);
|
|
131
|
-
await rag.add(ctx, { namespace: "global", chunks });
|
|
132
|
-
```
|
|
133
|
-
|
|
134
|
-
Note: The `textSplitter` here could be LangChain, Mastra, or something custom.
|
|
135
|
-
The simplest version makes an array of strings like `content.split("\n")`.
|
|
136
|
-
|
|
137
|
-
Note: you can pass in an async iterator instead of an array to handle large content.
|
|
84
|
+
See below for how to chunk the text yourself or add content asynchronously, e.g. to handle large files.
|
|
138
85
|
|
|
139
|
-
|
|
86
|
+
## Semantic Search
|
|
140
87
|
|
|
141
88
|
Search across content with vector similarity
|
|
142
89
|
|
|
@@ -157,7 +104,7 @@ export const search = action({
|
|
|
157
104
|
const { results, text, entries } = await rag.search(ctx, {
|
|
158
105
|
namespace: "global",
|
|
159
106
|
query: args.query,
|
|
160
|
-
limit: 10
|
|
107
|
+
limit: 10,
|
|
161
108
|
vectorScoreThreshold: 0.5, // Only return results with a score >= 0.5
|
|
162
109
|
});
|
|
163
110
|
|
|
@@ -166,40 +113,93 @@ export const search = action({
|
|
|
166
113
|
});
|
|
167
114
|
```
|
|
168
115
|
|
|
169
|
-
|
|
116
|
+
## Generate a response based on RAG context
|
|
170
117
|
|
|
171
|
-
|
|
172
|
-
|
|
118
|
+
Once you have searched for the context, you can use it with an LLM.
|
|
119
|
+
|
|
120
|
+
Generally you'll already be using something to make LLM requests, e.g.
|
|
121
|
+
the [Agent Component](https://www.convex.dev/components/agent),
|
|
122
|
+
which tracks the message history for you.
|
|
123
|
+
See the [Agent Component docs](https://docs.convex.dev/agents)
|
|
124
|
+
for more details on doing RAG with the Agent Component.
|
|
125
|
+
|
|
126
|
+
However, if you just want a one-off response, you can use the `generateText`
|
|
127
|
+
function as a convenience.
|
|
128
|
+
|
|
129
|
+
This will automatically search for relevant entries and use them as context
|
|
130
|
+
for the LLM, using default formatting.
|
|
131
|
+
|
|
132
|
+
The arguments to `generateText` are compatible with all arguments to
|
|
133
|
+
`generateText` from the AI SDK.
|
|
173
134
|
|
|
174
135
|
```ts
|
|
175
|
-
|
|
136
|
+
export const askQuestion = action({
|
|
137
|
+
args: {
|
|
138
|
+
prompt: v.string(),
|
|
139
|
+
},
|
|
140
|
+
handler: async (ctx, args) => {
|
|
141
|
+
const userId = await getAuthUserId(ctx);
|
|
142
|
+
const { text, context } = await rag.generateText(ctx, {
|
|
143
|
+
search: { namespace: userId, limit: 10 },
|
|
144
|
+
prompt: args.prompt,
|
|
145
|
+
model: openai.chat("gpt-4o-mini"),
|
|
146
|
+
});
|
|
147
|
+
return { answer: text, context };
|
|
148
|
+
},
|
|
176
149
|
```
|
|
177
150
|
|
|
178
|
-
|
|
179
|
-
it chunks, embeds, and inserts the data into the database.
|
|
180
|
-
Once all data is inserted, it will iterate over the chunks and swap the old
|
|
181
|
-
content embeddings with the new ones, and then update the status to "ready",
|
|
182
|
-
marking the previous version as "replaced".
|
|
151
|
+
Note: You can specify any of the search options available on `rag.search`.
|
|
183
152
|
|
|
184
|
-
|
|
185
|
-
results for old vector search results.
|
|
186
|
-
See below for more details on deleting.
|
|
153
|
+
## Filtered Search
|
|
187
154
|
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
search for it, or if you want to add content to a namespace and then immediately
|
|
192
|
-
add more content to the same namespace.
|
|
155
|
+
You can provide filters when adding content and use them to search.
|
|
156
|
+
To do this, you'll need to give the RAG component a list of the filter names.
|
|
157
|
+
You can optionally provide a type parameter for type safety (no runtime validation).
|
|
193
158
|
|
|
194
|
-
|
|
159
|
+
Note: these filters can be OR'd together when searching. In order to get an AND,
|
|
160
|
+
you provide a filter with a more complex value, such as `categoryAndType` below.
|
|
161
|
+
|
|
162
|
+
```ts
|
|
163
|
+
// convex/example.ts
|
|
164
|
+
import { components } from "./_generated/api";
|
|
165
|
+
import { RAG } from "@convex-dev/rag";
|
|
166
|
+
// Any AI SDK model that supports embeddings will work.
|
|
167
|
+
import { openai } from "@ai-sdk/openai";
|
|
168
|
+
|
|
169
|
+
// Optional: Add type safety to your filters.
|
|
170
|
+
type FilterTypes = {
|
|
171
|
+
category: string;
|
|
172
|
+
contentType: string;
|
|
173
|
+
categoryAndType: { category: string; contentType: string };
|
|
174
|
+
};
|
|
175
|
+
|
|
176
|
+
const rag = new RAG<FilterTypes>(components.rag, {
|
|
177
|
+
textEmbeddingModel: openai.embedding("text-embedding-3-small"),
|
|
178
|
+
embeddingDimension: 1536, // Needs to match your embedding model
|
|
179
|
+
filterNames: ["category", "contentType", "categoryAndType"],
|
|
180
|
+
});
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
Adding content with filters:
|
|
184
|
+
|
|
185
|
+
```ts
|
|
186
|
+
await rag.add(ctx, {
|
|
187
|
+
namespace: "global",
|
|
188
|
+
text,
|
|
189
|
+
filterValues: [
|
|
190
|
+
{ name: "category", value: "news" },
|
|
191
|
+
{ name: "contentType", value: "article" },
|
|
192
|
+
{ name: "categoryAndType", value: { category: "news", contentType: "article" } },
|
|
193
|
+
],
|
|
194
|
+
});
|
|
195
|
+
```
|
|
195
196
|
|
|
196
197
|
Search with metadata filters:
|
|
197
198
|
|
|
198
199
|
```ts
|
|
199
|
-
export const
|
|
200
|
+
export const searchForNewsOrSports = action({
|
|
200
201
|
args: {
|
|
201
202
|
query: v.string(),
|
|
202
|
-
category: v.string(),
|
|
203
203
|
},
|
|
204
204
|
handler: async (ctx, args) => {
|
|
205
205
|
const userId = await getUserId(ctx);
|
|
@@ -208,7 +208,10 @@ export const searchByCategory = action({
|
|
|
208
208
|
const results = await rag.search(ctx, {
|
|
209
209
|
namespace: userId,
|
|
210
210
|
query: args.query,
|
|
211
|
-
filters: [
|
|
211
|
+
filters: [
|
|
212
|
+
{ name: "category", value: "news" },
|
|
213
|
+
{ name: "category", value: "sports" },
|
|
214
|
+
],
|
|
212
215
|
limit: 10,
|
|
213
216
|
});
|
|
214
217
|
|
|
@@ -257,14 +260,14 @@ export const searchWithContext = action({
|
|
|
257
260
|
});
|
|
258
261
|
```
|
|
259
262
|
|
|
260
|
-
|
|
263
|
+
## Formatting results
|
|
261
264
|
|
|
262
265
|
Formatting the results for use in a prompt depends a bit on the use case.
|
|
263
266
|
By default, the results will be sorted by score, not necessarily in the order
|
|
264
267
|
they appear in the original text. You may want to sort them by the order they
|
|
265
268
|
appear in the original text so they follow the flow of the original document.
|
|
266
269
|
|
|
267
|
-
For
|
|
270
|
+
For convenience, the `text` field of the search results is a string formatted
|
|
268
271
|
with `...` separating non-sequential chunks, `---` separating entries, and
|
|
269
272
|
`# Title:` at each entry boundary (if titles are available).
|
|
270
273
|
|
|
@@ -274,14 +277,18 @@ console.log(text);
|
|
|
274
277
|
```
|
|
275
278
|
|
|
276
279
|
```txt
|
|
277
|
-
|
|
280
|
+
## Title 1:
|
|
278
281
|
Chunk 1 contents
|
|
279
282
|
Chunk 2 contents
|
|
283
|
+
|
|
280
284
|
...
|
|
285
|
+
|
|
281
286
|
Chunk 8 contents
|
|
282
287
|
Chunk 9 contents
|
|
288
|
+
|
|
283
289
|
---
|
|
284
|
-
|
|
290
|
+
|
|
291
|
+
## Title 2:
|
|
285
292
|
Chunk 4 contents
|
|
286
293
|
Chunk 5 contents
|
|
287
294
|
```
|
|
@@ -330,7 +337,49 @@ await generateText({
|
|
|
330
337
|
});
|
|
331
338
|
```
|
|
332
339
|
|
|
333
|
-
|
|
340
|
+
## Using keys to gracefully replace content
|
|
341
|
+
|
|
342
|
+
When you add content to a namespace, you can provide a `key` to uniquely identify the content.
|
|
343
|
+
If you add content with the same key, it will make a new entry to replace the old one.
|
|
344
|
+
|
|
345
|
+
```ts
|
|
346
|
+
await rag.add(ctx, { namespace: userId, key: "my-file.txt", text });
|
|
347
|
+
```
|
|
348
|
+
|
|
349
|
+
When a new document is added, it will start with a status of "pending" while
|
|
350
|
+
it chunks, embeds, and inserts the data into the database.
|
|
351
|
+
Once all data is inserted, it will iterate over the chunks and swap the old
|
|
352
|
+
content embeddings with the new ones, and then update the status to "ready",
|
|
353
|
+
marking the previous version as "replaced".
|
|
354
|
+
|
|
355
|
+
The old content is kept around by default, so in-flight searches will get
|
|
356
|
+
results for old vector search results.
|
|
357
|
+
See below for more details on deleting.
|
|
358
|
+
|
|
359
|
+
This means that if searches are happening while the document is being added,
|
|
360
|
+
they will see the old content results
|
|
361
|
+
This is useful if you want to add content to a namespace and then immediately
|
|
362
|
+
search for it, or if you want to add content to a namespace and then immediately
|
|
363
|
+
add more content to the same namespace.
|
|
364
|
+
|
|
365
|
+
## Using your own content splitter
|
|
366
|
+
|
|
367
|
+
By default, the component uses the `defaultChunker` to split the content into chunks.
|
|
368
|
+
You can pass in your own content chunks to the `add` or `addAsync` functions.
|
|
369
|
+
|
|
370
|
+
```ts
|
|
371
|
+
const chunks = await textSplitter.split(content);
|
|
372
|
+
await rag.add(ctx, { namespace: "global", chunks });
|
|
373
|
+
```
|
|
374
|
+
|
|
375
|
+
Note: The `textSplitter` here could be LangChain, Mastra, or something custom.
|
|
376
|
+
The simplest version makes an array of strings like `content.split("\n")`.
|
|
377
|
+
|
|
378
|
+
Note: you can pass in an async iterator instead of an array to handle large content.
|
|
379
|
+
Or use the `addAsync` function (see below).
|
|
380
|
+
|
|
381
|
+
|
|
382
|
+
## Providing custom embeddings per-chunk
|
|
334
383
|
|
|
335
384
|
In addition to the text, you can provide your own embeddings for each chunk.
|
|
336
385
|
|
|
@@ -348,7 +397,7 @@ const chunksWithEmbeddings = await Promise.all(chunks.map(async chunk => {
|
|
|
348
397
|
await rag.add(ctx, { namespace: "global", chunks });
|
|
349
398
|
```
|
|
350
399
|
|
|
351
|
-
|
|
400
|
+
## Add Entries Asynchronously using File Storage
|
|
352
401
|
|
|
353
402
|
For large files, you can upload them to file storage, then provide a chunker
|
|
354
403
|
action to split them into chunks.
|
|
@@ -462,18 +511,196 @@ Generally you'd do this:
|
|
|
462
511
|
1. Periodically by querying:
|
|
463
512
|
|
|
464
513
|
```ts
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
514
|
+
// in convex/crons.ts
|
|
515
|
+
import { cronJobs } from "convex/server";
|
|
516
|
+
import { internal } from "./_generated/api.js";
|
|
517
|
+
import { internalMutation } from "./_generated/server.js";
|
|
518
|
+
import { v } from "convex/values";
|
|
519
|
+
import { rag } from "./example.js";
|
|
520
|
+
import { assert } from "convex-helpers";
|
|
521
|
+
|
|
522
|
+
const WEEK = 7 * 24 * 60 * 60 * 1000;
|
|
523
|
+
|
|
524
|
+
export const deleteOldContent = internalMutation({
|
|
525
|
+
args: { cursor: v.optional(v.string()) },
|
|
526
|
+
handler: async (ctx, args) => {
|
|
527
|
+
const toDelete = await rag.list(ctx, {
|
|
528
|
+
status: "replaced",
|
|
529
|
+
paginationOpts: { cursor: args.cursor ?? null, numItems: 100 },
|
|
530
|
+
});
|
|
531
|
+
|
|
532
|
+
for (const entry of toDelete.page) {
|
|
533
|
+
assert(entry.status === "replaced");
|
|
534
|
+
if (entry.replacedAt >= Date.now() - WEEK) {
|
|
535
|
+
return; // we're done when we catch up to a week ago
|
|
536
|
+
}
|
|
537
|
+
await rag.delete(ctx, { entryId: entry.entryId });
|
|
538
|
+
}
|
|
539
|
+
if (!toDelete.isDone) {
|
|
540
|
+
await ctx.scheduler.runAfter(0, internal.example.deleteOldContent, {
|
|
541
|
+
cursor: toDelete.continueCursor,
|
|
542
|
+
});
|
|
543
|
+
}
|
|
544
|
+
},
|
|
468
545
|
});
|
|
469
546
|
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
547
|
+
// See example/convex/crons.ts for a complete example.
|
|
548
|
+
const crons = cronJobs();
|
|
549
|
+
crons.interval("deleteOldContent", { hours: 1 }, internal.crons.deleteOldContent, {});
|
|
550
|
+
export default crons;
|
|
551
|
+
```
|
|
552
|
+
|
|
553
|
+
## Working with types
|
|
554
|
+
|
|
555
|
+
You can use the provided types to validate and store data.
|
|
556
|
+
`import { ... } from "@convex-dev/rag";`
|
|
557
|
+
|
|
558
|
+
Types for the various elements:
|
|
559
|
+
|
|
560
|
+
`Entry`, `EntryFilter`, `SearchEntry`, `SearchResult`
|
|
561
|
+
|
|
562
|
+
- `SearchEntry` is an `Entry` with a `text` field including the combined search
|
|
563
|
+
results for that entry, whereas a `SearchResult` is a specific chunk result,
|
|
564
|
+
along with surrounding chunks.
|
|
565
|
+
|
|
566
|
+
`EntryId`, `NamespaceId`
|
|
567
|
+
|
|
568
|
+
- While the `EntryId` and `NamespaceId` are strings under the hood, they are
|
|
569
|
+
given more specific types to make it easier to use them correctly.
|
|
570
|
+
|
|
571
|
+
Validators can be used in `args` and schema table definitions:
|
|
572
|
+
`vEntry`, `vEntryId`, `vNamespaceId`, `vSearchEntry`, `vSearchResult`
|
|
573
|
+
|
|
574
|
+
e.g. `defineTable({ myDocTitle: v.string(), entryId: vEntryId })`
|
|
575
|
+
|
|
576
|
+
The validators for the branded IDs will only validate they are strings,
|
|
577
|
+
but will have the more specific types, to provide type safety.
|
|
578
|
+
|
|
579
|
+
## Utility Functions
|
|
580
|
+
|
|
581
|
+
In addition to the function on the `rag` instance, there are other utilities
|
|
582
|
+
provided:
|
|
583
|
+
|
|
584
|
+
### `defaultChunker`
|
|
585
|
+
|
|
586
|
+
This is the default chunker used by the `add` and `addAsync` functions.
|
|
587
|
+
|
|
588
|
+
It is customizable, but by default:
|
|
589
|
+
- It tries to break up the text into paragraphs between 100-1k characters.
|
|
590
|
+
- It will combine paragraphs to meet the minimum character count (100).
|
|
591
|
+
- It will break up paragraphs into separate lines to keep it under 1k.
|
|
592
|
+
- It will not split up a single line unless it's longer than 10k characters.
|
|
593
|
+
|
|
594
|
+
```ts
|
|
595
|
+
import { defaultChunker } from "@convex-dev/rag";
|
|
596
|
+
|
|
597
|
+
const chunks = defaultChunker(text, {
|
|
598
|
+
// these are the defaults
|
|
599
|
+
minLines: 1,
|
|
600
|
+
minCharsSoftLimit: 100,
|
|
601
|
+
maxCharsSoftLimit: 1000,
|
|
602
|
+
maxCharsHardLimit: 10000,
|
|
603
|
+
delimiter: "\n\n",
|
|
604
|
+
});
|
|
605
|
+
```
|
|
606
|
+
|
|
607
|
+
### `hybridRank`
|
|
608
|
+
|
|
609
|
+
This is an implementation of "Reciprocal Rank Fusion" for ranking search results
|
|
610
|
+
based on multiple scoring arrays. The premise is that if both arrays of results
|
|
611
|
+
are sorted by score, the best results show up near the top of both arrays and
|
|
612
|
+
should be preferred over results higher in one but much lower in the other.
|
|
613
|
+
|
|
614
|
+
```ts
|
|
615
|
+
import { hybridRank } from "@convex-dev/rag";
|
|
616
|
+
|
|
617
|
+
const textSearchResults = [id1, id2, id3];
|
|
618
|
+
const vectorSearchResults = [id2, id3, id1];
|
|
619
|
+
const results = hybridRank([
|
|
620
|
+
textSearchResults,
|
|
621
|
+
vectorSearchResults,
|
|
622
|
+
]);
|
|
623
|
+
// results = [id2, id1, id3]
|
|
624
|
+
```
|
|
625
|
+
|
|
626
|
+
It can take more than two arrays, and you can provide weights for each array.
|
|
627
|
+
|
|
628
|
+
```ts
|
|
629
|
+
|
|
630
|
+
const recentSearchResults = [id5, id4, id3];
|
|
631
|
+
const results = hybridRank([
|
|
632
|
+
textSearchResults,
|
|
633
|
+
vectorSearchResults,
|
|
634
|
+
recentSearchResults,
|
|
635
|
+
], {
|
|
636
|
+
weights: [2, 1, 3], // prefer recent results more than text or vector
|
|
637
|
+
});
|
|
638
|
+
// results = [ id3, id5, id1, id2, id4 ]
|
|
639
|
+
```
|
|
640
|
+
|
|
641
|
+
To have it more biased towards the top few results, you can set the `k` value
|
|
642
|
+
to a lower number (10 by default).
|
|
643
|
+
|
|
644
|
+
```ts
|
|
645
|
+
const results = hybridRank([
|
|
646
|
+
textSearchResults,
|
|
647
|
+
vectorSearchResults,
|
|
648
|
+
recentSearchResults,
|
|
649
|
+
], { k: 1 });
|
|
650
|
+
// results = [ id5, id1, id3, id2, id4 ]
|
|
651
|
+
```
|
|
652
|
+
|
|
653
|
+
### `contentHashFromArrayBuffer`
|
|
654
|
+
|
|
655
|
+
This generates the hash of a file's contents, which can be used to avoid
|
|
656
|
+
adding the same file twice.
|
|
657
|
+
|
|
658
|
+
Note: doing `blob.arrayBuffer()` will consume the blob's data, so you'll need
|
|
659
|
+
to make a new blob to use it after calling this function.
|
|
660
|
+
|
|
661
|
+
```ts
|
|
662
|
+
import { contentHashFromArrayBuffer } from "@convex-dev/rag";
|
|
663
|
+
|
|
664
|
+
export const addFile = action({
|
|
665
|
+
args: { bytes: v.bytes() },
|
|
666
|
+
handler: async (ctx, { bytes }) => {
|
|
667
|
+
|
|
668
|
+
const hash = await contentHashFromArrayBuffer(bytes);
|
|
669
|
+
|
|
670
|
+
const existing = await rag.findEntryByContentHash(ctx, {
|
|
671
|
+
namespace: "global",
|
|
672
|
+
key: "my-file.txt",
|
|
673
|
+
contentHash: hash,
|
|
674
|
+
});
|
|
675
|
+
if (existing) {
|
|
676
|
+
console.log("File contents are the same, skipping");
|
|
677
|
+
return;
|
|
678
|
+
}
|
|
679
|
+
const blob = new Blob([bytes], { type: "text/plain" });
|
|
680
|
+
//...
|
|
681
|
+
},
|
|
682
|
+
});
|
|
683
|
+
```
|
|
684
|
+
|
|
685
|
+
### `guessMimeTypeFromExtension`
|
|
686
|
+
|
|
687
|
+
This guesses the mime type of a file from its extension.
|
|
688
|
+
|
|
689
|
+
```ts
|
|
690
|
+
import { guessMimeTypeFromExtension } from "@convex-dev/rag";
|
|
691
|
+
|
|
692
|
+
const mimeType = guessMimeTypeFromExtension("my-file.mjs");
|
|
693
|
+
console.log(mimeType); // "text/javascript"
|
|
694
|
+
```
|
|
695
|
+
|
|
696
|
+
### `guessMimeTypeFromContents`
|
|
697
|
+
|
|
698
|
+
This guesses the mime type of a file from the first few bytes of its contents.
|
|
699
|
+
|
|
700
|
+
```ts
|
|
701
|
+
import { guessMimeTypeFromContents } from "@convex-dev/rag";
|
|
702
|
+
|
|
703
|
+
const mimeType = guessMimeTypeFromContents(await file.arrayBuffer());
|
|
477
704
|
```
|
|
478
705
|
|
|
479
706
|
### Example Usage
|
|
@@ -482,5 +709,5 @@ See more example usage in [example.ts](./example/convex/example.ts).
|
|
|
482
709
|
|
|
483
710
|
### Running the example
|
|
484
711
|
|
|
485
|
-
Run the example with `npm i && npm run example`.
|
|
712
|
+
Run the example with `npm i && npm run setup && npm run example`.
|
|
486
713
|
<!-- END: Include on https://convex.dev/components -->
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"defaultChunker.d.ts","sourceRoot":"","sources":["../../src/client/defaultChunker.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AACH,wBAAgB,cAAc,CAC5B,IAAI,EAAE,MAAM,EACZ,EACE,QAAY,EACZ,iBAAuB,EACvB,iBAAwB,EACxB,iBAAyB,EACzB,SAAkB,GACnB,GAAE;IACD,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB,iBAAiB,CAAC,EAAE,MAAM,CAAC;IAC3B,iBAAiB,CAAC,EAAE,MAAM,CAAC;IAC3B,iBAAiB,CAAC,EAAE,MAAM,CAAC;IAC3B,SAAS,CAAC,EAAE,MAAM,CAAC;CACf,GACL,MAAM,EAAE,
|
|
1
|
+
{"version":3,"file":"defaultChunker.d.ts","sourceRoot":"","sources":["../../src/client/defaultChunker.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AACH,wBAAgB,cAAc,CAC5B,IAAI,EAAE,MAAM,EACZ,EACE,QAAY,EACZ,iBAAuB,EACvB,iBAAwB,EACxB,iBAAyB,EACzB,SAAkB,GACnB,GAAE;IACD,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB,iBAAiB,CAAC,EAAE,MAAM,CAAC;IAC3B,iBAAiB,CAAC,EAAE,MAAM,CAAC;IAC3B,iBAAiB,CAAC,EAAE,MAAM,CAAC;IAC3B,SAAS,CAAC,EAAE,MAAM,CAAC;CACf,GACL,MAAM,EAAE,CA6HV;AA4FD,eAAe,cAAc,CAAC"}
|
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
* By default, it will chunk into paragraphs and target
|
|
5
5
|
* 200-2000 characters per chunk (only less than 1 line if the hard limit is reached).
|
|
6
6
|
*/
|
|
7
|
-
export function defaultChunker(text, { minLines = 1, minCharsSoftLimit =
|
|
7
|
+
export function defaultChunker(text, { minLines = 1, minCharsSoftLimit = 100, maxCharsSoftLimit = 1000, maxCharsHardLimit = 10000, delimiter = "\n\n", } = {}) {
|
|
8
8
|
if (!text)
|
|
9
9
|
return [];
|
|
10
10
|
// Split text into individual lines
|
|
@@ -19,13 +19,17 @@ export function defaultChunker(text, { minLines = 1, minCharsSoftLimit = 200, ma
|
|
|
19
19
|
const potentialChunk = [...currentChunk, line].join("\n");
|
|
20
20
|
// If adding this line would exceed max chars, finalize current chunk first
|
|
21
21
|
if (potentialChunk.length > maxCharsSoftLimit && currentChunk.length > 0) {
|
|
22
|
-
const
|
|
23
|
-
|
|
22
|
+
const processedChunk = processChunkForOutput(currentChunk, lines, i - currentChunk.length);
|
|
23
|
+
if (processedChunk.trim()) {
|
|
24
|
+
chunks.push(processedChunk);
|
|
25
|
+
}
|
|
24
26
|
// Split the line if it exceeds hard limit
|
|
25
27
|
const splitLines = maybeSplitLine(line, maxCharsHardLimit);
|
|
26
28
|
// Add all but the last split piece as separate chunks
|
|
27
29
|
for (let j = 0; j < splitLines.length - 1; j++) {
|
|
28
|
-
|
|
30
|
+
if (splitLines[j].trim()) {
|
|
31
|
+
chunks.push(splitLines[j]);
|
|
32
|
+
}
|
|
29
33
|
}
|
|
30
34
|
// Keep the last piece for potential combination with next lines
|
|
31
35
|
currentChunk = [splitLines[splitLines.length - 1]];
|
|
@@ -37,8 +41,11 @@ export function defaultChunker(text, { minLines = 1, minCharsSoftLimit = 200, ma
|
|
|
37
41
|
currentChunk.join("\n").length >= Math.min(minCharsSoftLimit * 0.8, 150)) {
|
|
38
42
|
// Simple logic: only split if potential chunk would exceed the soft max limit
|
|
39
43
|
if (potentialChunk.length > maxCharsSoftLimit) {
|
|
40
|
-
// When splitting at delimiter boundary, preserve natural empty lines
|
|
41
|
-
|
|
44
|
+
// When splitting at delimiter boundary, preserve natural empty lines and trailing newlines
|
|
45
|
+
const processedChunk = processChunkForOutput(currentChunk, lines, i - currentChunk.length);
|
|
46
|
+
if (processedChunk.trim()) {
|
|
47
|
+
chunks.push(processedChunk);
|
|
48
|
+
}
|
|
42
49
|
currentChunk = [line];
|
|
43
50
|
continue;
|
|
44
51
|
}
|
|
@@ -53,22 +60,28 @@ export function defaultChunker(text, { minLines = 1, minCharsSoftLimit = 200, ma
|
|
|
53
60
|
if (splitLines.length > 1) {
|
|
54
61
|
// Line was split - add all but the last piece as separate chunks
|
|
55
62
|
for (let j = 0; j < splitLines.length - 1; j++) {
|
|
56
|
-
|
|
63
|
+
if (splitLines[j].trim()) {
|
|
64
|
+
chunks.push(splitLines[j]);
|
|
65
|
+
}
|
|
57
66
|
}
|
|
58
67
|
// Keep the last piece for potential combination with next lines
|
|
59
68
|
currentChunk = [splitLines[splitLines.length - 1]];
|
|
60
69
|
}
|
|
61
70
|
else {
|
|
62
71
|
// Line doesn't exceed hard limit, keep it as is
|
|
63
|
-
|
|
72
|
+
if (line.trim()) {
|
|
73
|
+
chunks.push(line);
|
|
74
|
+
}
|
|
64
75
|
currentChunk = [];
|
|
65
76
|
}
|
|
66
77
|
}
|
|
67
78
|
else {
|
|
68
79
|
// Remove last line and finalize chunk
|
|
69
80
|
const lastLine = currentChunk.pop();
|
|
70
|
-
const
|
|
71
|
-
|
|
81
|
+
const processedChunk = processChunkForOutput(currentChunk, lines, i - currentChunk.length);
|
|
82
|
+
if (processedChunk.trim()) {
|
|
83
|
+
chunks.push(processedChunk);
|
|
84
|
+
}
|
|
72
85
|
currentChunk = [lastLine];
|
|
73
86
|
}
|
|
74
87
|
}
|
|
@@ -79,14 +92,32 @@ export function defaultChunker(text, { minLines = 1, minCharsSoftLimit = 200, ma
|
|
|
79
92
|
if (remainingText.length > maxCharsHardLimit) {
|
|
80
93
|
// Split the remaining chunk if it exceeds hard limit
|
|
81
94
|
const splitLines = maybeSplitLine(remainingText, maxCharsHardLimit);
|
|
82
|
-
chunks.push(...splitLines);
|
|
95
|
+
chunks.push(...splitLines.filter((chunk) => chunk.trim()));
|
|
83
96
|
}
|
|
84
97
|
else {
|
|
85
|
-
const
|
|
86
|
-
|
|
98
|
+
const processedChunk = processChunkForOutput(currentChunk, lines, lines.length - currentChunk.length);
|
|
99
|
+
if (processedChunk.trim()) {
|
|
100
|
+
chunks.push(processedChunk);
|
|
101
|
+
}
|
|
87
102
|
}
|
|
88
103
|
}
|
|
89
|
-
|
|
104
|
+
// Filter out any empty chunks that might have slipped through
|
|
105
|
+
return chunks.filter((chunk) => chunk.trim().length > 0);
|
|
106
|
+
}
|
|
107
|
+
function processChunkForOutput(chunkLines, allLines, startIndex) {
|
|
108
|
+
if (chunkLines.length === 0)
|
|
109
|
+
return "";
|
|
110
|
+
// Remove trailing empty lines but preserve meaningful structure
|
|
111
|
+
const trimmedLines = removeTrailingEmptyLines(chunkLines);
|
|
112
|
+
// Check if we should preserve some trailing newlines by looking at the original context
|
|
113
|
+
const endIndex = startIndex + chunkLines.length - 1;
|
|
114
|
+
const hasTrailingNewlines = endIndex < allLines.length - 1 && chunkLines.length > trimmedLines.length;
|
|
115
|
+
// If we removed empty lines but there are more lines after this chunk,
|
|
116
|
+
// preserve one trailing newline to maintain paragraph separation
|
|
117
|
+
if (hasTrailingNewlines && trimmedLines.length > 0) {
|
|
118
|
+
return trimmedLines.join("\n") + "\n";
|
|
119
|
+
}
|
|
120
|
+
return trimmedLines.join("\n");
|
|
90
121
|
}
|
|
91
122
|
function maybeSplitLine(line, maxCharsHardLimit) {
|
|
92
123
|
const inputs = [line]; // in reverse order
|
|
@@ -141,8 +172,8 @@ function removeTrailingEmptyLines(lines) {
|
|
|
141
172
|
return lines.slice(0, i + 1);
|
|
142
173
|
}
|
|
143
174
|
}
|
|
144
|
-
// If all lines are empty,
|
|
145
|
-
return
|
|
175
|
+
// If all lines are empty, return empty array instead of keeping empty strings
|
|
176
|
+
return [];
|
|
146
177
|
}
|
|
147
178
|
export default defaultChunker;
|
|
148
179
|
//# sourceMappingURL=defaultChunker.js.map
|