@convex-dev/rag 0.6.1 → 0.7.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/README.md +4 -17
  2. package/dist/client/hybridRank.d.ts +1 -1
  3. package/dist/client/hybridRank.js +1 -1
  4. package/dist/client/index.d.ts +39 -7
  5. package/dist/client/index.d.ts.map +1 -1
  6. package/dist/client/index.js +33 -14
  7. package/dist/client/index.js.map +1 -1
  8. package/dist/component/_generated/component.d.ts +6 -1
  9. package/dist/component/_generated/component.d.ts.map +1 -1
  10. package/dist/component/_generated/dataModel.d.ts +1 -1
  11. package/dist/component/_generated/server.d.ts.map +1 -1
  12. package/dist/component/chunks.d.ts +9 -2
  13. package/dist/component/chunks.d.ts.map +1 -1
  14. package/dist/component/chunks.js +66 -63
  15. package/dist/component/chunks.js.map +1 -1
  16. package/dist/component/embeddings/tables.d.ts +2 -2
  17. package/dist/component/embeddings/tables.d.ts.map +1 -1
  18. package/dist/component/schema.d.ts +87 -84
  19. package/dist/component/schema.d.ts.map +1 -1
  20. package/dist/component/schema.js +0 -1
  21. package/dist/component/schema.js.map +1 -1
  22. package/dist/component/search.d.ts +44 -1
  23. package/dist/component/search.d.ts.map +1 -1
  24. package/dist/component/search.js +188 -17
  25. package/dist/component/search.js.map +1 -1
  26. package/dist/shared.d.ts +2 -0
  27. package/dist/shared.d.ts.map +1 -1
  28. package/dist/shared.js +1 -0
  29. package/dist/shared.js.map +1 -1
  30. package/package.json +40 -38
  31. package/src/client/hybridRank.ts +1 -1
  32. package/src/client/index.test.ts +1 -1
  33. package/src/client/index.ts +80 -18
  34. package/src/component/_generated/component.ts +6 -1
  35. package/src/component/_generated/dataModel.ts +1 -1
  36. package/src/component/_generated/server.ts +0 -5
  37. package/src/component/chunks.ts +102 -92
  38. package/src/component/schema.ts +0 -1
  39. package/src/component/search.test.ts +303 -1
  40. package/src/component/search.ts +266 -19
  41. package/src/shared.ts +7 -0
@@ -3,7 +3,7 @@
3
3
  import { describe, expect, test } from "vitest";
4
4
  import { convexTest, type TestConvex } from "convex-test";
5
5
  import schema from "./schema.js";
6
- import { api } from "./_generated/api.js";
6
+ import { api, internal } from "./_generated/api.js";
7
7
  import { modules } from "./setup.test.js";
8
8
  import { insertChunks } from "./chunks.js";
9
9
  import type { Id } from "./_generated/dataModel.js";
@@ -442,4 +442,306 @@ describe("search", () => {
442
442
  );
443
443
  }
444
444
  });
445
+
446
+ describe("hybrid search", () => {
447
+ function createSearchableChunks(texts: string[], baseEmbedding = 0.1) {
448
+ return texts.map((text, i) => ({
449
+ content: { text, metadata: { index: i } },
450
+ embedding: [...Array(127).fill(0.01), baseEmbedding + i * 0.01],
451
+ searchableText: text,
452
+ }));
453
+ }
454
+
455
+ test("textSearch internal query finds chunks by text content", async () => {
456
+ const t = convexTest(schema, modules);
457
+ const namespaceId = await setupTestNamespace(t);
458
+ const entryId = await setupTestEntry(t, namespaceId);
459
+
460
+ const chunks = createSearchableChunks([
461
+ "The quick brown fox jumps over the lazy dog",
462
+ "A fast red car drives on the highway",
463
+ "The brown bear sleeps in the forest",
464
+ ]);
465
+
466
+ await t.run(async (ctx) => {
467
+ await insertChunks(ctx, { entryId, startOrder: 0, chunks });
468
+ });
469
+
470
+ const results = await t.query(internal.search.textSearch, {
471
+ query: "brown",
472
+ namespaceId,
473
+ filters: [],
474
+ limit: 10,
475
+ });
476
+
477
+ expect(results.length).toBeGreaterThan(0);
478
+ for (const r of results) {
479
+ expect(r.entryId).toBe(entryId);
480
+ }
481
+ });
482
+
483
+ test("textSearch scopes results to the given namespace", async () => {
484
+ const t = convexTest(schema, modules);
485
+ const ns1Id = await setupTestNamespace(t, "namespace-1");
486
+ const ns2Id = await setupTestNamespace(t, "namespace-2");
487
+ const entry1Id = await setupTestEntry(t, ns1Id, "entry-1");
488
+ const entry2Id = await setupTestEntry(t, ns2Id, "entry-2");
489
+
490
+ await t.run(async (ctx) => {
491
+ await insertChunks(ctx, {
492
+ entryId: entry1Id,
493
+ startOrder: 0,
494
+ chunks: createSearchableChunks(["alpha bravo charlie"]),
495
+ });
496
+ await insertChunks(ctx, {
497
+ entryId: entry2Id,
498
+ startOrder: 0,
499
+ chunks: createSearchableChunks(["alpha delta echo"]),
500
+ });
501
+ });
502
+
503
+ const ns1Results = await t.query(internal.search.textSearch, {
504
+ query: "alpha",
505
+ namespaceId: ns1Id,
506
+ filters: [],
507
+ limit: 10,
508
+ });
509
+
510
+ // All results should belong to namespace-1's entry.
511
+ for (const r of ns1Results) {
512
+ expect(r.entryId).toBe(entry1Id);
513
+ }
514
+ });
515
+
516
+ test("textSearch applies numbered filters", async () => {
517
+ const t = convexTest(schema, modules);
518
+ const namespaceId = await setupTestNamespace(t, "filtered-ns", 128, [
519
+ "category",
520
+ ]);
521
+
522
+ const cat1Entry = await setupTestEntry(t, namespaceId, "cat1", 0, [
523
+ { name: "category", value: "docs" },
524
+ ]);
525
+ const cat2Entry = await setupTestEntry(t, namespaceId, "cat2", 0, [
526
+ { name: "category", value: "blogs" },
527
+ ]);
528
+
529
+ await t.run(async (ctx) => {
530
+ await insertChunks(ctx, {
531
+ entryId: cat1Entry,
532
+ startOrder: 0,
533
+ chunks: createSearchableChunks(["shared keyword content"]),
534
+ });
535
+ await insertChunks(ctx, {
536
+ entryId: cat2Entry,
537
+ startOrder: 0,
538
+ chunks: createSearchableChunks(["shared keyword content"]),
539
+ });
540
+ });
541
+
542
+ // Filter to "docs" category only (filter index 0 = "category").
543
+ const results = await t.query(internal.search.textSearch, {
544
+ query: "shared keyword",
545
+ namespaceId,
546
+ filters: [{ 0: "docs" }],
547
+ limit: 10,
548
+ });
549
+
550
+ expect(results.length).toBeGreaterThan(0);
551
+ for (const r of results) {
552
+ expect(r.entryId).toBe(cat1Entry);
553
+ }
554
+ });
555
+
556
+ test("text-only search returns results via dimension arg", async () => {
557
+ const t = convexTest(schema, modules);
558
+ const namespaceId = await setupTestNamespace(t);
559
+ const entryId = await setupTestEntry(t, namespaceId);
560
+
561
+ const chunks = createSearchableChunks([
562
+ "Machine learning is a subset of artificial intelligence",
563
+ "Deep learning uses neural networks with many layers",
564
+ "Natural language processing handles text data",
565
+ ]);
566
+
567
+ await t.run(async (ctx) => {
568
+ await insertChunks(ctx, { entryId, startOrder: 0, chunks });
569
+ });
570
+
571
+ // Text-only: no embedding, provide dimension instead.
572
+ const result = await t.action(api.search.search, {
573
+ namespace: "test-namespace",
574
+ dimension: 128,
575
+ modelId: "test-model",
576
+ filters: [],
577
+ limit: 10,
578
+ textQuery: "neural networks",
579
+ });
580
+
581
+ expect(result.results.length).toBeGreaterThan(0);
582
+ expect(result.entries).toHaveLength(1);
583
+
584
+ // Text-only scores are position-based.
585
+ expect(result.results[0].score).toBe(1.0);
586
+ for (let i = 1; i < result.results.length; i++) {
587
+ expect(result.results[i].score).toBeLessThan(
588
+ result.results[i - 1].score,
589
+ );
590
+ }
591
+ });
592
+
593
+ test("hybrid search returns results when textQuery is provided", async () => {
594
+ const t = convexTest(schema, modules);
595
+ const namespaceId = await setupTestNamespace(t);
596
+ const entryId = await setupTestEntry(t, namespaceId);
597
+
598
+ const chunks = createSearchableChunks([
599
+ "Machine learning is a subset of artificial intelligence",
600
+ "Deep learning uses neural networks with many layers",
601
+ "Natural language processing handles text data",
602
+ ]);
603
+
604
+ await t.run(async (ctx) => {
605
+ await insertChunks(ctx, { entryId, startOrder: 0, chunks });
606
+ });
607
+
608
+ const result = await t.action(api.search.search, {
609
+ namespace: "test-namespace",
610
+ embedding: [...Array(127).fill(0.01), 0.1],
611
+ modelId: "test-model",
612
+ filters: [],
613
+ limit: 10,
614
+ textQuery: "neural networks",
615
+ });
616
+
617
+ expect(result.results.length).toBeGreaterThan(0);
618
+ expect(result.entries).toHaveLength(1);
619
+
620
+ // Hybrid scores are position-based (1.0 for top, decreasing linearly).
621
+ expect(result.results[0].score).toBe(1.0);
622
+ for (let i = 1; i < result.results.length; i++) {
623
+ expect(result.results[i].score).toBeLessThan(
624
+ result.results[i - 1].score,
625
+ );
626
+ }
627
+ });
628
+
629
+ test("hybrid search deduplicates results from vector and text paths", async () => {
630
+ const t = convexTest(schema, modules);
631
+ const namespaceId = await setupTestNamespace(t);
632
+ const entryId = await setupTestEntry(t, namespaceId);
633
+
634
+ const chunks = createSearchableChunks([
635
+ "Unique content about quantum computing",
636
+ "Another chunk about classical physics",
637
+ ]);
638
+
639
+ await t.run(async (ctx) => {
640
+ await insertChunks(ctx, { entryId, startOrder: 0, chunks });
641
+ });
642
+
643
+ const result = await t.action(api.search.search, {
644
+ namespace: "test-namespace",
645
+ embedding: [...Array(127).fill(0.01), 0.1],
646
+ modelId: "test-model",
647
+ filters: [],
648
+ limit: 10,
649
+ textQuery: "quantum computing",
650
+ });
651
+
652
+ // Each chunk should appear at most once in the results.
653
+ const entryOrderPairs = result.results.map(
654
+ (r) => `${r.entryId}:${r.order}`,
655
+ );
656
+ const uniquePairs = new Set(entryOrderPairs);
657
+ expect(uniquePairs.size).toBe(entryOrderPairs.length);
658
+ });
659
+
660
+ test("vector-only search is unchanged when textQuery is not provided", async () => {
661
+ const t = convexTest(schema, modules);
662
+ const namespaceId = await setupTestNamespace(t);
663
+ const entryId = await setupTestEntry(t, namespaceId);
664
+
665
+ const targetEmbedding = [...Array(127).fill(0.5), 1];
666
+ const chunks = [
667
+ {
668
+ content: { text: "Target chunk", metadata: {} },
669
+ embedding: targetEmbedding,
670
+ searchableText: "Target chunk",
671
+ },
672
+ {
673
+ content: { text: "Other chunk", metadata: {} },
674
+ embedding: [...Array(127).fill(0.1), 0],
675
+ searchableText: "Other chunk",
676
+ },
677
+ ];
678
+
679
+ await t.run(async (ctx) => {
680
+ await insertChunks(ctx, { entryId, startOrder: 0, chunks });
681
+ });
682
+
683
+ const result = await t.action(api.search.search, {
684
+ namespace: "test-namespace",
685
+ embedding: targetEmbedding,
686
+ modelId: "test-model",
687
+ filters: [],
688
+ limit: 10,
689
+ });
690
+
691
+ // Without textQuery, scores should be cosine similarity (not position-based).
692
+ expect(result.results).toHaveLength(2);
693
+ expect(result.results[0].score).toBeGreaterThan(result.results[1].score);
694
+ // Cosine similarity scores are typically between -1 and 1, not exactly 1.0.
695
+ // Position-based would give exactly 1.0 for the first result.
696
+ // With cosine similarity the first result can be 1.0 if exact match,
697
+ // but the second should not follow the linear decrease pattern.
698
+ expect(result.results[0].content[0].text).toBe("Target chunk");
699
+ });
700
+
701
+ test("textWeight and vectorWeight influence hybrid ranking", async () => {
702
+ const t = convexTest(schema, modules);
703
+ const namespaceId = await setupTestNamespace(t);
704
+ const entryId = await setupTestEntry(t, namespaceId);
705
+
706
+ const chunks = createSearchableChunks([
707
+ "Alpha topic with specific terminology",
708
+ "Beta topic with different keywords",
709
+ "Gamma topic about something else entirely",
710
+ ]);
711
+
712
+ await t.run(async (ctx) => {
713
+ await insertChunks(ctx, { entryId, startOrder: 0, chunks });
714
+ });
715
+
716
+ const embedding = [...Array(127).fill(0.01), 0.1];
717
+
718
+ // Search with heavy text weight.
719
+ const textHeavy = await t.action(api.search.search, {
720
+ namespace: "test-namespace",
721
+ embedding,
722
+ modelId: "test-model",
723
+ filters: [],
724
+ limit: 10,
725
+ textQuery: "specific terminology",
726
+ textWeight: 10,
727
+ vectorWeight: 1,
728
+ });
729
+
730
+ // Search with heavy vector weight.
731
+ const vectorHeavy = await t.action(api.search.search, {
732
+ namespace: "test-namespace",
733
+ embedding,
734
+ modelId: "test-model",
735
+ filters: [],
736
+ limit: 10,
737
+ textQuery: "specific terminology",
738
+ textWeight: 1,
739
+ vectorWeight: 10,
740
+ });
741
+
742
+ // Both should return results.
743
+ expect(textHeavy.results.length).toBeGreaterThan(0);
744
+ expect(vectorHeavy.results.length).toBeGreaterThan(0);
745
+ });
746
+ });
445
747
  });
@@ -1,20 +1,30 @@
1
1
  import { v, type Infer } from "convex/values";
2
- import { action } from "./_generated/server.js";
2
+ import { action, internalQuery, type QueryCtx } from "./_generated/server.js";
3
3
  import { searchEmbeddings } from "./embeddings/index.js";
4
- import { numberedFiltersFromNamedFilters, vNamedFilter } from "./filters.js";
4
+ import {
5
+ filterFieldsFromNumbers,
6
+ numberedFiltersFromNamedFilters,
7
+ vNamedFilter,
8
+ type NumberedFilter,
9
+ } from "./filters.js";
5
10
  import { internal } from "./_generated/api.js";
6
11
  import {
7
12
  vEntry,
8
13
  vSearchResult,
14
+ vSearchType,
9
15
  type SearchResult,
10
16
  type EntryId,
11
17
  } from "../shared.js";
12
- import type { vRangeResult } from "./chunks.js";
18
+ import type { Doc, Id } from "./_generated/dataModel.js";
19
+ import { buildRanges, type vRangeResult } from "./chunks.js";
20
+ import { hybridRank } from "../client/hybridRank.js";
21
+ import { vVectorId, type VectorTableId } from "./embeddings/tables.js";
13
22
 
14
23
  export const search = action({
15
24
  args: {
16
25
  namespace: v.string(),
17
- embedding: v.array(v.number()),
26
+ embedding: v.optional(v.array(v.number())),
27
+ dimension: v.optional(v.number()),
18
28
  modelId: v.string(),
19
29
  // These are all OR'd together
20
30
  filters: v.array(vNamedFilter),
@@ -23,6 +33,10 @@ export const search = action({
23
33
  chunkContext: v.optional(
24
34
  v.object({ before: v.number(), after: v.number() }),
25
35
  ),
36
+ searchType: v.optional(vSearchType),
37
+ textQuery: v.optional(v.string()),
38
+ textWeight: v.optional(v.number()),
39
+ vectorWeight: v.optional(v.number()),
26
40
  },
27
41
  returns: v.object({
28
42
  results: v.array(vSearchResult),
@@ -36,51 +50,284 @@ export const search = action({
36
50
  entries: Infer<typeof vEntry>[];
37
51
  }> => {
38
52
  const { modelId, embedding, filters, limit } = args;
53
+ const dimension = embedding?.length ?? args.dimension;
54
+ if (!dimension) {
55
+ throw new Error(
56
+ "Either embedding or dimension must be provided to search.",
57
+ );
58
+ }
59
+
39
60
  const namespace = await ctx.runQuery(
40
61
  internal.namespaces.getCompatibleNamespace,
41
62
  {
42
63
  namespace: args.namespace,
43
64
  modelId,
44
- dimension: embedding.length,
65
+ dimension,
45
66
  filterNames: filters.map((f) => f.name),
46
67
  },
47
68
  );
48
69
  if (!namespace) {
49
70
  console.debug(
50
- `No compatible namespace found for ${args.namespace} with model ${args.modelId} and dimension ${embedding.length} and filters ${filters.map((f) => f.name).join(", ")}.`,
71
+ `No compatible namespace found for ${args.namespace} with model ${args.modelId} and dimension ${dimension} and filters ${filters.map((f) => f.name).join(", ")}.`,
51
72
  );
52
73
  return {
53
74
  results: [],
54
75
  entries: [],
55
76
  };
56
77
  }
57
- const results = await searchEmbeddings(ctx, {
58
- embedding,
59
- namespaceId: namespace._id,
60
- filters: numberedFiltersFromNamedFilters(filters, namespace.filterNames),
61
- limit,
62
- });
63
78
 
64
- const threshold = args.vectorScoreThreshold ?? -1;
65
- const aboveThreshold = results.filter((r) => r._score >= threshold);
66
79
  const chunkContext = args.chunkContext ?? { before: 0, after: 0 };
67
- // TODO: break this up if there are too many results
68
- const { ranges, entries } = await ctx.runQuery(
69
- internal.chunks.getRangesOfChunks,
80
+ const numberedFilters = numberedFiltersFromNamedFilters(
81
+ filters,
82
+ namespace.filterNames,
83
+ );
84
+
85
+ const hasEmbedding = !!embedding;
86
+ const hasTextQuery = !!args.textQuery;
87
+
88
+ // Vector-only path: return results with cosine similarity scores.
89
+ if (hasEmbedding && !hasTextQuery) {
90
+ const vectorResults = await searchEmbeddings(ctx, {
91
+ embedding,
92
+ namespaceId: namespace._id,
93
+ filters: numberedFilters,
94
+ limit,
95
+ });
96
+ const threshold = args.vectorScoreThreshold ?? -1;
97
+ const aboveThreshold = vectorResults.filter((r) => r._score >= threshold);
98
+ // TODO: break this up if there are too many results
99
+ const { ranges, entries } = await ctx.runQuery(
100
+ internal.chunks.getRangesOfChunks,
101
+ {
102
+ embeddingIds: aboveThreshold.map((r) => r._id),
103
+ chunkContext,
104
+ },
105
+ );
106
+ return {
107
+ results: ranges
108
+ .map((r, i) => publicSearchResult(r, aboveThreshold[i]._score))
109
+ .filter((r) => r !== null),
110
+ entries: entries as Infer<typeof vEntry>[],
111
+ };
112
+ }
113
+
114
+ // Hybrid or text-only path: combine vector and text results with RRF.
115
+ let embeddingIds: VectorTableId[] = [];
116
+ if (hasEmbedding) {
117
+ const vectorResults = await searchEmbeddings(ctx, {
118
+ embedding: embedding!,
119
+ namespaceId: namespace._id,
120
+ filters: numberedFilters,
121
+ limit,
122
+ });
123
+ const threshold = args.vectorScoreThreshold ?? -1;
124
+ embeddingIds = vectorResults
125
+ .filter((r) => r._score >= threshold)
126
+ .map((r) => r._id);
127
+ }
128
+
129
+ if (!hasTextQuery) {
130
+ throw new Error(
131
+ "Search requires at least one of embedding or textQuery.",
132
+ );
133
+ }
134
+
135
+ const { ranges, entries, resultCount } = await ctx.runQuery(
136
+ internal.search.textAndRanges,
70
137
  {
71
- embeddingIds: aboveThreshold.map((r) => r._id),
138
+ embeddingIds,
139
+ textQuery: args.textQuery!,
140
+ namespaceId: namespace._id,
141
+ filters: numberedFilters,
142
+ limit,
143
+ vectorWeight: args.vectorWeight ?? 1,
144
+ textWeight: args.textWeight ?? 1,
72
145
  chunkContext,
73
146
  },
74
147
  );
148
+
149
+ // Position-based scores (1.0 for first, decreasing linearly).
75
150
  return {
76
151
  results: ranges
77
- .map((r, i) => publicSearchResult(r, aboveThreshold[i]._score))
152
+ .map((r, i) => publicSearchResult(r, (resultCount - i) / resultCount))
78
153
  .filter((r) => r !== null),
79
154
  entries: entries as Infer<typeof vEntry>[],
80
155
  };
81
156
  },
82
157
  });
83
158
 
159
+ type TextSearchResult = {
160
+ chunkId: Id<"chunks">;
161
+ entryId: Id<"entries">;
162
+ order: number;
163
+ };
164
+
165
+ async function textSearchImpl(
166
+ ctx: QueryCtx,
167
+ args: {
168
+ query: string;
169
+ namespaceId: Id<"namespaces">;
170
+ filters: NumberedFilter[];
171
+ limit: number;
172
+ },
173
+ ): Promise<TextSearchResult[]> {
174
+ const toResults = (chunks: Doc<"chunks">[]): TextSearchResult[] =>
175
+ chunks
176
+ .filter((chunk) => chunk.state.kind === "ready")
177
+ .map((chunk) => ({
178
+ chunkId: chunk._id,
179
+ entryId: chunk.entryId,
180
+ order: chunk.order,
181
+ }));
182
+
183
+ // No user filters — just filter by namespaceId.
184
+ if (args.filters.length === 0) {
185
+ const results = await ctx.db
186
+ .query("chunks")
187
+ .withSearchIndex("searchableText", (q) =>
188
+ q
189
+ .search("state.searchableText", args.query)
190
+ .eq("namespaceId", args.namespaceId),
191
+ )
192
+ .take(args.limit);
193
+ return toResults(results);
194
+ }
195
+
196
+ // OR across filter conditions: run one text search per filter and dedupe.
197
+ const seen = new Set<Id<"chunks">>();
198
+ const merged: TextSearchResult[] = [];
199
+ for (const filter of args.filters) {
200
+ const fields = filterFieldsFromNumbers(args.namespaceId, filter);
201
+ const results = await ctx.db
202
+ .query("chunks")
203
+ .withSearchIndex("searchableText", (q) => {
204
+ let query = q
205
+ .search("state.searchableText", args.query)
206
+ .eq("namespaceId", args.namespaceId);
207
+ for (const [field, value] of Object.entries(fields)) {
208
+ query = query.eq(
209
+ field as "filter0" | "filter1" | "filter2" | "filter3",
210
+ value,
211
+ );
212
+ }
213
+ return query;
214
+ })
215
+ .take(args.limit);
216
+ for (const r of toResults(results)) {
217
+ if (!seen.has(r.chunkId)) {
218
+ seen.add(r.chunkId);
219
+ merged.push(r);
220
+ }
221
+ }
222
+ }
223
+ return merged.slice(0, args.limit);
224
+ }
225
+
226
+ export const textSearch = internalQuery({
227
+ args: {
228
+ query: v.string(),
229
+ namespaceId: v.id("namespaces"),
230
+ // Numbered filters, OR'd together (same semantics as vector search).
231
+ filters: v.array(v.any()),
232
+ limit: v.number(),
233
+ },
234
+ returns: v.array(
235
+ v.object({
236
+ chunkId: v.id("chunks"),
237
+ entryId: v.id("entries"),
238
+ order: v.number(),
239
+ }),
240
+ ),
241
+ handler: async (ctx, args) => {
242
+ return textSearchImpl(ctx, {
243
+ query: args.query,
244
+ namespaceId: args.namespaceId,
245
+ filters: args.filters as NumberedFilter[],
246
+ limit: args.limit,
247
+ });
248
+ },
249
+ });
250
+
251
+ export const textAndRanges = internalQuery({
252
+ args: {
253
+ embeddingIds: v.array(vVectorId),
254
+ textQuery: v.string(),
255
+ namespaceId: v.id("namespaces"),
256
+ filters: v.array(v.any()),
257
+ limit: v.number(),
258
+ vectorWeight: v.number(),
259
+ textWeight: v.number(),
260
+ chunkContext: v.object({ before: v.number(), after: v.number() }),
261
+ },
262
+ returns: v.object({
263
+ ranges: v.array(
264
+ v.union(
265
+ v.null(),
266
+ v.object({
267
+ entryId: v.id("entries"),
268
+ order: v.number(),
269
+ startOrder: v.number(),
270
+ content: v.array(
271
+ v.object({
272
+ text: v.string(),
273
+ metadata: v.optional(v.record(v.string(), v.any())),
274
+ }),
275
+ ),
276
+ }),
277
+ ),
278
+ ),
279
+ entries: v.array(vEntry),
280
+ resultCount: v.number(),
281
+ }),
282
+ handler: async (ctx, args) => {
283
+ // 1. Map embedding IDs to chunk IDs.
284
+ const vectorChunkIds: Id<"chunks">[] = (
285
+ await Promise.all(
286
+ args.embeddingIds.map(async (embeddingId) => {
287
+ const chunk = await ctx.db
288
+ .query("chunks")
289
+ .withIndex("embeddingId", (q) =>
290
+ q.eq("state.embeddingId", embeddingId),
291
+ )
292
+ .order("desc")
293
+ .first();
294
+ return chunk?._id ?? null;
295
+ }),
296
+ )
297
+ ).filter((id) => id !== null);
298
+
299
+ // 2. Run text search.
300
+ const textResults = await textSearchImpl(ctx, {
301
+ query: args.textQuery,
302
+ namespaceId: args.namespaceId,
303
+ filters: args.filters as NumberedFilter[],
304
+ limit: args.limit,
305
+ });
306
+ const textChunkIds: Id<"chunks">[] = textResults.map((r) => r.chunkId);
307
+
308
+ // 3. Merge using Reciprocal Rank Fusion.
309
+ const mergedChunkIds = hybridRank<Id<"chunks">>(
310
+ [vectorChunkIds, textChunkIds],
311
+ { k: 10, weights: [args.vectorWeight, args.textWeight] },
312
+ ).slice(0, args.limit);
313
+
314
+ if (mergedChunkIds.length === 0) {
315
+ return { ranges: [], entries: [], resultCount: 0 };
316
+ }
317
+
318
+ // 4. Build ranges from merged chunk IDs.
319
+ const chunks = await Promise.all(
320
+ mergedChunkIds.map((id) => ctx.db.get(id)),
321
+ );
322
+ const { ranges, entries } = await buildRanges(
323
+ ctx,
324
+ chunks,
325
+ args.chunkContext,
326
+ );
327
+ return { ranges, entries, resultCount: mergedChunkIds.length };
328
+ },
329
+ });
330
+
84
331
  function publicSearchResult(
85
332
  r: Infer<typeof vRangeResult> | null,
86
333
  score: number,
package/src/shared.ts CHANGED
@@ -35,6 +35,13 @@ export const vSearchResult = v.object({
35
35
 
36
36
  export type SearchResult = Infer<typeof vSearchResult>;
37
37
 
38
+ export const vSearchType = v.union(
39
+ v.literal("vector"),
40
+ v.literal("text"),
41
+ v.literal("hybrid"),
42
+ );
43
+ export type SearchType = Infer<typeof vSearchType>;
44
+
38
45
  export const vStatus = v.union(
39
46
  v.literal("pending"),
40
47
  v.literal("ready"),