vexor 0.20.0__py3-none-any.whl → 0.21.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
vexor/cache.py CHANGED
@@ -16,7 +16,7 @@ from .utils import collect_files
16
16
 
17
17
  DEFAULT_CACHE_DIR = Path(os.path.expanduser("~")) / ".vexor"
18
18
  CACHE_DIR = DEFAULT_CACHE_DIR
19
- CACHE_VERSION = 5
19
+ CACHE_VERSION = 6
20
20
  DB_FILENAME = "index.db"
21
21
  EMBED_CACHE_TTL_DAYS = 30
22
22
  EMBED_CACHE_MAX_ENTRIES = 50_000
@@ -110,7 +110,7 @@ def _deserialize_exclude_patterns(value: str | None) -> tuple[str, ...]:
110
110
  return tuple(parts)
111
111
 
112
112
 
113
- def _chunk_values(values: Sequence[str], size: int) -> Iterable[Sequence[str]]:
113
+ def _chunk_values(values: Sequence[object], size: int) -> Iterable[Sequence[object]]:
114
114
  for idx in range(0, len(values), size):
115
115
  yield values[idx : idx + size]
116
116
 
@@ -158,7 +158,42 @@ def _connect(db_path: Path) -> sqlite3.Connection:
158
158
  return conn
159
159
 
160
160
 
161
+ def _table_exists(conn: sqlite3.Connection, table: str) -> bool:
162
+ row = conn.execute(
163
+ "SELECT name FROM sqlite_master WHERE type = 'table' AND name = ?",
164
+ (table,),
165
+ ).fetchone()
166
+ return row is not None
167
+
168
+
169
+ def _schema_needs_reset(conn: sqlite3.Connection) -> bool:
170
+ if _table_exists(conn, "indexed_chunk"):
171
+ return False
172
+ return any(
173
+ _table_exists(conn, table)
174
+ for table in ("index_metadata", "indexed_file", "file_embedding", "query_cache")
175
+ )
176
+
177
+
178
+ def _reset_index_schema(conn: sqlite3.Connection) -> None:
179
+ conn.execute("PRAGMA foreign_keys = OFF;")
180
+ conn.executescript(
181
+ """
182
+ DROP TABLE IF EXISTS query_cache;
183
+ DROP TABLE IF EXISTS file_embedding;
184
+ DROP TABLE IF EXISTS chunk_embedding;
185
+ DROP TABLE IF EXISTS chunk_meta;
186
+ DROP TABLE IF EXISTS indexed_chunk;
187
+ DROP TABLE IF EXISTS indexed_file;
188
+ DROP TABLE IF EXISTS index_metadata;
189
+ """
190
+ )
191
+ conn.execute("PRAGMA foreign_keys = ON;")
192
+
193
+
161
194
  def _ensure_schema(conn: sqlite3.Connection) -> None:
195
+ if _schema_needs_reset(conn):
196
+ _reset_index_schema(conn)
162
197
  conn.executescript(
163
198
  """
164
199
  CREATE TABLE IF NOT EXISTS index_metadata (
@@ -185,20 +220,31 @@ def _ensure_schema(conn: sqlite3.Connection) -> None:
185
220
  abs_path TEXT NOT NULL,
186
221
  size_bytes INTEGER NOT NULL,
187
222
  mtime REAL NOT NULL,
188
- position INTEGER NOT NULL,
189
- preview TEXT DEFAULT '',
190
- label_hash TEXT DEFAULT '',
223
+ UNIQUE(index_id, rel_path)
224
+ );
225
+
226
+ CREATE TABLE IF NOT EXISTS indexed_chunk (
227
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
228
+ index_id INTEGER NOT NULL REFERENCES index_metadata(id) ON DELETE CASCADE,
229
+ file_id INTEGER NOT NULL REFERENCES indexed_file(id) ON DELETE CASCADE,
191
230
  chunk_index INTEGER NOT NULL DEFAULT 0,
192
- start_line INTEGER,
193
- end_line INTEGER,
194
- UNIQUE(index_id, rel_path, chunk_index)
231
+ position INTEGER NOT NULL,
232
+ UNIQUE(index_id, file_id, chunk_index)
195
233
  );
196
234
 
197
- CREATE TABLE IF NOT EXISTS file_embedding (
198
- file_id INTEGER PRIMARY KEY REFERENCES indexed_file(id) ON DELETE CASCADE,
235
+ CREATE TABLE IF NOT EXISTS chunk_embedding (
236
+ chunk_id INTEGER PRIMARY KEY REFERENCES indexed_chunk(id) ON DELETE CASCADE,
199
237
  vector_blob BLOB NOT NULL
200
238
  );
201
239
 
240
+ CREATE TABLE IF NOT EXISTS chunk_meta (
241
+ chunk_id INTEGER PRIMARY KEY REFERENCES indexed_chunk(id) ON DELETE CASCADE,
242
+ preview TEXT DEFAULT '',
243
+ label_hash TEXT DEFAULT '',
244
+ start_line INTEGER,
245
+ end_line INTEGER
246
+ );
247
+
202
248
  CREATE TABLE IF NOT EXISTS query_cache (
203
249
  id INTEGER PRIMARY KEY AUTOINCREMENT,
204
250
  index_id INTEGER NOT NULL REFERENCES index_metadata(id) ON DELETE CASCADE,
@@ -218,8 +264,11 @@ def _ensure_schema(conn: sqlite3.Connection) -> None:
218
264
  UNIQUE(model, text_hash)
219
265
  );
220
266
 
221
- CREATE INDEX IF NOT EXISTS idx_indexed_file_order
222
- ON indexed_file(index_id, position);
267
+ CREATE INDEX IF NOT EXISTS idx_indexed_chunk_order
268
+ ON indexed_chunk(index_id, position);
269
+
270
+ CREATE INDEX IF NOT EXISTS idx_indexed_file_lookup
271
+ ON indexed_file(index_id, rel_path);
223
272
 
224
273
  CREATE INDEX IF NOT EXISTS idx_query_cache_lookup
225
274
  ON query_cache(index_id, query_hash);
@@ -228,133 +277,6 @@ def _ensure_schema(conn: sqlite3.Connection) -> None:
228
277
  ON embedding_cache(model, text_hash);
229
278
  """
230
279
  )
231
- try:
232
- conn.execute(
233
- "ALTER TABLE index_metadata ADD COLUMN recursive INTEGER NOT NULL DEFAULT 1"
234
- )
235
- except sqlite3.OperationalError:
236
- # Column already exists; ignore error.
237
- pass
238
- try:
239
- conn.execute(
240
- "ALTER TABLE index_metadata ADD COLUMN respect_gitignore INTEGER NOT NULL DEFAULT 1"
241
- )
242
- except sqlite3.OperationalError:
243
- pass
244
- try:
245
- conn.execute(
246
- "ALTER TABLE index_metadata ADD COLUMN mode TEXT NOT NULL DEFAULT 'name'"
247
- )
248
- except sqlite3.OperationalError:
249
- pass
250
- try:
251
- conn.execute(
252
- "ALTER TABLE indexed_file ADD COLUMN preview TEXT DEFAULT ''"
253
- )
254
- except sqlite3.OperationalError:
255
- pass
256
- try:
257
- conn.execute(
258
- "ALTER TABLE indexed_file ADD COLUMN label_hash TEXT DEFAULT ''"
259
- )
260
- except sqlite3.OperationalError:
261
- pass
262
- try:
263
- conn.execute("ALTER TABLE indexed_file ADD COLUMN start_line INTEGER")
264
- except sqlite3.OperationalError:
265
- pass
266
- try:
267
- conn.execute("ALTER TABLE indexed_file ADD COLUMN end_line INTEGER")
268
- except sqlite3.OperationalError:
269
- pass
270
- if not _table_has_column(conn, "indexed_file", "chunk_index"):
271
- _upgrade_indexed_file_with_chunk(conn)
272
- try:
273
- conn.execute(
274
- "ALTER TABLE index_metadata ADD COLUMN extensions TEXT DEFAULT ''"
275
- )
276
- except sqlite3.OperationalError:
277
- pass
278
- try:
279
- conn.execute(
280
- "ALTER TABLE index_metadata ADD COLUMN exclude_patterns TEXT DEFAULT ''"
281
- )
282
- except sqlite3.OperationalError:
283
- pass
284
- _cleanup_orphan_embeddings(conn)
285
-
286
-
287
- def _table_has_column(conn: sqlite3.Connection, table: str, column: str) -> bool:
288
- rows = conn.execute(f"PRAGMA table_info({table})").fetchall()
289
- return any(row[1] == column for row in rows)
290
-
291
-
292
- def _upgrade_indexed_file_with_chunk(conn: sqlite3.Connection) -> None:
293
- conn.execute("PRAGMA foreign_keys = OFF;")
294
- conn.execute("ALTER TABLE indexed_file RENAME TO indexed_file_legacy;")
295
- conn.executescript(
296
- """
297
- CREATE TABLE indexed_file (
298
- id INTEGER PRIMARY KEY AUTOINCREMENT,
299
- index_id INTEGER NOT NULL REFERENCES index_metadata(id) ON DELETE CASCADE,
300
- rel_path TEXT NOT NULL,
301
- abs_path TEXT NOT NULL,
302
- size_bytes INTEGER NOT NULL,
303
- mtime REAL NOT NULL,
304
- position INTEGER NOT NULL,
305
- preview TEXT DEFAULT '',
306
- label_hash TEXT DEFAULT '',
307
- chunk_index INTEGER NOT NULL DEFAULT 0,
308
- start_line INTEGER,
309
- end_line INTEGER,
310
- UNIQUE(index_id, rel_path, chunk_index)
311
- );
312
-
313
- CREATE INDEX IF NOT EXISTS idx_indexed_file_order
314
- ON indexed_file(index_id, position);
315
- """
316
- )
317
- conn.execute(
318
- """
319
- INSERT INTO indexed_file (
320
- id,
321
- index_id,
322
- rel_path,
323
- abs_path,
324
- size_bytes,
325
- mtime,
326
- position,
327
- preview,
328
- label_hash,
329
- chunk_index,
330
- start_line,
331
- end_line
332
- )
333
- SELECT
334
- id,
335
- index_id,
336
- rel_path,
337
- abs_path,
338
- size_bytes,
339
- mtime,
340
- position,
341
- preview,
342
- '',
343
- 0,
344
- NULL,
345
- NULL
346
- FROM indexed_file_legacy;
347
- """
348
- )
349
- conn.execute("DROP TABLE indexed_file_legacy;")
350
- conn.execute("PRAGMA foreign_keys = ON;")
351
-
352
-
353
- def _cleanup_orphan_embeddings(conn: sqlite3.Connection) -> None:
354
- with conn:
355
- conn.execute(
356
- "DELETE FROM file_embedding WHERE file_id NOT IN (SELECT id FROM indexed_file)"
357
- )
358
280
 
359
281
 
360
282
  def store_index(
@@ -430,32 +352,22 @@ def store_index(
430
352
  )
431
353
  index_id = cursor.lastrowid
432
354
 
433
- file_rows: list[tuple] = []
434
- vector_blobs: list[bytes] = []
435
- for position, entry in enumerate(entries):
355
+ file_rows_by_rel: dict[str, tuple] = {}
356
+ for entry in entries:
357
+ if entry.rel_path in file_rows_by_rel:
358
+ continue
436
359
  size_bytes = entry.size_bytes
437
360
  mtime = entry.mtime
438
361
  if size_bytes is None or mtime is None:
439
362
  stat = entry.path.stat()
440
363
  size_bytes = stat.st_size
441
364
  mtime = stat.st_mtime
442
- file_rows.append(
443
- (
444
- index_id,
445
- entry.rel_path,
446
- str(entry.path),
447
- size_bytes,
448
- mtime,
449
- position,
450
- entry.preview,
451
- entry.label_hash,
452
- entry.chunk_index,
453
- entry.start_line,
454
- entry.end_line,
455
- )
456
- )
457
- vector_blobs.append(
458
- np.asarray(entry.embedding, dtype=np.float32).tobytes()
365
+ file_rows_by_rel[entry.rel_path] = (
366
+ index_id,
367
+ entry.rel_path,
368
+ str(entry.path),
369
+ size_bytes,
370
+ mtime,
459
371
  )
460
372
 
461
373
  conn.executemany(
@@ -465,29 +377,87 @@ def store_index(
465
377
  rel_path,
466
378
  abs_path,
467
379
  size_bytes,
468
- mtime,
469
- position,
470
- preview,
471
- label_hash,
380
+ mtime
381
+ ) VALUES (?, ?, ?, ?, ?)
382
+ """,
383
+ list(file_rows_by_rel.values()),
384
+ )
385
+
386
+ file_id_map: dict[str, int] = {}
387
+ rel_paths = list(file_rows_by_rel.keys())
388
+ for chunk in _chunk_values(rel_paths, 900):
389
+ placeholders = ", ".join("?" for _ in chunk)
390
+ rows = conn.execute(
391
+ f"""
392
+ SELECT id, rel_path
393
+ FROM indexed_file
394
+ WHERE index_id = ? AND rel_path IN ({placeholders})
395
+ """,
396
+ (index_id, *chunk),
397
+ ).fetchall()
398
+ for row in rows:
399
+ file_id_map[row["rel_path"]] = int(row["id"])
400
+
401
+ chunk_rows: list[tuple] = []
402
+ vector_blobs: list[bytes] = []
403
+ meta_rows: list[tuple] = []
404
+ for position, entry in enumerate(entries):
405
+ file_id = file_id_map.get(entry.rel_path)
406
+ if file_id is None:
407
+ continue
408
+ chunk_rows.append(
409
+ (index_id, file_id, entry.chunk_index, position)
410
+ )
411
+ vector_blobs.append(
412
+ np.asarray(entry.embedding, dtype=np.float32).tobytes()
413
+ )
414
+ meta_rows.append(
415
+ (
416
+ entry.preview or "",
417
+ entry.label_hash or "",
418
+ entry.start_line,
419
+ entry.end_line,
420
+ )
421
+ )
422
+
423
+ conn.executemany(
424
+ """
425
+ INSERT INTO indexed_chunk (
426
+ index_id,
427
+ file_id,
472
428
  chunk_index,
473
- start_line,
474
- end_line
475
- ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
429
+ position
430
+ ) VALUES (?, ?, ?, ?)
476
431
  """,
477
- file_rows,
432
+ chunk_rows,
478
433
  )
479
434
 
480
435
  inserted_ids = conn.execute(
481
- "SELECT id FROM indexed_file WHERE index_id = ? ORDER BY position ASC",
436
+ "SELECT id FROM indexed_chunk WHERE index_id = ? ORDER BY position ASC",
482
437
  (index_id,),
483
438
  ).fetchall()
484
439
  conn.executemany(
485
- "INSERT OR REPLACE INTO file_embedding (file_id, vector_blob) VALUES (?, ?)",
440
+ "INSERT OR REPLACE INTO chunk_embedding (chunk_id, vector_blob) VALUES (?, ?)",
486
441
  (
487
442
  (row["id"], vector_blobs[idx])
488
443
  for idx, row in enumerate(inserted_ids)
489
444
  ),
490
445
  )
446
+ conn.executemany(
447
+ """
448
+ INSERT OR REPLACE INTO chunk_meta (
449
+ chunk_id,
450
+ preview,
451
+ label_hash,
452
+ start_line,
453
+ end_line
454
+ ) VALUES (?, ?, ?, ?, ?)
455
+ """,
456
+ (
457
+ (row["id"], *meta_rows[idx])
458
+ for idx, row in enumerate(inserted_ids)
459
+ ),
460
+ )
491
461
 
492
462
  return db_path
493
463
  finally:
@@ -558,112 +528,221 @@ def apply_index_updates(
558
528
  if changed_entries:
559
529
  chunk_map: dict[str, list[IndexedChunk]] = {}
560
530
  for entry in changed_entries:
561
- if entry.rel_path not in chunk_map:
562
- chunk_map[entry.rel_path] = []
563
- chunk_map[entry.rel_path].append(entry)
531
+ chunk_map.setdefault(entry.rel_path, []).append(entry)
564
532
 
565
- for rel_path, chunk_list in chunk_map.items():
533
+ for rel_path in chunk_map:
566
534
  conn.execute(
567
535
  "DELETE FROM indexed_file WHERE index_id = ? AND rel_path = ?",
568
536
  (index_id, rel_path),
569
537
  )
538
+
539
+ file_rows_by_rel: dict[str, tuple] = {}
540
+ for rel_path, chunk_list in chunk_map.items():
570
541
  chunk_list.sort(key=lambda item: item.chunk_index)
571
- file_rows: list[tuple] = []
542
+ sample = chunk_list[0]
543
+ size_bytes = sample.size_bytes
544
+ mtime = sample.mtime
545
+ if size_bytes is None or mtime is None:
546
+ stat = sample.path.stat()
547
+ size_bytes = stat.st_size
548
+ mtime = stat.st_mtime
549
+ file_rows_by_rel[rel_path] = (
550
+ index_id,
551
+ rel_path,
552
+ str(sample.path),
553
+ size_bytes,
554
+ mtime,
555
+ )
556
+
557
+ if file_rows_by_rel:
558
+ conn.executemany(
559
+ """
560
+ INSERT INTO indexed_file (
561
+ index_id,
562
+ rel_path,
563
+ abs_path,
564
+ size_bytes,
565
+ mtime
566
+ ) VALUES (?, ?, ?, ?, ?)
567
+ """,
568
+ list(file_rows_by_rel.values()),
569
+ )
570
+
571
+ file_id_map: dict[str, int] = {}
572
+ rel_paths = list(file_rows_by_rel.keys())
573
+ for chunk in _chunk_values(rel_paths, 900):
574
+ placeholders = ", ".join("?" for _ in chunk)
575
+ rows = conn.execute(
576
+ f"""
577
+ SELECT id, rel_path
578
+ FROM indexed_file
579
+ WHERE index_id = ? AND rel_path IN ({placeholders})
580
+ """,
581
+ (index_id, *chunk),
582
+ ).fetchall()
583
+ for row in rows:
584
+ file_id_map[row["rel_path"]] = int(row["id"])
585
+
586
+ for rel_path, chunk_list in chunk_map.items():
587
+ file_id = file_id_map.get(rel_path)
588
+ if file_id is None:
589
+ continue
590
+ chunk_list.sort(key=lambda item: item.chunk_index)
591
+ chunk_rows: list[tuple] = []
572
592
  vector_blobs: list[bytes] = []
593
+ meta_rows: list[tuple] = []
573
594
  for chunk in chunk_list:
574
595
  vector = np.asarray(chunk.embedding, dtype=np.float32)
575
596
  if vector_dimension is None:
576
597
  vector_dimension = vector.shape[0]
577
- size_bytes = chunk.size_bytes
578
- mtime = chunk.mtime
579
- if size_bytes is None or mtime is None:
580
- stat = chunk.path.stat()
581
- size_bytes = stat.st_size
582
- mtime = stat.st_mtime
583
- file_rows.append(
598
+ chunk_rows.append(
599
+ (index_id, file_id, chunk.chunk_index, 0)
600
+ )
601
+ vector_blobs.append(vector.tobytes())
602
+ meta_rows.append(
584
603
  (
585
- index_id,
586
- rel_path,
587
- str(chunk.path),
588
- size_bytes,
589
- mtime,
590
- 0,
591
- chunk.preview,
592
- chunk.label_hash,
593
- chunk.chunk_index,
604
+ chunk.preview or "",
605
+ chunk.label_hash or "",
594
606
  chunk.start_line,
595
607
  chunk.end_line,
596
608
  )
597
609
  )
598
- vector_blobs.append(vector.tobytes())
599
610
 
600
611
  conn.executemany(
601
612
  """
602
- INSERT INTO indexed_file (
613
+ INSERT INTO indexed_chunk (
603
614
  index_id,
604
- rel_path,
605
- abs_path,
606
- size_bytes,
607
- mtime,
608
- position,
609
- preview,
610
- label_hash,
615
+ file_id,
611
616
  chunk_index,
612
- start_line,
613
- end_line
614
- ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
617
+ position
618
+ ) VALUES (?, ?, ?, ?)
615
619
  """,
616
- file_rows,
620
+ chunk_rows,
617
621
  )
618
622
 
619
623
  inserted_ids = conn.execute(
620
624
  """
621
- SELECT id FROM indexed_file
622
- WHERE index_id = ? AND rel_path = ?
625
+ SELECT id FROM indexed_chunk
626
+ WHERE index_id = ? AND file_id = ?
623
627
  ORDER BY chunk_index ASC
624
628
  """,
625
- (index_id, rel_path),
629
+ (index_id, file_id),
626
630
  ).fetchall()
627
631
  conn.executemany(
628
- "INSERT INTO file_embedding (file_id, vector_blob) VALUES (?, ?)",
632
+ "INSERT OR REPLACE INTO chunk_embedding (chunk_id, vector_blob) VALUES (?, ?)",
629
633
  (
630
634
  (row["id"], vector_blobs[idx])
631
635
  for idx, row in enumerate(inserted_ids)
632
636
  ),
633
637
  )
638
+ conn.executemany(
639
+ """
640
+ INSERT OR REPLACE INTO chunk_meta (
641
+ chunk_id,
642
+ preview,
643
+ label_hash,
644
+ start_line,
645
+ end_line
646
+ ) VALUES (?, ?, ?, ?, ?)
647
+ """,
648
+ (
649
+ (row["id"], *meta_rows[idx])
650
+ for idx, row in enumerate(inserted_ids)
651
+ ),
652
+ )
634
653
 
635
654
  if touched_entries:
655
+ file_updates: dict[str, tuple[int, float]] = {}
656
+ for (
657
+ rel_path,
658
+ _chunk_index,
659
+ size_bytes,
660
+ mtime,
661
+ _preview,
662
+ _start_line,
663
+ _end_line,
664
+ _label_hash,
665
+ ) in touched_entries:
666
+ if rel_path not in file_updates:
667
+ file_updates[rel_path] = (size_bytes, mtime)
636
668
  conn.executemany(
637
669
  """
638
670
  UPDATE indexed_file
639
- SET size_bytes = ?, mtime = ?, preview = ?, start_line = ?, end_line = ?, label_hash = ?
640
- WHERE index_id = ? AND rel_path = ? AND chunk_index = ?
671
+ SET size_bytes = ?, mtime = ?
672
+ WHERE index_id = ? AND rel_path = ?
641
673
  """,
642
674
  (
675
+ (size_bytes, mtime, index_id, rel_path)
676
+ for rel_path, (size_bytes, mtime) in file_updates.items()
677
+ ),
678
+ )
679
+
680
+ chunk_id_map: dict[tuple[str, int], int] = {}
681
+ if ordered_entries or touched_entries:
682
+ rows = conn.execute(
683
+ """
684
+ SELECT c.id, c.chunk_index, f.rel_path
685
+ FROM indexed_chunk AS c
686
+ JOIN indexed_file AS f ON f.id = c.file_id
687
+ WHERE c.index_id = ?
688
+ """,
689
+ (index_id,),
690
+ ).fetchall()
691
+ for row in rows:
692
+ chunk_id_map[(row["rel_path"], int(row["chunk_index"]))] = int(
693
+ row["id"]
694
+ )
695
+
696
+ if touched_entries and chunk_id_map:
697
+ meta_rows: list[tuple] = []
698
+ for (
699
+ rel_path,
700
+ chunk_index,
701
+ _size_bytes,
702
+ _mtime,
703
+ preview,
704
+ start_line,
705
+ end_line,
706
+ label_hash,
707
+ ) in touched_entries:
708
+ chunk_id = chunk_id_map.get((rel_path, chunk_index))
709
+ if chunk_id is None:
710
+ continue
711
+ meta_rows.append(
643
712
  (
644
- size_bytes,
645
- mtime,
713
+ chunk_id,
646
714
  preview or "",
715
+ label_hash or "",
647
716
  start_line,
648
717
  end_line,
649
- label_hash or "",
650
- index_id,
651
- rel_path,
652
- chunk_index,
653
718
  )
654
- for rel_path, chunk_index, size_bytes, mtime, preview, start_line, end_line, label_hash in touched_entries
655
- ),
656
- )
719
+ )
720
+ if meta_rows:
721
+ conn.executemany(
722
+ """
723
+ INSERT OR REPLACE INTO chunk_meta (
724
+ chunk_id,
725
+ preview,
726
+ label_hash,
727
+ start_line,
728
+ end_line
729
+ ) VALUES (?, ?, ?, ?, ?)
730
+ """,
731
+ meta_rows,
732
+ )
657
733
 
658
- for position, (rel_path, chunk_index) in enumerate(ordered_entries):
659
- conn.execute(
660
- """
661
- UPDATE indexed_file
662
- SET position = ?
663
- WHERE index_id = ? AND rel_path = ? AND chunk_index = ?
664
- """,
665
- (position, index_id, rel_path, chunk_index),
666
- )
734
+ if ordered_entries and chunk_id_map:
735
+ position_updates = []
736
+ for position, (rel_path, chunk_index) in enumerate(ordered_entries):
737
+ chunk_id = chunk_id_map.get((rel_path, chunk_index))
738
+ if chunk_id is None:
739
+ continue
740
+ position_updates.append((position, chunk_id))
741
+ if position_updates:
742
+ conn.executemany(
743
+ "UPDATE indexed_chunk SET position = ? WHERE id = ?",
744
+ position_updates,
745
+ )
667
746
 
668
747
  generated_at = datetime.now(timezone.utc).isoformat()
669
748
  new_dimension = vector_dimension or existing_dimension
@@ -728,17 +807,52 @@ def backfill_chunk_lines(
728
807
 
729
808
  with conn:
730
809
  conn.execute("BEGIN IMMEDIATE;")
731
- conn.executemany(
732
- """
733
- UPDATE indexed_file
734
- SET start_line = ?, end_line = ?
735
- WHERE index_id = ? AND rel_path = ? AND chunk_index = ?
736
- """,
737
- (
738
- (start_line, end_line, index_id, rel_path, chunk_index)
739
- for rel_path, chunk_index, start_line, end_line in updates
740
- ),
741
- )
810
+ update_rows: list[tuple[int | None, int | None, int]] = []
811
+ insert_rows: list[tuple[int]] = []
812
+ if updates:
813
+ rel_paths = sorted({rel_path for rel_path, *_ in updates})
814
+ chunk_id_map: dict[tuple[str, int], int] = {}
815
+ for chunk in _chunk_values(rel_paths, 900):
816
+ placeholders = ", ".join("?" for _ in chunk)
817
+ rows = conn.execute(
818
+ f"""
819
+ SELECT c.id, c.chunk_index, f.rel_path
820
+ FROM indexed_chunk AS c
821
+ JOIN indexed_file AS f ON f.id = c.file_id
822
+ WHERE c.index_id = ? AND f.rel_path IN ({placeholders})
823
+ """,
824
+ (index_id, *chunk),
825
+ ).fetchall()
826
+ for row in rows:
827
+ chunk_id_map[(row["rel_path"], int(row["chunk_index"]))] = int(
828
+ row["id"]
829
+ )
830
+ for rel_path, chunk_index, start_line, end_line in updates:
831
+ chunk_id = chunk_id_map.get((rel_path, chunk_index))
832
+ if chunk_id is None:
833
+ continue
834
+ insert_rows.append((chunk_id,))
835
+ update_rows.append((start_line, end_line, chunk_id))
836
+ if insert_rows:
837
+ conn.executemany(
838
+ """
839
+ INSERT OR IGNORE INTO chunk_meta (
840
+ chunk_id,
841
+ preview,
842
+ label_hash
843
+ ) VALUES (?, '', '')
844
+ """,
845
+ insert_rows,
846
+ )
847
+ if update_rows:
848
+ conn.executemany(
849
+ """
850
+ UPDATE chunk_meta
851
+ SET start_line = ?, end_line = ?
852
+ WHERE chunk_id = ?
853
+ """,
854
+ update_rows,
855
+ )
742
856
  generated_at = datetime.now(timezone.utc).isoformat()
743
857
  conn.execute(
744
858
  """
@@ -794,13 +908,27 @@ def load_index(
794
908
  ).fetchone()
795
909
  if meta is None:
796
910
  raise FileNotFoundError(db_path)
911
+ version = int(meta["version"] or 0)
912
+ if version < CACHE_VERSION:
913
+ raise FileNotFoundError(db_path)
797
914
 
798
915
  rows = conn.execute(
799
916
  """
800
- SELECT rel_path, abs_path, size_bytes, mtime, preview, label_hash, chunk_index, start_line, end_line
801
- FROM indexed_file
802
- WHERE index_id = ?
803
- ORDER BY position ASC
917
+ SELECT
918
+ f.rel_path,
919
+ f.abs_path,
920
+ f.size_bytes,
921
+ f.mtime,
922
+ c.chunk_index,
923
+ m.preview,
924
+ m.label_hash,
925
+ m.start_line,
926
+ m.end_line
927
+ FROM indexed_chunk AS c
928
+ JOIN indexed_file AS f ON f.id = c.file_id
929
+ LEFT JOIN chunk_meta AS m ON m.chunk_id = c.id
930
+ WHERE c.index_id = ?
931
+ ORDER BY c.position ASC
804
932
  """,
805
933
  (meta["id"],),
806
934
  ).fetchall()
@@ -890,11 +1018,14 @@ def load_index_vectors(
890
1018
  ).fetchone()
891
1019
  if meta is None:
892
1020
  raise FileNotFoundError(db_path)
1021
+ version = int(meta["version"] or 0)
1022
+ if version < CACHE_VERSION:
1023
+ raise FileNotFoundError(db_path)
893
1024
 
894
1025
  index_id = meta["id"]
895
1026
  dimension = int(meta["dimension"])
896
1027
  chunk_count = conn.execute(
897
- "SELECT COUNT(*) AS count FROM indexed_file WHERE index_id = ?",
1028
+ "SELECT COUNT(*) AS count FROM indexed_chunk WHERE index_id = ?",
898
1029
  (index_id,),
899
1030
  ).fetchone()["count"]
900
1031
  chunk_total = int(chunk_count or 0)
@@ -916,27 +1047,48 @@ def load_index_vectors(
916
1047
  "extensions": _deserialize_extensions(meta["extensions"]),
917
1048
  "files": [],
918
1049
  "chunks": [],
1050
+ "chunk_ids": [],
919
1051
  }
920
1052
  return [], empty, metadata
921
1053
 
922
1054
  embeddings = np.empty((chunk_total, dimension), dtype=np.float32)
923
1055
  paths: list[Path] = []
924
- chunk_entries: list[dict] = []
925
- file_snapshot: dict[str, dict] = {}
1056
+ chunk_ids: list[int] = []
1057
+ file_snapshot: list[dict] = []
1058
+ file_meta_by_rel: dict[str, dict] = {}
1059
+
1060
+ file_rows = conn.execute(
1061
+ """
1062
+ SELECT rel_path, abs_path, size_bytes, mtime
1063
+ FROM indexed_file
1064
+ WHERE index_id = ?
1065
+ """,
1066
+ (index_id,),
1067
+ ).fetchall()
1068
+ for row in file_rows:
1069
+ file_meta_by_rel[row["rel_path"]] = {
1070
+ "path": row["rel_path"],
1071
+ "absolute": row["abs_path"],
1072
+ "mtime": row["mtime"],
1073
+ "size": row["size_bytes"],
1074
+ }
1075
+ seen_files: set[str] = set()
926
1076
 
927
1077
  cursor = conn.execute(
928
1078
  """
929
- SELECT f.rel_path, f.abs_path, f.size_bytes, f.mtime, f.preview, f.label_hash, f.chunk_index, f.start_line, f.end_line, e.vector_blob
930
- FROM indexed_file AS f
931
- JOIN file_embedding AS e ON e.file_id = f.id
932
- WHERE f.index_id = ?
933
- ORDER BY f.position ASC
1079
+ SELECT c.id AS chunk_id, f.rel_path, e.vector_blob
1080
+ FROM indexed_chunk AS c
1081
+ JOIN indexed_file AS f ON f.id = c.file_id
1082
+ JOIN chunk_embedding AS e ON e.chunk_id = c.id
1083
+ WHERE c.index_id = ?
1084
+ ORDER BY c.position ASC
934
1085
  """,
935
1086
  (index_id,),
936
1087
  )
937
1088
 
938
1089
  for idx, row in enumerate(cursor):
939
1090
  rel_path = row["rel_path"]
1091
+ chunk_id = int(row["chunk_id"])
940
1092
  vector = np.frombuffer(row["vector_blob"], dtype=np.float32)
941
1093
  if vector.size != dimension:
942
1094
  raise RuntimeError(
@@ -944,27 +1096,12 @@ def load_index_vectors(
944
1096
  )
945
1097
  embeddings[idx] = vector
946
1098
  paths.append(root / Path(rel_path))
947
- chunk_index = int(row["chunk_index"])
948
- chunk_entries.append(
949
- {
950
- "path": rel_path,
951
- "absolute": row["abs_path"],
952
- "mtime": row["mtime"],
953
- "size": row["size_bytes"],
954
- "preview": row["preview"],
955
- "label_hash": row["label_hash"],
956
- "chunk_index": chunk_index,
957
- "start_line": row["start_line"],
958
- "end_line": row["end_line"],
959
- }
960
- )
961
- if rel_path not in file_snapshot:
962
- file_snapshot[rel_path] = {
963
- "path": rel_path,
964
- "absolute": row["abs_path"],
965
- "mtime": row["mtime"],
966
- "size": row["size_bytes"],
967
- }
1099
+ chunk_ids.append(chunk_id)
1100
+ if rel_path not in seen_files:
1101
+ meta_row = file_meta_by_rel.get(rel_path)
1102
+ if meta_row is not None:
1103
+ file_snapshot.append(meta_row)
1104
+ seen_files.add(rel_path)
968
1105
 
969
1106
  metadata = {
970
1107
  "index_id": int(index_id),
@@ -979,14 +1116,73 @@ def load_index_vectors(
979
1116
  "dimension": meta["dimension"],
980
1117
  "exclude_patterns": _deserialize_exclude_patterns(meta["exclude_patterns"]),
981
1118
  "extensions": _deserialize_extensions(meta["extensions"]),
982
- "files": list(file_snapshot.values()),
983
- "chunks": chunk_entries,
1119
+ "files": file_snapshot,
1120
+ "chunks": [],
1121
+ "chunk_ids": chunk_ids,
984
1122
  }
985
1123
  return paths, embeddings, metadata
986
1124
  finally:
987
1125
  conn.close()
988
1126
 
989
1127
 
1128
+ def load_chunk_metadata(
1129
+ chunk_ids: Sequence[int],
1130
+ conn: sqlite3.Connection | None = None,
1131
+ ) -> dict[int, dict]:
1132
+ """Load cached chunk metadata keyed by chunk_id."""
1133
+
1134
+ if not chunk_ids:
1135
+ return {}
1136
+ unique_ids: list[int] = []
1137
+ seen: set[int] = set()
1138
+ for value in chunk_ids:
1139
+ try:
1140
+ chunk_id = int(value)
1141
+ except (TypeError, ValueError):
1142
+ continue
1143
+ if chunk_id in seen:
1144
+ continue
1145
+ seen.add(chunk_id)
1146
+ unique_ids.append(chunk_id)
1147
+ if not unique_ids:
1148
+ return {}
1149
+ db_path = cache_db_path()
1150
+ owns_connection = conn is None
1151
+ try:
1152
+ connection = conn or _connect(db_path)
1153
+ except sqlite3.OperationalError:
1154
+ return {}
1155
+ try:
1156
+ try:
1157
+ _ensure_schema(connection)
1158
+ except sqlite3.OperationalError:
1159
+ return {}
1160
+ results: dict[int, dict] = {}
1161
+ for chunk in _chunk_values(unique_ids, 900):
1162
+ placeholders = ", ".join("?" for _ in chunk)
1163
+ rows = connection.execute(
1164
+ f"""
1165
+ SELECT c.id AS chunk_id, c.chunk_index, m.preview, m.label_hash, m.start_line, m.end_line
1166
+ FROM indexed_chunk AS c
1167
+ LEFT JOIN chunk_meta AS m ON m.chunk_id = c.id
1168
+ WHERE c.id IN ({placeholders})
1169
+ """,
1170
+ tuple(chunk),
1171
+ ).fetchall()
1172
+ for row in rows:
1173
+ results[int(row["chunk_id"])] = {
1174
+ "chunk_index": int(row["chunk_index"]),
1175
+ "preview": row["preview"],
1176
+ "label_hash": row["label_hash"],
1177
+ "start_line": row["start_line"],
1178
+ "end_line": row["end_line"],
1179
+ }
1180
+ return results
1181
+ finally:
1182
+ if owns_connection:
1183
+ connection.close()
1184
+
1185
+
990
1186
  def load_query_vector(
991
1187
  index_id: int,
992
1188
  query_hash: str,
@@ -1260,7 +1456,7 @@ def list_cache_entries() -> list[dict[str, object]]:
1260
1456
  exclude_patterns,
1261
1457
  extensions,
1262
1458
  (
1263
- SELECT COUNT(DISTINCT rel_path)
1459
+ SELECT COUNT(*)
1264
1460
  FROM indexed_file
1265
1461
  WHERE index_id = index_metadata.id
1266
1462
  ) AS file_count