alvin-bot 4.19.2 → 4.20.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/dist/index.js +11 -0
- package/dist/paths.js +4 -1
- package/dist/services/embeddings-migration.js +114 -0
- package/dist/services/embeddings.js +207 -166
- package/package.json +4 -2
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,20 @@
|
|
|
2
2
|
|
|
3
3
|
All notable changes to Alvin Bot are documented here.
|
|
4
4
|
|
|
5
|
+
## [4.20.0] — 2026-05-03
|
|
6
|
+
|
|
7
|
+
### 🚀 Embeddings: JSON → SQLite
|
|
8
|
+
|
|
9
|
+
**Why.** The vector index `~/.alvin-bot/memory/.embeddings.json` had grown to **146 MB**. Every bot start parsed the whole file (slow boot, large heap), and every reindex iteration rewrote the entire 146 MB blob to disk. With ~3 800 entries the corpus is still small enough that linear-scan cosine similarity is fine, but the JSON serialisation overhead and per-write full-file rewrite were the real cost.
|
|
10
|
+
|
|
11
|
+
**Change.** New SQLite-backed store at `~/.alvin-bot/memory/.embeddings.db` (table `entries(id, source, text, vector BLOB, indexed_at)` + index on `source`). Vectors live as raw `Float32Array` BLOBs (4 B × 3072 dims = 12 KB each) instead of JSON-encoded Float64 arrays (≈ 24 KB each). Reindexing is per-chunk INSERT/UPDATE inside a single transaction — no full-file rewrite. WAL mode + 256 MB mmap, `synchronous = NORMAL`.
|
|
12
|
+
|
|
13
|
+
**Migration.** `src/services/embeddings-migration.ts` runs once on boot if `.embeddings.json` exists but `.embeddings.db` does not. Source JSON is renamed to `.embeddings.json.bak-pre-sqlite` after a successful entry-count match (idempotent, safe to re-run). On the maintainer's instance: 146 MB → 49 MB, 3 799 entries copied in 660 ms.
|
|
14
|
+
|
|
15
|
+
**Files touched.** `src/paths.ts` (new `EMBEDDINGS_DB`), `src/services/embeddings.ts` (full rewrite, drop-in same public surface), `src/services/embeddings-migration.ts` (new), `src/index.ts` (boot hook), `package.json` (deps `better-sqlite3@^12`, `@types/better-sqlite3` dev). Public API unchanged: `searchMemory`, `reindexMemory`, `initEmbeddings`, `getIndexStats` keep their signatures so callers in `engine.ts`, `web-server.ts` etc. don't change.
|
|
16
|
+
|
|
17
|
+
**Wins.** ~66 % smaller on disk. Bot boot no longer parses a 146 MB JSON. Reindex of a single file is O(log n) DELETE-by-source + transactional INSERTs instead of `JSON.stringify` + `writeFileSync` of the whole index.
|
|
18
|
+
|
|
5
19
|
## [4.19.2] — 2026-04-24
|
|
6
20
|
|
|
7
21
|
### 🐛 Fix: workspace switch produced "(no response)" format-kaskade; added empty-stream diagnostics
|
package/dist/index.js
CHANGED
|
@@ -20,6 +20,17 @@ if (hasLegacyData()) {
|
|
|
20
20
|
}
|
|
21
21
|
// 3. Seed defaults for any files that don't exist yet (fresh install)
|
|
22
22
|
seedDefaults();
|
|
23
|
+
// 3b. v4.20 — One-shot migration of legacy .embeddings.json → SQLite (.embeddings.db).
|
|
24
|
+
// Idempotent and safe: source JSON is renamed to .bak-pre-sqlite after success.
|
|
25
|
+
import { shouldMigrateEmbeddingsToSqlite, migrateEmbeddingsToSqlite } from "./services/embeddings-migration.js";
|
|
26
|
+
if (shouldMigrateEmbeddingsToSqlite()) {
|
|
27
|
+
try {
|
|
28
|
+
migrateEmbeddingsToSqlite();
|
|
29
|
+
}
|
|
30
|
+
catch (err) {
|
|
31
|
+
console.error("❌ Embeddings migration failed — bot will continue with empty SQLite store, JSON kept:", err);
|
|
32
|
+
}
|
|
33
|
+
}
|
|
23
34
|
// 3a. v4.12.2 — Audit + repair permissions on sensitive files. On multi-user
|
|
24
35
|
// systems, files written pre-v4.12.2 may have 0o644 / 0o666 mode — i.e.
|
|
25
36
|
// readable by other users on the same machine. This routine chmod-repairs
|
package/dist/paths.js
CHANGED
|
@@ -55,8 +55,11 @@ export const PROJECTS_MEMORY_DIR = resolve(DATA_DIR, "memory", "projects");
|
|
|
55
55
|
* name, purpose, cwd, color, emoji, and an optional system prompt body.
|
|
56
56
|
* See src/services/workspaces.ts for the loader and matcher. */
|
|
57
57
|
export const WORKSPACES_DIR = resolve(DATA_DIR, "workspaces");
|
|
58
|
-
/** memory/.embeddings.json —
|
|
58
|
+
/** memory/.embeddings.json — Legacy JSON vector index. Read on first SQLite migration only;
|
|
59
|
+
* active code path is EMBEDDINGS_DB. */
|
|
59
60
|
export const EMBEDDINGS_IDX = resolve(DATA_DIR, "memory", ".embeddings.json");
|
|
61
|
+
/** memory/.embeddings.db — SQLite vector store (replaces .embeddings.json since v4.20). */
|
|
62
|
+
export const EMBEDDINGS_DB = resolve(DATA_DIR, "memory", ".embeddings.db");
|
|
60
63
|
/** users/ — User profiles and per-user memory */
|
|
61
64
|
export const USERS_DIR = resolve(DATA_DIR, "users");
|
|
62
65
|
/** data/ — Runtime control data */
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* One-shot migration from legacy .embeddings.json → SQLite .embeddings.db.
|
|
3
|
+
*
|
|
4
|
+
* Triggered on startup if .embeddings.json exists but .embeddings.db does not.
|
|
5
|
+
* Idempotent: skips silently if the DB is already populated.
|
|
6
|
+
*
|
|
7
|
+
* Safety:
|
|
8
|
+
* - Source JSON is renamed to .embeddings.json.bak-pre-sqlite (kept on disk).
|
|
9
|
+
* - Entry counts are compared after import; mismatch → throw, leaving the bak
|
|
10
|
+
* file in place for manual recovery.
|
|
11
|
+
*/
|
|
12
|
+
import fs from "fs";
|
|
13
|
+
import path from "path";
|
|
14
|
+
import Database from "better-sqlite3";
|
|
15
|
+
import { EMBEDDINGS_IDX, EMBEDDINGS_DB } from "../paths.js";
|
|
16
|
+
function vectorToBlob(v) {
|
|
17
|
+
const f32 = new Float32Array(v);
|
|
18
|
+
return Buffer.from(f32.buffer, f32.byteOffset, f32.byteLength);
|
|
19
|
+
}
|
|
20
|
+
export function shouldMigrateEmbeddingsToSqlite() {
|
|
21
|
+
return fs.existsSync(EMBEDDINGS_IDX) && !fs.existsSync(EMBEDDINGS_DB);
|
|
22
|
+
}
|
|
23
|
+
/**
|
|
24
|
+
* Run the migration. Returns the entry count migrated, or null if skipped.
|
|
25
|
+
*/
|
|
26
|
+
export function migrateEmbeddingsToSqlite() {
|
|
27
|
+
if (!shouldMigrateEmbeddingsToSqlite())
|
|
28
|
+
return null;
|
|
29
|
+
const t0 = Date.now();
|
|
30
|
+
const sourceSize = fs.statSync(EMBEDDINGS_IDX).size;
|
|
31
|
+
console.log(`📦 Migrating embeddings JSON (${(sourceSize / 1024 / 1024).toFixed(0)} MB) → SQLite...`);
|
|
32
|
+
const raw = fs.readFileSync(EMBEDDINGS_IDX, "utf-8");
|
|
33
|
+
let legacy;
|
|
34
|
+
try {
|
|
35
|
+
legacy = JSON.parse(raw);
|
|
36
|
+
}
|
|
37
|
+
catch (err) {
|
|
38
|
+
console.error("⚠️ Embeddings migration: source JSON is corrupt — skipping.", err);
|
|
39
|
+
return null;
|
|
40
|
+
}
|
|
41
|
+
fs.mkdirSync(path.dirname(EMBEDDINGS_DB), { recursive: true });
|
|
42
|
+
const db = new Database(EMBEDDINGS_DB);
|
|
43
|
+
try {
|
|
44
|
+
db.pragma("journal_mode = WAL");
|
|
45
|
+
db.pragma("synchronous = NORMAL");
|
|
46
|
+
db.exec(`
|
|
47
|
+
CREATE TABLE IF NOT EXISTS meta (
|
|
48
|
+
key TEXT PRIMARY KEY,
|
|
49
|
+
value TEXT NOT NULL
|
|
50
|
+
);
|
|
51
|
+
CREATE TABLE IF NOT EXISTS file_mtimes (
|
|
52
|
+
source TEXT PRIMARY KEY,
|
|
53
|
+
mtime_ms REAL NOT NULL
|
|
54
|
+
);
|
|
55
|
+
CREATE TABLE IF NOT EXISTS entries (
|
|
56
|
+
id TEXT PRIMARY KEY,
|
|
57
|
+
source TEXT NOT NULL,
|
|
58
|
+
text TEXT NOT NULL,
|
|
59
|
+
vector BLOB NOT NULL,
|
|
60
|
+
indexed_at INTEGER NOT NULL
|
|
61
|
+
);
|
|
62
|
+
CREATE INDEX IF NOT EXISTS idx_entries_source ON entries(source);
|
|
63
|
+
`);
|
|
64
|
+
const setMeta = db.prepare("INSERT INTO meta (key, value) VALUES (?, ?) ON CONFLICT(key) DO UPDATE SET value = excluded.value");
|
|
65
|
+
setMeta.run("model", legacy.model);
|
|
66
|
+
setMeta.run("schemaVersion", "1");
|
|
67
|
+
setMeta.run("lastReindex", String(legacy.lastReindex));
|
|
68
|
+
setMeta.run("migratedFromJson", String(Date.now()));
|
|
69
|
+
const insMtime = db.prepare("INSERT INTO file_mtimes (source, mtime_ms) VALUES (?, ?) ON CONFLICT(source) DO UPDATE SET mtime_ms = excluded.mtime_ms");
|
|
70
|
+
const writeMtimes = db.transaction((rows) => {
|
|
71
|
+
for (const [s, m] of rows)
|
|
72
|
+
insMtime.run(s, m);
|
|
73
|
+
});
|
|
74
|
+
writeMtimes(Object.entries(legacy.fileMtimes ?? {}));
|
|
75
|
+
const insEntry = db.prepare("INSERT INTO entries (id, source, text, vector, indexed_at) VALUES (?, ?, ?, ?, ?)");
|
|
76
|
+
const writeEntries = db.transaction((rows) => {
|
|
77
|
+
for (const e of rows) {
|
|
78
|
+
if (!Array.isArray(e.vector) || e.vector.length === 0)
|
|
79
|
+
continue;
|
|
80
|
+
insEntry.run(e.id, e.source, e.text, vectorToBlob(e.vector), e.indexedAt);
|
|
81
|
+
}
|
|
82
|
+
});
|
|
83
|
+
writeEntries(legacy.entries ?? []);
|
|
84
|
+
const written = db.prepare("SELECT COUNT(*) AS c FROM entries").get().c;
|
|
85
|
+
const expected = (legacy.entries ?? []).filter(e => Array.isArray(e.vector) && e.vector.length > 0).length;
|
|
86
|
+
if (written !== expected) {
|
|
87
|
+
throw new Error(`Entry-count mismatch after migration: expected ${expected}, got ${written}`);
|
|
88
|
+
}
|
|
89
|
+
db.close();
|
|
90
|
+
// Move source JSON aside so we never re-migrate.
|
|
91
|
+
const bak = `${EMBEDDINGS_IDX}.bak-pre-sqlite`;
|
|
92
|
+
try {
|
|
93
|
+
fs.renameSync(EMBEDDINGS_IDX, bak);
|
|
94
|
+
}
|
|
95
|
+
catch (err) {
|
|
96
|
+
console.warn("⚠️ Could not rename source JSON:", err);
|
|
97
|
+
}
|
|
98
|
+
const targetSize = fs.statSync(EMBEDDINGS_DB).size;
|
|
99
|
+
const dt = Date.now() - t0;
|
|
100
|
+
console.log(`✅ Embeddings migrated: ${written} entries, ${(sourceSize / 1024 / 1024).toFixed(0)} MB JSON → ${(targetSize / 1024 / 1024).toFixed(0)} MB SQLite in ${dt} ms`);
|
|
101
|
+
return { entries: written, sourceMb: sourceSize / 1024 / 1024, targetMb: targetSize / 1024 / 1024 };
|
|
102
|
+
}
|
|
103
|
+
catch (err) {
|
|
104
|
+
db.close();
|
|
105
|
+
// Remove half-written DB so the next boot retries cleanly.
|
|
106
|
+
try {
|
|
107
|
+
fs.unlinkSync(EMBEDDINGS_DB);
|
|
108
|
+
}
|
|
109
|
+
catch {
|
|
110
|
+
/* nothing to clean */
|
|
111
|
+
}
|
|
112
|
+
throw err;
|
|
113
|
+
}
|
|
114
|
+
}
|
|
@@ -1,31 +1,116 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* Embeddings Service — Vector-based semantic memory search.
|
|
3
3
|
*
|
|
4
|
-
* Uses Google's
|
|
5
|
-
* Stores embeddings in a
|
|
4
|
+
* Uses Google's gemini-embedding-001 model for generating embeddings.
|
|
5
|
+
* Stores embeddings in a SQLite database (.embeddings.db) — replaces the
|
|
6
|
+
* older .embeddings.json index since v4.20. The migration runs once
|
|
7
|
+
* automatically on startup (see src/migrate.ts).
|
|
6
8
|
*
|
|
7
9
|
* Architecture:
|
|
8
|
-
* - Each memory entry (paragraph/section) gets
|
|
9
|
-
* - Vectors are stored
|
|
10
|
-
*
|
|
11
|
-
* -
|
|
10
|
+
* - Each memory entry (paragraph/section) gets a 3072-dim Float32 vector.
|
|
11
|
+
* - Vectors are stored as raw BLOB (4 bytes × 3072 = 12 KB each) instead of
|
|
12
|
+
* JSON-encoded Float64 arrays (~24 KB each) — halves disk footprint.
|
|
13
|
+
* - Cosine similarity runs in-memory: SQLite has no native vector ops, but
|
|
14
|
+
* reading the BLOBs is mmap-cheap and JS does the dot product fast enough
|
|
15
|
+
* for the current corpus (a few thousand entries).
|
|
16
|
+
* - Reindexing is per-chunk INSERT/UPDATE — no full-file rewrite.
|
|
12
17
|
*/
|
|
13
18
|
import fs from "fs";
|
|
14
19
|
import path from "path";
|
|
15
20
|
import { resolve } from "path";
|
|
16
|
-
import { config } from "../config.js";
|
|
17
21
|
import os from "os";
|
|
18
|
-
import
|
|
22
|
+
import Database from "better-sqlite3";
|
|
23
|
+
import { config } from "../config.js";
|
|
24
|
+
import { MEMORY_DIR, MEMORY_FILE, EMBEDDINGS_DB } from "../paths.js";
|
|
19
25
|
import { ASSETS_DIR, ASSETS_INDEX_MD } from "../paths.js";
|
|
20
26
|
// Hub memory directory (Claude Hub — read-only, additional context)
|
|
21
27
|
const HUB_MEMORY_DIR = resolve(os.homedir(), ".claude", "hub", "MEMORY");
|
|
22
|
-
// ──
|
|
28
|
+
// ── Constants ───────────────────────────────────────────
|
|
23
29
|
const EMBEDDING_MODEL = "gemini-embedding-001";
|
|
24
30
|
const EMBEDDING_DIMENSION = 3072;
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
31
|
+
const SCHEMA_VERSION = "1";
|
|
32
|
+
// ── Vector encoding (Float32Array ↔ Buffer) ─────────────
|
|
33
|
+
function vectorToBlob(v) {
|
|
34
|
+
const f32 = new Float32Array(v);
|
|
35
|
+
// Buffer.from(arrayBuffer, byteOffset, length) preserves the underlying memory.
|
|
36
|
+
return Buffer.from(f32.buffer, f32.byteOffset, f32.byteLength);
|
|
37
|
+
}
|
|
38
|
+
function blobToVector(b) {
|
|
39
|
+
// Buffers from better-sqlite3 own their memory and may not be aligned to 4 bytes.
|
|
40
|
+
// Copying into a fresh Float32Array guarantees alignment.
|
|
41
|
+
const f32 = new Float32Array(b.byteLength / 4);
|
|
42
|
+
const dv = new DataView(b.buffer, b.byteOffset, b.byteLength);
|
|
43
|
+
for (let i = 0; i < f32.length; i++) {
|
|
44
|
+
f32[i] = dv.getFloat32(i * 4, true /* little-endian */);
|
|
45
|
+
}
|
|
46
|
+
return f32;
|
|
47
|
+
}
|
|
48
|
+
// ── DB lifecycle ────────────────────────────────────────
|
|
49
|
+
let dbInstance = null;
|
|
50
|
+
function db() {
|
|
51
|
+
if (dbInstance)
|
|
52
|
+
return dbInstance;
|
|
53
|
+
// Ensure directory exists (handles fresh installs).
|
|
54
|
+
fs.mkdirSync(path.dirname(EMBEDDINGS_DB), { recursive: true });
|
|
55
|
+
dbInstance = new Database(EMBEDDINGS_DB);
|
|
56
|
+
dbInstance.pragma("journal_mode = WAL");
|
|
57
|
+
dbInstance.pragma("synchronous = NORMAL");
|
|
58
|
+
dbInstance.pragma("temp_store = MEMORY");
|
|
59
|
+
dbInstance.pragma("mmap_size = 268435456"); // 256 MB
|
|
60
|
+
dbInstance.exec(`
|
|
61
|
+
CREATE TABLE IF NOT EXISTS meta (
|
|
62
|
+
key TEXT PRIMARY KEY,
|
|
63
|
+
value TEXT NOT NULL
|
|
64
|
+
);
|
|
65
|
+
CREATE TABLE IF NOT EXISTS file_mtimes (
|
|
66
|
+
source TEXT PRIMARY KEY,
|
|
67
|
+
mtime_ms REAL NOT NULL
|
|
68
|
+
);
|
|
69
|
+
CREATE TABLE IF NOT EXISTS entries (
|
|
70
|
+
id TEXT PRIMARY KEY,
|
|
71
|
+
source TEXT NOT NULL,
|
|
72
|
+
text TEXT NOT NULL,
|
|
73
|
+
vector BLOB NOT NULL,
|
|
74
|
+
indexed_at INTEGER NOT NULL
|
|
75
|
+
);
|
|
76
|
+
CREATE INDEX IF NOT EXISTS idx_entries_source ON entries(source);
|
|
77
|
+
`);
|
|
78
|
+
// Initialise meta if absent.
|
|
79
|
+
const set = dbInstance.prepare("INSERT INTO meta (key, value) VALUES (?, ?) ON CONFLICT(key) DO NOTHING");
|
|
80
|
+
set.run("model", EMBEDDING_MODEL);
|
|
81
|
+
set.run("schemaVersion", SCHEMA_VERSION);
|
|
82
|
+
return dbInstance;
|
|
83
|
+
}
|
|
84
|
+
/** Close handle (used by tests / shutdown). */
|
|
85
|
+
export function closeEmbeddingsDb() {
|
|
86
|
+
if (dbInstance) {
|
|
87
|
+
dbInstance.close();
|
|
88
|
+
dbInstance = null;
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
// ── Meta helpers ────────────────────────────────────────
|
|
92
|
+
function getMeta(key) {
|
|
93
|
+
const row = db().prepare("SELECT value FROM meta WHERE key = ?").get(key);
|
|
94
|
+
return row?.value ?? null;
|
|
95
|
+
}
|
|
96
|
+
function setMeta(key, value) {
|
|
97
|
+
db()
|
|
98
|
+
.prepare("INSERT INTO meta (key, value) VALUES (?, ?) ON CONFLICT(key) DO UPDATE SET value = excluded.value")
|
|
99
|
+
.run(key, value);
|
|
100
|
+
}
|
|
101
|
+
function getFileMtimes() {
|
|
102
|
+
const rows = db().prepare("SELECT source, mtime_ms FROM file_mtimes").all();
|
|
103
|
+
const out = {};
|
|
104
|
+
for (const r of rows)
|
|
105
|
+
out[r.source] = r.mtime_ms;
|
|
106
|
+
return out;
|
|
107
|
+
}
|
|
108
|
+
function setFileMtime(source, mtimeMs) {
|
|
109
|
+
db()
|
|
110
|
+
.prepare("INSERT INTO file_mtimes (source, mtime_ms) VALUES (?, ?) ON CONFLICT(source) DO UPDATE SET mtime_ms = excluded.mtime_ms")
|
|
111
|
+
.run(source, mtimeMs);
|
|
112
|
+
}
|
|
113
|
+
// ── Google Embeddings API ───────────────────────────────
|
|
29
114
|
async function getEmbeddings(texts) {
|
|
30
115
|
const apiKey = config.apiKeys.google;
|
|
31
116
|
if (!apiKey) {
|
|
@@ -50,16 +135,13 @@ async function getEmbeddings(texts) {
|
|
|
50
135
|
const err = await response.text();
|
|
51
136
|
throw new Error(`Embedding API error: ${response.status} — ${err}`);
|
|
52
137
|
}
|
|
53
|
-
const data = await response.json();
|
|
138
|
+
const data = (await response.json());
|
|
54
139
|
for (const emb of data.embeddings) {
|
|
55
140
|
results.push(emb.values);
|
|
56
141
|
}
|
|
57
142
|
}
|
|
58
143
|
return results;
|
|
59
144
|
}
|
|
60
|
-
/**
|
|
61
|
-
* Get embedding for a single query text.
|
|
62
|
-
*/
|
|
63
145
|
async function getQueryEmbedding(text) {
|
|
64
146
|
const apiKey = config.apiKeys.google;
|
|
65
147
|
if (!apiKey) {
|
|
@@ -78,11 +160,11 @@ async function getQueryEmbedding(text) {
|
|
|
78
160
|
const err = await response.text();
|
|
79
161
|
throw new Error(`Embedding API error: ${response.status} — ${err}`);
|
|
80
162
|
}
|
|
81
|
-
const data = await response.json();
|
|
163
|
+
const data = (await response.json());
|
|
82
164
|
return data.embedding.values;
|
|
83
165
|
}
|
|
84
166
|
// ── Vector Math ─────────────────────────────────────────
|
|
85
|
-
function
|
|
167
|
+
function cosineSimilarityF32(a, b) {
|
|
86
168
|
if (a.length !== b.length)
|
|
87
169
|
return 0;
|
|
88
170
|
let dotProduct = 0;
|
|
@@ -97,20 +179,13 @@ function cosineSimilarity(a, b) {
|
|
|
97
179
|
return denom === 0 ? 0 : dotProduct / denom;
|
|
98
180
|
}
|
|
99
181
|
// ── Text Chunking ───────────────────────────────────────
|
|
100
|
-
/**
|
|
101
|
-
* Split a markdown file into meaningful chunks.
|
|
102
|
-
* Splits on ## headers, keeping each section as a chunk.
|
|
103
|
-
* Falls back to paragraph splitting for files without headers.
|
|
104
|
-
*/
|
|
105
182
|
function chunkMarkdown(content, source) {
|
|
106
183
|
const chunks = [];
|
|
107
|
-
// Split on ## headers
|
|
108
184
|
const sections = content.split(/^(?=## )/gm);
|
|
109
185
|
for (let i = 0; i < sections.length; i++) {
|
|
110
186
|
const section = sections[i].trim();
|
|
111
187
|
if (!section || section.length < 20)
|
|
112
|
-
continue;
|
|
113
|
-
// If section is too long (>1000 chars), split into paragraphs
|
|
188
|
+
continue;
|
|
114
189
|
if (section.length > 1000) {
|
|
115
190
|
const paragraphs = section.split(/\n\n+/);
|
|
116
191
|
let currentChunk = "";
|
|
@@ -142,51 +217,7 @@ function chunkMarkdown(content, source) {
|
|
|
142
217
|
}
|
|
143
218
|
return chunks;
|
|
144
219
|
}
|
|
145
|
-
// ──
|
|
146
|
-
// In-memory cache for the embedding index. Without this, every query would
|
|
147
|
-
// re-read and re-parse the on-disk index (can be 100+ MB, making searchMemory
|
|
148
|
-
// the slowest step in a message turn). We keep the parsed object and invalidate
|
|
149
|
-
// via mtime check — so external reindexers are still picked up.
|
|
150
|
-
let indexCache = null;
|
|
151
|
-
let indexCacheMtime = 0;
|
|
152
|
-
function loadIndex() {
|
|
153
|
-
try {
|
|
154
|
-
const st = fs.statSync(INDEX_FILE);
|
|
155
|
-
if (indexCache && st.mtimeMs === indexCacheMtime) {
|
|
156
|
-
return indexCache;
|
|
157
|
-
}
|
|
158
|
-
const raw = fs.readFileSync(INDEX_FILE, "utf-8");
|
|
159
|
-
indexCache = JSON.parse(raw);
|
|
160
|
-
indexCacheMtime = st.mtimeMs;
|
|
161
|
-
return indexCache;
|
|
162
|
-
}
|
|
163
|
-
catch {
|
|
164
|
-
// File missing or unparseable — return an empty index and don't cache it
|
|
165
|
-
// (next call will retry, so a freshly-written index gets picked up).
|
|
166
|
-
return {
|
|
167
|
-
model: EMBEDDING_MODEL,
|
|
168
|
-
lastReindex: 0,
|
|
169
|
-
fileMtimes: {},
|
|
170
|
-
entries: [],
|
|
171
|
-
};
|
|
172
|
-
}
|
|
173
|
-
}
|
|
174
|
-
function saveIndex(index) {
|
|
175
|
-
fs.writeFileSync(INDEX_FILE, JSON.stringify(index));
|
|
176
|
-
// Refresh cache immediately so the next loadIndex() sees the new state
|
|
177
|
-
// without a disk round-trip.
|
|
178
|
-
indexCache = index;
|
|
179
|
-
try {
|
|
180
|
-
indexCacheMtime = fs.statSync(INDEX_FILE).mtimeMs;
|
|
181
|
-
}
|
|
182
|
-
catch {
|
|
183
|
-
indexCacheMtime = Date.now();
|
|
184
|
-
}
|
|
185
|
-
}
|
|
186
|
-
/**
|
|
187
|
-
* Recursively walk a directory, returning file paths.
|
|
188
|
-
* Skips INDEX.json and INDEX.md at the directory root.
|
|
189
|
-
*/
|
|
220
|
+
// ── Indexable file discovery ────────────────────────────
|
|
190
221
|
function walkAssetDir(dir) {
|
|
191
222
|
const results = [];
|
|
192
223
|
function walk(currentDir) {
|
|
@@ -213,17 +244,11 @@ function walkAssetDir(dir) {
|
|
|
213
244
|
return results;
|
|
214
245
|
}
|
|
215
246
|
const TEXT_EXTENSIONS = new Set([".md", ".html", ".txt", ".css", ".ts"]);
|
|
216
|
-
/**
|
|
217
|
-
* Get all files that should be indexed — memories + text-based assets.
|
|
218
|
-
*/
|
|
219
247
|
function getIndexableFiles() {
|
|
220
248
|
const files = [];
|
|
221
|
-
// ── Memories (existing) ───────────────────────────────
|
|
222
|
-
// Alvin-Bot MEMORY.md
|
|
223
249
|
if (fs.existsSync(MEMORY_FILE)) {
|
|
224
250
|
files.push({ path: MEMORY_FILE, relativePath: "MEMORY.md" });
|
|
225
251
|
}
|
|
226
|
-
// Alvin-Bot daily logs
|
|
227
252
|
if (fs.existsSync(MEMORY_DIR)) {
|
|
228
253
|
const entries = fs.readdirSync(MEMORY_DIR);
|
|
229
254
|
for (const entry of entries) {
|
|
@@ -235,7 +260,6 @@ function getIndexableFiles() {
|
|
|
235
260
|
}
|
|
236
261
|
}
|
|
237
262
|
}
|
|
238
|
-
// Hub memories (~/.claude/hub/MEMORY/) — Claude Hub knowledge base
|
|
239
263
|
if (fs.existsSync(HUB_MEMORY_DIR)) {
|
|
240
264
|
try {
|
|
241
265
|
const entries = fs.readdirSync(HUB_MEMORY_DIR);
|
|
@@ -248,14 +272,13 @@ function getIndexableFiles() {
|
|
|
248
272
|
}
|
|
249
273
|
}
|
|
250
274
|
}
|
|
251
|
-
catch {
|
|
275
|
+
catch {
|
|
276
|
+
/* Hub not available — skip */
|
|
277
|
+
}
|
|
252
278
|
}
|
|
253
|
-
// ── Assets (new) ──────────────────────────────────────
|
|
254
|
-
// Asset INDEX.md — compact summary of all assets
|
|
255
279
|
if (fs.existsSync(ASSETS_INDEX_MD)) {
|
|
256
280
|
files.push({ path: ASSETS_INDEX_MD, relativePath: "assets/INDEX.md" });
|
|
257
281
|
}
|
|
258
|
-
// Text-based asset files (HTML, MD, TXT, CSS, TS)
|
|
259
282
|
if (fs.existsSync(ASSETS_DIR)) {
|
|
260
283
|
for (const entry of walkAssetDir(ASSETS_DIR)) {
|
|
261
284
|
if (TEXT_EXTENSIONS.has(path.extname(entry.name))) {
|
|
@@ -268,120 +291,133 @@ function getIndexableFiles() {
|
|
|
268
291
|
}
|
|
269
292
|
return files;
|
|
270
293
|
}
|
|
271
|
-
|
|
272
|
-
* Check which files need reindexing (new or modified).
|
|
273
|
-
*/
|
|
274
|
-
function getStaleFiles(index) {
|
|
294
|
+
function getStaleFiles() {
|
|
275
295
|
const allFiles = getIndexableFiles();
|
|
296
|
+
const known = getFileMtimes();
|
|
276
297
|
const stale = [];
|
|
277
298
|
for (const file of allFiles) {
|
|
278
299
|
try {
|
|
279
|
-
const
|
|
280
|
-
|
|
281
|
-
if (!index.fileMtimes[file.relativePath] || index.fileMtimes[file.relativePath] < mtime) {
|
|
300
|
+
const mtime = fs.statSync(file.path).mtimeMs;
|
|
301
|
+
if (!known[file.relativePath] || known[file.relativePath] < mtime) {
|
|
282
302
|
stale.push(file);
|
|
283
303
|
}
|
|
284
304
|
}
|
|
285
305
|
catch {
|
|
286
|
-
|
|
306
|
+
/* file disappeared */
|
|
287
307
|
}
|
|
288
308
|
}
|
|
289
309
|
return stale;
|
|
290
310
|
}
|
|
291
311
|
// ── Public API ──────────────────────────────────────────
|
|
292
|
-
/**
|
|
293
|
-
* Reindex all memory files (or just stale ones).
|
|
294
|
-
* Returns number of chunks indexed.
|
|
295
|
-
*/
|
|
296
312
|
export async function reindexMemory(force = false) {
|
|
297
|
-
const
|
|
298
|
-
const filesToIndex = force ? getIndexableFiles() : getStaleFiles(index);
|
|
313
|
+
const filesToIndex = force ? getIndexableFiles() : getStaleFiles();
|
|
299
314
|
if (filesToIndex.length === 0) {
|
|
300
|
-
|
|
315
|
+
const total = db().prepare("SELECT COUNT(*) AS c FROM entries").get().c;
|
|
316
|
+
return { indexed: 0, total };
|
|
301
317
|
}
|
|
302
|
-
//
|
|
303
|
-
const
|
|
304
|
-
|
|
305
|
-
|
|
318
|
+
// Drop existing entries for files being reindexed (per-source DELETE is O(log n) thanks to idx).
|
|
319
|
+
const delStmt = db().prepare("DELETE FROM entries WHERE source = ?");
|
|
320
|
+
const dropOld = db().transaction((sources) => {
|
|
321
|
+
for (const s of sources)
|
|
322
|
+
delStmt.run(s);
|
|
323
|
+
});
|
|
324
|
+
dropOld(filesToIndex.map(f => f.relativePath));
|
|
325
|
+
// Chunk all files.
|
|
306
326
|
const allChunks = [];
|
|
307
327
|
for (const file of filesToIndex) {
|
|
308
328
|
try {
|
|
309
329
|
const content = fs.readFileSync(file.path, "utf-8");
|
|
310
330
|
const chunks = chunkMarkdown(content, file.relativePath);
|
|
331
|
+
const mtime = fs.statSync(file.path).mtimeMs;
|
|
311
332
|
for (const chunk of chunks) {
|
|
312
|
-
allChunks.push({ ...chunk, source: file.relativePath });
|
|
333
|
+
allChunks.push({ ...chunk, source: file.relativePath, mtime });
|
|
313
334
|
}
|
|
314
|
-
// Update mtime
|
|
315
|
-
const stat = fs.statSync(file.path);
|
|
316
|
-
index.fileMtimes[file.relativePath] = stat.mtimeMs;
|
|
317
335
|
}
|
|
318
336
|
catch (err) {
|
|
319
337
|
console.error(`Failed to chunk ${file.relativePath}:`, err);
|
|
320
338
|
}
|
|
321
339
|
}
|
|
322
340
|
if (allChunks.length === 0) {
|
|
323
|
-
|
|
324
|
-
|
|
341
|
+
// Even with zero chunks, keep mtimes in sync so we don't re-walk on next run.
|
|
342
|
+
const updMtime = db().transaction((files) => {
|
|
343
|
+
for (const f of files) {
|
|
344
|
+
try {
|
|
345
|
+
setFileMtime(f.relativePath, fs.statSync(f.path).mtimeMs);
|
|
346
|
+
}
|
|
347
|
+
catch {
|
|
348
|
+
/* file disappeared */
|
|
349
|
+
}
|
|
350
|
+
}
|
|
351
|
+
});
|
|
352
|
+
updMtime(filesToIndex);
|
|
353
|
+
const total = db().prepare("SELECT COUNT(*) AS c FROM entries").get().c;
|
|
354
|
+
return { indexed: 0, total };
|
|
325
355
|
}
|
|
326
|
-
// Get embeddings for all chunks
|
|
356
|
+
// Get embeddings for all chunks (network).
|
|
327
357
|
const texts = allChunks.map(c => c.text);
|
|
328
358
|
const vectors = await getEmbeddings(texts);
|
|
329
|
-
//
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
359
|
+
// Single transaction for all writes.
|
|
360
|
+
const insertStmt = db().prepare("INSERT INTO entries (id, source, text, vector, indexed_at) VALUES (?, ?, ?, ?, ?) " +
|
|
361
|
+
"ON CONFLICT(id) DO UPDATE SET source=excluded.source, text=excluded.text, vector=excluded.vector, indexed_at=excluded.indexed_at");
|
|
362
|
+
const writeAll = db().transaction((rows) => {
|
|
363
|
+
for (const r of rows) {
|
|
364
|
+
insertStmt.run(r.id, r.source, r.text, r.vector, r.indexedAt);
|
|
365
|
+
}
|
|
366
|
+
});
|
|
367
|
+
const now = Date.now();
|
|
368
|
+
writeAll(allChunks.map((c, i) => ({
|
|
369
|
+
id: c.id,
|
|
370
|
+
source: c.source,
|
|
371
|
+
text: c.text,
|
|
372
|
+
vector: vectorToBlob(vectors[i]),
|
|
373
|
+
indexedAt: now,
|
|
374
|
+
})));
|
|
375
|
+
// Update mtimes for the files we just (re-)indexed.
|
|
376
|
+
const updMtime = db().transaction((files) => {
|
|
377
|
+
for (const f of files) {
|
|
378
|
+
try {
|
|
379
|
+
setFileMtime(f.relativePath, fs.statSync(f.path).mtimeMs);
|
|
380
|
+
}
|
|
381
|
+
catch {
|
|
382
|
+
/* file disappeared */
|
|
383
|
+
}
|
|
384
|
+
}
|
|
385
|
+
});
|
|
386
|
+
updMtime(filesToIndex);
|
|
387
|
+
setMeta("lastReindex", String(now));
|
|
388
|
+
const total = db().prepare("SELECT COUNT(*) AS c FROM entries").get().c;
|
|
389
|
+
return { indexed: allChunks.length, total };
|
|
342
390
|
}
|
|
343
|
-
/**
|
|
344
|
-
* Semantic search across all indexed memory.
|
|
345
|
-
* Returns top-K results sorted by similarity.
|
|
346
|
-
*/
|
|
347
391
|
export async function searchMemory(query, topK = 5, minScore = 0.3) {
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
392
|
+
// Auto-index if empty.
|
|
393
|
+
const total = db().prepare("SELECT COUNT(*) AS c FROM entries").get().c;
|
|
394
|
+
if (total === 0) {
|
|
351
395
|
await reindexMemory();
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
if (reloaded.entries.length === 0)
|
|
396
|
+
const after = db().prepare("SELECT COUNT(*) AS c FROM entries").get().c;
|
|
397
|
+
if (after === 0)
|
|
355
398
|
return [];
|
|
356
399
|
}
|
|
357
|
-
|
|
358
|
-
const
|
|
359
|
-
|
|
360
|
-
const
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
.slice(0, topK);
|
|
400
|
+
const queryVector = Float32Array.from(await getQueryEmbedding(query));
|
|
401
|
+
const rows = db().prepare("SELECT id, source, text, vector FROM entries").all();
|
|
402
|
+
const scored = [];
|
|
403
|
+
for (const row of rows) {
|
|
404
|
+
const v = blobToVector(row.vector);
|
|
405
|
+
const score = cosineSimilarityF32(queryVector, v);
|
|
406
|
+
if (score >= minScore) {
|
|
407
|
+
scored.push({ text: row.text, source: row.source, score });
|
|
408
|
+
}
|
|
409
|
+
}
|
|
410
|
+
scored.sort((a, b) => b.score - a.score);
|
|
411
|
+
return scored.slice(0, topK);
|
|
370
412
|
}
|
|
371
|
-
/**
|
|
372
|
-
* Get index stats for /status.
|
|
373
|
-
*/
|
|
374
|
-
/**
|
|
375
|
-
* Auto-reindex on startup. Indexes only stale/new files (incremental).
|
|
376
|
-
* Runs in background — does not block bot startup.
|
|
377
|
-
*/
|
|
378
413
|
export async function initEmbeddings() {
|
|
379
414
|
try {
|
|
380
|
-
|
|
415
|
+
db(); // Open & migrate schema.
|
|
416
|
+
const stale = getStaleFiles();
|
|
381
417
|
if (stale.length === 0) {
|
|
382
|
-
const
|
|
383
|
-
if (
|
|
384
|
-
return;
|
|
418
|
+
const total = db().prepare("SELECT COUNT(*) AS c FROM entries").get().c;
|
|
419
|
+
if (total > 0)
|
|
420
|
+
return;
|
|
385
421
|
}
|
|
386
422
|
const result = await reindexMemory();
|
|
387
423
|
if (result.indexed > 0) {
|
|
@@ -389,21 +425,26 @@ export async function initEmbeddings() {
|
|
|
389
425
|
}
|
|
390
426
|
}
|
|
391
427
|
catch (err) {
|
|
392
|
-
// Non-fatal — bot works without embeddings
|
|
393
428
|
console.warn("⚠️ Embeddings init failed:", err instanceof Error ? err.message : err);
|
|
394
429
|
}
|
|
395
430
|
}
|
|
396
431
|
export function getIndexStats() {
|
|
397
|
-
|
|
432
|
+
let entries = 0;
|
|
433
|
+
let files = 0;
|
|
434
|
+
let lastReindex = 0;
|
|
398
435
|
let sizeBytes = 0;
|
|
399
436
|
try {
|
|
400
|
-
|
|
437
|
+
entries = db().prepare("SELECT COUNT(*) AS c FROM entries").get().c;
|
|
438
|
+
files = db().prepare("SELECT COUNT(*) AS c FROM file_mtimes").get().c;
|
|
439
|
+
const meta = getMeta("lastReindex");
|
|
440
|
+
if (meta)
|
|
441
|
+
lastReindex = Number(meta);
|
|
442
|
+
sizeBytes = fs.statSync(EMBEDDINGS_DB).size;
|
|
443
|
+
}
|
|
444
|
+
catch {
|
|
445
|
+
/* DB not yet initialised */
|
|
401
446
|
}
|
|
402
|
-
|
|
403
|
-
return {
|
|
404
|
-
entries: index.entries.length,
|
|
405
|
-
files: Object.keys(index.fileMtimes).length,
|
|
406
|
-
lastReindex: index.lastReindex,
|
|
407
|
-
sizeBytes,
|
|
408
|
-
};
|
|
447
|
+
return { entries, files, lastReindex, sizeBytes };
|
|
409
448
|
}
|
|
449
|
+
// ── Re-export embedding dim for tests / debugging ──────
|
|
450
|
+
export { EMBEDDING_DIMENSION, EMBEDDING_MODEL };
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "alvin-bot",
|
|
3
|
-
"version": "4.
|
|
4
|
-
"description": "Alvin Bot
|
|
3
|
+
"version": "4.20.0",
|
|
4
|
+
"description": "Alvin Bot — Your personal AI agent on Telegram, WhatsApp, Discord, Signal, and Web.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "dist/index.js",
|
|
7
7
|
"bin": {
|
|
@@ -170,6 +170,7 @@
|
|
|
170
170
|
"@types/node": "^22.0.0",
|
|
171
171
|
"@types/ws": "^8.18.1",
|
|
172
172
|
"@whiskeysockets/baileys": "^6.7.21",
|
|
173
|
+
"better-sqlite3": "^12.9.0",
|
|
173
174
|
"dotenv": "^16.4.0",
|
|
174
175
|
"electron-updater": "^6.8.3",
|
|
175
176
|
"grammy": "^1.30.0",
|
|
@@ -181,6 +182,7 @@
|
|
|
181
182
|
"ws": "^8.19.0"
|
|
182
183
|
},
|
|
183
184
|
"devDependencies": {
|
|
185
|
+
"@types/better-sqlite3": "^7.6.13",
|
|
184
186
|
"@vitest/ui": "^4.1.4",
|
|
185
187
|
"electron": "^35.7.5",
|
|
186
188
|
"electron-builder": "^26.8.1",
|