@geekbeer/minion 3.36.0 → 3.40.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/core/db/helpers.js +18 -0
- package/core/db/index.js +146 -0
- package/core/db/migrations/000_initial_schema.js +157 -0
- package/core/db/migrations/001_fts_trigram.js +78 -0
- package/core/db/migrations/002_emails_fts.js +41 -0
- package/core/db/migrations/003_memories_project_id.js +17 -0
- package/core/db/migrations/004_chat_sessions_workspace.js +18 -0
- package/core/db/migrations/005_todos_session_injection.js +19 -0
- package/core/db/migrations/006_daily_logs_workspace.js +69 -0
- package/core/db/migrations/007_workspace_scoping.js +29 -0
- package/core/db/migrations/008_todos_workspace.js +22 -0
- package/core/db/migrations/index.js +41 -0
- package/core/lib/config-warnings.js +16 -8
- package/core/lib/end-of-day.js +30 -14
- package/core/lib/reflection-scheduler.js +23 -9
- package/core/lib/thread-watcher.js +3 -0
- package/core/routes/daily-logs.js +64 -27
- package/core/routes/routines.js +6 -2
- package/core/routes/skills.js +4 -0
- package/core/routes/todos.js +20 -7
- package/core/routes/workflows.js +17 -7
- package/core/stores/daily-log-store.js +61 -30
- package/core/stores/execution-store.js +40 -18
- package/core/stores/routine-store.js +32 -14
- package/core/stores/todo-store.js +37 -10
- package/core/stores/workflow-store.js +34 -13
- package/docs/api-reference.md +66 -25
- package/linux/routes/chat.js +14 -9
- package/linux/routes/directives.js +4 -0
- package/linux/routine-runner.js +2 -0
- package/linux/workflow-runner.js +2 -0
- package/package.json +4 -2
- package/rules/core.md +1 -0
- package/scripts/new-migration.js +53 -0
- package/win/routes/chat.js +14 -9
- package/win/routes/directives.js +4 -0
- package/win/routine-runner.js +2 -0
- package/win/workflow-runner.js +2 -0
- package/core/db.js +0 -583
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Shared helpers for migration files.
|
|
3
|
+
* These are passed as the second argument to each migration's up(db, helpers).
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
function hasColumn(db, table, column) {
|
|
7
|
+
const cols = db.prepare(`PRAGMA table_info(${table})`).all()
|
|
8
|
+
return cols.some(c => c.name === column)
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
function tableExists(db, table) {
|
|
12
|
+
const row = db.prepare(
|
|
13
|
+
"SELECT name FROM sqlite_master WHERE type='table' AND name = ?"
|
|
14
|
+
).get(table)
|
|
15
|
+
return !!row
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
module.exports = { hasColumn, tableExists }
|
package/core/db/index.js
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* SQLite Database Module
|
|
3
|
+
* Provides a singleton SQLite database connection for all minion stores.
|
|
4
|
+
* Uses better-sqlite3 (synchronous API) with WAL mode for performance.
|
|
5
|
+
* Falls back to Node.js built-in node:sqlite when better-sqlite3 is unavailable
|
|
6
|
+
* (e.g., no prebuilt binaries for the current Node.js version on Windows).
|
|
7
|
+
*
|
|
8
|
+
* Database file: $DATA_DIR/minion.db
|
|
9
|
+
*
|
|
10
|
+
* Schema changes are applied via migration files in ./migrations/.
|
|
11
|
+
* Any minion, regardless of the version it was last updated from, reaches the
|
|
12
|
+
* latest schema on boot because unapplied migrations run in sequence.
|
|
13
|
+
*/
|
|
14
|
+
|
|
15
|
+
const path = require('path')
|
|
16
|
+
const fs = require('fs')
|
|
17
|
+
|
|
18
|
+
const { DATA_DIR } = require('../lib/platform')
|
|
19
|
+
const { config } = require('../config')
|
|
20
|
+
const helpers = require('./helpers')
|
|
21
|
+
const migrations = require('./migrations')
|
|
22
|
+
|
|
23
|
+
let _db = null
|
|
24
|
+
|
|
25
|
+
function resolveDbPath() {
|
|
26
|
+
try {
|
|
27
|
+
fs.accessSync(DATA_DIR, fs.constants.W_OK)
|
|
28
|
+
return path.join(DATA_DIR, 'minion.db')
|
|
29
|
+
} catch {
|
|
30
|
+
return path.join(config.HOME_DIR, 'minion.db')
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
function openDatabase(dbPath) {
|
|
35
|
+
try {
|
|
36
|
+
const Database = require('better-sqlite3')
|
|
37
|
+
const db = new Database(dbPath)
|
|
38
|
+
db.pragma('journal_mode = WAL')
|
|
39
|
+
db.pragma('foreign_keys = ON')
|
|
40
|
+
console.log('[DB] Using better-sqlite3')
|
|
41
|
+
return db
|
|
42
|
+
} catch (e) {
|
|
43
|
+
console.log(`[DB] better-sqlite3 unavailable (${e.message}), trying node:sqlite...`)
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
try {
|
|
47
|
+
const { DatabaseSync } = require('node:sqlite')
|
|
48
|
+
const db = new DatabaseSync(dbPath)
|
|
49
|
+
db.exec('PRAGMA journal_mode = WAL')
|
|
50
|
+
db.exec('PRAGMA foreign_keys = ON')
|
|
51
|
+
|
|
52
|
+
db.pragma = function (str) {
|
|
53
|
+
const stmt = db.prepare(`PRAGMA ${str}`)
|
|
54
|
+
return stmt.get()
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
db.transaction = function (fn) {
|
|
58
|
+
return function (...args) {
|
|
59
|
+
db.exec('BEGIN')
|
|
60
|
+
try {
|
|
61
|
+
const result = fn(...args)
|
|
62
|
+
db.exec('COMMIT')
|
|
63
|
+
return result
|
|
64
|
+
} catch (e) {
|
|
65
|
+
db.exec('ROLLBACK')
|
|
66
|
+
throw e
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
console.log('[DB] Using node:sqlite (built-in)')
|
|
72
|
+
return db
|
|
73
|
+
} catch (e) {
|
|
74
|
+
throw new Error(
|
|
75
|
+
`[DB] No SQLite driver available. Install better-sqlite3 or use Node.js 22.5.0+ for built-in sqlite support. ` +
|
|
76
|
+
`better-sqlite3 error: check that Python and build tools are installed, or use Node.js 22 LTS which has prebuilt binaries. ` +
|
|
77
|
+
`node:sqlite error: ${e.message}`
|
|
78
|
+
)
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
function getDb() {
|
|
83
|
+
if (_db) return _db
|
|
84
|
+
|
|
85
|
+
const dbPath = resolveDbPath()
|
|
86
|
+
fs.mkdirSync(path.dirname(dbPath), { recursive: true })
|
|
87
|
+
|
|
88
|
+
_db = openDatabase(dbPath)
|
|
89
|
+
|
|
90
|
+
runMigrations(_db)
|
|
91
|
+
|
|
92
|
+
console.log(`[DB] SQLite database initialized: ${dbPath}`)
|
|
93
|
+
return _db
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
/**
|
|
97
|
+
* Apply pending migrations in version order.
|
|
98
|
+
*
|
|
99
|
+
* Each migration runs in a transaction; failures roll back cleanly and prevent
|
|
100
|
+
* the schema_version row from being inserted, so the migration retries on the
|
|
101
|
+
* next boot. This is a behavior change from the pre-refactor code, which used
|
|
102
|
+
* try/catch + INSERT OR IGNORE that could leave migrations marked as applied
|
|
103
|
+
* even when ALTER TABLE silently failed.
|
|
104
|
+
*/
|
|
105
|
+
function runMigrations(db) {
|
|
106
|
+
db.exec(`
|
|
107
|
+
CREATE TABLE IF NOT EXISTS schema_version (
|
|
108
|
+
version INTEGER PRIMARY KEY,
|
|
109
|
+
applied_at TEXT NOT NULL DEFAULT (datetime('now'))
|
|
110
|
+
);
|
|
111
|
+
`)
|
|
112
|
+
|
|
113
|
+
const applied = new Set(
|
|
114
|
+
db.prepare('SELECT version FROM schema_version').all().map(r => r.version)
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
const insertVersion = db.prepare('INSERT INTO schema_version (version) VALUES (?)')
|
|
118
|
+
|
|
119
|
+
for (const m of migrations) {
|
|
120
|
+
if (applied.has(m.version)) continue
|
|
121
|
+
|
|
122
|
+
if (typeof m.shouldAutoApply === 'function' && m.shouldAutoApply(db)) {
|
|
123
|
+
console.log(`[DB] Migration ${m.version} (${m.name}): auto-apply (pre-existing state)`)
|
|
124
|
+
insertVersion.run(m.version)
|
|
125
|
+
continue
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
console.log(`[DB] Applying migration ${m.version}: ${m.name}...`)
|
|
129
|
+
const tx = db.transaction(() => {
|
|
130
|
+
m.up(db, helpers)
|
|
131
|
+
insertVersion.run(m.version)
|
|
132
|
+
})
|
|
133
|
+
tx()
|
|
134
|
+
console.log(`[DB] Migration ${m.version} (${m.name}) complete`)
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
function closeDb() {
|
|
139
|
+
if (_db) {
|
|
140
|
+
_db.close()
|
|
141
|
+
_db = null
|
|
142
|
+
console.log('[DB] SQLite database closed')
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
module.exports = { getDb, closeDb, runMigrations }
|
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Initial schema — the earliest known state of the DB, before any subsequent
|
|
3
|
+
* migrations. Each of migrations 001-008 applies one change additively on top.
|
|
4
|
+
*
|
|
5
|
+
* For existing DBs created by the pre-refactor code, `memories` already exists,
|
|
6
|
+
* so this migration auto-applies (marks applied without running) and lets the
|
|
7
|
+
* subsequent migrations catch the DB up to the latest schema. This guarantees
|
|
8
|
+
* that a minion updated from any past version reaches the latest schema, since
|
|
9
|
+
* unapplied migrations still run individually after the auto-apply.
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
module.exports = {
|
|
13
|
+
version: 0,
|
|
14
|
+
name: 'initial_schema',
|
|
15
|
+
|
|
16
|
+
shouldAutoApply(db) {
|
|
17
|
+
const { tableExists } = require('../helpers')
|
|
18
|
+
return tableExists(db, 'memories')
|
|
19
|
+
},
|
|
20
|
+
|
|
21
|
+
up(db) {
|
|
22
|
+
db.exec(`
|
|
23
|
+
-- ==================== memories ====================
|
|
24
|
+
CREATE TABLE memories (
|
|
25
|
+
id TEXT PRIMARY KEY,
|
|
26
|
+
title TEXT NOT NULL DEFAULT 'Untitled',
|
|
27
|
+
category TEXT NOT NULL DEFAULT 'reference'
|
|
28
|
+
CHECK (category IN ('user', 'feedback', 'project', 'reference')),
|
|
29
|
+
content TEXT NOT NULL DEFAULT '',
|
|
30
|
+
created_at TEXT NOT NULL,
|
|
31
|
+
updated_at TEXT NOT NULL
|
|
32
|
+
);
|
|
33
|
+
|
|
34
|
+
CREATE INDEX idx_memories_category ON memories(category);
|
|
35
|
+
CREATE INDEX idx_memories_updated_at ON memories(updated_at DESC);
|
|
36
|
+
|
|
37
|
+
-- ==================== daily_logs ====================
|
|
38
|
+
CREATE TABLE daily_logs (
|
|
39
|
+
date TEXT PRIMARY KEY,
|
|
40
|
+
content TEXT NOT NULL DEFAULT '',
|
|
41
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
42
|
+
updated_at TEXT NOT NULL DEFAULT (datetime('now'))
|
|
43
|
+
);
|
|
44
|
+
|
|
45
|
+
-- ==================== workspaces (cache from HQ) ====================
|
|
46
|
+
CREATE TABLE workspaces (
|
|
47
|
+
id TEXT PRIMARY KEY,
|
|
48
|
+
name TEXT NOT NULL,
|
|
49
|
+
slug TEXT NOT NULL,
|
|
50
|
+
updated_at INTEGER NOT NULL
|
|
51
|
+
);
|
|
52
|
+
|
|
53
|
+
-- ==================== chat_sessions ====================
|
|
54
|
+
CREATE TABLE chat_sessions (
|
|
55
|
+
session_id TEXT PRIMARY KEY,
|
|
56
|
+
turn_count INTEGER NOT NULL DEFAULT 0,
|
|
57
|
+
created_at INTEGER NOT NULL,
|
|
58
|
+
updated_at INTEGER NOT NULL
|
|
59
|
+
);
|
|
60
|
+
|
|
61
|
+
CREATE TABLE chat_messages (
|
|
62
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
63
|
+
session_id TEXT NOT NULL REFERENCES chat_sessions(session_id) ON DELETE CASCADE,
|
|
64
|
+
role TEXT NOT NULL,
|
|
65
|
+
content TEXT NOT NULL,
|
|
66
|
+
timestamp INTEGER NOT NULL
|
|
67
|
+
);
|
|
68
|
+
|
|
69
|
+
CREATE INDEX idx_chat_messages_session ON chat_messages(session_id, id);
|
|
70
|
+
|
|
71
|
+
-- ==================== executions ====================
|
|
72
|
+
CREATE TABLE executions (
|
|
73
|
+
id TEXT PRIMARY KEY,
|
|
74
|
+
skill_name TEXT,
|
|
75
|
+
workflow_id TEXT,
|
|
76
|
+
status TEXT,
|
|
77
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
78
|
+
data TEXT NOT NULL
|
|
79
|
+
);
|
|
80
|
+
|
|
81
|
+
CREATE INDEX idx_executions_workflow ON executions(workflow_id);
|
|
82
|
+
CREATE INDEX idx_executions_created ON executions(created_at DESC);
|
|
83
|
+
|
|
84
|
+
-- ==================== routines ====================
|
|
85
|
+
CREATE TABLE routines (
|
|
86
|
+
id TEXT PRIMARY KEY,
|
|
87
|
+
name TEXT NOT NULL,
|
|
88
|
+
data TEXT NOT NULL
|
|
89
|
+
);
|
|
90
|
+
|
|
91
|
+
CREATE INDEX idx_routines_name ON routines(name);
|
|
92
|
+
|
|
93
|
+
-- ==================== workflows ====================
|
|
94
|
+
CREATE TABLE workflows (
|
|
95
|
+
id TEXT PRIMARY KEY,
|
|
96
|
+
name TEXT NOT NULL,
|
|
97
|
+
data TEXT NOT NULL
|
|
98
|
+
);
|
|
99
|
+
|
|
100
|
+
CREATE INDEX idx_workflows_name ON workflows(name);
|
|
101
|
+
|
|
102
|
+
-- ==================== todos ====================
|
|
103
|
+
CREATE TABLE todos (
|
|
104
|
+
id TEXT PRIMARY KEY,
|
|
105
|
+
title TEXT NOT NULL,
|
|
106
|
+
description TEXT,
|
|
107
|
+
status TEXT NOT NULL DEFAULT 'pending'
|
|
108
|
+
CHECK (status IN ('pending', 'in_progress', 'done', 'cancelled')),
|
|
109
|
+
priority TEXT NOT NULL DEFAULT 'normal'
|
|
110
|
+
CHECK (priority IN ('low', 'normal', 'high', 'urgent')),
|
|
111
|
+
source_type TEXT
|
|
112
|
+
CHECK (source_type IS NULL OR source_type IN ('thread', 'workflow', 'directive', 'user', 'self')),
|
|
113
|
+
source_id TEXT,
|
|
114
|
+
project_id TEXT,
|
|
115
|
+
due_at TEXT,
|
|
116
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
117
|
+
updated_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
118
|
+
completed_at TEXT,
|
|
119
|
+
data TEXT
|
|
120
|
+
);
|
|
121
|
+
|
|
122
|
+
CREATE INDEX idx_todos_status ON todos(status);
|
|
123
|
+
CREATE INDEX idx_todos_project ON todos(project_id);
|
|
124
|
+
CREATE INDEX idx_todos_priority ON todos(priority);
|
|
125
|
+
|
|
126
|
+
-- ==================== emails ====================
|
|
127
|
+
CREATE TABLE emails (
|
|
128
|
+
id TEXT PRIMARY KEY,
|
|
129
|
+
from_address TEXT NOT NULL,
|
|
130
|
+
to_address TEXT NOT NULL,
|
|
131
|
+
subject TEXT DEFAULT '',
|
|
132
|
+
body_text TEXT DEFAULT '',
|
|
133
|
+
body_html TEXT DEFAULT '',
|
|
134
|
+
received_at TEXT NOT NULL,
|
|
135
|
+
is_read INTEGER DEFAULT 0,
|
|
136
|
+
labels TEXT DEFAULT '[]'
|
|
137
|
+
);
|
|
138
|
+
|
|
139
|
+
CREATE INDEX idx_emails_received_at ON emails(received_at DESC);
|
|
140
|
+
CREATE INDEX idx_emails_from ON emails(from_address);
|
|
141
|
+
CREATE INDEX idx_emails_is_read ON emails(is_read);
|
|
142
|
+
|
|
143
|
+
-- ==================== email_attachments ====================
|
|
144
|
+
CREATE TABLE email_attachments (
|
|
145
|
+
id TEXT PRIMARY KEY,
|
|
146
|
+
email_id TEXT NOT NULL REFERENCES emails(id) ON DELETE CASCADE,
|
|
147
|
+
filename TEXT NOT NULL,
|
|
148
|
+
content_type TEXT,
|
|
149
|
+
size_bytes INTEGER DEFAULT 0,
|
|
150
|
+
approved INTEGER DEFAULT 0,
|
|
151
|
+
data BLOB
|
|
152
|
+
);
|
|
153
|
+
|
|
154
|
+
CREATE INDEX idx_email_attachments_email ON email_attachments(email_id);
|
|
155
|
+
`)
|
|
156
|
+
},
|
|
157
|
+
}
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Create FTS5 virtual tables for memories and daily_logs with the trigram
|
|
3
|
+
* tokenizer (CJK/Japanese substring search support).
|
|
4
|
+
*
|
|
5
|
+
* Uses DROP IF EXISTS + CREATE so it doubles as a repair for DBs that were
|
|
6
|
+
* originally created with the default unicode61 tokenizer.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
module.exports = {
|
|
10
|
+
version: 1,
|
|
11
|
+
name: 'fts_trigram',
|
|
12
|
+
|
|
13
|
+
up(db) {
|
|
14
|
+
db.exec(`
|
|
15
|
+
DROP TRIGGER IF EXISTS memories_ai;
|
|
16
|
+
DROP TRIGGER IF EXISTS memories_ad;
|
|
17
|
+
DROP TRIGGER IF EXISTS memories_au;
|
|
18
|
+
DROP TABLE IF EXISTS memories_fts;
|
|
19
|
+
|
|
20
|
+
DROP TRIGGER IF EXISTS daily_logs_ai;
|
|
21
|
+
DROP TRIGGER IF EXISTS daily_logs_ad;
|
|
22
|
+
DROP TRIGGER IF EXISTS daily_logs_au;
|
|
23
|
+
DROP TABLE IF EXISTS daily_logs_fts;
|
|
24
|
+
|
|
25
|
+
CREATE VIRTUAL TABLE memories_fts USING fts5(
|
|
26
|
+
title,
|
|
27
|
+
content,
|
|
28
|
+
content=memories,
|
|
29
|
+
content_rowid=rowid,
|
|
30
|
+
tokenize='trigram'
|
|
31
|
+
);
|
|
32
|
+
|
|
33
|
+
CREATE TRIGGER memories_ai AFTER INSERT ON memories BEGIN
|
|
34
|
+
INSERT INTO memories_fts(rowid, title, content)
|
|
35
|
+
VALUES (new.rowid, new.title, new.content);
|
|
36
|
+
END;
|
|
37
|
+
|
|
38
|
+
CREATE TRIGGER memories_ad AFTER DELETE ON memories BEGIN
|
|
39
|
+
INSERT INTO memories_fts(memories_fts, rowid, title, content)
|
|
40
|
+
VALUES ('delete', old.rowid, old.title, old.content);
|
|
41
|
+
END;
|
|
42
|
+
|
|
43
|
+
CREATE TRIGGER memories_au AFTER UPDATE ON memories BEGIN
|
|
44
|
+
INSERT INTO memories_fts(memories_fts, rowid, title, content)
|
|
45
|
+
VALUES ('delete', old.rowid, old.title, old.content);
|
|
46
|
+
INSERT INTO memories_fts(rowid, title, content)
|
|
47
|
+
VALUES (new.rowid, new.title, new.content);
|
|
48
|
+
END;
|
|
49
|
+
|
|
50
|
+
CREATE VIRTUAL TABLE daily_logs_fts USING fts5(
|
|
51
|
+
content,
|
|
52
|
+
content=daily_logs,
|
|
53
|
+
content_rowid=rowid,
|
|
54
|
+
tokenize='trigram'
|
|
55
|
+
);
|
|
56
|
+
|
|
57
|
+
CREATE TRIGGER daily_logs_ai AFTER INSERT ON daily_logs BEGIN
|
|
58
|
+
INSERT INTO daily_logs_fts(rowid, content)
|
|
59
|
+
VALUES (new.rowid, new.content);
|
|
60
|
+
END;
|
|
61
|
+
|
|
62
|
+
CREATE TRIGGER daily_logs_ad AFTER DELETE ON daily_logs BEGIN
|
|
63
|
+
INSERT INTO daily_logs_fts(daily_logs_fts, rowid, content)
|
|
64
|
+
VALUES ('delete', old.rowid, old.content);
|
|
65
|
+
END;
|
|
66
|
+
|
|
67
|
+
CREATE TRIGGER daily_logs_au AFTER UPDATE ON daily_logs BEGIN
|
|
68
|
+
INSERT INTO daily_logs_fts(daily_logs_fts, rowid, content)
|
|
69
|
+
VALUES ('delete', old.rowid, old.content);
|
|
70
|
+
INSERT INTO daily_logs_fts(rowid, content)
|
|
71
|
+
VALUES (new.rowid, new.content);
|
|
72
|
+
END;
|
|
73
|
+
|
|
74
|
+
INSERT INTO memories_fts(memories_fts) VALUES ('rebuild');
|
|
75
|
+
INSERT INTO daily_logs_fts(daily_logs_fts) VALUES ('rebuild');
|
|
76
|
+
`)
|
|
77
|
+
},
|
|
78
|
+
}
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Add FTS5 virtual table for emails.subject and emails.body_text (trigram).
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
module.exports = {
|
|
6
|
+
version: 2,
|
|
7
|
+
name: 'emails_fts',
|
|
8
|
+
|
|
9
|
+
up(db, { tableExists }) {
|
|
10
|
+
if (tableExists(db, 'emails_fts')) return
|
|
11
|
+
|
|
12
|
+
db.exec(`
|
|
13
|
+
CREATE VIRTUAL TABLE emails_fts USING fts5(
|
|
14
|
+
subject,
|
|
15
|
+
body_text,
|
|
16
|
+
content=emails,
|
|
17
|
+
content_rowid=rowid,
|
|
18
|
+
tokenize='trigram'
|
|
19
|
+
);
|
|
20
|
+
|
|
21
|
+
CREATE TRIGGER emails_ai AFTER INSERT ON emails BEGIN
|
|
22
|
+
INSERT INTO emails_fts(rowid, subject, body_text)
|
|
23
|
+
VALUES (new.rowid, new.subject, new.body_text);
|
|
24
|
+
END;
|
|
25
|
+
|
|
26
|
+
CREATE TRIGGER emails_ad AFTER DELETE ON emails BEGIN
|
|
27
|
+
INSERT INTO emails_fts(emails_fts, rowid, subject, body_text)
|
|
28
|
+
VALUES ('delete', old.rowid, old.subject, old.body_text);
|
|
29
|
+
END;
|
|
30
|
+
|
|
31
|
+
CREATE TRIGGER emails_au AFTER UPDATE ON emails BEGIN
|
|
32
|
+
INSERT INTO emails_fts(emails_fts, rowid, subject, body_text)
|
|
33
|
+
VALUES ('delete', old.rowid, old.subject, old.body_text);
|
|
34
|
+
INSERT INTO emails_fts(rowid, subject, body_text)
|
|
35
|
+
VALUES (new.rowid, new.subject, new.body_text);
|
|
36
|
+
END;
|
|
37
|
+
|
|
38
|
+
INSERT INTO emails_fts(emails_fts) VALUES ('rebuild');
|
|
39
|
+
`)
|
|
40
|
+
},
|
|
41
|
+
}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Add memories.project_id for linking memories to projects.
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
module.exports = {
|
|
6
|
+
version: 3,
|
|
7
|
+
name: 'memories_project_id',
|
|
8
|
+
|
|
9
|
+
up(db, { hasColumn }) {
|
|
10
|
+
if (hasColumn(db, 'memories', 'project_id')) return
|
|
11
|
+
|
|
12
|
+
db.exec(`
|
|
13
|
+
ALTER TABLE memories ADD COLUMN project_id TEXT DEFAULT NULL;
|
|
14
|
+
CREATE INDEX IF NOT EXISTS idx_memories_project_id ON memories(project_id);
|
|
15
|
+
`)
|
|
16
|
+
},
|
|
17
|
+
}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Add chat_sessions.workspace_id so chat history can be scoped per workspace.
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
module.exports = {
|
|
6
|
+
version: 4,
|
|
7
|
+
name: 'chat_sessions_workspace',
|
|
8
|
+
|
|
9
|
+
up(db, { hasColumn }) {
|
|
10
|
+
if (hasColumn(db, 'chat_sessions', 'workspace_id')) return
|
|
11
|
+
|
|
12
|
+
db.exec(`
|
|
13
|
+
ALTER TABLE chat_sessions ADD COLUMN workspace_id TEXT DEFAULT NULL;
|
|
14
|
+
CREATE INDEX IF NOT EXISTS idx_chat_sessions_workspace
|
|
15
|
+
ON chat_sessions(workspace_id, updated_at DESC);
|
|
16
|
+
`)
|
|
17
|
+
},
|
|
18
|
+
}
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Add todos.session_id (link to chat session that created the todo) and
|
|
3
|
+
* todos.injection_count (how many times the todo has been injected into chat).
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
module.exports = {
|
|
7
|
+
version: 5,
|
|
8
|
+
name: 'todos_session_injection',
|
|
9
|
+
|
|
10
|
+
up(db, { hasColumn }) {
|
|
11
|
+
if (!hasColumn(db, 'todos', 'session_id')) {
|
|
12
|
+
db.exec(`ALTER TABLE todos ADD COLUMN session_id TEXT`)
|
|
13
|
+
}
|
|
14
|
+
if (!hasColumn(db, 'todos', 'injection_count')) {
|
|
15
|
+
db.exec(`ALTER TABLE todos ADD COLUMN injection_count INTEGER NOT NULL DEFAULT 0`)
|
|
16
|
+
}
|
|
17
|
+
db.exec(`CREATE INDEX IF NOT EXISTS idx_todos_session ON todos(session_id)`)
|
|
18
|
+
},
|
|
19
|
+
}
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Scope daily_logs by workspace_id. SQLite cannot alter a PRIMARY KEY in place,
|
|
3
|
+
* so the table is rebuilt with a composite (workspace_id, date) PK.
|
|
4
|
+
*
|
|
5
|
+
* Existing rows are backfilled to the first workspace by name — or '' (legacy
|
|
6
|
+
* bucket) if the workspaces cache is empty (minion hasn't synced yet).
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
module.exports = {
|
|
10
|
+
version: 6,
|
|
11
|
+
name: 'daily_logs_workspace',
|
|
12
|
+
|
|
13
|
+
up(db, { hasColumn }) {
|
|
14
|
+
if (hasColumn(db, 'daily_logs', 'workspace_id')) return
|
|
15
|
+
|
|
16
|
+
db.exec(`
|
|
17
|
+
DROP TRIGGER IF EXISTS daily_logs_ai;
|
|
18
|
+
DROP TRIGGER IF EXISTS daily_logs_ad;
|
|
19
|
+
DROP TRIGGER IF EXISTS daily_logs_au;
|
|
20
|
+
DROP TABLE IF EXISTS daily_logs_fts;
|
|
21
|
+
|
|
22
|
+
ALTER TABLE daily_logs RENAME TO daily_logs_old;
|
|
23
|
+
|
|
24
|
+
CREATE TABLE daily_logs (
|
|
25
|
+
workspace_id TEXT NOT NULL DEFAULT '',
|
|
26
|
+
date TEXT NOT NULL,
|
|
27
|
+
content TEXT NOT NULL DEFAULT '',
|
|
28
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
29
|
+
updated_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
30
|
+
PRIMARY KEY (workspace_id, date)
|
|
31
|
+
);
|
|
32
|
+
|
|
33
|
+
CREATE INDEX IF NOT EXISTS idx_daily_logs_date ON daily_logs(date DESC);
|
|
34
|
+
|
|
35
|
+
INSERT INTO daily_logs (workspace_id, date, content, created_at, updated_at)
|
|
36
|
+
SELECT COALESCE((SELECT id FROM workspaces ORDER BY name LIMIT 1), ''),
|
|
37
|
+
date, content, created_at, updated_at
|
|
38
|
+
FROM daily_logs_old;
|
|
39
|
+
|
|
40
|
+
DROP TABLE daily_logs_old;
|
|
41
|
+
|
|
42
|
+
CREATE VIRTUAL TABLE daily_logs_fts USING fts5(
|
|
43
|
+
content,
|
|
44
|
+
content=daily_logs,
|
|
45
|
+
content_rowid=rowid,
|
|
46
|
+
tokenize='trigram'
|
|
47
|
+
);
|
|
48
|
+
|
|
49
|
+
CREATE TRIGGER daily_logs_ai AFTER INSERT ON daily_logs BEGIN
|
|
50
|
+
INSERT INTO daily_logs_fts(rowid, content)
|
|
51
|
+
VALUES (new.rowid, new.content);
|
|
52
|
+
END;
|
|
53
|
+
|
|
54
|
+
CREATE TRIGGER daily_logs_ad AFTER DELETE ON daily_logs BEGIN
|
|
55
|
+
INSERT INTO daily_logs_fts(daily_logs_fts, rowid, content)
|
|
56
|
+
VALUES ('delete', old.rowid, old.content);
|
|
57
|
+
END;
|
|
58
|
+
|
|
59
|
+
CREATE TRIGGER daily_logs_au AFTER UPDATE ON daily_logs BEGIN
|
|
60
|
+
INSERT INTO daily_logs_fts(daily_logs_fts, rowid, content)
|
|
61
|
+
VALUES ('delete', old.rowid, old.content);
|
|
62
|
+
INSERT INTO daily_logs_fts(rowid, content)
|
|
63
|
+
VALUES (new.rowid, new.content);
|
|
64
|
+
END;
|
|
65
|
+
|
|
66
|
+
INSERT INTO daily_logs_fts(daily_logs_fts) VALUES ('rebuild');
|
|
67
|
+
`)
|
|
68
|
+
},
|
|
69
|
+
}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Scope workflows, routines, and executions by workspace_id.
|
|
3
|
+
*
|
|
4
|
+
* Existing rows keep workspace_id = '' (legacy bucket); they'll be naturally
|
|
5
|
+
* overwritten as HQ re-syncs workflows and routines after the upgrade.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
module.exports = {
|
|
9
|
+
version: 7,
|
|
10
|
+
name: 'workspace_scoping',
|
|
11
|
+
|
|
12
|
+
up(db, { hasColumn }) {
|
|
13
|
+
if (!hasColumn(db, 'workflows', 'workspace_id')) {
|
|
14
|
+
db.exec(`ALTER TABLE workflows ADD COLUMN workspace_id TEXT NOT NULL DEFAULT ''`)
|
|
15
|
+
}
|
|
16
|
+
if (!hasColumn(db, 'routines', 'workspace_id')) {
|
|
17
|
+
db.exec(`ALTER TABLE routines ADD COLUMN workspace_id TEXT NOT NULL DEFAULT ''`)
|
|
18
|
+
}
|
|
19
|
+
if (!hasColumn(db, 'executions', 'workspace_id')) {
|
|
20
|
+
db.exec(`ALTER TABLE executions ADD COLUMN workspace_id TEXT NOT NULL DEFAULT ''`)
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
db.exec(`
|
|
24
|
+
CREATE INDEX IF NOT EXISTS idx_workflows_workspace ON workflows(workspace_id, name);
|
|
25
|
+
CREATE INDEX IF NOT EXISTS idx_routines_workspace ON routines(workspace_id, name);
|
|
26
|
+
CREATE INDEX IF NOT EXISTS idx_executions_workspace ON executions(workspace_id, created_at DESC);
|
|
27
|
+
`)
|
|
28
|
+
},
|
|
29
|
+
}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Scope todos by workspace_id.
|
|
3
|
+
*
|
|
4
|
+
* Existing rows stay at '' (legacy bucket) until the caller (chat /
|
|
5
|
+
* thread-watcher / directive) starts passing workspace_id explicitly.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
module.exports = {
|
|
9
|
+
version: 8,
|
|
10
|
+
name: 'todos_workspace',
|
|
11
|
+
|
|
12
|
+
up(db, { hasColumn }) {
|
|
13
|
+
if (!hasColumn(db, 'todos', 'workspace_id')) {
|
|
14
|
+
db.exec(`ALTER TABLE todos ADD COLUMN workspace_id TEXT NOT NULL DEFAULT ''`)
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
db.exec(`
|
|
18
|
+
CREATE INDEX IF NOT EXISTS idx_todos_workspace ON todos(workspace_id, status);
|
|
19
|
+
CREATE INDEX IF NOT EXISTS idx_todos_workspace_session ON todos(workspace_id, session_id);
|
|
20
|
+
`)
|
|
21
|
+
},
|
|
22
|
+
}
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Auto-loads all migration files in this directory.
|
|
3
|
+
*
|
|
4
|
+
* Naming conventions:
|
|
5
|
+
* - Legacy (frozen, 000-008): NNN_<name>.js
|
|
6
|
+
* - New migrations: YYYYMMDDHHMMSS_<name>.js
|
|
7
|
+
*
|
|
8
|
+
* Both sort correctly in lexicographic order because "008_..." < "20260101000000_...".
|
|
9
|
+
*
|
|
10
|
+
* Each migration module must export:
|
|
11
|
+
* - version: number (unique integer; used as schema_version primary key)
|
|
12
|
+
* - name: string (human-readable identifier)
|
|
13
|
+
* - up(db, helpers) (applies the change; runs in a transaction)
|
|
14
|
+
* - shouldAutoApply?(db) (optional; return true to mark applied without running up())
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
const fs = require('fs')
|
|
18
|
+
const path = require('path')
|
|
19
|
+
|
|
20
|
+
const files = fs.readdirSync(__dirname)
|
|
21
|
+
.filter(f => f.endsWith('.js') && f !== 'index.js')
|
|
22
|
+
.sort()
|
|
23
|
+
|
|
24
|
+
const migrations = files.map(f => {
|
|
25
|
+
const mod = require(path.join(__dirname, f))
|
|
26
|
+
if (typeof mod.version !== 'number' || !mod.name || typeof mod.up !== 'function') {
|
|
27
|
+
throw new Error(`[DB] Invalid migration file: ${f} (must export { version, name, up })`)
|
|
28
|
+
}
|
|
29
|
+
return mod
|
|
30
|
+
})
|
|
31
|
+
|
|
32
|
+
// Guard against duplicate version numbers
|
|
33
|
+
const seen = new Set()
|
|
34
|
+
for (const m of migrations) {
|
|
35
|
+
if (seen.has(m.version)) {
|
|
36
|
+
throw new Error(`[DB] Duplicate migration version: ${m.version}`)
|
|
37
|
+
}
|
|
38
|
+
seen.add(m.version)
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
module.exports = migrations
|