or3-provider-sqlite 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +109 -0
- package/dist/module.d.mts +5 -0
- package/dist/module.json +9 -0
- package/dist/module.mjs +11 -0
- package/dist/runtime/server/admin/adapters/sync-sqlite.d.ts +2 -0
- package/dist/runtime/server/admin/adapters/sync-sqlite.js +72 -0
- package/dist/runtime/server/admin/stores/sqlite-store.d.ts +4 -0
- package/dist/runtime/server/admin/stores/sqlite-store.js +336 -0
- package/dist/runtime/server/auth/sqlite-auth-workspace-store.d.ts +111 -0
- package/dist/runtime/server/auth/sqlite-auth-workspace-store.js +349 -0
- package/dist/runtime/server/db/kysely.d.ts +32 -0
- package/dist/runtime/server/db/kysely.js +62 -0
- package/dist/runtime/server/db/migrate.d.ts +10 -0
- package/dist/runtime/server/db/migrate.js +38 -0
- package/dist/runtime/server/db/migrations/001_init.d.ts +6 -0
- package/dist/runtime/server/db/migrations/001_init.js +31 -0
- package/dist/runtime/server/db/migrations/002_sync_tables.d.ts +6 -0
- package/dist/runtime/server/db/migrations/002_sync_tables.js +55 -0
- package/dist/runtime/server/db/migrations/003_sync_hardening.d.ts +9 -0
- package/dist/runtime/server/db/migrations/003_sync_hardening.js +67 -0
- package/dist/runtime/server/db/migrations/004_auth_invites.d.ts +3 -0
- package/dist/runtime/server/db/migrations/004_auth_invites.js +18 -0
- package/dist/runtime/server/db/migrations/005_admin_stores.d.ts +7 -0
- package/dist/runtime/server/db/migrations/005_admin_stores.js +12 -0
- package/dist/runtime/server/db/schema.d.ts +138 -0
- package/dist/runtime/server/db/schema.js +10 -0
- package/dist/runtime/server/plugins/register.d.ts +2 -0
- package/dist/runtime/server/plugins/register.js +48 -0
- package/dist/runtime/server/sync/sqlite-sync-gateway-adapter.d.ts +36 -0
- package/dist/runtime/server/sync/sqlite-sync-gateway-adapter.js +366 -0
- package/dist/types.d.mts +7 -0
- package/package.json +54 -0
|
@@ -0,0 +1,349 @@
|
|
|
1
|
+
import { getRawDb, getSqliteDb } from "../db/kysely.js";
|
|
2
|
+
import { randomUUID } from "node:crypto";
|
|
3
|
+
function uid() {
|
|
4
|
+
return randomUUID();
|
|
5
|
+
}
|
|
6
|
+
function nowEpoch() {
|
|
7
|
+
return Math.floor(Date.now() / 1e3);
|
|
8
|
+
}
|
|
9
|
+
function normalizeEmail(email) {
|
|
10
|
+
return email.trim().toLowerCase();
|
|
11
|
+
}
|
|
12
|
+
export class SqliteAuthWorkspaceStore {
|
|
13
|
+
get db() {
|
|
14
|
+
return getSqliteDb();
|
|
15
|
+
}
|
|
16
|
+
async getOrCreateUser(input) {
|
|
17
|
+
this.db;
|
|
18
|
+
const raw = getRawDb();
|
|
19
|
+
const userId = raw.transaction(() => {
|
|
20
|
+
const existing = raw.prepare(
|
|
21
|
+
`SELECT user_id
|
|
22
|
+
FROM auth_accounts
|
|
23
|
+
WHERE provider = ? AND provider_user_id = ?`
|
|
24
|
+
).get(input.provider, input.providerUserId);
|
|
25
|
+
if (existing) {
|
|
26
|
+
return existing.user_id;
|
|
27
|
+
}
|
|
28
|
+
const candidateUserId = uid();
|
|
29
|
+
const now = nowEpoch();
|
|
30
|
+
raw.prepare(
|
|
31
|
+
`INSERT OR IGNORE INTO users (id, email, display_name, active_workspace_id, created_at)
|
|
32
|
+
VALUES (?, ?, ?, NULL, ?)`
|
|
33
|
+
).run(
|
|
34
|
+
candidateUserId,
|
|
35
|
+
input.email ?? null,
|
|
36
|
+
input.displayName ?? null,
|
|
37
|
+
now
|
|
38
|
+
);
|
|
39
|
+
raw.prepare(
|
|
40
|
+
`INSERT OR IGNORE INTO auth_accounts (id, user_id, provider, provider_user_id, created_at)
|
|
41
|
+
VALUES (?, ?, ?, ?, ?)`
|
|
42
|
+
).run(
|
|
43
|
+
uid(),
|
|
44
|
+
candidateUserId,
|
|
45
|
+
input.provider,
|
|
46
|
+
input.providerUserId,
|
|
47
|
+
now
|
|
48
|
+
);
|
|
49
|
+
const winner = raw.prepare(
|
|
50
|
+
`SELECT user_id
|
|
51
|
+
FROM auth_accounts
|
|
52
|
+
WHERE provider = ? AND provider_user_id = ?`
|
|
53
|
+
).get(input.provider, input.providerUserId);
|
|
54
|
+
if (!winner) {
|
|
55
|
+
throw new Error("Failed to resolve auth account after get-or-create attempt");
|
|
56
|
+
}
|
|
57
|
+
if (winner.user_id !== candidateUserId) {
|
|
58
|
+
raw.prepare(
|
|
59
|
+
`DELETE FROM users
|
|
60
|
+
WHERE id = ?
|
|
61
|
+
AND NOT EXISTS (
|
|
62
|
+
SELECT 1 FROM auth_accounts WHERE user_id = ?
|
|
63
|
+
)`
|
|
64
|
+
).run(candidateUserId, candidateUserId);
|
|
65
|
+
}
|
|
66
|
+
return winner.user_id;
|
|
67
|
+
}).immediate();
|
|
68
|
+
return { userId };
|
|
69
|
+
}
|
|
70
|
+
async getUser(input) {
|
|
71
|
+
const db = this.db;
|
|
72
|
+
const row = await db.selectFrom("auth_accounts").innerJoin("users", "users.id", "auth_accounts.user_id").select([
|
|
73
|
+
"users.id as user_id",
|
|
74
|
+
"users.email as email",
|
|
75
|
+
"users.display_name as display_name"
|
|
76
|
+
]).where("auth_accounts.provider", "=", input.provider).where("auth_accounts.provider_user_id", "=", input.providerUserId).executeTakeFirst();
|
|
77
|
+
if (!row) return null;
|
|
78
|
+
return {
|
|
79
|
+
userId: row.user_id,
|
|
80
|
+
email: row.email ?? void 0,
|
|
81
|
+
displayName: row.display_name ?? void 0
|
|
82
|
+
};
|
|
83
|
+
}
|
|
84
|
+
async getOrCreateDefaultWorkspace(userId) {
|
|
85
|
+
const db = this.db;
|
|
86
|
+
const activeMembership = await db.selectFrom("users").innerJoin("workspaces", "workspaces.id", "users.active_workspace_id").innerJoin(
|
|
87
|
+
"workspace_members",
|
|
88
|
+
(join) => join.onRef(
|
|
89
|
+
"workspace_members.workspace_id",
|
|
90
|
+
"=",
|
|
91
|
+
"workspaces.id"
|
|
92
|
+
).onRef("workspace_members.user_id", "=", "users.id")
|
|
93
|
+
).select(["workspaces.id", "workspaces.name"]).where("users.id", "=", userId).where("workspaces.deleted", "=", 0).executeTakeFirst();
|
|
94
|
+
if (activeMembership) {
|
|
95
|
+
return {
|
|
96
|
+
workspaceId: activeMembership.id,
|
|
97
|
+
workspaceName: activeMembership.name
|
|
98
|
+
};
|
|
99
|
+
}
|
|
100
|
+
const membership = await db.selectFrom("workspace_members").innerJoin("workspaces", "workspaces.id", "workspace_members.workspace_id").select([
|
|
101
|
+
"workspaces.id",
|
|
102
|
+
"workspaces.name"
|
|
103
|
+
]).where("workspace_members.user_id", "=", userId).where("workspaces.deleted", "=", 0).orderBy("workspace_members.created_at", "asc").executeTakeFirst();
|
|
104
|
+
if (membership) {
|
|
105
|
+
await db.updateTable("users").set({ active_workspace_id: membership.id }).where("id", "=", userId).execute();
|
|
106
|
+
return { workspaceId: membership.id, workspaceName: membership.name };
|
|
107
|
+
}
|
|
108
|
+
const workspaceId = uid();
|
|
109
|
+
const memberId = uid();
|
|
110
|
+
const now = nowEpoch();
|
|
111
|
+
const name = "My Workspace";
|
|
112
|
+
await db.transaction().execute(async (tx) => {
|
|
113
|
+
await tx.insertInto("workspaces").values({
|
|
114
|
+
id: workspaceId,
|
|
115
|
+
name,
|
|
116
|
+
description: null,
|
|
117
|
+
owner_user_id: userId,
|
|
118
|
+
created_at: now,
|
|
119
|
+
deleted: 0,
|
|
120
|
+
deleted_at: null
|
|
121
|
+
}).execute();
|
|
122
|
+
await tx.insertInto("workspace_members").values({
|
|
123
|
+
id: memberId,
|
|
124
|
+
workspace_id: workspaceId,
|
|
125
|
+
user_id: userId,
|
|
126
|
+
role: "owner",
|
|
127
|
+
created_at: now
|
|
128
|
+
}).execute();
|
|
129
|
+
await tx.updateTable("users").set({ active_workspace_id: workspaceId }).where("id", "=", userId).execute();
|
|
130
|
+
});
|
|
131
|
+
return { workspaceId, workspaceName: name };
|
|
132
|
+
}
|
|
133
|
+
async getWorkspaceRole(input) {
|
|
134
|
+
const db = this.db;
|
|
135
|
+
const member = await db.selectFrom("workspace_members").select("role").where("workspace_id", "=", input.workspaceId).where("user_id", "=", input.userId).executeTakeFirst();
|
|
136
|
+
if (!member) return null;
|
|
137
|
+
return member.role;
|
|
138
|
+
}
|
|
139
|
+
async listUserWorkspaces(userId) {
|
|
140
|
+
const db = this.db;
|
|
141
|
+
const user = await db.selectFrom("users").select("active_workspace_id").where("id", "=", userId).executeTakeFirst();
|
|
142
|
+
const activeId = user?.active_workspace_id ?? null;
|
|
143
|
+
const rows = await db.selectFrom("workspace_members").innerJoin("workspaces", "workspaces.id", "workspace_members.workspace_id").select([
|
|
144
|
+
"workspaces.id",
|
|
145
|
+
"workspaces.name",
|
|
146
|
+
"workspaces.description",
|
|
147
|
+
"workspaces.created_at",
|
|
148
|
+
"workspace_members.role"
|
|
149
|
+
]).where("workspace_members.user_id", "=", userId).where("workspaces.deleted", "=", 0).execute();
|
|
150
|
+
return rows.map((r) => ({
|
|
151
|
+
id: r.id,
|
|
152
|
+
name: r.name,
|
|
153
|
+
description: r.description,
|
|
154
|
+
role: r.role,
|
|
155
|
+
createdAt: r.created_at,
|
|
156
|
+
isActive: r.id === activeId
|
|
157
|
+
}));
|
|
158
|
+
}
|
|
159
|
+
async createWorkspace(input) {
|
|
160
|
+
const db = this.db;
|
|
161
|
+
const workspaceId = uid();
|
|
162
|
+
const memberId = uid();
|
|
163
|
+
const now = nowEpoch();
|
|
164
|
+
await db.transaction().execute(async (tx) => {
|
|
165
|
+
await tx.insertInto("workspaces").values({
|
|
166
|
+
id: workspaceId,
|
|
167
|
+
name: input.name,
|
|
168
|
+
description: input.description ?? null,
|
|
169
|
+
owner_user_id: input.userId,
|
|
170
|
+
created_at: now,
|
|
171
|
+
deleted: 0,
|
|
172
|
+
deleted_at: null
|
|
173
|
+
}).execute();
|
|
174
|
+
await tx.insertInto("workspace_members").values({
|
|
175
|
+
id: memberId,
|
|
176
|
+
workspace_id: workspaceId,
|
|
177
|
+
user_id: input.userId,
|
|
178
|
+
role: "owner",
|
|
179
|
+
created_at: now
|
|
180
|
+
}).execute();
|
|
181
|
+
});
|
|
182
|
+
return { workspaceId };
|
|
183
|
+
}
|
|
184
|
+
async updateWorkspace(input) {
|
|
185
|
+
const db = this.db;
|
|
186
|
+
const member = await db.selectFrom("workspace_members").select("role").where("workspace_id", "=", input.workspaceId).where("user_id", "=", input.userId).executeTakeFirst();
|
|
187
|
+
if (!member || member.role === "viewer") {
|
|
188
|
+
throw new Error("Forbidden: insufficient workspace role");
|
|
189
|
+
}
|
|
190
|
+
await db.updateTable("workspaces").set({
|
|
191
|
+
name: input.name,
|
|
192
|
+
description: input.description ?? null
|
|
193
|
+
}).where("id", "=", input.workspaceId).where("deleted", "=", 0).execute();
|
|
194
|
+
}
|
|
195
|
+
async removeWorkspace(input) {
|
|
196
|
+
const db = this.db;
|
|
197
|
+
const member = await db.selectFrom("workspace_members").select("role").where("workspace_id", "=", input.workspaceId).where("user_id", "=", input.userId).executeTakeFirst();
|
|
198
|
+
if (!member || member.role !== "owner") {
|
|
199
|
+
throw new Error("Forbidden: only owner can remove workspace");
|
|
200
|
+
}
|
|
201
|
+
const now = nowEpoch();
|
|
202
|
+
await db.transaction().execute(async (tx) => {
|
|
203
|
+
await tx.updateTable("workspaces").set({ deleted: 1, deleted_at: now }).where("id", "=", input.workspaceId).execute();
|
|
204
|
+
const affectedUsers = await tx.selectFrom("users").select("id").where("active_workspace_id", "=", input.workspaceId).execute();
|
|
205
|
+
for (const affectedUser of affectedUsers) {
|
|
206
|
+
const next = await tx.selectFrom("workspace_members").innerJoin("workspaces", "workspaces.id", "workspace_members.workspace_id").select("workspaces.id").where("workspace_members.user_id", "=", affectedUser.id).where("workspaces.deleted", "=", 0).where("workspaces.id", "!=", input.workspaceId).executeTakeFirst();
|
|
207
|
+
await tx.updateTable("users").set({ active_workspace_id: next?.id ?? null }).where("id", "=", affectedUser.id).execute();
|
|
208
|
+
}
|
|
209
|
+
});
|
|
210
|
+
}
|
|
211
|
+
async setActiveWorkspace(input) {
|
|
212
|
+
const db = this.db;
|
|
213
|
+
const member = await db.selectFrom("workspace_members").innerJoin("workspaces", "workspaces.id", "workspace_members.workspace_id").select("workspace_members.id").where("workspace_members.workspace_id", "=", input.workspaceId).where("workspace_members.user_id", "=", input.userId).where("workspaces.deleted", "=", 0).executeTakeFirst();
|
|
214
|
+
if (!member) {
|
|
215
|
+
throw new Error("Forbidden: not a member of this workspace");
|
|
216
|
+
}
|
|
217
|
+
await db.updateTable("users").set({ active_workspace_id: input.workspaceId }).where("id", "=", input.userId).execute();
|
|
218
|
+
}
|
|
219
|
+
async createInvite(input) {
|
|
220
|
+
const db = this.db;
|
|
221
|
+
const inviteId = uid();
|
|
222
|
+
const now = nowEpoch();
|
|
223
|
+
await db.insertInto("auth_invites").values({
|
|
224
|
+
id: inviteId,
|
|
225
|
+
workspace_id: input.workspaceId,
|
|
226
|
+
email: normalizeEmail(input.email),
|
|
227
|
+
role: input.role,
|
|
228
|
+
status: "pending",
|
|
229
|
+
invited_by_user_id: input.invitedByUserId,
|
|
230
|
+
token_hash: input.tokenHash,
|
|
231
|
+
expires_at: input.expiresAt,
|
|
232
|
+
accepted_at: null,
|
|
233
|
+
accepted_user_id: null,
|
|
234
|
+
revoked_at: null,
|
|
235
|
+
created_at: now,
|
|
236
|
+
updated_at: now
|
|
237
|
+
}).execute();
|
|
238
|
+
return { inviteId };
|
|
239
|
+
}
|
|
240
|
+
async listInvites(input) {
|
|
241
|
+
const db = this.db;
|
|
242
|
+
const now = nowEpoch();
|
|
243
|
+
await db.updateTable("auth_invites").set({ status: "expired", updated_at: now }).where("workspace_id", "=", input.workspaceId).where("status", "=", "pending").where("expires_at", "<=", now).execute();
|
|
244
|
+
let query = db.selectFrom("auth_invites").selectAll().where("workspace_id", "=", input.workspaceId).orderBy("created_at", "desc");
|
|
245
|
+
if (input.status) {
|
|
246
|
+
query = query.where("status", "=", input.status);
|
|
247
|
+
}
|
|
248
|
+
const rows = await query.limit(Math.max(1, Math.min(input.limit ?? 100, 500))).execute();
|
|
249
|
+
return rows.map((row) => ({
|
|
250
|
+
id: row.id,
|
|
251
|
+
workspaceId: row.workspace_id,
|
|
252
|
+
email: row.email,
|
|
253
|
+
role: row.role,
|
|
254
|
+
status: row.status,
|
|
255
|
+
invitedByUserId: row.invited_by_user_id,
|
|
256
|
+
expiresAt: row.expires_at,
|
|
257
|
+
tokenHash: row.token_hash,
|
|
258
|
+
acceptedAt: row.accepted_at,
|
|
259
|
+
acceptedUserId: row.accepted_user_id,
|
|
260
|
+
revokedAt: row.revoked_at,
|
|
261
|
+
createdAt: row.created_at,
|
|
262
|
+
updatedAt: row.updated_at
|
|
263
|
+
}));
|
|
264
|
+
}
|
|
265
|
+
async revokeInvite(input) {
|
|
266
|
+
const db = this.db;
|
|
267
|
+
const now = nowEpoch();
|
|
268
|
+
const row = await db.selectFrom("auth_invites").select(["id", "status"]).where("workspace_id", "=", input.workspaceId).where("id", "=", input.inviteId).executeTakeFirst();
|
|
269
|
+
if (!row) {
|
|
270
|
+
throw new Error("Invite not found");
|
|
271
|
+
}
|
|
272
|
+
if (row.status !== "pending") return;
|
|
273
|
+
await db.updateTable("auth_invites").set({
|
|
274
|
+
status: "revoked",
|
|
275
|
+
revoked_at: now,
|
|
276
|
+
updated_at: now
|
|
277
|
+
}).where("workspace_id", "=", input.workspaceId).where("id", "=", input.inviteId).execute();
|
|
278
|
+
void input.revokedByUserId;
|
|
279
|
+
}
|
|
280
|
+
async consumeInvite(input) {
|
|
281
|
+
this.db;
|
|
282
|
+
const raw = getRawDb();
|
|
283
|
+
const now = nowEpoch();
|
|
284
|
+
const normalized = normalizeEmail(input.email);
|
|
285
|
+
return raw.transaction(() => {
|
|
286
|
+
raw.prepare(
|
|
287
|
+
`UPDATE auth_invites
|
|
288
|
+
SET status = 'expired', updated_at = ?
|
|
289
|
+
WHERE workspace_id = ?
|
|
290
|
+
AND status = 'pending'
|
|
291
|
+
AND expires_at <= ?`
|
|
292
|
+
).run(now, input.workspaceId, now);
|
|
293
|
+
const invite = raw.prepare(
|
|
294
|
+
`SELECT *
|
|
295
|
+
FROM auth_invites
|
|
296
|
+
WHERE workspace_id = ?
|
|
297
|
+
AND email = ?
|
|
298
|
+
ORDER BY created_at ASC
|
|
299
|
+
LIMIT 1`
|
|
300
|
+
).get(input.workspaceId, normalized);
|
|
301
|
+
if (!invite) {
|
|
302
|
+
return { ok: false, reason: "not_found" };
|
|
303
|
+
}
|
|
304
|
+
if (invite.status === "revoked") {
|
|
305
|
+
return { ok: false, reason: "revoked" };
|
|
306
|
+
}
|
|
307
|
+
if (invite.status === "accepted") {
|
|
308
|
+
return { ok: false, reason: "already_used" };
|
|
309
|
+
}
|
|
310
|
+
if (invite.status === "expired" || invite.expires_at <= now) {
|
|
311
|
+
return { ok: false, reason: "expired" };
|
|
312
|
+
}
|
|
313
|
+
if (invite.token_hash !== input.tokenHash) {
|
|
314
|
+
return { ok: false, reason: "token_mismatch" };
|
|
315
|
+
}
|
|
316
|
+
raw.prepare(
|
|
317
|
+
`UPDATE auth_invites
|
|
318
|
+
SET status = 'accepted', accepted_at = ?, accepted_user_id = ?, updated_at = ?
|
|
319
|
+
WHERE id = ?`
|
|
320
|
+
).run(now, input.acceptedUserId, now, invite.id);
|
|
321
|
+
const existingMember = raw.prepare(
|
|
322
|
+
`SELECT id FROM workspace_members
|
|
323
|
+
WHERE workspace_id = ? AND user_id = ?
|
|
324
|
+
LIMIT 1`
|
|
325
|
+
).get(input.workspaceId, input.acceptedUserId);
|
|
326
|
+
if (existingMember) {
|
|
327
|
+
raw.prepare(
|
|
328
|
+
`UPDATE workspace_members
|
|
329
|
+
SET role = ?
|
|
330
|
+
WHERE id = ?`
|
|
331
|
+
).run(invite.role, existingMember.id);
|
|
332
|
+
} else {
|
|
333
|
+
raw.prepare(
|
|
334
|
+
`INSERT INTO workspace_members (id, workspace_id, user_id, role, created_at)
|
|
335
|
+
VALUES (?, ?, ?, ?, ?)`
|
|
336
|
+
).run(uid(), input.workspaceId, input.acceptedUserId, invite.role, now);
|
|
337
|
+
}
|
|
338
|
+
raw.prepare(
|
|
339
|
+
`UPDATE users
|
|
340
|
+
SET active_workspace_id = ?
|
|
341
|
+
WHERE id = ?`
|
|
342
|
+
).run(input.workspaceId, input.acceptedUserId);
|
|
343
|
+
return { ok: true, role: invite.role };
|
|
344
|
+
}).immediate();
|
|
345
|
+
}
|
|
346
|
+
}
|
|
347
|
+
export function createSqliteAuthWorkspaceStore() {
|
|
348
|
+
return new SqliteAuthWorkspaceStore();
|
|
349
|
+
}
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Kysely singleton for the SQLite provider.
|
|
3
|
+
*
|
|
4
|
+
* Creates a single Kysely instance backed by better-sqlite3.
|
|
5
|
+
* Applies WAL + NORMAL pragmas for safe concurrent reads.
|
|
6
|
+
*/
|
|
7
|
+
import { Kysely } from 'kysely';
|
|
8
|
+
import Database from 'better-sqlite3';
|
|
9
|
+
import type { Or3SqliteDb } from './schema.js';
|
|
10
|
+
export interface SqliteDbOptions {
|
|
11
|
+
path: string;
|
|
12
|
+
journalMode?: string;
|
|
13
|
+
synchronous?: string;
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Get or create the singleton Kysely DB.
|
|
17
|
+
* First call initializes the connection and sets pragmas.
|
|
18
|
+
*/
|
|
19
|
+
export declare function getSqliteDb(options?: SqliteDbOptions): Kysely<Or3SqliteDb>;
|
|
20
|
+
/**
|
|
21
|
+
* Get the underlying better-sqlite3 instance for raw transactions.
|
|
22
|
+
* Only available after getSqliteDb() has been called.
|
|
23
|
+
*/
|
|
24
|
+
export declare function getRawDb(): InstanceType<typeof Database>;
|
|
25
|
+
/**
|
|
26
|
+
* Destroy the connection (for tests/cleanup).
|
|
27
|
+
*/
|
|
28
|
+
export declare function destroySqliteDb(): Promise<void>;
|
|
29
|
+
/**
|
|
30
|
+
* Reset the module-level singleton (for tests only).
|
|
31
|
+
*/
|
|
32
|
+
export declare function _resetForTest(): void;
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
import { Kysely, SqliteDialect } from "kysely";
|
|
2
|
+
import Database from "better-sqlite3";
|
|
3
|
+
let instance = null;
|
|
4
|
+
let rawDb = null;
|
|
5
|
+
function envFlag(value) {
|
|
6
|
+
if (!value) return false;
|
|
7
|
+
const normalized = value.trim().toLowerCase();
|
|
8
|
+
return normalized === "1" || normalized === "true" || normalized === "yes";
|
|
9
|
+
}
|
|
10
|
+
export function getSqliteDb(options) {
|
|
11
|
+
if (instance) return instance;
|
|
12
|
+
const isTestEnv = process.env.NODE_ENV === "test" || envFlag(process.env.VITEST);
|
|
13
|
+
const allowInMemory = envFlag(process.env.OR3_SQLITE_ALLOW_IN_MEMORY);
|
|
14
|
+
const strictMode = envFlag(process.env.OR3_SQLITE_STRICT);
|
|
15
|
+
const configuredPath = options?.path ?? process.env.OR3_SQLITE_DB_PATH;
|
|
16
|
+
const path = configuredPath ?? ":memory:";
|
|
17
|
+
if (!configuredPath && !isTestEnv && !allowInMemory) {
|
|
18
|
+
throw new Error(
|
|
19
|
+
"OR3_SQLITE_DB_PATH is required in non-test environments. Set OR3_SQLITE_ALLOW_IN_MEMORY=true only if you intentionally want ephemeral storage."
|
|
20
|
+
);
|
|
21
|
+
}
|
|
22
|
+
if (strictMode && path === ":memory:") {
|
|
23
|
+
throw new Error(
|
|
24
|
+
"OR3_SQLITE_STRICT=true forbids in-memory SQLite. Set OR3_SQLITE_DB_PATH to a persistent file path."
|
|
25
|
+
);
|
|
26
|
+
}
|
|
27
|
+
if (!isTestEnv && path === ":memory:" && !allowInMemory) {
|
|
28
|
+
throw new Error(
|
|
29
|
+
"Using :memory: in non-test environments requires OR3_SQLITE_ALLOW_IN_MEMORY=true."
|
|
30
|
+
);
|
|
31
|
+
}
|
|
32
|
+
if (!isTestEnv && path === ":memory:" && allowInMemory) {
|
|
33
|
+
console.warn(
|
|
34
|
+
"[or3-sqlite] OR3_SQLITE_ALLOW_IN_MEMORY=true enabled. Data will be lost on process restart."
|
|
35
|
+
);
|
|
36
|
+
}
|
|
37
|
+
const journalMode = options?.journalMode ?? process.env.OR3_SQLITE_PRAGMA_JOURNAL_MODE ?? "WAL";
|
|
38
|
+
const synchronous = options?.synchronous ?? process.env.OR3_SQLITE_PRAGMA_SYNCHRONOUS ?? "NORMAL";
|
|
39
|
+
rawDb = new Database(path);
|
|
40
|
+
rawDb.pragma(`journal_mode = ${journalMode}`);
|
|
41
|
+
rawDb.pragma(`synchronous = ${synchronous}`);
|
|
42
|
+
rawDb.pragma("foreign_keys = ON");
|
|
43
|
+
instance = new Kysely({
|
|
44
|
+
dialect: new SqliteDialect({ database: rawDb })
|
|
45
|
+
});
|
|
46
|
+
return instance;
|
|
47
|
+
}
|
|
48
|
+
export function getRawDb() {
|
|
49
|
+
if (!rawDb) throw new Error("SQLite DB not initialized \u2014 call getSqliteDb() first");
|
|
50
|
+
return rawDb;
|
|
51
|
+
}
|
|
52
|
+
export async function destroySqliteDb() {
|
|
53
|
+
if (instance) {
|
|
54
|
+
await instance.destroy();
|
|
55
|
+
instance = null;
|
|
56
|
+
rawDb = null;
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
export function _resetForTest() {
|
|
60
|
+
instance = null;
|
|
61
|
+
rawDb = null;
|
|
62
|
+
}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Migration runner for the SQLite provider.
|
|
3
|
+
* Runs all migrations in order on module init.
|
|
4
|
+
*/
|
|
5
|
+
import { type Kysely } from 'kysely';
|
|
6
|
+
import type { Or3SqliteDb } from './schema.js';
|
|
7
|
+
/**
|
|
8
|
+
* Run all pending migrations. Safe to call repeatedly.
|
|
9
|
+
*/
|
|
10
|
+
export declare function runMigrations(db: Kysely<Or3SqliteDb>): Promise<void>;
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import { Migrator } from "kysely";
|
|
2
|
+
import * as m001 from "./migrations/001_init.js";
|
|
3
|
+
import * as m002 from "./migrations/002_sync_tables.js";
|
|
4
|
+
import * as m003 from "./migrations/003_sync_hardening.js";
|
|
5
|
+
import * as m004 from "./migrations/004_auth_invites.js";
|
|
6
|
+
import * as m005 from "./migrations/005_admin_stores.js";
|
|
7
|
+
const migrations = {
|
|
8
|
+
"001_init": m001,
|
|
9
|
+
"002_sync_tables": m002,
|
|
10
|
+
"003_sync_hardening": m003,
|
|
11
|
+
"004_auth_invites": m004,
|
|
12
|
+
"005_admin_stores": m005
|
|
13
|
+
};
|
|
14
|
+
class StaticMigrationProvider {
|
|
15
|
+
async getMigrations() {
|
|
16
|
+
return migrations;
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
export async function runMigrations(db) {
|
|
20
|
+
const migrator = new Migrator({
|
|
21
|
+
db,
|
|
22
|
+
provider: new StaticMigrationProvider()
|
|
23
|
+
});
|
|
24
|
+
const { error, results } = await migrator.migrateToLatest();
|
|
25
|
+
if (results?.length) {
|
|
26
|
+
for (const r of results) {
|
|
27
|
+
if (r.status === "Success") {
|
|
28
|
+
console.log(`[or3-sqlite] migration "${r.migrationName}" applied`);
|
|
29
|
+
} else if (r.status === "Error") {
|
|
30
|
+
console.error(`[or3-sqlite] migration "${r.migrationName}" failed`);
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
if (error) {
|
|
35
|
+
console.error("[or3-sqlite] migration error:", error);
|
|
36
|
+
throw error;
|
|
37
|
+
}
|
|
38
|
+
}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
import { sql } from "kysely";
|
|
2
|
+
export async function up(db) {
|
|
3
|
+
await db.schema.createTable("users").ifNotExists().addColumn("id", "text", (col) => col.primaryKey()).addColumn("email", "text").addColumn("display_name", "text").addColumn("active_workspace_id", "text").addColumn(
|
|
4
|
+
"created_at",
|
|
5
|
+
"integer",
|
|
6
|
+
(col) => col.notNull().defaultTo(sql`(unixepoch())`)
|
|
7
|
+
).execute();
|
|
8
|
+
await db.schema.createTable("auth_accounts").ifNotExists().addColumn("id", "text", (col) => col.primaryKey()).addColumn("user_id", "text", (col) => col.notNull()).addColumn("provider", "text", (col) => col.notNull()).addColumn("provider_user_id", "text", (col) => col.notNull()).addColumn(
|
|
9
|
+
"created_at",
|
|
10
|
+
"integer",
|
|
11
|
+
(col) => col.notNull().defaultTo(sql`(unixepoch())`)
|
|
12
|
+
).execute();
|
|
13
|
+
await db.schema.createIndex("idx_auth_accounts_provider_uid").ifNotExists().on("auth_accounts").columns(["provider", "provider_user_id"]).unique().execute();
|
|
14
|
+
await db.schema.createTable("workspaces").ifNotExists().addColumn("id", "text", (col) => col.primaryKey()).addColumn("name", "text", (col) => col.notNull()).addColumn("description", "text").addColumn("owner_user_id", "text", (col) => col.notNull()).addColumn(
|
|
15
|
+
"created_at",
|
|
16
|
+
"integer",
|
|
17
|
+
(col) => col.notNull().defaultTo(sql`(unixepoch())`)
|
|
18
|
+
).addColumn("deleted", "integer", (col) => col.notNull().defaultTo(0)).addColumn("deleted_at", "integer").execute();
|
|
19
|
+
await db.schema.createTable("workspace_members").ifNotExists().addColumn("id", "text", (col) => col.primaryKey()).addColumn("workspace_id", "text", (col) => col.notNull()).addColumn("user_id", "text", (col) => col.notNull()).addColumn("role", "text", (col) => col.notNull().defaultTo("editor")).addColumn(
|
|
20
|
+
"created_at",
|
|
21
|
+
"integer",
|
|
22
|
+
(col) => col.notNull().defaultTo(sql`(unixepoch())`)
|
|
23
|
+
).execute();
|
|
24
|
+
await db.schema.createIndex("idx_workspace_members_ws_user").ifNotExists().on("workspace_members").columns(["workspace_id", "user_id"]).unique().execute();
|
|
25
|
+
}
|
|
26
|
+
export async function down(db) {
|
|
27
|
+
await db.schema.dropTable("workspace_members").ifExists().execute();
|
|
28
|
+
await db.schema.dropTable("workspaces").ifExists().execute();
|
|
29
|
+
await db.schema.dropTable("auth_accounts").ifExists().execute();
|
|
30
|
+
await db.schema.dropTable("users").ifExists().execute();
|
|
31
|
+
}
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import { sql } from "kysely";
|
|
2
|
+
const SYNCED_TABLES = [
|
|
3
|
+
"s_threads",
|
|
4
|
+
"s_messages",
|
|
5
|
+
"s_projects",
|
|
6
|
+
"s_posts",
|
|
7
|
+
"s_kv",
|
|
8
|
+
"s_file_meta",
|
|
9
|
+
"s_notifications"
|
|
10
|
+
];
|
|
11
|
+
export async function up(db) {
|
|
12
|
+
await db.schema.createTable("server_version_counter").ifNotExists().addColumn("workspace_id", "text", (col) => col.primaryKey()).addColumn("value", "integer", (col) => col.notNull().defaultTo(0)).execute();
|
|
13
|
+
await db.schema.createTable("change_log").ifNotExists().addColumn("id", "text", (col) => col.primaryKey()).addColumn("workspace_id", "text", (col) => col.notNull()).addColumn("server_version", "integer", (col) => col.notNull()).addColumn("table_name", "text", (col) => col.notNull()).addColumn("pk", "text", (col) => col.notNull()).addColumn("op", "text", (col) => col.notNull()).addColumn("payload_json", "text").addColumn("clock", "integer", (col) => col.notNull()).addColumn("hlc", "text", (col) => col.notNull()).addColumn("device_id", "text", (col) => col.notNull()).addColumn("op_id", "text", (col) => col.notNull()).addColumn(
|
|
14
|
+
"created_at",
|
|
15
|
+
"integer",
|
|
16
|
+
(col) => col.notNull().defaultTo(sql`(unixepoch())`)
|
|
17
|
+
).execute();
|
|
18
|
+
await db.schema.createIndex("idx_change_log_ws_sv").ifNotExists().on("change_log").columns(["workspace_id", "server_version"]).execute();
|
|
19
|
+
await db.schema.createIndex("idx_change_log_op_id").ifNotExists().on("change_log").columns(["op_id"]).unique().execute();
|
|
20
|
+
await db.schema.createTable("device_cursors").ifNotExists().addColumn("id", "text", (col) => col.primaryKey()).addColumn("workspace_id", "text", (col) => col.notNull()).addColumn("device_id", "text", (col) => col.notNull()).addColumn("last_seen_version", "integer", (col) => col.notNull().defaultTo(0)).addColumn(
|
|
21
|
+
"updated_at",
|
|
22
|
+
"integer",
|
|
23
|
+
(col) => col.notNull().defaultTo(sql`(unixepoch())`)
|
|
24
|
+
).execute();
|
|
25
|
+
await db.schema.createIndex("idx_device_cursors_ws_device").ifNotExists().on("device_cursors").columns(["workspace_id", "device_id"]).unique().execute();
|
|
26
|
+
await db.schema.createIndex("idx_device_cursors_ws_version").ifNotExists().on("device_cursors").columns(["workspace_id", "last_seen_version"]).execute();
|
|
27
|
+
await db.schema.createTable("tombstones").ifNotExists().addColumn("id", "text", (col) => col.primaryKey()).addColumn("workspace_id", "text", (col) => col.notNull()).addColumn("table_name", "text", (col) => col.notNull()).addColumn("pk", "text", (col) => col.notNull()).addColumn("deleted_at", "integer", (col) => col.notNull()).addColumn("clock", "integer", (col) => col.notNull()).addColumn("server_version", "integer", (col) => col.notNull()).addColumn(
|
|
28
|
+
"created_at",
|
|
29
|
+
"integer",
|
|
30
|
+
(col) => col.notNull().defaultTo(sql`(unixepoch())`)
|
|
31
|
+
).execute();
|
|
32
|
+
await db.schema.createIndex("idx_tombstones_ws_sv").ifNotExists().on("tombstones").columns(["workspace_id", "server_version"]).execute();
|
|
33
|
+
await db.schema.createIndex("idx_tombstones_ws_table_pk").ifNotExists().on("tombstones").columns(["workspace_id", "table_name", "pk"]).unique().execute();
|
|
34
|
+
for (const tableName of SYNCED_TABLES) {
|
|
35
|
+
await db.schema.createTable(tableName).ifNotExists().addColumn("id", "text", (col) => col.notNull()).addColumn("workspace_id", "text", (col) => col.notNull()).addColumn("data_json", "text", (col) => col.notNull()).addColumn("clock", "integer", (col) => col.notNull().defaultTo(0)).addColumn("hlc", "text", (col) => col.notNull().defaultTo("")).addColumn("device_id", "text", (col) => col.notNull().defaultTo("")).addColumn("deleted", "integer", (col) => col.notNull().defaultTo(0)).addColumn(
|
|
36
|
+
"created_at",
|
|
37
|
+
"integer",
|
|
38
|
+
(col) => col.notNull().defaultTo(sql`(unixepoch())`)
|
|
39
|
+
).addColumn(
|
|
40
|
+
"updated_at",
|
|
41
|
+
"integer",
|
|
42
|
+
(col) => col.notNull().defaultTo(sql`(unixepoch())`)
|
|
43
|
+
).addPrimaryKeyConstraint(`${tableName}_pk`, ["workspace_id", "id"]).execute();
|
|
44
|
+
await db.schema.createIndex(`idx_${tableName}_ws`).ifNotExists().on(tableName).columns(["workspace_id"]).execute();
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
export async function down(db) {
|
|
48
|
+
for (const tableName of [...SYNCED_TABLES].reverse()) {
|
|
49
|
+
await db.schema.dropTable(tableName).ifExists().execute();
|
|
50
|
+
}
|
|
51
|
+
await db.schema.dropTable("tombstones").ifExists().execute();
|
|
52
|
+
await db.schema.dropTable("device_cursors").ifExists().execute();
|
|
53
|
+
await db.schema.dropTable("change_log").ifExists().execute();
|
|
54
|
+
await db.schema.dropTable("server_version_counter").ifExists().execute();
|
|
55
|
+
}
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Migration 003: Harden sync storage keys and tombstone uniqueness.
|
|
3
|
+
*
|
|
4
|
+
* - Materialized sync tables move to composite PK (workspace_id, id)
|
|
5
|
+
* - Tombstones enforce one row per (workspace_id, table_name, pk)
|
|
6
|
+
*/
|
|
7
|
+
import type { Kysely } from 'kysely';
|
|
8
|
+
export declare function up(db: Kysely<unknown>): Promise<void>;
|
|
9
|
+
export declare function down(_db: Kysely<unknown>): Promise<void>;
|