@enbox/dwn-server 0.0.7 → 0.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. package/dist/esm/src/admin/admin-api.d.ts +5 -1
  2. package/dist/esm/src/admin/admin-api.d.ts.map +1 -1
  3. package/dist/esm/src/admin/admin-api.js +327 -7
  4. package/dist/esm/src/admin/admin-api.js.map +1 -1
  5. package/dist/esm/src/admin/admin-auth.d.ts +21 -3
  6. package/dist/esm/src/admin/admin-auth.d.ts.map +1 -1
  7. package/dist/esm/src/admin/admin-auth.js +17 -9
  8. package/dist/esm/src/admin/admin-auth.js.map +1 -1
  9. package/dist/esm/src/admin/admin-passkey-store.d.ts +68 -0
  10. package/dist/esm/src/admin/admin-passkey-store.d.ts.map +1 -0
  11. package/dist/esm/src/admin/admin-passkey-store.js +132 -0
  12. package/dist/esm/src/admin/admin-passkey-store.js.map +1 -0
  13. package/dist/esm/src/admin/admin-session.d.ts +35 -0
  14. package/dist/esm/src/admin/admin-session.d.ts.map +1 -0
  15. package/dist/esm/src/admin/admin-session.js +91 -0
  16. package/dist/esm/src/admin/admin-session.js.map +1 -0
  17. package/dist/esm/src/admin/audit-log.d.ts.map +1 -1
  18. package/dist/esm/src/admin/audit-log.js +5 -43
  19. package/dist/esm/src/admin/audit-log.js.map +1 -1
  20. package/dist/esm/src/admin/index.d.ts +5 -1
  21. package/dist/esm/src/admin/index.d.ts.map +1 -1
  22. package/dist/esm/src/admin/index.js +2 -0
  23. package/dist/esm/src/admin/index.js.map +1 -1
  24. package/dist/esm/src/admin/types.d.ts +22 -0
  25. package/dist/esm/src/admin/types.d.ts.map +1 -1
  26. package/dist/esm/src/admin/webhook-manager.d.ts.map +1 -1
  27. package/dist/esm/src/admin/webhook-manager.js +11 -10
  28. package/dist/esm/src/admin/webhook-manager.js.map +1 -1
  29. package/dist/esm/src/config.d.ts +18 -0
  30. package/dist/esm/src/config.d.ts.map +1 -1
  31. package/dist/esm/src/config.js +18 -0
  32. package/dist/esm/src/config.js.map +1 -1
  33. package/dist/esm/src/dwn-server.d.ts.map +1 -1
  34. package/dist/esm/src/dwn-server.js +46 -11
  35. package/dist/esm/src/dwn-server.js.map +1 -1
  36. package/dist/esm/src/http-api.d.ts +4 -0
  37. package/dist/esm/src/http-api.d.ts.map +1 -1
  38. package/dist/esm/src/http-api.js +14 -4
  39. package/dist/esm/src/http-api.js.map +1 -1
  40. package/dist/esm/src/migrations/001-initial-server-schema.d.ts +21 -0
  41. package/dist/esm/src/migrations/001-initial-server-schema.d.ts.map +1 -0
  42. package/dist/esm/src/migrations/001-initial-server-schema.js +97 -0
  43. package/dist/esm/src/migrations/001-initial-server-schema.js.map +1 -0
  44. package/dist/esm/src/migrations/index.d.ts +13 -0
  45. package/dist/esm/src/migrations/index.d.ts.map +1 -0
  46. package/dist/esm/src/migrations/index.js +5 -0
  47. package/dist/esm/src/migrations/index.js.map +1 -0
  48. package/dist/esm/src/registration/registration-store.d.ts +4 -0
  49. package/dist/esm/src/registration/registration-store.d.ts.map +1 -1
  50. package/dist/esm/src/registration/registration-store.js +11 -34
  51. package/dist/esm/src/registration/registration-store.js.map +1 -1
  52. package/dist/esm/src/server-migration-runner.d.ts +23 -0
  53. package/dist/esm/src/server-migration-runner.d.ts.map +1 -0
  54. package/dist/esm/src/server-migration-runner.js +57 -0
  55. package/dist/esm/src/server-migration-runner.js.map +1 -0
  56. package/dist/esm/src/storage.d.ts +15 -0
  57. package/dist/esm/src/storage.d.ts.map +1 -1
  58. package/dist/esm/src/storage.js +135 -17
  59. package/dist/esm/src/storage.js.map +1 -1
  60. package/dist/esm/src/web5-connect/sql-ttl-cache.d.ts +11 -1
  61. package/dist/esm/src/web5-connect/sql-ttl-cache.d.ts.map +1 -1
  62. package/dist/esm/src/web5-connect/sql-ttl-cache.js +19 -20
  63. package/dist/esm/src/web5-connect/sql-ttl-cache.js.map +1 -1
  64. package/dist/esm/src/web5-connect/web5-connect-server.d.ts +10 -3
  65. package/dist/esm/src/web5-connect/web5-connect-server.d.ts.map +1 -1
  66. package/dist/esm/src/web5-connect/web5-connect-server.js +10 -4
  67. package/dist/esm/src/web5-connect/web5-connect-server.js.map +1 -1
  68. package/package.json +3 -2
  69. package/src/admin/admin-api.ts +403 -10
  70. package/src/admin/admin-auth.ts +38 -9
  71. package/src/admin/admin-passkey-store.ts +190 -0
  72. package/src/admin/admin-session.ts +116 -0
  73. package/src/admin/audit-log.ts +7 -44
  74. package/src/admin/index.ts +5 -0
  75. package/src/admin/types.ts +28 -0
  76. package/src/admin/webhook-manager.ts +12 -10
  77. package/src/config.ts +21 -0
  78. package/src/dwn-server.ts +49 -11
  79. package/src/http-api.ts +20 -5
  80. package/src/migrations/001-initial-server-schema.ts +114 -0
  81. package/src/migrations/index.ts +18 -0
  82. package/src/registration/registration-store.ts +13 -36
  83. package/src/server-migration-runner.ts +74 -0
  84. package/src/storage.ts +145 -17
  85. package/src/web5-connect/sql-ttl-cache.ts +21 -22
  86. package/src/web5-connect/web5-connect-server.ts +14 -5
package/src/http-api.ts CHANGED
@@ -3,8 +3,11 @@ import type { RecordsReadReply } from '@enbox/dwn-sdk-js';
3
3
  import type { ServerInfo } from '@enbox/dwn-clients';
4
4
  import type { Server, ServerWebSocket } from 'bun';
5
5
 
6
+ import type { Dialect } from '@enbox/dwn-sql-store';
7
+
6
8
  import type { ActivityLog } from './admin/activity-log.js';
7
9
  import type { AdminApi } from './admin/admin-api.js';
10
+ import type { AdminSessionManager } from './admin/admin-session.js';
8
11
  import type { AdminStore } from './admin/admin-store.js';
9
12
  import type { DwnServerConfig } from './config.js';
10
13
  import type { DwnServerError } from './dwn-error.js';
@@ -27,6 +30,7 @@ import { existsSync, readFileSync } from 'fs';
27
30
  import { join, resolve } from 'path';
28
31
 
29
32
  import { config } from './config.js';
33
+ import { getDialectFromUrl } from './storage.js';
30
34
  import { jsonRpcRouter } from './json-rpc-api.js';
31
35
  import { validateAdminAuth } from './admin/admin-auth.js';
32
36
  import { Web5ConnectServer } from './web5-connect/web5-connect-server.js';
@@ -62,6 +66,7 @@ export class HttpApi {
62
66
  #tenantRateLimiter: RateLimiter | undefined;
63
67
  #messageProcessedHooks: MessageProcessedHook[];
64
68
  #openAuthHandler: OpenAuthHandler | undefined;
69
+ #sessionManager: AdminSessionManager | undefined;
65
70
  #adminUiPath: string | undefined;
66
71
  web5ConnectServer: Web5ConnectServer;
67
72
  registrationManager: RegistrationManager;
@@ -82,6 +87,8 @@ export class HttpApi {
82
87
  tenantRateLimiter? : RateLimiter;
83
88
  messageProcessedHooks? : MessageProcessedHook[];
84
89
  openAuthHandler? : OpenAuthHandler;
90
+ sessionManager? : AdminSessionManager;
91
+ ttlCacheDialect? : Dialect;
85
92
  },
86
93
  ): Promise<HttpApi> {
87
94
  const httpApi = new HttpApi();
@@ -119,15 +126,20 @@ export class HttpApi {
119
126
  httpApi.#tenantRateLimiter = options?.tenantRateLimiter;
120
127
  httpApi.#messageProcessedHooks = options?.messageProcessedHooks ?? [];
121
128
  httpApi.#openAuthHandler = options?.openAuthHandler;
129
+ httpApi.#sessionManager = options?.sessionManager;
122
130
  httpApi.#adminUiPath = resolvedAdminUiPath;
123
131
 
124
132
  if (registrationManager !== undefined) {
125
133
  httpApi.registrationManager = registrationManager;
126
134
  }
127
135
 
136
+ // Use an externally provided dialect when available (required for
137
+ // in-memory SQLite so that migrations and the TTL cache share the same
138
+ // database instance). Falls back to creating a dialect from the URL.
139
+ const ttlDialect = options?.ttlCacheDialect ?? getDialectFromUrl(new URL(config.ttlCacheUrl));
128
140
  httpApi.web5ConnectServer = await Web5ConnectServer.create({
129
- baseUrl : config.baseUrl,
130
- sqlTtlCacheUrl : config.ttlCacheUrl,
141
+ baseUrl : config.baseUrl,
142
+ sqlDialect : ttlDialect,
131
143
  });
132
144
 
133
145
  return httpApi;
@@ -258,6 +270,9 @@ export class HttpApi {
258
270
  if (this.#openAuthHandler) {
259
271
  this.#openAuthHandler.destroy();
260
272
  }
273
+ if (this.web5ConnectServer) {
274
+ this.web5ConnectServer.close();
275
+ }
261
276
  if (this.#server) {
262
277
  this.#server.stop(true); // close all connections immediately
263
278
  }
@@ -321,9 +336,9 @@ export class HttpApi {
321
336
  if (method === 'GET' && path === '/metrics') {
322
337
  // Metrics require admin authentication when an admin token is configured.
323
338
  if (this.#config.adminToken) {
324
- const authError = validateAdminAuth(req, this.#config);
325
- if (authError) {
326
- return authError;
339
+ const authResult = validateAdminAuth(req, this.#config, this.#sessionManager);
340
+ if (authResult.error) {
341
+ return authResult.error;
327
342
  }
328
343
  }
329
344
  try {
@@ -0,0 +1,114 @@
1
+ import type { Dialect } from '@enbox/dwn-sql-store';
2
+ import type { Kysely, Migration } from 'kysely';
3
+
4
+ /**
5
+ * Factory type for server migrations. Mirrors the `DwnMigrationFactory` from
6
+ * `@enbox/dwn-sql-store` — receives the {@link Dialect} so migrations can use
7
+ * dialect-specific helpers like `addAutoIncrementingColumn()`.
8
+ */
9
+ export type ServerMigrationFactory = (dialect: Dialect) => Migration;
10
+
11
+ /**
12
+ * Baseline server migration: creates all DWN server admin and cache tables.
13
+ *
14
+ * Tables:
15
+ * - `registeredTenants`: tenant registration data
16
+ * - `tenantQuotas`: per-tenant storage quotas
17
+ * - `adminAuditLog`: append-only admin event log
18
+ * - `adminWebhooks`: webhook registrations
19
+ * - `adminPasskeys`: WebAuthn admin passkeys
20
+ * - `cacheEntries`: TTL-based key/value cache (Web5 Connect state, etc.)
21
+ */
22
+ export const migration001InitialServerSchema: ServerMigrationFactory = (dialect): Migration => ({
23
+
24
+ async up(db: Kysely<any>): Promise<void> {
25
+
26
+ // ─── registeredTenants ────────────────────────────────────────────
27
+ await db.schema
28
+ .createTable('registeredTenants')
29
+ .ifNotExists()
30
+ .addColumn('did', 'text', (col) => col.primaryKey())
31
+ .addColumn('termsOfServiceHash', 'text')
32
+ .addColumn('suspended', 'integer', (col) => col.defaultTo(0))
33
+ .addColumn('accountId', 'text')
34
+ .addColumn('registrationType', 'text')
35
+ .addColumn('registeredAt', 'text')
36
+ .addColumn('metadata', 'text')
37
+ .execute();
38
+
39
+ // ─── tenantQuotas ─────────────────────────────────────────────────
40
+ await db.schema
41
+ .createTable('tenantQuotas')
42
+ .ifNotExists()
43
+ .addColumn('did', 'text', (col) => col.primaryKey())
44
+ .addColumn('maxMessages', 'integer', (col) => col.defaultTo(0))
45
+ .addColumn('maxStorageBytes', 'bigint', (col) => col.defaultTo(0))
46
+ .execute();
47
+
48
+ // ─── adminAuditLog ────────────────────────────────────────────────
49
+ let auditTable = db.schema
50
+ .createTable('adminAuditLog')
51
+ .ifNotExists()
52
+ .addColumn('timestamp', 'text', (col) => col.notNull())
53
+ .addColumn('actor', 'text', (col) => col.notNull())
54
+ .addColumn('action', 'text', (col) => col.notNull())
55
+ .addColumn('target', 'text')
56
+ .addColumn('detail', 'text');
57
+
58
+ auditTable = dialect.addAutoIncrementingColumn(auditTable, 'id', (col) => col.primaryKey());
59
+ await auditTable.execute();
60
+
61
+ try {
62
+ await db.schema.createIndex('index_audit_timestamp')
63
+ .ifNotExists().on('adminAuditLog').column('timestamp').execute();
64
+ } catch { /* index already exists */ }
65
+
66
+ try {
67
+ await db.schema.createIndex('index_audit_target')
68
+ .ifNotExists().on('adminAuditLog').column('target').execute();
69
+ } catch { /* index already exists */ }
70
+
71
+ try {
72
+ await db.schema.createIndex('index_audit_action')
73
+ .ifNotExists().on('adminAuditLog').column('action').execute();
74
+ } catch { /* index already exists */ }
75
+
76
+ // ─── adminWebhooks ────────────────────────────────────────────────
77
+ await db.schema
78
+ .createTable('adminWebhooks')
79
+ .ifNotExists()
80
+ .addColumn('id', 'text', (col) => col.primaryKey())
81
+ .addColumn('url', 'text', (col) => col.notNull())
82
+ .addColumn('events', 'text', (col) => col.notNull())
83
+ .addColumn('secret', 'text')
84
+ .addColumn('createdAt', 'text', (col) => col.notNull())
85
+ .execute();
86
+
87
+ // ─── adminPasskeys ────────────────────────────────────────────────
88
+ await db.schema
89
+ .createTable('adminPasskeys')
90
+ .ifNotExists()
91
+ .addColumn('id', 'text', (col) => col.primaryKey())
92
+ .addColumn('name', 'text', (col) => col.notNull())
93
+ .addColumn('publicKey', 'text', (col) => col.notNull())
94
+ .addColumn('counter', 'integer', (col) => col.notNull().defaultTo(0))
95
+ .addColumn('transports', 'text', (col) => col.notNull().defaultTo('[]'))
96
+ .addColumn('createdAt', 'text', (col) => col.notNull())
97
+ .addColumn('lastUsedAt', 'text')
98
+ .execute();
99
+
100
+ // ─── cacheEntries (TTL cache) ─────────────────────────────────────
101
+ await db.schema
102
+ .createTable('cacheEntries')
103
+ .ifNotExists()
104
+ .addColumn('key', 'varchar(512)', (col) => col.primaryKey())
105
+ .addColumn('value', 'text', (col) => col.notNull())
106
+ .addColumn('expiry', 'bigint', (col) => col.notNull())
107
+ .execute();
108
+
109
+ try {
110
+ await db.schema.createIndex('index_expiry')
111
+ .ifNotExists().on('cacheEntries').column('expiry').execute();
112
+ } catch { /* index already exists */ }
113
+ },
114
+ });
@@ -0,0 +1,18 @@
1
+ import type { ServerMigrationFactory } from './001-initial-server-schema.js';
2
+
3
+ import { migration001InitialServerSchema } from './001-initial-server-schema.js';
4
+
5
+ /**
6
+ * All DWN server migrations in sequential order.
7
+ *
8
+ * Each entry is a `[name, factory]` tuple where the factory receives the
9
+ * `Dialect` and returns a standard Kysely `Migration`. This mirrors the
10
+ * pattern used by DWN store migrations in `@enbox/dwn-sql-store`.
11
+ *
12
+ * **Ordering contract:** Entries MUST be sorted by name (lexicographic).
13
+ */
14
+ export type { ServerMigrationFactory };
15
+
16
+ export const allServerMigrations: ReadonlyArray<readonly [name: string, factory: ServerMigrationFactory]> = [
17
+ ['001-initial-server-schema', migration001InitialServerSchema],
18
+ ];
@@ -29,47 +29,24 @@ export class RegistrationStore {
29
29
  return store;
30
30
  }
31
31
 
32
+ /**
33
+ * Verifies that the required tables exist. Throws a clear error directing
34
+ * the caller to run server migrations first.
35
+ */
32
36
  private async initialize(): Promise<void> {
33
- await this.db.schema
34
- .createTable(RegistrationStore.registeredTenantTableName)
35
- .ifNotExists()
36
- .addColumn('did', 'text', (column) => column.primaryKey())
37
- .addColumn('termsOfServiceHash', 'text')
38
- .addColumn('suspended', 'integer', (column) => column.defaultTo(0))
39
- .execute();
40
-
41
- // Add the `suspended` column to existing tables that don't have it yet.
42
- // Kysely doesn't support `ADD COLUMN IF NOT EXISTS` across all dialects, so we
43
- // catch and ignore the "column already exists" error.
44
- try {
45
- await this.db.schema
46
- .alterTable(RegistrationStore.registeredTenantTableName)
47
- .addColumn('suspended', 'integer', (column) => column.defaultTo(0))
48
- .execute();
49
- } catch {
50
- // Column already exists — expected for new installations.
51
- }
52
-
53
- // Add provider-auth columns (idempotent migration). https://github.com/enboxorg/enbox/issues/404
54
- for (const col of ['accountId', 'registrationType', 'registeredAt', 'metadata']) {
37
+ const tables = [
38
+ RegistrationStore.registeredTenantTableName,
39
+ RegistrationStore.tenantQuotasTableName,
40
+ ];
41
+ for (const table of tables) {
55
42
  try {
56
- await this.db.schema
57
- .alterTable(RegistrationStore.registeredTenantTableName)
58
- .addColumn(col, 'text')
59
- .execute();
43
+ await sql`SELECT 1 FROM ${sql.table(table)} LIMIT 0`.execute(this.db);
60
44
  } catch {
61
- // Column already exists — expected.
45
+ throw new Error(
46
+ `RegistrationStore: table '${table}' does not exist. Run server migrations before starting.`
47
+ );
62
48
  }
63
49
  }
64
-
65
- // Per-tenant storage quotas table.
66
- await this.db.schema
67
- .createTable(RegistrationStore.tenantQuotasTableName)
68
- .ifNotExists()
69
- .addColumn('did', 'text', (column) => column.primaryKey())
70
- .addColumn('maxMessages', 'integer', (column) => column.defaultTo(0))
71
- .addColumn('maxStorageBytes', 'bigint', (column) => column.defaultTo(0))
72
- .execute();
73
50
  }
74
51
 
75
52
  /**
@@ -0,0 +1,74 @@
1
+ import type { Dialect } from '@enbox/dwn-sql-store';
2
+ import type { ServerMigrationFactory } from './migrations/index.js';
3
+ import type { Kysely, Migration, MigrationProvider, MigrationResultSet } from 'kysely';
4
+
5
+ import { allServerMigrations } from './migrations/index.js';
6
+ import { Migrator } from 'kysely';
7
+
8
+ /**
9
+ * {@link MigrationProvider} for server migrations. Wraps an ordered list of
10
+ * `(name, factory)` pairs. At resolution time each factory is called with the
11
+ * dialect, producing the concrete Kysely {@link Migration} objects.
12
+ */
13
+ class ServerMigrationProvider implements MigrationProvider {
14
+ #dialect: Dialect;
15
+ #factories: ReadonlyArray<readonly [name: string, factory: ServerMigrationFactory]>;
16
+
17
+ constructor(
18
+ dialect: Dialect,
19
+ factories: ReadonlyArray<readonly [name: string, factory: ServerMigrationFactory]>,
20
+ ) {
21
+ this.#dialect = dialect;
22
+ this.#factories = factories;
23
+ }
24
+
25
+ public async getMigrations(): Promise<Record<string, Migration>> {
26
+ const migrations: Record<string, Migration> = {};
27
+ for (const [name, factory] of this.#factories) {
28
+ migrations[name] = factory(this.#dialect);
29
+ }
30
+ return migrations;
31
+ }
32
+ }
33
+
34
+ /**
35
+ * Runs all pending DWN server migrations against the given database.
36
+ *
37
+ * Uses Kysely's native {@link Migrator} with a separate migration table
38
+ * (`dwn_server_migration`) to avoid collisions with the DWN store
39
+ * migrations that use the default `kysely_migration` table.
40
+ *
41
+ * Call this once during server startup, before creating admin stores,
42
+ * registration stores, or the TTL cache.
43
+ *
44
+ * @param db - An open Kysely instance connected to the target database.
45
+ * @param dialect - The dialect for the target database. Passed to each
46
+ * migration factory so it can use dialect-specific DDL helpers.
47
+ * @param migrations - Optional custom migration list; defaults to the
48
+ * built-in {@link allServerMigrations}.
49
+ * @returns The names of newly applied migrations (empty if already up-to-date).
50
+ * @throws If any migration fails.
51
+ */
52
+ export async function runServerMigrations(
53
+ db: Kysely<any>,
54
+ dialect: Dialect,
55
+ migrations?: ReadonlyArray<readonly [name: string, factory: ServerMigrationFactory]>,
56
+ ): Promise<string[]> {
57
+ const provider = new ServerMigrationProvider(dialect, migrations ?? allServerMigrations);
58
+ const migrator = new Migrator({
59
+ db,
60
+ provider,
61
+ migrationTableName : 'dwn_server_migration',
62
+ migrationLockTableName : 'dwn_server_migration_lock',
63
+ });
64
+
65
+ const resultSet: MigrationResultSet = await migrator.migrateToLatest();
66
+
67
+ if (resultSet.error) {
68
+ throw resultSet.error;
69
+ }
70
+
71
+ return (resultSet.results ?? [])
72
+ .filter((r) => r.status === 'Success')
73
+ .map((r) => r.migrationName);
74
+ }
package/src/storage.ts CHANGED
@@ -20,6 +20,7 @@ import { Kysely } from 'kysely';
20
20
 
21
21
  import { createBunSqliteDatabase } from '@enbox/dwn-sql-store';
22
22
  import { PluginLoader } from './plugin-loader.js';
23
+ import { runServerMigrations } from './server-migration-runner.js';
23
24
 
24
25
  import {
25
26
  DataStoreLevel,
@@ -55,26 +56,27 @@ export enum BackendTypes {
55
56
  export type DwnStore = DataStore | StateIndex | MessageStore | ResumableTaskStore;
56
57
 
57
58
  /**
58
- * Cache of shared PostgreSQL dialects keyed by connection URL. When multiple
59
- * DWN stores share the same Postgres URL, they reuse a single dialect (and
60
- * thus a single `pg.Pool`) instead of each creating their own. This reduces
61
- * connection count from 4 × pool_max to 1 × pool_max per DWN process.
59
+ * Returns a (potentially cached) dialect for the given connection URL. For
60
+ * Postgres, creates a pool with configurable sizing from the server config.
61
+ * For other backends, delegates to `getDialectFromUrl()` which handles its
62
+ * own caching (critical for in-memory SQLite).
63
+ *
64
+ * All Postgres dialects are cached in a separate map keyed by URL so that
65
+ * multiple DWN stores sharing the same Postgres URL reuse a single
66
+ * `pg.Pool`, reducing connection count from 4 × pool_max to 1 × pool_max.
62
67
  */
63
- const sharedDialectCache: Map<string, Dialect> = new Map();
68
+ const postgresDialectCache: Map<string, Dialect> = new Map();
64
69
 
65
- /**
66
- * Returns a (potentially cached) dialect for the given Postgres connection URL.
67
- * Non-Postgres URLs always return a fresh dialect (no caching).
68
- */
69
70
  function getOrCreateDialect(connectionUrl: URL, config: DwnServerConfig): Dialect {
70
71
  const protocol = connectionUrl.protocol.slice(0, -1);
71
72
 
72
73
  if (protocol !== BackendTypes.POSTGRES) {
74
+ // getDialectFromUrl handles its own caching for SQLite/MySQL.
73
75
  return getDialectFromUrl(connectionUrl);
74
76
  }
75
77
 
76
78
  const key = connectionUrl.toString();
77
- const cached = sharedDialectCache.get(key);
79
+ const cached = postgresDialectCache.get(key);
78
80
  if (cached !== undefined) {
79
81
  return cached;
80
82
  }
@@ -92,7 +94,7 @@ function getOrCreateDialect(connectionUrl: URL, config: DwnServerConfig): Dialec
92
94
  cursor : Cursor,
93
95
  });
94
96
 
95
- sharedDialectCache.set(key, dialect);
97
+ postgresDialectCache.set(key, dialect);
96
98
  return dialect;
97
99
  }
98
100
 
@@ -152,13 +154,99 @@ async function runSqlMigrationsIfNeeded(config: DwnServerConfig): Promise<void>
152
154
  console.log(`DWN migrations applied: ${applied.join(', ')}`);
153
155
  }
154
156
  } finally {
155
- // Don't destroy the Kysely instance if using a shared Postgres pool
156
- // the pool is cached in sharedDialectCache and will be reused by stores.
157
- // Only destroy for non-cached dialects (SQLite, MySQL).
158
- if (protocol !== BackendTypes.POSTGRES) {
159
- await db.destroy();
157
+ // Do NOT destroy the Kysely instance the dialect is cached and will be
158
+ // reused by stores. For in-memory SQLite, destroying would close the
159
+ // database and lose all migrated schema. For Postgres, the pool is shared.
160
+ }
161
+ }
162
+
163
+ /**
164
+ * Runs DWN server schema migrations (admin stores, registration, TTL cache)
165
+ * if the given URL points to a SQL backend. Uses the `registrationStoreUrl`
166
+ * (or the TTL cache URL) as the target database.
167
+ *
168
+ * Server migrations use a separate tracking table (`dwn_server_migration`)
169
+ * so they do not conflict with the DWN store migrations.
170
+ *
171
+ * Call this once during server startup, before creating admin stores.
172
+ *
173
+ * @returns The dialect used for the target database (so the caller can reuse
174
+ * it for the TTL cache and admin stores), or `undefined` if no SQL
175
+ * backend was configured or needed.
176
+ */
177
+ export async function runServerMigrationsIfNeeded(config: DwnServerConfig): Promise<Dialect | undefined> {
178
+ const sqlBackends: string[] = [BackendTypes.SQLITE, BackendTypes.MYSQL, BackendTypes.POSTGRES];
179
+
180
+ // Determine the target URL for server migrations. Prefer registrationStoreUrl
181
+ // since admin stores and the TTL cache share that database. Fall back to
182
+ // ttlCacheUrl when no registration store is configured (the cacheEntries
183
+ // table still needs a schema).
184
+ const targetUrl = config.registrationStoreUrl ?? config.ttlCacheUrl;
185
+ if (!targetUrl) {
186
+ return undefined;
187
+ }
188
+
189
+ if (isFilePath(targetUrl)) {
190
+ return undefined;
191
+ }
192
+
193
+ let parsedUrl: URL;
194
+ try {
195
+ parsedUrl = new URL(targetUrl);
196
+ } catch {
197
+ return undefined;
198
+ }
199
+
200
+ const protocol = parsedUrl.protocol.slice(0, -1);
201
+ if (!sqlBackends.includes(protocol)) {
202
+ return undefined;
203
+ }
204
+
205
+ // When both registrationStoreUrl and ttlCacheUrl are set and differ,
206
+ // validate they point at the same database — the cacheEntries table is
207
+ // included in the server migration so it must live alongside the other
208
+ // server tables.
209
+ if (config.registrationStoreUrl && config.ttlCacheUrl
210
+ && config.ttlCacheUrl !== config.registrationStoreUrl) {
211
+ let ttlUrl: URL | undefined;
212
+ try {
213
+ ttlUrl = new URL(config.ttlCacheUrl);
214
+ } catch { /* not a URL */ }
215
+
216
+ if (ttlUrl) {
217
+ const ttlProtocol = ttlUrl.protocol.slice(0, -1);
218
+ if (sqlBackends.includes(ttlProtocol)) {
219
+ throw new Error(
220
+ 'DWN server misconfiguration: DWN_TTL_CACHE_URL must point to the same database as ' +
221
+ 'DWN_REGISTRATION_STORE_URL (or DWN_STORAGE) because the cacheEntries table is managed ' +
222
+ 'by the server migration system. ' +
223
+ `Got registrationStoreUrl="${config.registrationStoreUrl}", ttlCacheUrl="${config.ttlCacheUrl}".`
224
+ );
225
+ }
226
+ }
227
+ }
228
+
229
+ const dialect = getOrCreateDialect(parsedUrl, config);
230
+ const db = new Kysely<Record<string, unknown>>({ dialect });
231
+ try {
232
+ const applied = await runServerMigrations(db, dialect);
233
+ if (applied.length > 0) {
234
+ console.log(`Server migrations applied: ${applied.join(', ')}`);
235
+ }
236
+ } finally {
237
+ // For Postgres, don't destroy — the pool is cached in sharedDialectCache.
238
+ // For SQLite/MySQL, we also keep the Kysely instance alive so the caller
239
+ // can reuse the same dialect (critical for in-memory SQLite).
240
+ if (protocol === BackendTypes.POSTGRES) {
241
+ // Pool stays alive via sharedDialectCache.
160
242
  }
243
+ // NOTE: We intentionally do NOT destroy the Kysely instance for any
244
+ // backend. The dialect is returned to the caller for reuse (e.g. by the
245
+ // TTL cache and admin stores). For in-memory SQLite, destroying would
246
+ // lose the database.
161
247
  }
248
+
249
+ return dialect;
162
250
  }
163
251
 
164
252
  function getLevelStore(
@@ -264,6 +352,21 @@ async function loadStoreFromFilePath(
264
352
  }
265
353
  }
266
354
 
355
+ /**
356
+ * Cache for the in-memory SQLite dialect. Since every call to
357
+ * `createBunSqliteDatabase(':memory:')` creates a separate, empty database,
358
+ * we must ensure that `getDialectFromUrl(new URL('sqlite://'))` always
359
+ * returns the same dialect (and thus the same underlying database) within a
360
+ * process. This is critical for the DWN server startup flow where migrations,
361
+ * the registration store, and the TTL cache all need to share the same
362
+ * in-memory database.
363
+ *
364
+ * File-based SQLite and other backends are NOT cached here — file-based SQLite
365
+ * connections naturally share state through the filesystem, and caching would
366
+ * break test isolation when multiple test files run in the same process.
367
+ */
368
+ let inMemorySqliteDialect: Dialect | undefined;
369
+
267
370
  export function getDialectFromUrl(connectionUrl: URL): Dialect {
268
371
  switch (connectionUrl.protocol.slice(0, -1)) {
269
372
  case BackendTypes.SQLITE: {
@@ -275,9 +378,32 @@ export function getDialectFromUrl(connectionUrl: URL): Dialect {
275
378
  fs.mkdirSync(connectionUrl.host, { recursive: true });
276
379
  }
277
380
 
278
- // Use in-memory database if no path is provided (for tests)
381
+ // Use in-memory database if no path is provided (for tests).
279
382
  const dbPath = path || ':memory:';
280
383
 
384
+ // For in-memory SQLite, return a cached dialect so that all callers
385
+ // (migrations, registration store, TTL cache) share the same database.
386
+ // The wrapper makes close() a no-op so that individual consumers (e.g.
387
+ // DwnServer.stop() → Dwn.close() → store.close()) cannot destroy the
388
+ // shared database out from under other consumers.
389
+ if (dbPath === ':memory:') {
390
+ if (inMemorySqliteDialect === undefined) {
391
+ const sharedDb = createBunSqliteDatabase(':memory:');
392
+ const nonCloseableDb = {
393
+ close(): void {
394
+ // no-op — shared instance must survive the process
395
+ },
396
+ prepare(sql: string): ReturnType<typeof sharedDb.prepare> {
397
+ return sharedDb.prepare(sql);
398
+ },
399
+ };
400
+ inMemorySqliteDialect = new SqliteDialect({
401
+ database: async (): Promise<typeof nonCloseableDb> => nonCloseableDb,
402
+ });
403
+ }
404
+ return inMemorySqliteDialect;
405
+ }
406
+
281
407
  return new SqliteDialect({
282
408
  database: async () => createBunSqliteDatabase(dbPath),
283
409
  });
@@ -291,6 +417,8 @@ export function getDialectFromUrl(connectionUrl: URL): Dialect {
291
417
  pool : async () => new pg.Pool({ connectionString: connectionUrl.toString() }),
292
418
  cursor : Cursor,
293
419
  });
420
+ default:
421
+ throw new Error(`Unsupported database protocol: ${connectionUrl.protocol}`);
294
422
  }
295
423
  }
296
424
 
@@ -1,5 +1,5 @@
1
1
  import type { Dialect } from '@enbox/dwn-sql-store';
2
- import { Kysely } from 'kysely';
2
+ import { Kysely, sql } from 'kysely';
3
3
 
4
4
  /**
5
5
  * The SqlTtlCache is responsible for storing and retrieving cache data with TTL (Time-to-Live).
@@ -8,13 +8,11 @@ export class SqlTtlCache {
8
8
  private static readonly cacheTableName = 'cacheEntries';
9
9
  private static readonly cleanupIntervalInSeconds = 60;
10
10
 
11
- private sqlDialect: Dialect;
12
11
  private db: Kysely<CacheDatabase>;
13
12
  private cleanupTimer: NodeJS.Timeout;
14
13
 
15
14
  private constructor(sqlDialect: Dialect) {
16
15
  this.db = new Kysely<CacheDatabase>({ dialect: sqlDialect });
17
- this.sqlDialect = sqlDialect;
18
16
  }
19
17
 
20
18
  /**
@@ -28,26 +26,17 @@ export class SqlTtlCache {
28
26
  return cacheManager;
29
27
  }
30
28
 
29
+ /**
30
+ * Verifies that the required table exists and starts the cleanup timer.
31
+ * Throws a clear error directing the caller to run server migrations first.
32
+ */
31
33
  private async initialize(): Promise<void> {
32
-
33
- // create table if it doesn't exist
34
- const tableExists = await this.sqlDialect.hasTable(this.db, SqlTtlCache.cacheTableName);
35
- if (!tableExists) {
36
- await this.db.schema
37
- .createTable(SqlTtlCache.cacheTableName)
38
- .ifNotExists() // kept to show supported by all dialects in contrast to ifNotExists() below, though not needed due to tableExists check above
39
- // 512 chars to accommodate potentially large `state` in Web5 Connect flow
40
- .addColumn('key', 'varchar(512)', (column) => column.primaryKey())
41
- .addColumn('value', 'text', (column) => column.notNull())
42
- .addColumn('expiry', 'bigint', (column) => column.notNull())
43
- .execute();
44
-
45
- await this.db.schema
46
- .createIndex('index_expiry')
47
- // .ifNotExists() // intentionally kept commented out code to show that it is not supported by all dialects (ie. MySQL)
48
- .on(SqlTtlCache.cacheTableName)
49
- .column('expiry')
50
- .execute();
34
+ try {
35
+ await sql`SELECT 1 FROM ${sql.table(SqlTtlCache.cacheTableName)} LIMIT 0`.execute(this.db);
36
+ } catch {
37
+ throw new Error(
38
+ `SqlTtlCache: table '${SqlTtlCache.cacheTableName}' does not exist. Run server migrations before starting.`
39
+ );
51
40
  }
52
41
 
53
42
  // Start the cleanup timer
@@ -131,6 +120,16 @@ export class SqlTtlCache {
131
120
  .where('expiry', '<', Date.now())
132
121
  .execute();
133
122
  }
123
+
124
+ /**
125
+ * Stops the background cleanup timer. The underlying database connection is
126
+ * NOT destroyed here because the dialect is shared with other components
127
+ * (e.g. DWN stores, registration store) and its lifecycle is managed by the
128
+ * dialect owner.
129
+ */
130
+ public close(): void {
131
+ clearInterval(this.cleanupTimer);
132
+ }
134
133
  }
135
134
 
136
135
  interface CacheEntry {