@elizaos/plugin-sql 1.6.5 → 1.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -73,6 +73,14 @@ var init_server = __esm(() => {
73
73
  });
74
74
  });
75
75
 
76
+ // src/types.ts
77
+ function getDb(adapter) {
78
+ return adapter.db;
79
+ }
80
+ function getRow(result, index5 = 0) {
81
+ return result.rows[index5];
82
+ }
83
+
76
84
  // src/runtime-migrator/storage/migration-tracker.ts
77
85
  import { sql as sql17 } from "drizzle-orm";
78
86
 
@@ -114,12 +122,12 @@ class MigrationTracker {
114
122
  `);
115
123
  }
116
124
  async getLastMigration(pluginName) {
117
- const result = await this.db.execute(sql17`SELECT id, hash, created_at
118
- FROM migrations._migrations
119
- WHERE plugin_name = ${pluginName}
120
- ORDER BY created_at DESC
125
+ const result = await this.db.execute(sql17`SELECT id, hash, created_at
126
+ FROM migrations._migrations
127
+ WHERE plugin_name = ${pluginName}
128
+ ORDER BY created_at DESC
121
129
  LIMIT 1`);
122
- return result.rows[0] || null;
130
+ return getRow(result) || null;
123
131
  }
124
132
  async recordMigration(pluginName, hash, createdAt) {
125
133
  await this.db.execute(sql17`INSERT INTO migrations._migrations (plugin_name, hash, created_at)
@@ -143,7 +151,7 @@ class JournalStorage {
143
151
  if (result.rows.length === 0) {
144
152
  return null;
145
153
  }
146
- const row = result.rows[0];
154
+ const row = getRow(result);
147
155
  return {
148
156
  version: row.version,
149
157
  dialect: row.dialect,
@@ -19881,48 +19889,15 @@ class RuntimeMigrator {
19881
19889
  return value >= MIN_BIGINT && value <= MAX_BIGINT;
19882
19890
  }
19883
19891
  isRealPostgresDatabase(connectionUrl) {
19884
- if (!connectionUrl || connectionUrl.trim() === "") {
19892
+ if (!connectionUrl?.trim())
19885
19893
  return false;
19886
- }
19887
- const trimmedUrl = connectionUrl.trim();
19888
- const url = trimmedUrl.toLowerCase();
19889
- const originalUrl = trimmedUrl;
19890
- const nonPostgresSchemes = [
19891
- "mysql://",
19892
- "mysqli://",
19893
- "mariadb://",
19894
- "mongodb://",
19895
- "mongodb+srv://"
19896
- ];
19897
- for (const scheme of nonPostgresSchemes) {
19898
- if (url.startsWith(scheme)) {
19899
- return false;
19900
- }
19901
- }
19902
- const excludePatterns = [
19903
- ":memory:",
19904
- "pglite://",
19905
- "/pglite",
19906
- "sqlite://",
19907
- "sqlite3://",
19908
- ".sqlite",
19909
- ".sqlite3",
19910
- "file::memory:",
19911
- "file:"
19912
- ];
19913
- const urlWithoutQuery = url.split("?")[0];
19914
- if (urlWithoutQuery.endsWith(".db") || urlWithoutQuery.endsWith(".sqlite") || urlWithoutQuery.endsWith(".sqlite3")) {
19894
+ const url = connectionUrl.trim().toLowerCase();
19895
+ const nonPgSchemes = ["mysql://", "mysqli://", "mariadb://", "mongodb://", "mongodb+srv://"];
19896
+ if (nonPgSchemes.some((s) => url.startsWith(s)))
19915
19897
  return false;
19916
- }
19917
- for (const pattern of excludePatterns) {
19918
- if (url.includes(pattern)) {
19919
- if (pattern === "file:" && url.includes("postgres")) {
19920
- continue;
19921
- }
19922
- return false;
19923
- }
19924
- }
19925
- const postgresSchemes = [
19898
+ if (url.includes(":memory:"))
19899
+ return false;
19900
+ const pgSchemes = [
19926
19901
  "postgres://",
19927
19902
  "postgresql://",
19928
19903
  "postgis://",
@@ -19934,12 +19909,17 @@ class RuntimeMigrator {
19934
19909
  "timescaledb://",
19935
19910
  "yugabyte://"
19936
19911
  ];
19937
- for (const scheme of postgresSchemes) {
19938
- if (url.startsWith(scheme)) {
19939
- return true;
19940
- }
19941
- }
19942
- const connectionParams = [
19912
+ if (pgSchemes.some((s) => url.startsWith(s)))
19913
+ return true;
19914
+ const excludePatterns = ["pglite", "sqlite"];
19915
+ const urlBase = url.split("?")[0];
19916
+ if (excludePatterns.some((p) => url.includes(p)))
19917
+ return false;
19918
+ if (/\.(db|sqlite|sqlite3)$/.test(urlBase))
19919
+ return false;
19920
+ if (url.includes("localhost") || url.includes("127.0.0.1"))
19921
+ return true;
19922
+ const connParams = [
19943
19923
  "host=",
19944
19924
  "dbname=",
19945
19925
  "sslmode=",
@@ -19951,126 +19931,60 @@ class RuntimeMigrator {
19951
19931
  "options=",
19952
19932
  "sslcert=",
19953
19933
  "sslkey=",
19954
- "sslrootcert="
19934
+ "sslrootcert=",
19935
+ "fallback_application_name=",
19936
+ "keepalives=",
19937
+ "target_session_attrs="
19955
19938
  ];
19956
- for (const param of connectionParams) {
19957
- if (url.includes(param)) {
19958
- return true;
19959
- }
19960
- }
19961
- if (url.includes("@") && (url.includes("postgres") || /:\d{4,5}/.test(url))) {
19939
+ if (connParams.some((p) => url.includes(p)))
19962
19940
  return true;
19963
- }
19964
- const postgresPorts = [
19965
- ":5432",
19966
- ":5433",
19967
- ":5434",
19968
- ":25060",
19969
- ":26257",
19970
- ":6432",
19971
- ":9999",
19972
- ":8432"
19973
- ];
19974
- for (const port of postgresPorts) {
19975
- if (url.includes(port)) {
19976
- return true;
19977
- }
19978
- }
19979
- const cloudProviderPatterns = [
19941
+ if (url.includes("@") && (url.includes("postgres") || /:\d{4,5}/.test(url)))
19942
+ return true;
19943
+ if (/:(5432|5433|5434|6432|8432|9999|25060|26257)\b/.test(url))
19944
+ return true;
19945
+ const cloudPatterns = [
19980
19946
  "amazonaws.com",
19981
- "rds.amazonaws.com",
19982
19947
  ".rds.",
19983
- "redshift.amazonaws.com",
19984
19948
  "azure.com",
19985
19949
  "database.azure.com",
19986
- "postgres.database.azure.com",
19987
19950
  "googleusercontent",
19988
19951
  "cloudsql",
19989
- "cloud.google.com",
19990
19952
  "supabase",
19991
- ".supabase.co",
19992
- ".supabase.com",
19993
- "pooler.supabase",
19994
19953
  "neon.tech",
19995
- ".neon.tech",
19996
19954
  "neon.build",
19997
19955
  "railway.app",
19998
- ".railway.app",
19999
19956
  "railway.internal",
20000
19957
  "render.com",
20001
- ".render.com",
20002
19958
  "onrender.com",
20003
- "heroku.com",
20004
- "herokuapp.com",
20005
- ".heroku.com",
19959
+ "heroku",
20006
19960
  "timescale",
20007
- "timescaledb",
20008
19961
  ".tsdb.cloud",
20009
19962
  "cockroachlabs",
20010
19963
  "cockroachdb.cloud",
20011
19964
  ".crdb.io",
20012
- "digitalocean.com",
19965
+ "digitalocean",
20013
19966
  "db.ondigitalocean",
20014
19967
  "do-user-",
20015
- ".db.ondigitalocean.com",
20016
19968
  "aiven",
20017
- "aivencloud",
20018
- ".aiven.io",
20019
- ".aivencloud.com",
20020
19969
  "crunchydata",
20021
- ".crunchydata.com",
20022
19970
  "elephantsql",
20023
- ".elephantsql.com",
20024
19971
  "yugabyte",
20025
- ".yugabyte.cloud",
20026
19972
  "scaleway",
20027
19973
  ".rdb.fr-par.scw.cloud",
20028
19974
  "vercel-storage",
20029
- ".postgres.vercel-storage.com",
20030
19975
  "psdb.cloud",
20031
- ".psdb.cloud",
20032
19976
  "xata.sh",
20033
- ".xata.sh",
20034
19977
  "fly.dev",
20035
- ".fly.dev",
20036
19978
  "fly.io"
20037
19979
  ];
20038
- for (const pattern of cloudProviderPatterns) {
20039
- if (url.includes(pattern)) {
20040
- return true;
20041
- }
20042
- }
20043
- const ipv4PortPattern = /\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}\b/;
20044
- if (ipv4PortPattern.test(originalUrl)) {
19980
+ if (cloudPatterns.some((p) => url.includes(p)))
20045
19981
  return true;
20046
- }
20047
- const ipv6Pattern = /\[[0-9a-f:]+\](:\d{1,5})?/i;
20048
- if (ipv6Pattern.test(originalUrl)) {
19982
+ if (/\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}/.test(url))
20049
19983
  return true;
20050
- }
20051
- const hostPortDbPattern = /^[a-z0-9.-]+:\d{1,5}\/[a-z0-9_-]+/i;
20052
- if (hostPortDbPattern.test(originalUrl)) {
19984
+ if (/\[[0-9a-f:]+\](:\d{1,5})?/i.test(connectionUrl))
19985
+ return true;
19986
+ if (/^[a-z0-9_.-]+:\d{1,5}\/[a-z0-9_-]+/i.test(connectionUrl))
20053
19987
  return true;
20054
- }
20055
- if (url.includes("?") || url.includes("&")) {
20056
- const postgresQueryParams = [
20057
- "sslmode=",
20058
- "sslcert=",
20059
- "sslkey=",
20060
- "sslrootcert=",
20061
- "connect_timeout=",
20062
- "application_name=",
20063
- "options=",
20064
- "fallback_application_name=",
20065
- "keepalives=",
20066
- "target_session_attrs="
20067
- ];
20068
- for (const param of postgresQueryParams) {
20069
- if (url.includes(param)) {
20070
- return true;
20071
- }
20072
- }
20073
- }
20074
19988
  logger5.debug({ src: "plugin:sql", urlPreview: url.substring(0, 50) }, "Connection string did not match any PostgreSQL patterns");
20075
19989
  return false;
20076
19990
  }
@@ -20095,7 +20009,7 @@ class RuntimeMigrator {
20095
20009
  logger5.debug({ src: "plugin:sql", pluginName }, "Using PostgreSQL advisory locks");
20096
20010
  const lockIdStr = lockId.toString();
20097
20011
  const lockResult = await this.db.execute(sql22`SELECT pg_try_advisory_lock(CAST(${lockIdStr} AS bigint)) as acquired`);
20098
- lockAcquired = lockResult.rows[0]?.acquired === true;
20012
+ lockAcquired = getRow(lockResult)?.acquired === true;
20099
20013
  if (!lockAcquired) {
20100
20014
  logger5.info({ src: "plugin:sql", pluginName }, "Migration already in progress, waiting for lock");
20101
20015
  await this.db.execute(sql22`SELECT pg_advisory_lock(CAST(${lockIdStr} AS bigint))`);
@@ -20136,13 +20050,33 @@ class RuntimeMigrator {
20136
20050
  logger5.info({ src: "plugin:sql", pluginName }, "No snapshot found but tables exist in database, introspecting");
20137
20051
  const schemaName = this.getExpectedSchemaName(pluginName);
20138
20052
  const introspectedSnapshot = await this.introspector.introspectSchema(schemaName);
20139
- if (Object.keys(introspectedSnapshot.tables).length > 0) {
20140
- await this.snapshotStorage.saveSnapshot(pluginName, 0, introspectedSnapshot);
20053
+ const expectedTableNames = new Set;
20054
+ for (const tableKey of Object.keys(currentSnapshot.tables)) {
20055
+ const tableData = currentSnapshot.tables[tableKey];
20056
+ const tableName = tableData.name || tableKey.split(".").pop();
20057
+ expectedTableNames.add(tableName);
20058
+ }
20059
+ const filteredTables = {};
20060
+ for (const tableKey of Object.keys(introspectedSnapshot.tables)) {
20061
+ const tableData = introspectedSnapshot.tables[tableKey];
20062
+ const tableName = tableData.name || tableKey.split(".").pop();
20063
+ if (expectedTableNames.has(tableName)) {
20064
+ filteredTables[tableKey] = tableData;
20065
+ } else {
20066
+ logger5.debug({ src: "plugin:sql", pluginName, tableName }, "Ignoring table from introspection (not in current schema)");
20067
+ }
20068
+ }
20069
+ const filteredSnapshot = {
20070
+ ...introspectedSnapshot,
20071
+ tables: filteredTables
20072
+ };
20073
+ if (Object.keys(filteredSnapshot.tables).length > 0) {
20074
+ await this.snapshotStorage.saveSnapshot(pluginName, 0, filteredSnapshot);
20141
20075
  await this.journalStorage.updateJournal(pluginName, 0, `introspected_${Date.now()}`, true);
20142
- const introspectedHash = hashSnapshot(introspectedSnapshot);
20143
- await this.migrationTracker.recordMigration(pluginName, introspectedHash, Date.now());
20076
+ const filteredHash = hashSnapshot(filteredSnapshot);
20077
+ await this.migrationTracker.recordMigration(pluginName, filteredHash, Date.now());
20144
20078
  logger5.info({ src: "plugin:sql", pluginName }, "Created initial snapshot from existing database");
20145
- previousSnapshot = introspectedSnapshot;
20079
+ previousSnapshot = filteredSnapshot;
20146
20080
  }
20147
20081
  }
20148
20082
  }
@@ -20326,14 +20260,63 @@ var init_runtime_migrator2 = __esm(() => {
20326
20260
  import { logger as logger6 } from "@elizaos/core";
20327
20261
  import { sql as sql23 } from "drizzle-orm";
20328
20262
  async function migrateToEntityRLS(adapter) {
20329
- const db = adapter.db;
20263
+ const db = getDb(adapter);
20330
20264
  try {
20331
20265
  await db.execute(sql23`SELECT 1 FROM pg_tables LIMIT 1`);
20332
20266
  } catch {
20333
20267
  logger6.debug("[Migration] ⊘ Not PostgreSQL, skipping PostgreSQL-specific migrations");
20334
20268
  return;
20335
20269
  }
20336
- logger6.info("[Migration] Starting develop → feat/entity-rls migration...");
20270
+ let schemaAlreadyMigrated = false;
20271
+ try {
20272
+ const migrationCheck = await db.execute(sql23`
20273
+ SELECT column_name FROM information_schema.columns
20274
+ WHERE table_schema = 'public'
20275
+ AND table_name = 'rooms'
20276
+ AND column_name = 'agent_id'
20277
+ `);
20278
+ if (migrationCheck.rows && migrationCheck.rows.length > 0) {
20279
+ schemaAlreadyMigrated = true;
20280
+ logger6.debug("[Migration] ⊘ Schema already migrated (snake_case columns exist)");
20281
+ }
20282
+ } catch {
20283
+ logger6.debug("[Migration] → rooms table not found, will be created by RuntimeMigrator");
20284
+ return;
20285
+ }
20286
+ if (schemaAlreadyMigrated) {
20287
+ const dataIsolationEnabled = process.env.ENABLE_DATA_ISOLATION === "true";
20288
+ if (dataIsolationEnabled) {
20289
+ logger6.debug("[Migration] ⊘ Schema already migrated, RLS enabled - nothing to do");
20290
+ return;
20291
+ }
20292
+ logger6.debug("[Migration] → Schema migrated but RLS disabled, cleaning up...");
20293
+ try {
20294
+ const tablesWithRls = await db.execute(sql23`
20295
+ SELECT c.relname as tablename
20296
+ FROM pg_class c
20297
+ JOIN pg_namespace n ON n.oid = c.relnamespace
20298
+ WHERE n.nspname = 'public'
20299
+ AND c.relkind = 'r'
20300
+ AND c.relrowsecurity = true
20301
+ ORDER BY c.relname
20302
+ `);
20303
+ if (tablesWithRls.rows && tablesWithRls.rows.length > 0) {
20304
+ for (const row of tablesWithRls.rows) {
20305
+ const tableName = row.tablename;
20306
+ try {
20307
+ await db.execute(sql23.raw(`ALTER TABLE "${tableName}" DISABLE ROW LEVEL SECURITY`));
20308
+ } catch {}
20309
+ }
20310
+ logger6.debug(`[Migration] ✓ RLS cleanup completed (${tablesWithRls.rows.length} tables)`);
20311
+ } else {
20312
+ logger6.debug("[Migration] ⊘ No tables with RLS to clean up");
20313
+ }
20314
+ } catch {
20315
+ logger6.debug("[Migration] ⊘ Could not perform RLS cleanup");
20316
+ }
20317
+ return;
20318
+ }
20319
+ logger6.info("[Migration] Starting pre-1.6.5 → 1.6.5+ schema migration...");
20337
20320
  try {
20338
20321
  logger6.debug("[Migration] → Clearing RuntimeMigrator snapshot cache...");
20339
20322
  try {
@@ -20342,27 +20325,34 @@ async function migrateToEntityRLS(adapter) {
20342
20325
  } catch (error) {
20343
20326
  logger6.debug("[Migration] ⊘ No snapshot cache to clear (migrations schema not yet created)");
20344
20327
  }
20345
- logger6.debug("[Migration] → Disabling Row Level Security on all tables...");
20328
+ logger6.debug("[Migration] → Checking for Row Level Security to disable...");
20346
20329
  try {
20347
- const tablesResult = await db.execute(sql23`
20348
- SELECT tablename
20349
- FROM pg_tables
20350
- WHERE schemaname = 'public'
20351
- ORDER BY tablename
20330
+ const tablesWithRls = await db.execute(sql23`
20331
+ SELECT c.relname as tablename
20332
+ FROM pg_class c
20333
+ JOIN pg_namespace n ON n.oid = c.relnamespace
20334
+ WHERE n.nspname = 'public'
20335
+ AND c.relkind = 'r'
20336
+ AND c.relrowsecurity = true
20337
+ ORDER BY c.relname
20352
20338
  `);
20353
- for (const row of tablesResult.rows || []) {
20354
- const tableName = row.tablename;
20355
- try {
20356
- await db.execute(sql23.raw(`ALTER TABLE "${tableName}" DISABLE ROW LEVEL SECURITY`));
20357
- logger6.debug(`[Migration] Disabled RLS on ${tableName}`);
20358
- } catch (error) {
20359
- logger6.debug(`[Migration] Could not disable RLS on ${tableName}`);
20339
+ if (tablesWithRls.rows && tablesWithRls.rows.length > 0) {
20340
+ for (const row of tablesWithRls.rows) {
20341
+ const tableName = row.tablename;
20342
+ try {
20343
+ await db.execute(sql23.raw(`ALTER TABLE "${tableName}" DISABLE ROW LEVEL SECURITY`));
20344
+ logger6.debug(`[Migration] Disabled RLS on ${tableName}`);
20345
+ } catch (error) {
20346
+ logger6.debug(`[Migration] ⊘ Could not disable RLS on ${tableName}`);
20347
+ }
20360
20348
  }
20349
+ } else {
20350
+ logger6.debug("[Migration] ⊘ No tables with RLS enabled");
20361
20351
  }
20362
20352
  } catch (error) {
20363
- logger6.debug("[Migration] ⊘ Could not disable RLS (may not have permissions)");
20353
+ logger6.debug("[Migration] ⊘ Could not check RLS (may not have permissions)");
20364
20354
  }
20365
- logger6.debug("[Migration] → Handling server_id → message_server_id migrations...");
20355
+ logger6.debug("[Migration] → Handling serverId/server_id → message_server_id migrations...");
20366
20356
  const tablesToMigrate = ["channels", "worlds", "rooms"];
20367
20357
  for (const tableName of tablesToMigrate) {
20368
20358
  try {
@@ -20371,34 +20361,49 @@ async function migrateToEntityRLS(adapter) {
20371
20361
  FROM information_schema.columns
20372
20362
  WHERE table_schema = 'public'
20373
20363
  AND table_name = ${tableName}
20374
- AND column_name IN ('server_id', 'message_server_id')
20364
+ AND column_name IN ('server_id', 'serverId', 'message_server_id')
20375
20365
  ORDER BY column_name
20376
20366
  `);
20377
20367
  const columns = columnsResult.rows || [];
20378
- const serverId = columns.find((c) => c.column_name === "server_id");
20368
+ const serverIdSnake = columns.find((c) => c.column_name === "server_id");
20369
+ const serverIdCamel = columns.find((c) => c.column_name === "serverId");
20379
20370
  const messageServerId = columns.find((c) => c.column_name === "message_server_id");
20371
+ const serverId = serverIdSnake || serverIdCamel;
20372
+ const oldColumnName = serverIdSnake ? "server_id" : "serverId";
20380
20373
  if (serverId && !messageServerId) {
20381
- logger6.debug(`[Migration] → Renaming ${tableName}.server_id to message_server_id...`);
20382
- await db.execute(sql23.raw(`ALTER TABLE "${tableName}" RENAME COLUMN "server_id" TO "message_server_id"`));
20383
- logger6.debug(`[Migration] ✓ Renamed ${tableName}.server_id → message_server_id`);
20374
+ logger6.debug(`[Migration] → Renaming ${tableName}.${oldColumnName} to message_server_id...`);
20375
+ await db.execute(sql23.raw(`ALTER TABLE "${tableName}" RENAME COLUMN "${oldColumnName}" TO "message_server_id"`));
20376
+ logger6.debug(`[Migration] ✓ Renamed ${tableName}.${oldColumnName} → message_server_id`);
20384
20377
  if (serverId.data_type === "text") {
20385
20378
  try {
20386
20379
  logger6.debug(`[Migration] → Dropping DEFAULT constraint on ${tableName}.message_server_id...`);
20387
20380
  await db.execute(sql23.raw(`ALTER TABLE "${tableName}" ALTER COLUMN "message_server_id" DROP DEFAULT`));
20388
20381
  logger6.debug(`[Migration] ✓ Dropped DEFAULT constraint`);
20382
+ } catch {
20383
+ logger6.debug(`[Migration] ⊘ No DEFAULT constraint to drop on ${tableName}.message_server_id`);
20384
+ }
20385
+ try {
20389
20386
  logger6.debug(`[Migration] → Converting ${tableName}.message_server_id from text to uuid...`);
20390
- await db.execute(sql23.raw(`ALTER TABLE "${tableName}" ALTER COLUMN "message_server_id" TYPE uuid USING "message_server_id"::uuid`));
20387
+ await db.execute(sql23.raw(`
20388
+ ALTER TABLE "${tableName}"
20389
+ ALTER COLUMN "message_server_id" TYPE uuid
20390
+ USING CASE
20391
+ WHEN "message_server_id" IS NULL THEN NULL
20392
+ WHEN "message_server_id" = '' THEN NULL
20393
+ WHEN "message_server_id" ~ '^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$'
20394
+ THEN "message_server_id"::uuid
20395
+ ELSE md5("message_server_id")::uuid
20396
+ END
20397
+ `));
20391
20398
  logger6.debug(`[Migration] ✓ Converted ${tableName}.message_server_id to uuid`);
20392
20399
  } catch (convertError) {
20393
- logger6.warn(`[Migration] ⚠️ Could not convert ${tableName}.message_server_id to uuid - data may not be valid UUIDs`);
20394
- logger6.debug(`[Migration] → Setting invalid UUIDs to NULL in ${tableName}.message_server_id...`);
20395
- await db.execute(sql23.raw(`ALTER TABLE "${tableName}" ALTER COLUMN "message_server_id" TYPE uuid USING CASE WHEN "message_server_id" ~ '^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$' THEN "message_server_id"::uuid ELSE NULL END`));
20400
+ logger6.warn(`[Migration] ⚠️ Could not convert ${tableName}.message_server_id to uuid: ${convertError}`);
20396
20401
  }
20397
20402
  }
20398
20403
  if (tableName === "channels") {
20399
20404
  const nullCountResult = await db.execute(sql23.raw(`SELECT COUNT(*) as count FROM "${tableName}" WHERE "message_server_id" IS NULL`));
20400
20405
  const nullCount = nullCountResult.rows?.[0]?.count;
20401
- if (nullCount && parseInt(nullCount) > 0) {
20406
+ if (nullCount && parseInt(nullCount, 10) > 0) {
20402
20407
  logger6.warn(`[Migration] ⚠️ ${tableName} has ${nullCount} rows with NULL message_server_id - these will be deleted`);
20403
20408
  await db.execute(sql23.raw(`DELETE FROM "${tableName}" WHERE "message_server_id" IS NULL`));
20404
20409
  logger6.debug(`[Migration] ✓ Deleted ${nullCount} rows with NULL message_server_id from ${tableName}`);
@@ -20408,9 +20413,9 @@ async function migrateToEntityRLS(adapter) {
20408
20413
  logger6.debug(`[Migration] ✓ Set ${tableName}.message_server_id NOT NULL`);
20409
20414
  }
20410
20415
  } else if (serverId && messageServerId) {
20411
- logger6.debug(`[Migration] → ${tableName} has both columns, dropping server_id...`);
20412
- await db.execute(sql23.raw(`ALTER TABLE "${tableName}" DROP COLUMN "server_id" CASCADE`));
20413
- logger6.debug(`[Migration] ✓ Dropped ${tableName}.server_id (will be re-added by RuntimeMigrator for RLS)`);
20416
+ logger6.debug(`[Migration] → ${tableName} has both columns, dropping ${oldColumnName}...`);
20417
+ await db.execute(sql23.raw(`ALTER TABLE "${tableName}" DROP COLUMN "${oldColumnName}" CASCADE`));
20418
+ logger6.debug(`[Migration] ✓ Dropped ${tableName}.${oldColumnName}`);
20414
20419
  } else if (!serverId && messageServerId) {
20415
20420
  if (messageServerId.data_type === "text") {
20416
20421
  logger6.debug(`[Migration] → ${tableName}.message_server_id exists but is TEXT, needs UUID conversion...`);
@@ -20471,6 +20476,67 @@ async function migrateToEntityRLS(adapter) {
20471
20476
  } catch (error) {
20472
20477
  logger6.debug("[Migration] ⊘ Could not drop server_id columns (may not have permissions)");
20473
20478
  }
20479
+ logger6.debug("[Migration] → Checking agents.owner_id → server_id rename...");
20480
+ try {
20481
+ const agentsColumnsResult = await db.execute(sql23`
20482
+ SELECT column_name
20483
+ FROM information_schema.columns
20484
+ WHERE table_schema = 'public'
20485
+ AND table_name = 'agents'
20486
+ AND column_name IN ('owner_id', 'server_id')
20487
+ ORDER BY column_name
20488
+ `);
20489
+ const agentsColumns = agentsColumnsResult.rows || [];
20490
+ const hasOwnerId = agentsColumns.some((c) => c.column_name === "owner_id");
20491
+ const hasServerId = agentsColumns.some((c) => c.column_name === "server_id");
20492
+ if (hasOwnerId && !hasServerId) {
20493
+ logger6.debug("[Migration] → Renaming agents.owner_id to server_id...");
20494
+ await db.execute(sql23.raw(`ALTER TABLE "agents" RENAME COLUMN "owner_id" TO "server_id"`));
20495
+ logger6.debug("[Migration] ✓ Renamed agents.owner_id → server_id");
20496
+ } else if (hasOwnerId && hasServerId) {
20497
+ logger6.debug("[Migration] → Both owner_id and server_id exist, dropping owner_id...");
20498
+ await db.execute(sql23.raw(`ALTER TABLE "agents" DROP COLUMN "owner_id" CASCADE`));
20499
+ logger6.debug("[Migration] ✓ Dropped agents.owner_id");
20500
+ } else {
20501
+ logger6.debug("[Migration] ⊘ agents table already has server_id (or no owner_id), skipping");
20502
+ }
20503
+ } catch (error) {
20504
+ logger6.debug("[Migration] ⊘ Could not check/migrate agents.owner_id");
20505
+ }
20506
+ logger6.debug("[Migration] → Checking for owners → servers data migration...");
20507
+ try {
20508
+ const ownersTableResult = await db.execute(sql23`
20509
+ SELECT table_name
20510
+ FROM information_schema.tables
20511
+ WHERE table_schema = 'public'
20512
+ AND table_name = 'owners'
20513
+ `);
20514
+ if (ownersTableResult.rows && ownersTableResult.rows.length > 0) {
20515
+ logger6.debug("[Migration] → Ensuring servers table exists...");
20516
+ await db.execute(sql23.raw(`
20517
+ CREATE TABLE IF NOT EXISTS "servers" (
20518
+ "id" uuid PRIMARY KEY,
20519
+ "created_at" timestamp with time zone DEFAULT now() NOT NULL,
20520
+ "updated_at" timestamp with time zone DEFAULT now() NOT NULL
20521
+ )
20522
+ `));
20523
+ logger6.debug("[Migration] → Migrating owners data to servers...");
20524
+ await db.execute(sql23.raw(`
20525
+ INSERT INTO "servers" ("id", "created_at", "updated_at")
20526
+ SELECT "id", COALESCE("created_at", now()), COALESCE("updated_at", now())
20527
+ FROM "owners"
20528
+ ON CONFLICT ("id") DO NOTHING
20529
+ `));
20530
+ logger6.debug("[Migration] ✓ Migrated owners data to servers");
20531
+ logger6.debug("[Migration] → Dropping obsolete owners table...");
20532
+ await db.execute(sql23.raw(`DROP TABLE IF EXISTS "owners" CASCADE`));
20533
+ logger6.debug("[Migration] ✓ Dropped obsolete owners table");
20534
+ } else {
20535
+ logger6.debug("[Migration] ⊘ owners table not found, skipping");
20536
+ }
20537
+ } catch (error) {
20538
+ logger6.warn(`[Migration] ⚠️ Could not migrate owners → servers: ${error}`);
20539
+ }
20474
20540
  logger6.debug("[Migration] → Checking server_agents table rename...");
20475
20541
  try {
20476
20542
  const tablesResult = await db.execute(sql23`
@@ -20575,7 +20641,87 @@ async function migrateToEntityRLS(adapter) {
20575
20641
  } catch (error) {
20576
20642
  logger6.debug("[Migration] ⊘ Could not drop indexes (may not have permissions)");
20577
20643
  }
20578
- logger6.info("[Migration] Migration complete - develop to feat/entity-rls migration finished");
20644
+ logger6.debug("[Migration] Starting camelCase snake_case column renames...");
20645
+ const columnRenames = [
20646
+ { table: "rooms", from: "agentId", to: "agent_id" },
20647
+ { table: "rooms", from: "worldId", to: "world_id" },
20648
+ { table: "rooms", from: "channelId", to: "channel_id" },
20649
+ { table: "rooms", from: "createdAt", to: "created_at" },
20650
+ { table: "worlds", from: "agentId", to: "agent_id" },
20651
+ { table: "worlds", from: "createdAt", to: "created_at" },
20652
+ { table: "memories", from: "createdAt", to: "created_at" },
20653
+ { table: "memories", from: "entityId", to: "entity_id" },
20654
+ { table: "memories", from: "agentId", to: "agent_id" },
20655
+ { table: "memories", from: "roomId", to: "room_id" },
20656
+ { table: "memories", from: "worldId", to: "world_id" },
20657
+ { table: "components", from: "entityId", to: "entity_id" },
20658
+ { table: "components", from: "agentId", to: "agent_id" },
20659
+ { table: "components", from: "roomId", to: "room_id" },
20660
+ { table: "components", from: "worldId", to: "world_id" },
20661
+ { table: "components", from: "sourceEntityId", to: "source_entity_id" },
20662
+ { table: "components", from: "createdAt", to: "created_at" },
20663
+ { table: "participants", from: "entityId", to: "entity_id" },
20664
+ { table: "participants", from: "roomId", to: "room_id" },
20665
+ { table: "participants", from: "agentId", to: "agent_id" },
20666
+ { table: "participants", from: "roomState", to: "room_state" },
20667
+ { table: "participants", from: "createdAt", to: "created_at" },
20668
+ { table: "relationships", from: "sourceEntityId", to: "source_entity_id" },
20669
+ { table: "relationships", from: "targetEntityId", to: "target_entity_id" },
20670
+ { table: "relationships", from: "agentId", to: "agent_id" },
20671
+ { table: "relationships", from: "createdAt", to: "created_at" },
20672
+ { table: "logs", from: "entityId", to: "entity_id" },
20673
+ { table: "logs", from: "roomId", to: "room_id" },
20674
+ { table: "logs", from: "createdAt", to: "created_at" },
20675
+ { table: "tasks", from: "roomId", to: "room_id" },
20676
+ { table: "tasks", from: "worldId", to: "world_id" },
20677
+ { table: "tasks", from: "entityId", to: "entity_id" },
20678
+ { table: "tasks", from: "createdAt", to: "created_at" },
20679
+ { table: "tasks", from: "updatedAt", to: "updated_at" },
20680
+ { table: "agents", from: "createdAt", to: "created_at" },
20681
+ { table: "agents", from: "updatedAt", to: "updated_at" },
20682
+ { table: "entities", from: "agentId", to: "agent_id" },
20683
+ { table: "entities", from: "createdAt", to: "created_at" },
20684
+ { table: "embeddings", from: "memoryId", to: "memory_id" },
20685
+ { table: "embeddings", from: "createdAt", to: "created_at" },
20686
+ { table: "cache", from: "agentId", to: "agent_id" },
20687
+ { table: "cache", from: "createdAt", to: "created_at" },
20688
+ { table: "cache", from: "expiresAt", to: "expires_at" }
20689
+ ];
20690
+ for (const rename of columnRenames) {
20691
+ try {
20692
+ const tableExistsResult = await db.execute(sql23`
20693
+ SELECT 1 FROM information_schema.tables
20694
+ WHERE table_schema = 'public' AND table_name = ${rename.table}
20695
+ `);
20696
+ if (!tableExistsResult.rows || tableExistsResult.rows.length === 0) {
20697
+ continue;
20698
+ }
20699
+ const columnsResult = await db.execute(sql23`
20700
+ SELECT column_name
20701
+ FROM information_schema.columns
20702
+ WHERE table_schema = 'public'
20703
+ AND table_name = ${rename.table}
20704
+ AND column_name IN (${rename.from}, ${rename.to})
20705
+ ORDER BY column_name
20706
+ `);
20707
+ const columns = columnsResult.rows || [];
20708
+ const hasOldColumn = columns.some((c) => c.column_name === rename.from);
20709
+ const hasNewColumn = columns.some((c) => c.column_name === rename.to);
20710
+ if (hasOldColumn && !hasNewColumn) {
20711
+ logger6.debug(`[Migration] → Renaming ${rename.table}.${rename.from} to ${rename.to}...`);
20712
+ await db.execute(sql23.raw(`ALTER TABLE "${rename.table}" RENAME COLUMN "${rename.from}" TO "${rename.to}"`));
20713
+ logger6.debug(`[Migration] ✓ Renamed ${rename.table}.${rename.from} → ${rename.to}`);
20714
+ } else if (hasOldColumn && hasNewColumn) {
20715
+ logger6.debug(`[Migration] → Both columns exist, dropping ${rename.table}.${rename.from}...`);
20716
+ await db.execute(sql23.raw(`ALTER TABLE "${rename.table}" DROP COLUMN "${rename.from}" CASCADE`));
20717
+ logger6.debug(`[Migration] ✓ Dropped ${rename.table}.${rename.from}`);
20718
+ }
20719
+ } catch (error) {
20720
+ logger6.debug(`[Migration] ⊘ Could not process ${rename.table}.${rename.from}: ${error}`);
20721
+ }
20722
+ }
20723
+ logger6.debug("[Migration] ✓ Completed camelCase → snake_case column renames");
20724
+ logger6.info("[Migration] ✓ Migration complete - pre-1.6.5 → 1.6.5+ schema migration finished");
20579
20725
  } catch (error) {
20580
20726
  logger6.error("[Migration] Migration failed:", String(error));
20581
20727
  throw error;
@@ -20587,7 +20733,7 @@ var init_migrations = () => {};
20587
20733
  import { logger as logger7, validateUuid } from "@elizaos/core";
20588
20734
  import { sql as sql24, eq } from "drizzle-orm";
20589
20735
  async function installRLSFunctions(adapter) {
20590
- const db = adapter.db;
20736
+ const db = getDb(adapter);
20591
20737
  await db.execute(sql24`
20592
20738
  CREATE TABLE IF NOT EXISTS servers (
20593
20739
  id UUID PRIMARY KEY,
@@ -20703,7 +20849,7 @@ async function installRLSFunctions(adapter) {
20703
20849
  await installEntityRLS(adapter);
20704
20850
  }
20705
20851
  async function applyRLSToNewTables(adapter) {
20706
- const db = adapter.db;
20852
+ const db = getDb(adapter);
20707
20853
  try {
20708
20854
  await db.execute(sql24`SELECT apply_rls_to_all_tables()`);
20709
20855
  logger7.info({ src: "plugin:sql" }, "RLS applied to all tables");
@@ -20712,7 +20858,7 @@ async function applyRLSToNewTables(adapter) {
20712
20858
  }
20713
20859
  }
20714
20860
  async function installEntityRLS(adapter) {
20715
- const db = adapter.db;
20861
+ const db = getDb(adapter);
20716
20862
  logger7.info("[Entity RLS] Installing entity RLS functions and policies...");
20717
20863
  await db.execute(sql24`
20718
20864
  CREATE OR REPLACE FUNCTION current_entity_id()
@@ -20753,26 +20899,26 @@ async function installEntityRLS(adapter) {
20753
20899
  BEGIN
20754
20900
  full_table_name := schema_name || '.' || table_name;
20755
20901
 
20756
- -- Check which columns exist (using camelCase as per schema definition)
20902
+ -- Check which columns exist
20757
20903
  SELECT EXISTS (
20758
20904
  SELECT 1 FROM information_schema.columns
20759
20905
  WHERE information_schema.columns.table_schema = schema_name
20760
20906
  AND information_schema.columns.table_name = add_entity_isolation.table_name
20761
- AND information_schema.columns.column_name = 'entityId'
20907
+ AND information_schema.columns.column_name = 'entity_id'
20762
20908
  ) INTO has_entity_id;
20763
20909
 
20764
20910
  SELECT EXISTS (
20765
20911
  SELECT 1 FROM information_schema.columns
20766
20912
  WHERE information_schema.columns.table_schema = schema_name
20767
20913
  AND information_schema.columns.table_name = add_entity_isolation.table_name
20768
- AND information_schema.columns.column_name = 'authorId'
20914
+ AND information_schema.columns.column_name = 'author_id'
20769
20915
  ) INTO has_author_id;
20770
20916
 
20771
20917
  SELECT EXISTS (
20772
20918
  SELECT 1 FROM information_schema.columns
20773
20919
  WHERE information_schema.columns.table_schema = schema_name
20774
20920
  AND information_schema.columns.table_name = add_entity_isolation.table_name
20775
- AND information_schema.columns.column_name = 'roomId'
20921
+ AND information_schema.columns.column_name = 'room_id'
20776
20922
  ) INTO has_room_id;
20777
20923
 
20778
20924
  -- Skip if no entity-related columns
@@ -20782,20 +20928,20 @@ async function installEntityRLS(adapter) {
20782
20928
  END IF;
20783
20929
 
20784
20930
  -- Determine which column to use for entity filtering
20785
- -- Priority: roomId (shared access via participants) > entityId/authorId (direct access)
20931
+ -- Priority: room_id (shared access via participants) > entity_id/author_id (direct access)
20786
20932
  --
20787
- -- SPECIAL CASE: participants table must use direct entityId to avoid infinite recursion
20933
+ -- SPECIAL CASE: participants table must use direct entity_id to avoid infinite recursion
20788
20934
  IF table_name = 'participants' AND has_entity_id THEN
20789
- entity_column_name := 'entityId';
20935
+ entity_column_name := 'entity_id';
20790
20936
  room_column_name := NULL;
20791
20937
  ELSIF has_room_id THEN
20792
- room_column_name := 'roomId';
20938
+ room_column_name := 'room_id';
20793
20939
  entity_column_name := NULL;
20794
20940
  ELSIF has_entity_id THEN
20795
- entity_column_name := 'entityId';
20941
+ entity_column_name := 'entity_id';
20796
20942
  room_column_name := NULL;
20797
20943
  ELSIF has_author_id THEN
20798
- entity_column_name := 'authorId';
20944
+ entity_column_name := 'author_id';
20799
20945
  room_column_name := NULL;
20800
20946
  ELSE
20801
20947
  entity_column_name := NULL;
@@ -20809,11 +20955,11 @@ async function installEntityRLS(adapter) {
20809
20955
  -- Drop existing entity policies if present
20810
20956
  EXECUTE format('DROP POLICY IF EXISTS entity_isolation_policy ON %I.%I', schema_name, table_name);
20811
20957
 
20812
- -- CASE 1: Table has roomId or channelId (shared access via participants)
20958
+ -- CASE 1: Table has room_id (shared access via participants)
20813
20959
  IF room_column_name IS NOT NULL THEN
20814
20960
  -- Determine the corresponding column name in participants table
20815
- -- If the table has roomId, look for roomId in participants.roomId
20816
- -- participants table uses: entityId (for participant), roomId (for room)
20961
+ -- If the table has room_id, look for room_id in participants.room_id
20962
+ -- participants table uses: entity_id (for participant), room_id (for room)
20817
20963
  -- RESTRICTIVE: Must pass BOTH server RLS AND entity RLS (combined with AND)
20818
20964
 
20819
20965
  -- Build policy with or without NULL check based on require_entity parameter
@@ -20825,21 +20971,21 @@ async function installEntityRLS(adapter) {
20825
20971
  USING (
20826
20972
  current_entity_id() IS NOT NULL
20827
20973
  AND %I IN (
20828
- SELECT "roomId"
20974
+ SELECT room_id
20829
20975
  FROM participants
20830
- WHERE "entityId" = current_entity_id()
20976
+ WHERE entity_id = current_entity_id()
20831
20977
  )
20832
20978
  )
20833
20979
  WITH CHECK (
20834
20980
  current_entity_id() IS NOT NULL
20835
20981
  AND %I IN (
20836
- SELECT "roomId"
20982
+ SELECT room_id
20837
20983
  FROM participants
20838
- WHERE "entityId" = current_entity_id()
20984
+ WHERE entity_id = current_entity_id()
20839
20985
  )
20840
20986
  )
20841
20987
  ', schema_name, table_name, room_column_name, room_column_name);
20842
- RAISE NOTICE '[Entity RLS] Applied STRICT RESTRICTIVE to %.% (via % → participants.roomId, entity REQUIRED)', schema_name, table_name, room_column_name;
20988
+ RAISE NOTICE '[Entity RLS] Applied STRICT RESTRICTIVE to %.% (via % → participants.room_id, entity REQUIRED)', schema_name, table_name, room_column_name;
20843
20989
  ELSE
20844
20990
  -- PERMISSIVE MODE: NULL entity_id allows system/admin access
20845
20991
  EXECUTE format('
@@ -20848,21 +20994,21 @@ async function installEntityRLS(adapter) {
20848
20994
  USING (
20849
20995
  current_entity_id() IS NULL
20850
20996
  OR %I IN (
20851
- SELECT "roomId"
20997
+ SELECT room_id
20852
20998
  FROM participants
20853
- WHERE "entityId" = current_entity_id()
20999
+ WHERE entity_id = current_entity_id()
20854
21000
  )
20855
21001
  )
20856
21002
  WITH CHECK (
20857
21003
  current_entity_id() IS NULL
20858
21004
  OR %I IN (
20859
- SELECT "roomId"
21005
+ SELECT room_id
20860
21006
  FROM participants
20861
- WHERE "entityId" = current_entity_id()
21007
+ WHERE entity_id = current_entity_id()
20862
21008
  )
20863
21009
  )
20864
21010
  ', schema_name, table_name, room_column_name, room_column_name);
20865
- RAISE NOTICE '[Entity RLS] Applied PERMISSIVE RESTRICTIVE to %.% (via % → participants.roomId, NULL allowed)', schema_name, table_name, room_column_name;
21011
+ RAISE NOTICE '[Entity RLS] Applied PERMISSIVE RESTRICTIVE to %.% (via % → participants.room_id, NULL allowed)', schema_name, table_name, room_column_name;
20866
21012
  END IF;
20867
21013
 
20868
21014
  -- CASE 2: Table has direct entity_id or author_id column
@@ -20960,7 +21106,7 @@ async function installEntityRLS(adapter) {
20960
21106
  logger7.info("[Entity RLS] Entity RLS functions installed successfully");
20961
21107
  }
20962
21108
  async function applyEntityRLSToAllTables(adapter) {
20963
- const db = adapter.db;
21109
+ const db = getDb(adapter);
20964
21110
  try {
20965
21111
  await db.execute(sql24`SELECT apply_entity_rls_to_all_tables()`);
20966
21112
  logger7.info("[Entity RLS] Applied entity RLS to all eligible tables");
@@ -20984,15 +21130,17 @@ class DatabaseMigrationService {
20984
21130
  constructor() {}
20985
21131
  async initializeWithDatabase(db) {
20986
21132
  this.db = db;
20987
- await migrateToEntityRLS({ db });
21133
+ const adapterWrapper = { db };
21134
+ await migrateToEntityRLS(adapterWrapper);
20988
21135
  this.migrator = new RuntimeMigrator(db);
20989
21136
  await this.migrator.initialize();
20990
21137
  logger8.info({ src: "plugin:sql" }, "DatabaseMigrationService initialized");
20991
21138
  }
20992
21139
  discoverAndRegisterPluginSchemas(plugins) {
20993
21140
  for (const plugin of plugins) {
20994
- if (plugin.schema) {
20995
- this.registeredSchemas.set(plugin.name, plugin.schema);
21141
+ const pluginWithSchema = plugin;
21142
+ if (pluginWithSchema.schema) {
21143
+ this.registeredSchemas.set(plugin.name, pluginWithSchema.schema);
20996
21144
  }
20997
21145
  }
20998
21146
  logger8.info({
@@ -21046,9 +21194,10 @@ class DatabaseMigrationService {
21046
21194
  if (dataIsolationEnabled) {
21047
21195
  try {
21048
21196
  logger8.info({ src: "plugin:sql" }, "Re-applying Row Level Security...");
21049
- await installRLSFunctions({ db: this.db });
21050
- await applyRLSToNewTables({ db: this.db });
21051
- await applyEntityRLSToAllTables({ db: this.db });
21197
+ const adapterWrapper = { db: this.db };
21198
+ await installRLSFunctions(adapterWrapper);
21199
+ await applyRLSToNewTables(adapterWrapper);
21200
+ await applyEntityRLSToAllTables(adapterWrapper);
21052
21201
  logger8.info({ src: "plugin:sql" }, "RLS re-applied successfully");
21053
21202
  } catch (rlsError) {
21054
21203
  const errorMsg = rlsError instanceof Error ? rlsError.message : String(rlsError);
@@ -21202,13 +21351,13 @@ import { sql as sql3 } from "drizzle-orm";
21202
21351
  import { jsonb as jsonb3, pgTable as pgTable3, text as text3, timestamp as timestamp3, uuid as uuid3 } from "drizzle-orm/pg-core";
21203
21352
  var roomTable = pgTable3("rooms", {
21204
21353
  id: uuid3("id").notNull().primaryKey().default(sql3`gen_random_uuid()`),
21205
- agentId: uuid3("agentId").references(() => agentTable.id, {
21354
+ agentId: uuid3("agent_id").references(() => agentTable.id, {
21206
21355
  onDelete: "cascade"
21207
21356
  }),
21208
21357
  source: text3("source").notNull(),
21209
21358
  type: text3("type").notNull(),
21210
21359
  messageServerId: uuid3("message_server_id"),
21211
- worldId: uuid3("worldId"),
21360
+ worldId: uuid3("world_id"),
21212
21361
  name: text3("name"),
21213
21362
  metadata: jsonb3("metadata"),
21214
21363
  channelId: text3("channel_id"),
@@ -21219,18 +21368,18 @@ var roomTable = pgTable3("rooms", {
21219
21368
  var memoryTable = pgTable4("memories", {
21220
21369
  id: uuid4("id").primaryKey().notNull(),
21221
21370
  type: text4("type").notNull(),
21222
- createdAt: timestamp4("createdAt").default(sql4`now()`).notNull(),
21371
+ createdAt: timestamp4("created_at").default(sql4`now()`).notNull(),
21223
21372
  content: jsonb4("content").notNull(),
21224
- entityId: uuid4("entityId").references(() => entityTable.id, {
21373
+ entityId: uuid4("entity_id").references(() => entityTable.id, {
21225
21374
  onDelete: "cascade"
21226
21375
  }),
21227
- agentId: uuid4("agentId").references(() => agentTable.id, {
21376
+ agentId: uuid4("agent_id").references(() => agentTable.id, {
21228
21377
  onDelete: "cascade"
21229
21378
  }).notNull(),
21230
- roomId: uuid4("roomId").references(() => roomTable.id, {
21379
+ roomId: uuid4("room_id").references(() => roomTable.id, {
21231
21380
  onDelete: "cascade"
21232
21381
  }),
21233
- worldId: uuid4("worldId"),
21382
+ worldId: uuid4("world_id"),
21234
21383
  unique: boolean2("unique").default(true).notNull(),
21235
21384
  metadata: jsonb4("metadata").default({}).notNull()
21236
21385
  }, (table) => [
@@ -21349,7 +21498,7 @@ import { sql as sql7 } from "drizzle-orm";
21349
21498
  import { jsonb as jsonb6, pgTable as pgTable7, text as text6, timestamp as timestamp7, uuid as uuid7 } from "drizzle-orm/pg-core";
21350
21499
  var worldTable = pgTable7("worlds", {
21351
21500
  id: uuid7("id").notNull().primaryKey().default(sql7`gen_random_uuid()`),
21352
- agentId: uuid7("agentId").notNull().references(() => agentTable.id, { onDelete: "cascade" }),
21501
+ agentId: uuid7("agent_id").notNull().references(() => agentTable.id, { onDelete: "cascade" }),
21353
21502
  name: text6("name").notNull(),
21354
21503
  metadata: jsonb6("metadata"),
21355
21504
  messageServerId: uuid7("message_server_id"),
@@ -21359,14 +21508,16 @@ var worldTable = pgTable7("worlds", {
21359
21508
  // src/schema/component.ts
21360
21509
  var componentTable = pgTable8("components", {
21361
21510
  id: uuid8("id").primaryKey().default(sql8`gen_random_uuid()`).notNull(),
21362
- entityId: uuid8("entityId").references(() => entityTable.id, { onDelete: "cascade" }).notNull(),
21363
- agentId: uuid8("agentId").references(() => agentTable.id, { onDelete: "cascade" }).notNull(),
21364
- roomId: uuid8("roomId").references(() => roomTable.id, { onDelete: "cascade" }).notNull(),
21365
- worldId: uuid8("worldId").references(() => worldTable.id, { onDelete: "cascade" }),
21366
- sourceEntityId: uuid8("sourceEntityId").references(() => entityTable.id, { onDelete: "cascade" }),
21511
+ entityId: uuid8("entity_id").references(() => entityTable.id, { onDelete: "cascade" }).notNull(),
21512
+ agentId: uuid8("agent_id").references(() => agentTable.id, { onDelete: "cascade" }).notNull(),
21513
+ roomId: uuid8("room_id").references(() => roomTable.id, { onDelete: "cascade" }).notNull(),
21514
+ worldId: uuid8("world_id").references(() => worldTable.id, { onDelete: "cascade" }),
21515
+ sourceEntityId: uuid8("source_entity_id").references(() => entityTable.id, {
21516
+ onDelete: "cascade"
21517
+ }),
21367
21518
  type: text7("type").notNull(),
21368
21519
  data: jsonb7("data").default(sql8`'{}'::jsonb`),
21369
- createdAt: timestamp8("createdAt").default(sql8`now()`).notNull()
21520
+ createdAt: timestamp8("created_at").default(sql8`now()`).notNull()
21370
21521
  });
21371
21522
  // src/schema/log.ts
21372
21523
  import { sql as sql9 } from "drizzle-orm";
@@ -21374,10 +21525,10 @@ import { foreignKey as foreignKey3, jsonb as jsonb8, pgTable as pgTable9, text a
21374
21525
  var logTable = pgTable9("logs", {
21375
21526
  id: uuid9("id").defaultRandom().notNull(),
21376
21527
  createdAt: timestamp9("created_at", { withTimezone: true }).default(sql9`now()`).notNull(),
21377
- entityId: uuid9("entityId").notNull().references(() => entityTable.id, { onDelete: "cascade" }),
21528
+ entityId: uuid9("entity_id").notNull().references(() => entityTable.id, { onDelete: "cascade" }),
21378
21529
  body: jsonb8("body").notNull(),
21379
21530
  type: text8("type").notNull(),
21380
- roomId: uuid9("roomId").notNull().references(() => roomTable.id, { onDelete: "cascade" })
21531
+ roomId: uuid9("room_id").notNull().references(() => roomTable.id, { onDelete: "cascade" })
21381
21532
  }, (table) => [
21382
21533
  foreignKey3({
21383
21534
  name: "fk_room",
@@ -21401,16 +21552,16 @@ import { foreignKey as foreignKey4, index as index3, pgTable as pgTable11, text
21401
21552
  var participantTable = pgTable11("participants", {
21402
21553
  id: uuid11("id").notNull().primaryKey().default(sql11`gen_random_uuid()`),
21403
21554
  createdAt: timestamp11("created_at", { withTimezone: true }).default(sql11`now()`).notNull(),
21404
- entityId: uuid11("entityId").references(() => entityTable.id, {
21555
+ entityId: uuid11("entity_id").references(() => entityTable.id, {
21405
21556
  onDelete: "cascade"
21406
21557
  }),
21407
- roomId: uuid11("roomId").references(() => roomTable.id, {
21558
+ roomId: uuid11("room_id").references(() => roomTable.id, {
21408
21559
  onDelete: "cascade"
21409
21560
  }),
21410
- agentId: uuid11("agentId").references(() => agentTable.id, {
21561
+ agentId: uuid11("agent_id").references(() => agentTable.id, {
21411
21562
  onDelete: "cascade"
21412
21563
  }),
21413
- roomState: text9("roomState")
21564
+ roomState: text9("room_state")
21414
21565
  }, (table) => [
21415
21566
  index3("idx_participants_user").on(table.entityId),
21416
21567
  index3("idx_participants_room").on(table.roomId),
@@ -21441,9 +21592,9 @@ import {
21441
21592
  var relationshipTable = pgTable12("relationships", {
21442
21593
  id: uuid12("id").notNull().primaryKey().default(sql12`gen_random_uuid()`),
21443
21594
  createdAt: timestamp12("created_at", { withTimezone: true }).default(sql12`now()`).notNull(),
21444
- sourceEntityId: uuid12("sourceEntityId").notNull().references(() => entityTable.id, { onDelete: "cascade" }),
21445
- targetEntityId: uuid12("targetEntityId").notNull().references(() => entityTable.id, { onDelete: "cascade" }),
21446
- agentId: uuid12("agentId").notNull().references(() => agentTable.id, { onDelete: "cascade" }),
21595
+ sourceEntityId: uuid12("source_entity_id").notNull().references(() => entityTable.id, { onDelete: "cascade" }),
21596
+ targetEntityId: uuid12("target_entity_id").notNull().references(() => entityTable.id, { onDelete: "cascade" }),
21597
+ agentId: uuid12("agent_id").notNull().references(() => agentTable.id, { onDelete: "cascade" }),
21447
21598
  tags: text10("tags").array(),
21448
21599
  metadata: jsonb9("metadata")
21449
21600
  }, (table) => [
@@ -21468,10 +21619,10 @@ var taskTable = pgTable13("tasks", {
21468
21619
  id: uuid13("id").primaryKey().defaultRandom(),
21469
21620
  name: text11("name").notNull(),
21470
21621
  description: text11("description"),
21471
- roomId: uuid13("roomId"),
21472
- worldId: uuid13("worldId"),
21473
- entityId: uuid13("entityId"),
21474
- agentId: uuid13("agentId").notNull().references(() => agentTable.id, { onDelete: "cascade" }),
21622
+ roomId: uuid13("room_id"),
21623
+ worldId: uuid13("world_id"),
21624
+ entityId: uuid13("entity_id"),
21625
+ agentId: uuid13("agent_id").notNull().references(() => agentTable.id, { onDelete: "cascade" }),
21475
21626
  tags: text11("tags").array().default(sql13`'{}'::text[]`),
21476
21627
  metadata: jsonb10("metadata").default(sql13`'{}'::jsonb`),
21477
21628
  createdAt: timestamp13("created_at", { withTimezone: true }).defaultNow(),
@@ -21619,7 +21770,11 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
21619
21770
  return this.withDatabase(async () => {
21620
21771
  const existingMemory = await this.db.select().from(memoryTable).innerJoin(embeddingTable, eq2(embeddingTable.memoryId, memoryTable.id)).where(eq2(memoryTable.agentId, this.agentId)).limit(1);
21621
21772
  if (existingMemory.length > 0) {
21622
- Object.entries(DIMENSION_MAP).find(([_, colName]) => existingMemory[0].embeddings[colName] !== null);
21773
+ const joinedResult = existingMemory[0];
21774
+ Object.entries(DIMENSION_MAP).find(([_, colName]) => {
21775
+ const embeddingCol = colName;
21776
+ return joinedResult.embeddings[embeddingCol] !== null;
21777
+ });
21623
21778
  }
21624
21779
  this.embeddingDimension = DIMENSION_MAP[dimension];
21625
21780
  });
@@ -21758,7 +21913,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
21758
21913
  return output;
21759
21914
  };
21760
21915
  const finalSettings = deepMerge(currentSettings, updatedSettings);
21761
- return finalSettings === undefined ? {} : finalSettings;
21916
+ return finalSettings ?? {};
21762
21917
  }
21763
21918
  async deleteAgent(agentId) {
21764
21919
  return this.withDatabase(async () => {
@@ -22325,6 +22480,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
22325
22480
  id: log2.id,
22326
22481
  entityId: log2.entityId,
22327
22482
  roomId: log2.roomId,
22483
+ type: log2.type,
22328
22484
  body: log2.body,
22329
22485
  createdAt: new Date(log2.createdAt)
22330
22486
  }));
@@ -23137,7 +23293,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
23137
23293
  return this.withRetry(async () => {
23138
23294
  return this.withDatabase(async () => {
23139
23295
  const result = await this.db.select().from(taskTable).where(and(eq2(taskTable.agentId, this.agentId), ...params.roomId ? [eq2(taskTable.roomId, params.roomId)] : [], ...params.tags && params.tags.length > 0 ? [
23140
- sql25`${taskTable.tags} @> ARRAY[${sql25.raw(params.tags.map((t) => `'${t.replace(/'/g, "''")}'`).join(", "))}]::text[]`
23296
+ sql25`${taskTable.tags} @> ARRAY[${sql25.join(params.tags.map((t) => sql25`${t}`), sql25`, `)}]::text[]`
23141
23297
  ] : []));
23142
23298
  return result.map((row) => ({
23143
23299
  id: row.id,
@@ -23201,11 +23357,14 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
23201
23357
  updateValues.worldId = task.worldId;
23202
23358
  if (task.tags !== undefined)
23203
23359
  updateValues.tags = task.tags;
23204
- updateValues.updatedAt = new Date;
23360
+ const dbUpdateValues = {
23361
+ ...updateValues,
23362
+ updatedAt: new Date
23363
+ };
23205
23364
  if (task.metadata !== undefined) {
23206
- updateValues.metadata = task.metadata;
23365
+ dbUpdateValues.metadata = task.metadata;
23207
23366
  }
23208
- await this.db.update(taskTable).set(updateValues).where(and(eq2(taskTable.id, id), eq2(taskTable.agentId, this.agentId)));
23367
+ await this.db.update(taskTable).set(dbUpdateValues).where(and(eq2(taskTable.id, id), eq2(taskTable.agentId, this.agentId)));
23209
23368
  });
23210
23369
  });
23211
23370
  }
@@ -23670,6 +23829,11 @@ class PGliteClientManager {
23670
23829
  async initialize() {}
23671
23830
  async close() {
23672
23831
  this.shuttingDown = true;
23832
+ if (this.client) {
23833
+ try {
23834
+ await this.client.close();
23835
+ } catch {}
23836
+ }
23673
23837
  }
23674
23838
  setupShutdownHandlers() {}
23675
23839
  }
@@ -23715,5 +23879,5 @@ export {
23715
23879
  DatabaseMigrationService
23716
23880
  };
23717
23881
 
23718
- //# debugId=A4CD118ECB5E333164756E2164756E21
23882
+ //# debugId=F233539768A25C6F64756E2164756E21
23719
23883
  //# sourceMappingURL=index.browser.js.map