@elizaos/plugin-sql 1.6.5-alpha.2 → 1.6.5-alpha.20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser/index.browser.js +1040 -379
- package/dist/browser/index.browser.js.map +24 -21
- package/dist/browser/tsconfig.build.tsbuildinfo +1 -1
- package/dist/node/index.node.js +1132 -528
- package/dist/node/index.node.js.map +27 -25
- package/dist/node/tsconfig.build.node.tsbuildinfo +1 -1
- package/package.json +6 -4
package/dist/node/index.node.js
CHANGED
|
@@ -5694,6 +5694,44 @@ var init_drizzle_orm = __esm(() => {
|
|
|
5694
5694
|
init_view_common();
|
|
5695
5695
|
});
|
|
5696
5696
|
|
|
5697
|
+
// src/schema/agent.ts
|
|
5698
|
+
var agentTable;
|
|
5699
|
+
var init_agent = __esm(() => {
|
|
5700
|
+
init_drizzle_orm();
|
|
5701
|
+
init_pg_core();
|
|
5702
|
+
agentTable = pgTable("agents", {
|
|
5703
|
+
id: uuid("id").primaryKey().defaultRandom(),
|
|
5704
|
+
enabled: boolean("enabled").default(true).notNull(),
|
|
5705
|
+
server_id: uuid("server_id"),
|
|
5706
|
+
createdAt: timestamp("created_at", { withTimezone: true }).default(sql`now()`).notNull(),
|
|
5707
|
+
updatedAt: timestamp("updated_at", { withTimezone: true }).default(sql`now()`).notNull(),
|
|
5708
|
+
name: text("name").notNull(),
|
|
5709
|
+
username: text("username"),
|
|
5710
|
+
system: text("system").default(""),
|
|
5711
|
+
bio: jsonb("bio").$type().default(sql`'[]'::jsonb`),
|
|
5712
|
+
messageExamples: jsonb("message_examples").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
5713
|
+
postExamples: jsonb("post_examples").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
5714
|
+
topics: jsonb("topics").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
5715
|
+
adjectives: jsonb("adjectives").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
5716
|
+
knowledge: jsonb("knowledge").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
5717
|
+
plugins: jsonb("plugins").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
5718
|
+
settings: jsonb("settings").$type().default(sql`'{}'::jsonb`).notNull(),
|
|
5719
|
+
style: jsonb("style").$type().default(sql`'{}'::jsonb`).notNull()
|
|
5720
|
+
});
|
|
5721
|
+
});
|
|
5722
|
+
|
|
5723
|
+
// src/schema/server.ts
|
|
5724
|
+
var serverTable;
|
|
5725
|
+
var init_server = __esm(() => {
|
|
5726
|
+
init_drizzle_orm();
|
|
5727
|
+
init_pg_core();
|
|
5728
|
+
serverTable = pgTable("servers", {
|
|
5729
|
+
id: uuid("id").primaryKey(),
|
|
5730
|
+
createdAt: timestamp("created_at", { withTimezone: true }).default(sql`now()`).notNull(),
|
|
5731
|
+
updatedAt: timestamp("updated_at", { withTimezone: true }).default(sql`now()`).notNull()
|
|
5732
|
+
});
|
|
5733
|
+
});
|
|
5734
|
+
|
|
5697
5735
|
// src/runtime-migrator/storage/migration-tracker.ts
|
|
5698
5736
|
class MigrationTracker {
|
|
5699
5737
|
db;
|
|
@@ -5871,14 +5909,14 @@ class ExtensionManager {
|
|
|
5871
5909
|
for (const extension of extensions) {
|
|
5872
5910
|
try {
|
|
5873
5911
|
if (!/^[a-zA-Z0-9_-]+$/.test(extension)) {
|
|
5874
|
-
logger2.warn(
|
|
5912
|
+
logger2.warn({ src: "plugin:sql", extension }, "Invalid extension name - contains invalid characters");
|
|
5875
5913
|
continue;
|
|
5876
5914
|
}
|
|
5877
5915
|
await this.db.execute(sql`CREATE EXTENSION IF NOT EXISTS ${sql.identifier(extension)}`);
|
|
5878
|
-
logger2.debug(
|
|
5916
|
+
logger2.debug({ src: "plugin:sql", extension }, "Extension installed");
|
|
5879
5917
|
} catch (error) {
|
|
5880
5918
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
5881
|
-
logger2.warn(
|
|
5919
|
+
logger2.warn({ src: "plugin:sql", extension, error: errorMessage }, "Could not install extension");
|
|
5882
5920
|
}
|
|
5883
5921
|
}
|
|
5884
5922
|
}
|
|
@@ -6590,10 +6628,7 @@ async function generateMigrationSQL(previousSnapshot, currentSnapshot, diff) {
|
|
|
6590
6628
|
}
|
|
6591
6629
|
const dataLossCheck = checkForDataLoss(diff);
|
|
6592
6630
|
if (dataLossCheck.warnings.length > 0) {
|
|
6593
|
-
logger3.warn("
|
|
6594
|
-
for (const warning of dataLossCheck.warnings) {
|
|
6595
|
-
logger3.warn(` - ${warning}`);
|
|
6596
|
-
}
|
|
6631
|
+
logger3.warn({ src: "plugin:sql", warnings: dataLossCheck.warnings }, "Schema changes may cause data loss");
|
|
6597
6632
|
}
|
|
6598
6633
|
const schemasToCreate = new Set;
|
|
6599
6634
|
for (const tableName of diff.tables.created) {
|
|
@@ -7015,7 +7050,7 @@ class DatabaseIntrospector {
|
|
|
7015
7050
|
this.db = db2;
|
|
7016
7051
|
}
|
|
7017
7052
|
async introspectSchema(schemaName = "public") {
|
|
7018
|
-
logger5.info(
|
|
7053
|
+
logger5.info({ src: "plugin:sql", schemaName }, "Starting database introspection");
|
|
7019
7054
|
const tables = {};
|
|
7020
7055
|
const schemas = {};
|
|
7021
7056
|
const enums = {};
|
|
@@ -7023,7 +7058,7 @@ class DatabaseIntrospector {
|
|
|
7023
7058
|
for (const tableInfo of allTables) {
|
|
7024
7059
|
const tableName = tableInfo.table_name;
|
|
7025
7060
|
const tableSchema = tableInfo.table_schema || "public";
|
|
7026
|
-
logger5.debug(
|
|
7061
|
+
logger5.debug({ src: "plugin:sql", tableSchema, tableName }, "Introspecting table");
|
|
7027
7062
|
const columns2 = await this.getColumns(tableSchema, tableName);
|
|
7028
7063
|
const columnsObject = {};
|
|
7029
7064
|
const uniqueConstraintObject = {};
|
|
@@ -7040,15 +7075,17 @@ class DatabaseIntrospector {
|
|
|
7040
7075
|
const indexesObject = {};
|
|
7041
7076
|
for (const idx of indexes2) {
|
|
7042
7077
|
if (!idx.is_primary && !idx.is_unique_constraint) {
|
|
7043
|
-
|
|
7044
|
-
|
|
7045
|
-
|
|
7046
|
-
|
|
7047
|
-
|
|
7048
|
-
|
|
7049
|
-
|
|
7050
|
-
|
|
7051
|
-
|
|
7078
|
+
if (idx.columns && Array.isArray(idx.columns) && idx.columns.length > 0) {
|
|
7079
|
+
indexesObject[idx.name] = {
|
|
7080
|
+
name: idx.name,
|
|
7081
|
+
columns: idx.columns.map((col) => ({
|
|
7082
|
+
expression: col,
|
|
7083
|
+
isExpression: false
|
|
7084
|
+
})),
|
|
7085
|
+
isUnique: idx.is_unique,
|
|
7086
|
+
method: idx.method || "btree"
|
|
7087
|
+
};
|
|
7088
|
+
}
|
|
7052
7089
|
}
|
|
7053
7090
|
}
|
|
7054
7091
|
const foreignKeys = await this.getForeignKeys(tableSchema, tableName);
|
|
@@ -7116,7 +7153,7 @@ class DatabaseIntrospector {
|
|
|
7116
7153
|
}
|
|
7117
7154
|
enums[key].values.push(enumInfo.value);
|
|
7118
7155
|
}
|
|
7119
|
-
logger5.info(
|
|
7156
|
+
logger5.info({ src: "plugin:sql", tableCount: Object.keys(tables).length }, "Database introspection complete");
|
|
7120
7157
|
return {
|
|
7121
7158
|
version: "7",
|
|
7122
7159
|
dialect: "postgresql",
|
|
@@ -7378,7 +7415,7 @@ class RuntimeMigrator {
|
|
|
7378
7415
|
}
|
|
7379
7416
|
}
|
|
7380
7417
|
for (const schemaName of schemasToCreate) {
|
|
7381
|
-
logger6.debug(
|
|
7418
|
+
logger6.debug({ src: "plugin:sql", schemaName }, "Ensuring schema exists");
|
|
7382
7419
|
await this.db.execute(sql.raw(`CREATE SCHEMA IF NOT EXISTS "${schemaName}"`));
|
|
7383
7420
|
}
|
|
7384
7421
|
}
|
|
@@ -7389,10 +7426,10 @@ class RuntimeMigrator {
|
|
|
7389
7426
|
const tableData = table3;
|
|
7390
7427
|
const actualSchema = tableData.schema || "public";
|
|
7391
7428
|
if (!isCorePLugin && actualSchema === "public") {
|
|
7392
|
-
logger6.warn(
|
|
7429
|
+
logger6.warn({ src: "plugin:sql", pluginName, tableName: tableData.name, expectedSchema }, "Plugin table is using public schema - consider using pgSchema for better isolation");
|
|
7393
7430
|
}
|
|
7394
7431
|
if (isCorePLugin && actualSchema !== "public") {
|
|
7395
|
-
logger6.warn(
|
|
7432
|
+
logger6.warn({ src: "plugin:sql", pluginName: "@elizaos/plugin-sql", tableName: tableData.name, actualSchema }, "Core plugin table should use public schema");
|
|
7396
7433
|
}
|
|
7397
7434
|
}
|
|
7398
7435
|
}
|
|
@@ -7603,13 +7640,13 @@ class RuntimeMigrator {
|
|
|
7603
7640
|
}
|
|
7604
7641
|
}
|
|
7605
7642
|
}
|
|
7606
|
-
logger6.debug(
|
|
7643
|
+
logger6.debug({ src: "plugin:sql", urlPreview: url.substring(0, 50) }, "Connection string did not match any PostgreSQL patterns");
|
|
7607
7644
|
return false;
|
|
7608
7645
|
}
|
|
7609
7646
|
async initialize() {
|
|
7610
|
-
logger6.info("
|
|
7647
|
+
logger6.info({ src: "plugin:sql" }, "Initializing migration system");
|
|
7611
7648
|
await this.migrationTracker.ensureTables();
|
|
7612
|
-
logger6.info("
|
|
7649
|
+
logger6.info({ src: "plugin:sql" }, "Migration system initialized");
|
|
7613
7650
|
}
|
|
7614
7651
|
async migrate(pluginName, schema2, options = {}) {
|
|
7615
7652
|
const lockId = this.getAdvisoryLockId(pluginName);
|
|
@@ -7618,46 +7655,46 @@ class RuntimeMigrator {
|
|
|
7618
7655
|
}
|
|
7619
7656
|
let lockAcquired = false;
|
|
7620
7657
|
try {
|
|
7621
|
-
logger6.info(
|
|
7658
|
+
logger6.info({ src: "plugin:sql", pluginName }, "Starting migration for plugin");
|
|
7622
7659
|
await this.initialize();
|
|
7623
7660
|
const postgresUrl = process.env.POSTGRES_URL || process.env.DATABASE_URL || "";
|
|
7624
7661
|
const isRealPostgres = this.isRealPostgresDatabase(postgresUrl);
|
|
7625
7662
|
if (isRealPostgres) {
|
|
7626
7663
|
try {
|
|
7627
|
-
logger6.debug(
|
|
7664
|
+
logger6.debug({ src: "plugin:sql", pluginName }, "Using PostgreSQL advisory locks");
|
|
7628
7665
|
const lockIdStr = lockId.toString();
|
|
7629
7666
|
const lockResult = await this.db.execute(sql`SELECT pg_try_advisory_lock(CAST(${lockIdStr} AS bigint)) as acquired`);
|
|
7630
7667
|
lockAcquired = lockResult.rows[0]?.acquired === true;
|
|
7631
7668
|
if (!lockAcquired) {
|
|
7632
|
-
logger6.info(
|
|
7669
|
+
logger6.info({ src: "plugin:sql", pluginName }, "Migration already in progress, waiting for lock");
|
|
7633
7670
|
await this.db.execute(sql`SELECT pg_advisory_lock(CAST(${lockIdStr} AS bigint))`);
|
|
7634
7671
|
lockAcquired = true;
|
|
7635
|
-
logger6.info(
|
|
7672
|
+
logger6.info({ src: "plugin:sql", pluginName }, "Lock acquired");
|
|
7636
7673
|
} else {
|
|
7637
|
-
logger6.debug(
|
|
7674
|
+
logger6.debug({ src: "plugin:sql", pluginName, lockId: lockIdStr }, "Advisory lock acquired");
|
|
7638
7675
|
}
|
|
7639
7676
|
} catch (lockError) {
|
|
7640
|
-
logger6.warn(
|
|
7677
|
+
logger6.warn({ src: "plugin:sql", pluginName, error: lockError instanceof Error ? lockError.message : String(lockError) }, "Failed to acquire advisory lock, continuing without lock");
|
|
7641
7678
|
lockAcquired = false;
|
|
7642
7679
|
}
|
|
7643
7680
|
} else {
|
|
7644
|
-
logger6.debug(
|
|
7681
|
+
logger6.debug({ src: "plugin:sql" }, "Development database detected, skipping advisory locks");
|
|
7645
7682
|
}
|
|
7646
|
-
await this.extensionManager.installRequiredExtensions(["vector", "fuzzystrmatch"]);
|
|
7683
|
+
await this.extensionManager.installRequiredExtensions(["vector", "fuzzystrmatch", "pgcrypto"]);
|
|
7647
7684
|
const currentSnapshot = await generateSnapshot(schema2);
|
|
7648
7685
|
await this.ensureSchemasExist(currentSnapshot);
|
|
7649
7686
|
this.validateSchemaUsage(pluginName, currentSnapshot);
|
|
7650
7687
|
const currentHash = hashSnapshot(currentSnapshot);
|
|
7651
7688
|
const lastMigration = await this.migrationTracker.getLastMigration(pluginName);
|
|
7652
7689
|
if (lastMigration && lastMigration.hash === currentHash) {
|
|
7653
|
-
logger6.info(
|
|
7690
|
+
logger6.info({ src: "plugin:sql", pluginName, hash: currentHash }, "No changes detected, skipping migration");
|
|
7654
7691
|
return;
|
|
7655
7692
|
}
|
|
7656
7693
|
let previousSnapshot = await this.snapshotStorage.getLatestSnapshot(pluginName);
|
|
7657
7694
|
if (!previousSnapshot && Object.keys(currentSnapshot.tables).length > 0) {
|
|
7658
7695
|
const hasExistingTables = await this.introspector.hasExistingTables(pluginName);
|
|
7659
7696
|
if (hasExistingTables) {
|
|
7660
|
-
logger6.info(
|
|
7697
|
+
logger6.info({ src: "plugin:sql", pluginName }, "No snapshot found but tables exist in database, introspecting");
|
|
7661
7698
|
const schemaName = this.getExpectedSchemaName(pluginName);
|
|
7662
7699
|
const introspectedSnapshot = await this.introspector.introspectSchema(schemaName);
|
|
7663
7700
|
if (Object.keys(introspectedSnapshot.tables).length > 0) {
|
|
@@ -7665,15 +7702,15 @@ class RuntimeMigrator {
|
|
|
7665
7702
|
await this.journalStorage.updateJournal(pluginName, 0, `introspected_${Date.now()}`, true);
|
|
7666
7703
|
const introspectedHash = hashSnapshot(introspectedSnapshot);
|
|
7667
7704
|
await this.migrationTracker.recordMigration(pluginName, introspectedHash, Date.now());
|
|
7668
|
-
logger6.info(
|
|
7705
|
+
logger6.info({ src: "plugin:sql", pluginName }, "Created initial snapshot from existing database");
|
|
7669
7706
|
previousSnapshot = introspectedSnapshot;
|
|
7670
7707
|
}
|
|
7671
7708
|
}
|
|
7672
7709
|
}
|
|
7673
7710
|
if (!hasChanges(previousSnapshot, currentSnapshot)) {
|
|
7674
|
-
logger6.info(
|
|
7711
|
+
logger6.info({ src: "plugin:sql", pluginName }, "No schema changes");
|
|
7675
7712
|
if (!previousSnapshot && Object.keys(currentSnapshot.tables).length === 0) {
|
|
7676
|
-
logger6.info(
|
|
7713
|
+
logger6.info({ src: "plugin:sql", pluginName }, "Recording empty schema");
|
|
7677
7714
|
await this.migrationTracker.recordMigration(pluginName, currentHash, Date.now());
|
|
7678
7715
|
const idx = await this.journalStorage.getNextIdx(pluginName);
|
|
7679
7716
|
const tag = this.generateMigrationTag(idx, pluginName);
|
|
@@ -7684,7 +7721,7 @@ class RuntimeMigrator {
|
|
|
7684
7721
|
}
|
|
7685
7722
|
const diff = await calculateDiff(previousSnapshot, currentSnapshot);
|
|
7686
7723
|
if (!hasDiffChanges(diff)) {
|
|
7687
|
-
logger6.info(
|
|
7724
|
+
logger6.info({ src: "plugin:sql", pluginName }, "No actionable changes");
|
|
7688
7725
|
return;
|
|
7689
7726
|
}
|
|
7690
7727
|
const dataLossCheck = checkForDataLoss(diff);
|
|
@@ -7692,55 +7729,34 @@ class RuntimeMigrator {
|
|
|
7692
7729
|
const isProduction = false;
|
|
7693
7730
|
const allowDestructive = options.force || options.allowDataLoss || process.env.ELIZA_ALLOW_DESTRUCTIVE_MIGRATIONS === "true";
|
|
7694
7731
|
if (!allowDestructive) {
|
|
7695
|
-
logger6.error("
|
|
7696
|
-
logger6.error(`[RuntimeMigrator] Plugin: ${pluginName}`);
|
|
7697
|
-
logger6.error(`[RuntimeMigrator] Environment: ${isProduction ? "PRODUCTION" : "DEVELOPMENT"}`);
|
|
7698
|
-
logger6.error("[RuntimeMigrator] Destructive operations detected:");
|
|
7699
|
-
for (const warning of dataLossCheck.warnings) {
|
|
7700
|
-
logger6.error(`[RuntimeMigrator] - ${warning}`);
|
|
7701
|
-
}
|
|
7702
|
-
logger6.error("[RuntimeMigrator] To proceed with destructive migrations:");
|
|
7703
|
-
logger6.error("[RuntimeMigrator] 1. Set environment variable: export ELIZA_ALLOW_DESTRUCTIVE_MIGRATIONS=true");
|
|
7704
|
-
logger6.error("[RuntimeMigrator] 2. Or use option: migrate(plugin, schema, { force: true })");
|
|
7705
|
-
if (isProduction) {
|
|
7706
|
-
logger6.error("[RuntimeMigrator] 3. For production, consider using drizzle-kit for manual migration");
|
|
7707
|
-
}
|
|
7732
|
+
logger6.error({ src: "plugin:sql", pluginName, environment: isProduction ? "PRODUCTION" : "DEVELOPMENT", warnings: dataLossCheck.warnings }, "Destructive migration blocked - set ELIZA_ALLOW_DESTRUCTIVE_MIGRATIONS=true or use force option");
|
|
7708
7733
|
const errorMessage = isProduction ? `Destructive migration blocked in production for ${pluginName}. Set ELIZA_ALLOW_DESTRUCTIVE_MIGRATIONS=true or use drizzle-kit.` : `Destructive migration blocked for ${pluginName}. Set ELIZA_ALLOW_DESTRUCTIVE_MIGRATIONS=true to proceed.`;
|
|
7709
7734
|
throw new Error(errorMessage);
|
|
7710
7735
|
}
|
|
7711
7736
|
if (dataLossCheck.requiresConfirmation) {
|
|
7712
|
-
logger6.warn("
|
|
7713
|
-
logger6.warn(`[RuntimeMigrator] Plugin: ${pluginName}`);
|
|
7714
|
-
logger6.warn("[RuntimeMigrator] The following operations will be performed:");
|
|
7715
|
-
for (const warning of dataLossCheck.warnings) {
|
|
7716
|
-
logger6.warn(`[RuntimeMigrator] ⚠️ ${warning}`);
|
|
7717
|
-
}
|
|
7737
|
+
logger6.warn({ src: "plugin:sql", pluginName, warnings: dataLossCheck.warnings }, "Proceeding with destructive migration");
|
|
7718
7738
|
}
|
|
7719
7739
|
}
|
|
7720
7740
|
const sqlStatements = await generateMigrationSQL(previousSnapshot, currentSnapshot, diff);
|
|
7721
7741
|
if (sqlStatements.length === 0) {
|
|
7722
|
-
logger6.info(
|
|
7742
|
+
logger6.info({ src: "plugin:sql", pluginName }, "No SQL statements to execute");
|
|
7723
7743
|
return;
|
|
7724
7744
|
}
|
|
7725
|
-
logger6.info(
|
|
7745
|
+
logger6.info({ src: "plugin:sql", pluginName, statementCount: sqlStatements.length }, "Executing SQL statements");
|
|
7726
7746
|
if (options.verbose) {
|
|
7727
7747
|
sqlStatements.forEach((stmt, i) => {
|
|
7728
|
-
logger6.debug(
|
|
7748
|
+
logger6.debug({ src: "plugin:sql", statementIndex: i + 1, statement: stmt }, "SQL statement");
|
|
7729
7749
|
});
|
|
7730
7750
|
}
|
|
7731
7751
|
if (options.dryRun) {
|
|
7732
|
-
logger6.info("
|
|
7733
|
-
logger6.info("[RuntimeMigrator] Would execute:");
|
|
7734
|
-
sqlStatements.forEach((stmt, i) => {
|
|
7735
|
-
logger6.info(` ${i + 1}. ${stmt}`);
|
|
7736
|
-
});
|
|
7752
|
+
logger6.info({ src: "plugin:sql", pluginName, statements: sqlStatements }, "DRY RUN mode - not executing statements");
|
|
7737
7753
|
return;
|
|
7738
7754
|
}
|
|
7739
7755
|
await this.executeMigration(pluginName, currentSnapshot, currentHash, sqlStatements);
|
|
7740
|
-
logger6.info(
|
|
7756
|
+
logger6.info({ src: "plugin:sql", pluginName }, "Migration completed successfully");
|
|
7741
7757
|
return;
|
|
7742
7758
|
} catch (error) {
|
|
7743
|
-
logger6.error(
|
|
7759
|
+
logger6.error({ src: "plugin:sql", pluginName, error: error instanceof Error ? error.message : String(error) }, "Migration failed");
|
|
7744
7760
|
throw error;
|
|
7745
7761
|
} finally {
|
|
7746
7762
|
const postgresUrl = process.env.POSTGRES_URL || process.env.DATABASE_URL || "";
|
|
@@ -7749,9 +7765,9 @@ class RuntimeMigrator {
|
|
|
7749
7765
|
try {
|
|
7750
7766
|
const lockIdStr = lockId.toString();
|
|
7751
7767
|
await this.db.execute(sql`SELECT pg_advisory_unlock(CAST(${lockIdStr} AS bigint))`);
|
|
7752
|
-
logger6.debug(
|
|
7768
|
+
logger6.debug({ src: "plugin:sql", pluginName }, "Advisory lock released");
|
|
7753
7769
|
} catch (unlockError) {
|
|
7754
|
-
logger6.warn(
|
|
7770
|
+
logger6.warn({ src: "plugin:sql", pluginName, error: unlockError instanceof Error ? unlockError.message : String(unlockError) }, "Failed to release advisory lock");
|
|
7755
7771
|
}
|
|
7756
7772
|
}
|
|
7757
7773
|
}
|
|
@@ -7762,7 +7778,7 @@ class RuntimeMigrator {
|
|
|
7762
7778
|
await this.db.execute(sql`BEGIN`);
|
|
7763
7779
|
transactionStarted = true;
|
|
7764
7780
|
for (const stmt of sqlStatements) {
|
|
7765
|
-
logger6.debug(
|
|
7781
|
+
logger6.debug({ src: "plugin:sql", statement: stmt }, "Executing SQL statement");
|
|
7766
7782
|
await this.db.execute(sql.raw(stmt));
|
|
7767
7783
|
}
|
|
7768
7784
|
const idx = await this.journalStorage.getNextIdx(pluginName);
|
|
@@ -7771,14 +7787,14 @@ class RuntimeMigrator {
|
|
|
7771
7787
|
await this.journalStorage.updateJournal(pluginName, idx, tag, true);
|
|
7772
7788
|
await this.snapshotStorage.saveSnapshot(pluginName, idx, snapshot);
|
|
7773
7789
|
await this.db.execute(sql`COMMIT`);
|
|
7774
|
-
logger6.info(
|
|
7790
|
+
logger6.info({ src: "plugin:sql", pluginName, tag }, "Recorded migration");
|
|
7775
7791
|
} catch (error) {
|
|
7776
7792
|
if (transactionStarted) {
|
|
7777
7793
|
try {
|
|
7778
7794
|
await this.db.execute(sql`ROLLBACK`);
|
|
7779
|
-
logger6.error("
|
|
7795
|
+
logger6.error({ src: "plugin:sql", error: error instanceof Error ? error.message : String(error) }, "Migration failed, rolled back");
|
|
7780
7796
|
} catch (rollbackError) {
|
|
7781
|
-
logger6.error("
|
|
7797
|
+
logger6.error({ src: "plugin:sql", error: rollbackError instanceof Error ? rollbackError.message : String(rollbackError) }, "Failed to rollback transaction");
|
|
7782
7798
|
}
|
|
7783
7799
|
}
|
|
7784
7800
|
throw error;
|
|
@@ -7801,31 +7817,31 @@ class RuntimeMigrator {
|
|
|
7801
7817
|
};
|
|
7802
7818
|
}
|
|
7803
7819
|
async reset(pluginName) {
|
|
7804
|
-
logger6.warn(
|
|
7820
|
+
logger6.warn({ src: "plugin:sql", pluginName }, "Resetting migrations");
|
|
7805
7821
|
await this.db.execute(sql`DELETE FROM migrations._migrations WHERE plugin_name = ${pluginName}`);
|
|
7806
7822
|
await this.db.execute(sql`DELETE FROM migrations._journal WHERE plugin_name = ${pluginName}`);
|
|
7807
7823
|
await this.db.execute(sql`DELETE FROM migrations._snapshots WHERE plugin_name = ${pluginName}`);
|
|
7808
|
-
logger6.warn(
|
|
7824
|
+
logger6.warn({ src: "plugin:sql", pluginName }, "Reset complete");
|
|
7809
7825
|
}
|
|
7810
7826
|
async checkMigration(pluginName, schema2) {
|
|
7811
7827
|
try {
|
|
7812
|
-
logger6.info(
|
|
7828
|
+
logger6.info({ src: "plugin:sql", pluginName }, "Checking migration");
|
|
7813
7829
|
const currentSnapshot = await generateSnapshot(schema2);
|
|
7814
7830
|
const previousSnapshot = await this.snapshotStorage.getLatestSnapshot(pluginName);
|
|
7815
7831
|
if (!hasChanges(previousSnapshot, currentSnapshot)) {
|
|
7816
|
-
logger6.info(
|
|
7832
|
+
logger6.info({ src: "plugin:sql", pluginName }, "No changes detected");
|
|
7817
7833
|
return null;
|
|
7818
7834
|
}
|
|
7819
7835
|
const diff = await calculateDiff(previousSnapshot, currentSnapshot);
|
|
7820
7836
|
const dataLossCheck = checkForDataLoss(diff);
|
|
7821
7837
|
if (dataLossCheck.hasDataLoss) {
|
|
7822
|
-
logger6.warn(
|
|
7838
|
+
logger6.warn({ src: "plugin:sql", pluginName }, "Migration would cause data loss");
|
|
7823
7839
|
} else {
|
|
7824
|
-
logger6.info(
|
|
7840
|
+
logger6.info({ src: "plugin:sql", pluginName }, "Migration is safe (no data loss)");
|
|
7825
7841
|
}
|
|
7826
7842
|
return dataLossCheck;
|
|
7827
7843
|
} catch (error) {
|
|
7828
|
-
logger6.error(
|
|
7844
|
+
logger6.error({ src: "plugin:sql", pluginName, error: error instanceof Error ? error.message : String(error) }, "Failed to check migration");
|
|
7829
7845
|
throw error;
|
|
7830
7846
|
}
|
|
7831
7847
|
}
|
|
@@ -7847,12 +7863,810 @@ var init_runtime_migrator2 = __esm(() => {
|
|
|
7847
7863
|
init_runtime_migrator();
|
|
7848
7864
|
});
|
|
7849
7865
|
|
|
7866
|
+
// src/migrations.ts
|
|
7867
|
+
import { logger as logger7 } from "@elizaos/core";
|
|
7868
|
+
async function migrateToEntityRLS(adapter) {
|
|
7869
|
+
const db2 = adapter.db;
|
|
7870
|
+
try {
|
|
7871
|
+
await db2.execute(sql`SELECT 1 FROM pg_tables LIMIT 1`);
|
|
7872
|
+
} catch {
|
|
7873
|
+
logger7.debug("[Migration] ⊘ Not PostgreSQL, skipping PostgreSQL-specific migrations");
|
|
7874
|
+
return;
|
|
7875
|
+
}
|
|
7876
|
+
logger7.info("[Migration] Starting develop → feat/entity-rls migration...");
|
|
7877
|
+
try {
|
|
7878
|
+
logger7.debug("[Migration] → Clearing RuntimeMigrator snapshot cache...");
|
|
7879
|
+
try {
|
|
7880
|
+
await db2.execute(sql`DELETE FROM migrations._snapshots WHERE plugin_name = '@elizaos/plugin-sql'`);
|
|
7881
|
+
logger7.debug("[Migration] ✓ Snapshot cache cleared");
|
|
7882
|
+
} catch (error) {
|
|
7883
|
+
logger7.debug("[Migration] ⊘ No snapshot cache to clear (migrations schema not yet created)");
|
|
7884
|
+
}
|
|
7885
|
+
logger7.debug("[Migration] → Disabling Row Level Security on all tables...");
|
|
7886
|
+
try {
|
|
7887
|
+
const tablesResult = await db2.execute(sql`
|
|
7888
|
+
SELECT tablename
|
|
7889
|
+
FROM pg_tables
|
|
7890
|
+
WHERE schemaname = 'public'
|
|
7891
|
+
ORDER BY tablename
|
|
7892
|
+
`);
|
|
7893
|
+
for (const row of tablesResult.rows || []) {
|
|
7894
|
+
const tableName = row.tablename;
|
|
7895
|
+
try {
|
|
7896
|
+
await db2.execute(sql.raw(`ALTER TABLE "${tableName}" DISABLE ROW LEVEL SECURITY`));
|
|
7897
|
+
logger7.debug(`[Migration] ✓ Disabled RLS on ${tableName}`);
|
|
7898
|
+
} catch (error) {
|
|
7899
|
+
logger7.debug(`[Migration] ⊘ Could not disable RLS on ${tableName}`);
|
|
7900
|
+
}
|
|
7901
|
+
}
|
|
7902
|
+
} catch (error) {
|
|
7903
|
+
logger7.debug("[Migration] ⊘ Could not disable RLS (may not have permissions)");
|
|
7904
|
+
}
|
|
7905
|
+
logger7.debug("[Migration] → Handling server_id → message_server_id migrations...");
|
|
7906
|
+
const tablesToMigrate = ["channels", "worlds", "rooms"];
|
|
7907
|
+
for (const tableName of tablesToMigrate) {
|
|
7908
|
+
try {
|
|
7909
|
+
const columnsResult = await db2.execute(sql`
|
|
7910
|
+
SELECT column_name, data_type, is_nullable
|
|
7911
|
+
FROM information_schema.columns
|
|
7912
|
+
WHERE table_schema = 'public'
|
|
7913
|
+
AND table_name = ${tableName}
|
|
7914
|
+
AND column_name IN ('server_id', 'message_server_id')
|
|
7915
|
+
ORDER BY column_name
|
|
7916
|
+
`);
|
|
7917
|
+
const columns2 = columnsResult.rows || [];
|
|
7918
|
+
const serverId = columns2.find((c) => c.column_name === "server_id");
|
|
7919
|
+
const messageServerId = columns2.find((c) => c.column_name === "message_server_id");
|
|
7920
|
+
if (serverId && !messageServerId) {
|
|
7921
|
+
logger7.debug(`[Migration] → Renaming ${tableName}.server_id to message_server_id...`);
|
|
7922
|
+
await db2.execute(sql.raw(`ALTER TABLE "${tableName}" RENAME COLUMN "server_id" TO "message_server_id"`));
|
|
7923
|
+
logger7.debug(`[Migration] ✓ Renamed ${tableName}.server_id → message_server_id`);
|
|
7924
|
+
if (serverId.data_type === "text") {
|
|
7925
|
+
try {
|
|
7926
|
+
logger7.debug(`[Migration] → Dropping DEFAULT constraint on ${tableName}.message_server_id...`);
|
|
7927
|
+
await db2.execute(sql.raw(`ALTER TABLE "${tableName}" ALTER COLUMN "message_server_id" DROP DEFAULT`));
|
|
7928
|
+
logger7.debug(`[Migration] ✓ Dropped DEFAULT constraint`);
|
|
7929
|
+
logger7.debug(`[Migration] → Converting ${tableName}.message_server_id from text to uuid...`);
|
|
7930
|
+
await db2.execute(sql.raw(`ALTER TABLE "${tableName}" ALTER COLUMN "message_server_id" TYPE uuid USING "message_server_id"::uuid`));
|
|
7931
|
+
logger7.debug(`[Migration] ✓ Converted ${tableName}.message_server_id to uuid`);
|
|
7932
|
+
} catch (convertError) {
|
|
7933
|
+
logger7.warn(`[Migration] ⚠️ Could not convert ${tableName}.message_server_id to uuid - data may not be valid UUIDs`);
|
|
7934
|
+
logger7.debug(`[Migration] → Setting invalid UUIDs to NULL in ${tableName}.message_server_id...`);
|
|
7935
|
+
await db2.execute(sql.raw(`ALTER TABLE "${tableName}" ALTER COLUMN "message_server_id" TYPE uuid USING CASE WHEN "message_server_id" ~ '^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$' THEN "message_server_id"::uuid ELSE NULL END`));
|
|
7936
|
+
}
|
|
7937
|
+
}
|
|
7938
|
+
if (tableName === "channels") {
|
|
7939
|
+
const nullCountResult = await db2.execute(sql.raw(`SELECT COUNT(*) as count FROM "${tableName}" WHERE "message_server_id" IS NULL`));
|
|
7940
|
+
const nullCount = nullCountResult.rows?.[0]?.count;
|
|
7941
|
+
if (nullCount && parseInt(nullCount) > 0) {
|
|
7942
|
+
logger7.warn(`[Migration] ⚠️ ${tableName} has ${nullCount} rows with NULL message_server_id - these will be deleted`);
|
|
7943
|
+
await db2.execute(sql.raw(`DELETE FROM "${tableName}" WHERE "message_server_id" IS NULL`));
|
|
7944
|
+
logger7.debug(`[Migration] ✓ Deleted ${nullCount} rows with NULL message_server_id from ${tableName}`);
|
|
7945
|
+
}
|
|
7946
|
+
logger7.debug(`[Migration] → Making ${tableName}.message_server_id NOT NULL...`);
|
|
7947
|
+
await db2.execute(sql.raw(`ALTER TABLE "${tableName}" ALTER COLUMN "message_server_id" SET NOT NULL`));
|
|
7948
|
+
logger7.debug(`[Migration] ✓ Set ${tableName}.message_server_id NOT NULL`);
|
|
7949
|
+
}
|
|
7950
|
+
} else if (serverId && messageServerId) {
|
|
7951
|
+
logger7.debug(`[Migration] → ${tableName} has both columns, dropping server_id...`);
|
|
7952
|
+
await db2.execute(sql.raw(`ALTER TABLE "${tableName}" DROP COLUMN "server_id" CASCADE`));
|
|
7953
|
+
logger7.debug(`[Migration] ✓ Dropped ${tableName}.server_id (will be re-added by RuntimeMigrator for RLS)`);
|
|
7954
|
+
} else if (!serverId && messageServerId) {
|
|
7955
|
+
if (messageServerId.data_type === "text") {
|
|
7956
|
+
logger7.debug(`[Migration] → ${tableName}.message_server_id exists but is TEXT, needs UUID conversion...`);
|
|
7957
|
+
logger7.debug(`[Migration] → Dropping DEFAULT constraint on ${tableName}.message_server_id...`);
|
|
7958
|
+
await db2.execute(sql.raw(`ALTER TABLE "${tableName}" ALTER COLUMN "message_server_id" DROP DEFAULT`));
|
|
7959
|
+
logger7.debug(`[Migration] ✓ Dropped DEFAULT constraint`);
|
|
7960
|
+
logger7.debug(`[Migration] → Converting ${tableName}.message_server_id from text to uuid (generating UUIDs from text)...`);
|
|
7961
|
+
await db2.execute(sql.raw(`
|
|
7962
|
+
ALTER TABLE "${tableName}"
|
|
7963
|
+
ALTER COLUMN "message_server_id" TYPE uuid
|
|
7964
|
+
USING CASE
|
|
7965
|
+
WHEN "message_server_id" ~ '^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$'
|
|
7966
|
+
THEN "message_server_id"::uuid
|
|
7967
|
+
ELSE md5("message_server_id")::uuid
|
|
7968
|
+
END
|
|
7969
|
+
`));
|
|
7970
|
+
logger7.debug(`[Migration] ✓ Converted ${tableName}.message_server_id to uuid`);
|
|
7971
|
+
} else {
|
|
7972
|
+
logger7.debug(`[Migration] ⊘ ${tableName}.message_server_id already UUID, skipping`);
|
|
7973
|
+
}
|
|
7974
|
+
} else {
|
|
7975
|
+
logger7.debug(`[Migration] ⊘ ${tableName} already migrated, skipping`);
|
|
7976
|
+
}
|
|
7977
|
+
} catch (error) {
|
|
7978
|
+
logger7.warn(`[Migration] ⚠️ Error migrating ${tableName}.server_id: ${error}`);
|
|
7979
|
+
}
|
|
7980
|
+
}
|
|
7981
|
+
logger7.debug("[Migration] → Dropping all remaining RLS-managed server_id columns...");
|
|
7982
|
+
try {
|
|
7983
|
+
const serverIdColumnsResult = await db2.execute(sql`
|
|
7984
|
+
SELECT table_name
|
|
7985
|
+
FROM information_schema.columns
|
|
7986
|
+
WHERE table_schema = 'public'
|
|
7987
|
+
AND column_name = 'server_id'
|
|
7988
|
+
AND table_name NOT IN (
|
|
7989
|
+
'servers', -- server_id is the primary key
|
|
7990
|
+
'agents', -- server_id is in the schema (for RLS)
|
|
7991
|
+
'channels', -- already handled above
|
|
7992
|
+
'worlds', -- already handled above
|
|
7993
|
+
'rooms', -- already handled above
|
|
7994
|
+
'server_agents', -- server_id is part of composite key
|
|
7995
|
+
'drizzle_migrations',
|
|
7996
|
+
'__drizzle_migrations'
|
|
7997
|
+
)
|
|
7998
|
+
ORDER BY table_name
|
|
7999
|
+
`);
|
|
8000
|
+
const tablesToClean = serverIdColumnsResult.rows || [];
|
|
8001
|
+
logger7.debug(`[Migration] → Found ${tablesToClean.length} tables with server_id columns`);
|
|
8002
|
+
for (const row of tablesToClean) {
|
|
8003
|
+
const tableName = row.table_name;
|
|
8004
|
+
try {
|
|
8005
|
+
await db2.execute(sql.raw(`ALTER TABLE "${tableName}" DROP COLUMN IF EXISTS server_id CASCADE`));
|
|
8006
|
+
logger7.debug(`[Migration] ✓ Dropped server_id from ${tableName}`);
|
|
8007
|
+
} catch (error) {
|
|
8008
|
+
logger7.debug(`[Migration] ⊘ Could not drop server_id from ${tableName}`);
|
|
8009
|
+
}
|
|
8010
|
+
}
|
|
8011
|
+
} catch (error) {
|
|
8012
|
+
logger7.debug("[Migration] ⊘ Could not drop server_id columns (may not have permissions)");
|
|
8013
|
+
}
|
|
8014
|
+
logger7.debug("[Migration] → Checking server_agents table rename...");
|
|
8015
|
+
try {
|
|
8016
|
+
const tablesResult = await db2.execute(sql`
|
|
8017
|
+
SELECT table_name
|
|
8018
|
+
FROM information_schema.tables
|
|
8019
|
+
WHERE table_schema = 'public'
|
|
8020
|
+
AND table_name IN ('server_agents', 'message_server_agents')
|
|
8021
|
+
ORDER BY table_name
|
|
8022
|
+
`);
|
|
8023
|
+
const tables = tablesResult.rows || [];
|
|
8024
|
+
const hasServerAgents = tables.some((t) => t.table_name === "server_agents");
|
|
8025
|
+
const hasMessageServerAgents = tables.some((t) => t.table_name === "message_server_agents");
|
|
8026
|
+
if (hasServerAgents && !hasMessageServerAgents) {
|
|
8027
|
+
logger7.debug("[Migration] → Renaming server_agents to message_server_agents...");
|
|
8028
|
+
await db2.execute(sql.raw(`ALTER TABLE "server_agents" RENAME TO "message_server_agents"`));
|
|
8029
|
+
logger7.debug("[Migration] ✓ Renamed server_agents → message_server_agents");
|
|
8030
|
+
logger7.debug("[Migration] → Renaming message_server_agents.server_id to message_server_id...");
|
|
8031
|
+
await db2.execute(sql.raw(`ALTER TABLE "message_server_agents" RENAME COLUMN "server_id" TO "message_server_id"`));
|
|
8032
|
+
logger7.debug("[Migration] ✓ Renamed message_server_agents.server_id → message_server_id");
|
|
8033
|
+
} else if (!hasServerAgents && !hasMessageServerAgents) {
|
|
8034
|
+
logger7.debug("[Migration] ⊘ No server_agents table to migrate");
|
|
8035
|
+
} else if (hasMessageServerAgents) {
|
|
8036
|
+
logger7.debug("[Migration] → Checking message_server_agents columns...");
|
|
8037
|
+
const columnsResult = await db2.execute(sql`
|
|
8038
|
+
SELECT column_name
|
|
8039
|
+
FROM information_schema.columns
|
|
8040
|
+
WHERE table_schema = 'public'
|
|
8041
|
+
AND table_name = 'message_server_agents'
|
|
8042
|
+
AND column_name IN ('server_id', 'message_server_id')
|
|
8043
|
+
ORDER BY column_name
|
|
8044
|
+
`);
|
|
8045
|
+
const columns2 = columnsResult.rows || [];
|
|
8046
|
+
const hasServerId = columns2.some((c) => c.column_name === "server_id");
|
|
8047
|
+
const hasMessageServerId = columns2.some((c) => c.column_name === "message_server_id");
|
|
8048
|
+
if (hasServerId && !hasMessageServerId) {
|
|
8049
|
+
logger7.debug("[Migration] → Renaming message_server_agents.server_id to message_server_id...");
|
|
8050
|
+
await db2.execute(sql.raw(`ALTER TABLE "message_server_agents" RENAME COLUMN "server_id" TO "message_server_id"`));
|
|
8051
|
+
logger7.debug("[Migration] ✓ Renamed message_server_agents.server_id → message_server_id");
|
|
8052
|
+
} else if (!hasServerId && !hasMessageServerId) {
|
|
8053
|
+
logger7.debug("[Migration] → message_server_agents exists without required columns, truncating...");
|
|
8054
|
+
await db2.execute(sql`TRUNCATE TABLE message_server_agents CASCADE`);
|
|
8055
|
+
logger7.debug("[Migration] ✓ Truncated message_server_agents");
|
|
8056
|
+
} else {
|
|
8057
|
+
logger7.debug("[Migration] ⊘ message_server_agents already has correct schema");
|
|
8058
|
+
}
|
|
8059
|
+
}
|
|
8060
|
+
} catch (error) {
|
|
8061
|
+
logger7.debug("[Migration] ⊘ Could not check/migrate server_agents table");
|
|
8062
|
+
}
|
|
8063
|
+
logger7.debug("[Migration] → Checking channel_participants table...");
|
|
8064
|
+
try {
|
|
8065
|
+
const columnsResult = await db2.execute(sql`
|
|
8066
|
+
SELECT column_name
|
|
8067
|
+
FROM information_schema.columns
|
|
8068
|
+
WHERE table_schema = 'public'
|
|
8069
|
+
AND table_name = 'channel_participants'
|
|
8070
|
+
AND column_name IN ('user_id', 'entity_id')
|
|
8071
|
+
ORDER BY column_name
|
|
8072
|
+
`);
|
|
8073
|
+
const columns2 = columnsResult.rows || [];
|
|
8074
|
+
const hasUserId = columns2.some((c) => c.column_name === "user_id");
|
|
8075
|
+
const hasEntityId = columns2.some((c) => c.column_name === "entity_id");
|
|
8076
|
+
if (hasUserId && !hasEntityId) {
|
|
8077
|
+
logger7.debug("[Migration] → Renaming channel_participants.user_id to entity_id...");
|
|
8078
|
+
await db2.execute(sql.raw(`ALTER TABLE "channel_participants" RENAME COLUMN "user_id" TO "entity_id"`));
|
|
8079
|
+
logger7.debug("[Migration] ✓ Renamed channel_participants.user_id → entity_id");
|
|
8080
|
+
} else if (!hasUserId && !hasEntityId) {
|
|
8081
|
+
logger7.debug("[Migration] → channel_participants exists without entity_id or user_id, truncating...");
|
|
8082
|
+
await db2.execute(sql`TRUNCATE TABLE channel_participants CASCADE`);
|
|
8083
|
+
logger7.debug("[Migration] ✓ Truncated channel_participants");
|
|
8084
|
+
} else {
|
|
8085
|
+
logger7.debug("[Migration] ⊘ channel_participants already has entity_id column");
|
|
8086
|
+
}
|
|
8087
|
+
} catch (error) {
|
|
8088
|
+
logger7.debug("[Migration] ⊘ Could not check/migrate channel_participants");
|
|
8089
|
+
}
|
|
8090
|
+
logger7.debug("[Migration] → Discovering and dropping all regular indexes...");
|
|
8091
|
+
try {
|
|
8092
|
+
const indexesResult = await db2.execute(sql`
|
|
8093
|
+
SELECT i.relname AS index_name
|
|
8094
|
+
FROM pg_index idx
|
|
8095
|
+
JOIN pg_class i ON i.oid = idx.indexrelid
|
|
8096
|
+
JOIN pg_class c ON c.oid = idx.indrelid
|
|
8097
|
+
JOIN pg_namespace n ON n.oid = c.relnamespace
|
|
8098
|
+
LEFT JOIN pg_constraint con ON con.conindid = idx.indexrelid
|
|
8099
|
+
WHERE n.nspname = 'public'
|
|
8100
|
+
AND NOT idx.indisprimary -- Not a primary key
|
|
8101
|
+
AND con.contype IS NULL -- Not a constraint (unique, etc)
|
|
8102
|
+
ORDER BY i.relname
|
|
8103
|
+
`);
|
|
8104
|
+
const indexesToDrop = indexesResult.rows || [];
|
|
8105
|
+
logger7.debug(`[Migration] → Found ${indexesToDrop.length} indexes to drop`);
|
|
8106
|
+
for (const row of indexesToDrop) {
|
|
8107
|
+
const indexName = row.index_name;
|
|
8108
|
+
try {
|
|
8109
|
+
await db2.execute(sql.raw(`DROP INDEX IF EXISTS "${indexName}"`));
|
|
8110
|
+
logger7.debug(`[Migration] ✓ Dropped index ${indexName}`);
|
|
8111
|
+
} catch (error) {
|
|
8112
|
+
logger7.debug(`[Migration] ⊘ Could not drop index ${indexName}`);
|
|
8113
|
+
}
|
|
8114
|
+
}
|
|
8115
|
+
} catch (error) {
|
|
8116
|
+
logger7.debug("[Migration] ⊘ Could not drop indexes (may not have permissions)");
|
|
8117
|
+
}
|
|
8118
|
+
logger7.info("[Migration] ✓ Migration complete - develop to feat/entity-rls migration finished");
|
|
8119
|
+
} catch (error) {
|
|
8120
|
+
logger7.error("[Migration] Migration failed:", String(error));
|
|
8121
|
+
throw error;
|
|
8122
|
+
}
|
|
8123
|
+
}
|
|
8124
|
+
var init_migrations = __esm(() => {
|
|
8125
|
+
init_drizzle_orm();
|
|
8126
|
+
});
|
|
8127
|
+
|
|
8128
|
+
// src/rls.ts
|
|
8129
|
+
import { logger as logger8, validateUuid } from "@elizaos/core";
|
|
8130
|
+
async function installRLSFunctions(adapter) {
|
|
8131
|
+
const db2 = adapter.db;
|
|
8132
|
+
await db2.execute(sql`
|
|
8133
|
+
CREATE TABLE IF NOT EXISTS servers (
|
|
8134
|
+
id UUID PRIMARY KEY,
|
|
8135
|
+
created_at TIMESTAMPTZ DEFAULT NOW() NOT NULL,
|
|
8136
|
+
updated_at TIMESTAMPTZ DEFAULT NOW() NOT NULL
|
|
8137
|
+
)
|
|
8138
|
+
`);
|
|
8139
|
+
await db2.execute(sql`
|
|
8140
|
+
CREATE OR REPLACE FUNCTION current_server_id() RETURNS UUID AS $$
|
|
8141
|
+
DECLARE
|
|
8142
|
+
app_name TEXT;
|
|
8143
|
+
BEGIN
|
|
8144
|
+
app_name := NULLIF(current_setting('application_name', TRUE), '');
|
|
8145
|
+
|
|
8146
|
+
-- Return NULL if application_name is not set or not a valid UUID
|
|
8147
|
+
-- This allows admin queries to work without RLS restrictions
|
|
8148
|
+
BEGIN
|
|
8149
|
+
RETURN app_name::UUID;
|
|
8150
|
+
EXCEPTION WHEN OTHERS THEN
|
|
8151
|
+
RETURN NULL;
|
|
8152
|
+
END;
|
|
8153
|
+
END;
|
|
8154
|
+
$$ LANGUAGE plpgsql STABLE;
|
|
8155
|
+
`);
|
|
8156
|
+
await db2.execute(sql`
|
|
8157
|
+
CREATE OR REPLACE FUNCTION add_server_isolation(
|
|
8158
|
+
schema_name text,
|
|
8159
|
+
table_name text
|
|
8160
|
+
) RETURNS void AS $$
|
|
8161
|
+
DECLARE
|
|
8162
|
+
full_table_name text;
|
|
8163
|
+
column_exists boolean;
|
|
8164
|
+
orphaned_count bigint;
|
|
8165
|
+
BEGIN
|
|
8166
|
+
full_table_name := schema_name || '.' || table_name;
|
|
8167
|
+
|
|
8168
|
+
-- Check if server_id column already exists
|
|
8169
|
+
SELECT EXISTS (
|
|
8170
|
+
SELECT 1 FROM information_schema.columns
|
|
8171
|
+
WHERE information_schema.columns.table_schema = schema_name
|
|
8172
|
+
AND information_schema.columns.table_name = add_server_isolation.table_name
|
|
8173
|
+
AND information_schema.columns.column_name = 'server_id'
|
|
8174
|
+
) INTO column_exists;
|
|
8175
|
+
|
|
8176
|
+
-- Add server_id column if missing (DEFAULT populates it automatically for new rows)
|
|
8177
|
+
IF NOT column_exists THEN
|
|
8178
|
+
EXECUTE format('ALTER TABLE %I.%I ADD COLUMN server_id UUID DEFAULT current_server_id()', schema_name, table_name);
|
|
8179
|
+
|
|
8180
|
+
-- Backfill existing rows with current server_id
|
|
8181
|
+
-- This ensures all existing data belongs to the server instance that is enabling RLS
|
|
8182
|
+
EXECUTE format('UPDATE %I.%I SET server_id = current_server_id() WHERE server_id IS NULL', schema_name, table_name);
|
|
8183
|
+
ELSE
|
|
8184
|
+
-- Column already exists (RLS was previously enabled then disabled)
|
|
8185
|
+
-- Restore the DEFAULT clause (may have been removed during uninstallRLS)
|
|
8186
|
+
EXECUTE format('ALTER TABLE %I.%I ALTER COLUMN server_id SET DEFAULT current_server_id()', schema_name, table_name);
|
|
8187
|
+
|
|
8188
|
+
-- Only backfill NULL server_id rows, do NOT steal data from other servers
|
|
8189
|
+
EXECUTE format('SELECT COUNT(*) FROM %I.%I WHERE server_id IS NULL', schema_name, table_name) INTO orphaned_count;
|
|
8190
|
+
|
|
8191
|
+
IF orphaned_count > 0 THEN
|
|
8192
|
+
RAISE NOTICE 'Backfilling % rows with NULL server_id in %.%', orphaned_count, schema_name, table_name;
|
|
8193
|
+
EXECUTE format('UPDATE %I.%I SET server_id = current_server_id() WHERE server_id IS NULL', schema_name, table_name);
|
|
8194
|
+
END IF;
|
|
8195
|
+
END IF;
|
|
8196
|
+
|
|
8197
|
+
-- Create index for efficient server_id filtering
|
|
8198
|
+
EXECUTE format('CREATE INDEX IF NOT EXISTS idx_%I_server_id ON %I.%I(server_id)', table_name, schema_name, table_name);
|
|
8199
|
+
|
|
8200
|
+
-- Enable RLS on the table
|
|
8201
|
+
EXECUTE format('ALTER TABLE %I.%I ENABLE ROW LEVEL SECURITY', schema_name, table_name);
|
|
8202
|
+
|
|
8203
|
+
-- FORCE RLS even for table owners (critical for security)
|
|
8204
|
+
EXECUTE format('ALTER TABLE %I.%I FORCE ROW LEVEL SECURITY', schema_name, table_name);
|
|
8205
|
+
|
|
8206
|
+
-- Drop existing policy if present
|
|
8207
|
+
EXECUTE format('DROP POLICY IF EXISTS server_isolation_policy ON %I.%I', schema_name, table_name);
|
|
8208
|
+
|
|
8209
|
+
-- Create isolation policy: users can only see/modify rows where server_id matches current server instance
|
|
8210
|
+
-- No NULL clause - all rows must have a valid server_id (backfilled during column addition)
|
|
8211
|
+
EXECUTE format('
|
|
8212
|
+
CREATE POLICY server_isolation_policy ON %I.%I
|
|
8213
|
+
USING (server_id = current_server_id())
|
|
8214
|
+
WITH CHECK (server_id = current_server_id())
|
|
8215
|
+
', schema_name, table_name);
|
|
8216
|
+
END;
|
|
8217
|
+
$$ LANGUAGE plpgsql;
|
|
8218
|
+
`);
|
|
8219
|
+
await db2.execute(sql`
|
|
8220
|
+
CREATE OR REPLACE FUNCTION apply_rls_to_all_tables() RETURNS void AS $$
|
|
8221
|
+
DECLARE
|
|
8222
|
+
tbl record;
|
|
8223
|
+
BEGIN
|
|
8224
|
+
FOR tbl IN
|
|
8225
|
+
SELECT schemaname, tablename
|
|
8226
|
+
FROM pg_tables
|
|
8227
|
+
WHERE schemaname = 'public'
|
|
8228
|
+
AND tablename NOT IN (
|
|
8229
|
+
'servers',
|
|
8230
|
+
'drizzle_migrations',
|
|
8231
|
+
'__drizzle_migrations'
|
|
8232
|
+
)
|
|
8233
|
+
LOOP
|
|
8234
|
+
BEGIN
|
|
8235
|
+
PERFORM add_server_isolation(tbl.schemaname, tbl.tablename);
|
|
8236
|
+
EXCEPTION WHEN OTHERS THEN
|
|
8237
|
+
RAISE WARNING 'Failed to apply RLS to %.%: %', tbl.schemaname, tbl.tablename, SQLERRM;
|
|
8238
|
+
END;
|
|
8239
|
+
END LOOP;
|
|
8240
|
+
END;
|
|
8241
|
+
$$ LANGUAGE plpgsql;
|
|
8242
|
+
`);
|
|
8243
|
+
logger8.info({ src: "plugin:sql" }, "RLS PostgreSQL functions installed");
|
|
8244
|
+
await installEntityRLS(adapter);
|
|
8245
|
+
}
|
|
8246
|
+
async function getOrCreateRlsServer(adapter, serverId) {
|
|
8247
|
+
const db2 = adapter.db;
|
|
8248
|
+
await db2.insert(serverTable).values({
|
|
8249
|
+
id: serverId
|
|
8250
|
+
}).onConflictDoNothing();
|
|
8251
|
+
logger8.info({ src: "plugin:sql", serverId: serverId.slice(0, 8) }, "RLS server registered");
|
|
8252
|
+
return serverId;
|
|
8253
|
+
}
|
|
8254
|
+
async function setServerContext(adapter, serverId) {
|
|
8255
|
+
if (!validateUuid(serverId)) {
|
|
8256
|
+
throw new Error(`Invalid server ID format: ${serverId}. Must be a valid UUID.`);
|
|
8257
|
+
}
|
|
8258
|
+
const db2 = adapter.db;
|
|
8259
|
+
const servers = await db2.select().from(serverTable).where(eq(serverTable.id, serverId));
|
|
8260
|
+
if (servers.length === 0) {
|
|
8261
|
+
throw new Error(`Server ${serverId} does not exist`);
|
|
8262
|
+
}
|
|
8263
|
+
logger8.info({ src: "plugin:sql", serverId: serverId.slice(0, 8) }, "RLS context configured");
|
|
8264
|
+
}
|
|
8265
|
+
async function assignAgentToServer(adapter, agentId, serverId) {
|
|
8266
|
+
if (!agentId || !serverId) {
|
|
8267
|
+
logger8.warn(`[Data Isolation] Cannot assign agent to server: invalid agentId (${agentId}) or serverId (${serverId})`);
|
|
8268
|
+
return;
|
|
8269
|
+
}
|
|
8270
|
+
const db2 = adapter.db;
|
|
8271
|
+
const agents = await db2.select().from(agentTable).where(eq(agentTable.id, agentId));
|
|
8272
|
+
if (agents.length > 0) {
|
|
8273
|
+
const agent = agents[0];
|
|
8274
|
+
const currentServerId = agent.server_id;
|
|
8275
|
+
if (currentServerId === serverId) {
|
|
8276
|
+
logger8.debug({ src: "plugin:sql", agentName: agent.name }, "Agent already assigned to correct server");
|
|
8277
|
+
} else {
|
|
8278
|
+
await db2.update(agentTable).set({ server_id: serverId }).where(eq(agentTable.id, agentId));
|
|
8279
|
+
if (currentServerId === null) {
|
|
8280
|
+
logger8.info({ src: "plugin:sql", agentName: agent.name }, "Agent assigned to server");
|
|
8281
|
+
} else {
|
|
8282
|
+
logger8.warn({ src: "plugin:sql", agentName: agent.name }, "Agent server changed");
|
|
8283
|
+
}
|
|
8284
|
+
}
|
|
8285
|
+
} else {
|
|
8286
|
+
logger8.debug({ src: "plugin:sql", agentId }, "Agent does not exist yet");
|
|
8287
|
+
}
|
|
8288
|
+
}
|
|
8289
|
+
async function applyRLSToNewTables(adapter) {
|
|
8290
|
+
const db2 = adapter.db;
|
|
8291
|
+
try {
|
|
8292
|
+
await db2.execute(sql`SELECT apply_rls_to_all_tables()`);
|
|
8293
|
+
logger8.info({ src: "plugin:sql" }, "RLS applied to all tables");
|
|
8294
|
+
} catch (error) {
|
|
8295
|
+
logger8.warn({ src: "plugin:sql", error: String(error) }, "Failed to apply RLS to some tables");
|
|
8296
|
+
}
|
|
8297
|
+
}
|
|
8298
|
+
async function uninstallRLS(adapter) {
|
|
8299
|
+
const db2 = adapter.db;
|
|
8300
|
+
try {
|
|
8301
|
+
const checkResult = await db2.execute(sql`
|
|
8302
|
+
SELECT EXISTS (
|
|
8303
|
+
SELECT FROM pg_tables
|
|
8304
|
+
WHERE schemaname = 'public' AND tablename = 'servers'
|
|
8305
|
+
) as rls_enabled
|
|
8306
|
+
`);
|
|
8307
|
+
const rlsEnabled = checkResult.rows?.[0]?.rls_enabled;
|
|
8308
|
+
if (!rlsEnabled) {
|
|
8309
|
+
logger8.debug({ src: "plugin:sql" }, "RLS not installed, skipping cleanup");
|
|
8310
|
+
return;
|
|
8311
|
+
}
|
|
8312
|
+
logger8.info({ src: "plugin:sql" }, "Disabling RLS globally (keeping server_id columns for schema compatibility)...");
|
|
8313
|
+
try {
|
|
8314
|
+
await uninstallEntityRLS(adapter);
|
|
8315
|
+
} catch (entityRlsError) {
|
|
8316
|
+
logger8.debug({ src: "plugin:sql" }, "Entity RLS cleanup skipped (not installed or already cleaned)");
|
|
8317
|
+
}
|
|
8318
|
+
await db2.execute(sql`
|
|
8319
|
+
CREATE OR REPLACE FUNCTION _temp_disable_rls_on_table(
|
|
8320
|
+
p_schema_name text,
|
|
8321
|
+
p_table_name text
|
|
8322
|
+
) RETURNS void AS $$
|
|
8323
|
+
DECLARE
|
|
8324
|
+
policy_rec record;
|
|
8325
|
+
BEGIN
|
|
8326
|
+
-- Drop all policies on this table
|
|
8327
|
+
FOR policy_rec IN
|
|
8328
|
+
SELECT policyname
|
|
8329
|
+
FROM pg_policies
|
|
8330
|
+
WHERE schemaname = p_schema_name AND tablename = p_table_name
|
|
8331
|
+
LOOP
|
|
8332
|
+
EXECUTE format('DROP POLICY IF EXISTS %I ON %I.%I',
|
|
8333
|
+
policy_rec.policyname, p_schema_name, p_table_name);
|
|
8334
|
+
END LOOP;
|
|
8335
|
+
|
|
8336
|
+
-- Disable RLS
|
|
8337
|
+
EXECUTE format('ALTER TABLE %I.%I NO FORCE ROW LEVEL SECURITY', p_schema_name, p_table_name);
|
|
8338
|
+
EXECUTE format('ALTER TABLE %I.%I DISABLE ROW LEVEL SECURITY', p_schema_name, p_table_name);
|
|
8339
|
+
END;
|
|
8340
|
+
$$ LANGUAGE plpgsql;
|
|
8341
|
+
`);
|
|
8342
|
+
const tablesResult = await db2.execute(sql`
|
|
8343
|
+
SELECT schemaname, tablename
|
|
8344
|
+
FROM pg_tables
|
|
8345
|
+
WHERE schemaname = 'public'
|
|
8346
|
+
AND tablename NOT IN ('drizzle_migrations', '__drizzle_migrations')
|
|
8347
|
+
`);
|
|
8348
|
+
for (const row of tablesResult.rows || []) {
|
|
8349
|
+
const schemaName = row.schemaname;
|
|
8350
|
+
const tableName = row.tablename;
|
|
8351
|
+
try {
|
|
8352
|
+
await db2.execute(sql`SELECT _temp_disable_rls_on_table(${schemaName}, ${tableName})`);
|
|
8353
|
+
logger8.debug({ src: "plugin:sql", schemaName, tableName }, "Disabled RLS on table");
|
|
8354
|
+
} catch (error) {
|
|
8355
|
+
logger8.warn({ src: "plugin:sql", schemaName, tableName, error: String(error) }, "Failed to disable RLS on table");
|
|
8356
|
+
}
|
|
8357
|
+
}
|
|
8358
|
+
await db2.execute(sql`DROP FUNCTION IF EXISTS _temp_disable_rls_on_table(text, text)`);
|
|
8359
|
+
logger8.info({ src: "plugin:sql" }, "Keeping server_id values intact (prevents data theft on re-enable)");
|
|
8360
|
+
logger8.info({ src: "plugin:sql" }, "Clearing servers table...");
|
|
8361
|
+
await db2.execute(sql`TRUNCATE TABLE servers`);
|
|
8362
|
+
await db2.execute(sql`DROP FUNCTION IF EXISTS apply_rls_to_all_tables() CASCADE`);
|
|
8363
|
+
await db2.execute(sql`DROP FUNCTION IF EXISTS add_server_isolation(text, text) CASCADE`);
|
|
8364
|
+
await db2.execute(sql`DROP FUNCTION IF EXISTS current_server_id() CASCADE`);
|
|
8365
|
+
logger8.info({ src: "plugin:sql" }, "Dropped all RLS functions");
|
|
8366
|
+
logger8.info({ src: "plugin:sql" }, "RLS disabled successfully (server_id columns preserved)");
|
|
8367
|
+
} catch (error) {
|
|
8368
|
+
logger8.error({ src: "plugin:sql", error: String(error) }, "Failed to disable RLS");
|
|
8369
|
+
throw error;
|
|
8370
|
+
}
|
|
8371
|
+
}
|
|
8372
|
+
async function installEntityRLS(adapter) {
|
|
8373
|
+
const db2 = adapter.db;
|
|
8374
|
+
logger8.info("[Entity RLS] Installing entity RLS functions and policies...");
|
|
8375
|
+
await db2.execute(sql`
|
|
8376
|
+
CREATE OR REPLACE FUNCTION current_entity_id()
|
|
8377
|
+
RETURNS UUID AS $$
|
|
8378
|
+
DECLARE
|
|
8379
|
+
entity_id_text TEXT;
|
|
8380
|
+
BEGIN
|
|
8381
|
+
-- Read from transaction-local variable
|
|
8382
|
+
entity_id_text := NULLIF(current_setting('app.entity_id', TRUE), '');
|
|
8383
|
+
|
|
8384
|
+
IF entity_id_text IS NULL OR entity_id_text = '' THEN
|
|
8385
|
+
RETURN NULL;
|
|
8386
|
+
END IF;
|
|
8387
|
+
|
|
8388
|
+
BEGIN
|
|
8389
|
+
RETURN entity_id_text::UUID;
|
|
8390
|
+
EXCEPTION WHEN OTHERS THEN
|
|
8391
|
+
RETURN NULL;
|
|
8392
|
+
END;
|
|
8393
|
+
END;
|
|
8394
|
+
$$ LANGUAGE plpgsql STABLE;
|
|
8395
|
+
`);
|
|
8396
|
+
logger8.info("[Entity RLS] Created current_entity_id() function");
|
|
8397
|
+
await db2.execute(sql`
|
|
8398
|
+
CREATE OR REPLACE FUNCTION add_entity_isolation(
|
|
8399
|
+
schema_name text,
|
|
8400
|
+
table_name text,
|
|
8401
|
+
require_entity boolean DEFAULT false
|
|
8402
|
+
) RETURNS void AS $$
|
|
8403
|
+
DECLARE
|
|
8404
|
+
full_table_name text;
|
|
8405
|
+
has_entity_id boolean;
|
|
8406
|
+
has_author_id boolean;
|
|
8407
|
+
has_channel_id boolean;
|
|
8408
|
+
has_room_id boolean;
|
|
8409
|
+
entity_column_name text;
|
|
8410
|
+
room_column_name text;
|
|
8411
|
+
BEGIN
|
|
8412
|
+
full_table_name := schema_name || '.' || table_name;
|
|
8413
|
+
|
|
8414
|
+
-- Check which columns exist (using camelCase as per schema definition)
|
|
8415
|
+
SELECT EXISTS (
|
|
8416
|
+
SELECT 1 FROM information_schema.columns
|
|
8417
|
+
WHERE information_schema.columns.table_schema = schema_name
|
|
8418
|
+
AND information_schema.columns.table_name = add_entity_isolation.table_name
|
|
8419
|
+
AND information_schema.columns.column_name = 'entityId'
|
|
8420
|
+
) INTO has_entity_id;
|
|
8421
|
+
|
|
8422
|
+
SELECT EXISTS (
|
|
8423
|
+
SELECT 1 FROM information_schema.columns
|
|
8424
|
+
WHERE information_schema.columns.table_schema = schema_name
|
|
8425
|
+
AND information_schema.columns.table_name = add_entity_isolation.table_name
|
|
8426
|
+
AND information_schema.columns.column_name = 'authorId'
|
|
8427
|
+
) INTO has_author_id;
|
|
8428
|
+
|
|
8429
|
+
SELECT EXISTS (
|
|
8430
|
+
SELECT 1 FROM information_schema.columns
|
|
8431
|
+
WHERE information_schema.columns.table_schema = schema_name
|
|
8432
|
+
AND information_schema.columns.table_name = add_entity_isolation.table_name
|
|
8433
|
+
AND information_schema.columns.column_name = 'roomId'
|
|
8434
|
+
) INTO has_room_id;
|
|
8435
|
+
|
|
8436
|
+
-- Skip if no entity-related columns
|
|
8437
|
+
IF NOT (has_entity_id OR has_author_id OR has_room_id) THEN
|
|
8438
|
+
RAISE NOTICE '[Entity RLS] Skipping %.%: no entity columns found', schema_name, table_name;
|
|
8439
|
+
RETURN;
|
|
8440
|
+
END IF;
|
|
8441
|
+
|
|
8442
|
+
-- Determine which column to use for entity filtering
|
|
8443
|
+
-- Priority: roomId (shared access via participants) > entityId/authorId (direct access)
|
|
8444
|
+
--
|
|
8445
|
+
-- SPECIAL CASE: participants table must use direct entityId to avoid infinite recursion
|
|
8446
|
+
IF table_name = 'participants' AND has_entity_id THEN
|
|
8447
|
+
entity_column_name := 'entityId';
|
|
8448
|
+
room_column_name := NULL;
|
|
8449
|
+
ELSIF has_room_id THEN
|
|
8450
|
+
room_column_name := 'roomId';
|
|
8451
|
+
entity_column_name := NULL;
|
|
8452
|
+
ELSIF has_entity_id THEN
|
|
8453
|
+
entity_column_name := 'entityId';
|
|
8454
|
+
room_column_name := NULL;
|
|
8455
|
+
ELSIF has_author_id THEN
|
|
8456
|
+
entity_column_name := 'authorId';
|
|
8457
|
+
room_column_name := NULL;
|
|
8458
|
+
ELSE
|
|
8459
|
+
entity_column_name := NULL;
|
|
8460
|
+
room_column_name := NULL;
|
|
8461
|
+
END IF;
|
|
8462
|
+
|
|
8463
|
+
-- Enable RLS on the table
|
|
8464
|
+
EXECUTE format('ALTER TABLE %I.%I ENABLE ROW LEVEL SECURITY', schema_name, table_name);
|
|
8465
|
+
EXECUTE format('ALTER TABLE %I.%I FORCE ROW LEVEL SECURITY', schema_name, table_name);
|
|
8466
|
+
|
|
8467
|
+
-- Drop existing entity policies if present
|
|
8468
|
+
EXECUTE format('DROP POLICY IF EXISTS entity_isolation_policy ON %I.%I', schema_name, table_name);
|
|
8469
|
+
|
|
8470
|
+
-- CASE 1: Table has roomId or channelId (shared access via participants)
|
|
8471
|
+
IF room_column_name IS NOT NULL THEN
|
|
8472
|
+
-- Determine the corresponding column name in participants table
|
|
8473
|
+
-- If the table has roomId, look for roomId in participants.roomId
|
|
8474
|
+
-- participants table uses: entityId (for participant), roomId (for room)
|
|
8475
|
+
-- RESTRICTIVE: Must pass BOTH server RLS AND entity RLS (combined with AND)
|
|
8476
|
+
|
|
8477
|
+
-- Build policy with or without NULL check based on require_entity parameter
|
|
8478
|
+
IF require_entity THEN
|
|
8479
|
+
-- STRICT MODE: Entity context is REQUIRED (blocks NULL entity_id)
|
|
8480
|
+
EXECUTE format('
|
|
8481
|
+
CREATE POLICY entity_isolation_policy ON %I.%I
|
|
8482
|
+
AS RESTRICTIVE
|
|
8483
|
+
USING (
|
|
8484
|
+
current_entity_id() IS NOT NULL
|
|
8485
|
+
AND %I IN (
|
|
8486
|
+
SELECT "roomId"
|
|
8487
|
+
FROM participants
|
|
8488
|
+
WHERE "entityId" = current_entity_id()
|
|
8489
|
+
)
|
|
8490
|
+
)
|
|
8491
|
+
WITH CHECK (
|
|
8492
|
+
current_entity_id() IS NOT NULL
|
|
8493
|
+
AND %I IN (
|
|
8494
|
+
SELECT "roomId"
|
|
8495
|
+
FROM participants
|
|
8496
|
+
WHERE "entityId" = current_entity_id()
|
|
8497
|
+
)
|
|
8498
|
+
)
|
|
8499
|
+
', schema_name, table_name, room_column_name, room_column_name);
|
|
8500
|
+
RAISE NOTICE '[Entity RLS] Applied STRICT RESTRICTIVE to %.% (via % → participants.roomId, entity REQUIRED)', schema_name, table_name, room_column_name;
|
|
8501
|
+
ELSE
|
|
8502
|
+
-- PERMISSIVE MODE: NULL entity_id allows system/admin access
|
|
8503
|
+
EXECUTE format('
|
|
8504
|
+
CREATE POLICY entity_isolation_policy ON %I.%I
|
|
8505
|
+
AS RESTRICTIVE
|
|
8506
|
+
USING (
|
|
8507
|
+
current_entity_id() IS NULL
|
|
8508
|
+
OR %I IN (
|
|
8509
|
+
SELECT "roomId"
|
|
8510
|
+
FROM participants
|
|
8511
|
+
WHERE "entityId" = current_entity_id()
|
|
8512
|
+
)
|
|
8513
|
+
)
|
|
8514
|
+
WITH CHECK (
|
|
8515
|
+
current_entity_id() IS NULL
|
|
8516
|
+
OR %I IN (
|
|
8517
|
+
SELECT "roomId"
|
|
8518
|
+
FROM participants
|
|
8519
|
+
WHERE "entityId" = current_entity_id()
|
|
8520
|
+
)
|
|
8521
|
+
)
|
|
8522
|
+
', schema_name, table_name, room_column_name, room_column_name);
|
|
8523
|
+
RAISE NOTICE '[Entity RLS] Applied PERMISSIVE RESTRICTIVE to %.% (via % → participants.roomId, NULL allowed)', schema_name, table_name, room_column_name;
|
|
8524
|
+
END IF;
|
|
8525
|
+
|
|
8526
|
+
-- CASE 2: Table has direct entity_id or author_id column
|
|
8527
|
+
ELSIF entity_column_name IS NOT NULL THEN
|
|
8528
|
+
-- RESTRICTIVE: Must pass BOTH server RLS AND entity RLS (combined with AND)
|
|
8529
|
+
|
|
8530
|
+
IF require_entity THEN
|
|
8531
|
+
-- STRICT MODE: Entity context is REQUIRED
|
|
8532
|
+
EXECUTE format('
|
|
8533
|
+
CREATE POLICY entity_isolation_policy ON %I.%I
|
|
8534
|
+
AS RESTRICTIVE
|
|
8535
|
+
USING (
|
|
8536
|
+
current_entity_id() IS NOT NULL
|
|
8537
|
+
AND %I = current_entity_id()
|
|
8538
|
+
)
|
|
8539
|
+
WITH CHECK (
|
|
8540
|
+
current_entity_id() IS NOT NULL
|
|
8541
|
+
AND %I = current_entity_id()
|
|
8542
|
+
)
|
|
8543
|
+
', schema_name, table_name, entity_column_name, entity_column_name);
|
|
8544
|
+
RAISE NOTICE '[Entity RLS] Applied STRICT RESTRICTIVE to %.% (direct column: %, entity REQUIRED)', schema_name, table_name, entity_column_name;
|
|
8545
|
+
ELSE
|
|
8546
|
+
-- PERMISSIVE MODE: NULL entity_id allows system/admin access
|
|
8547
|
+
EXECUTE format('
|
|
8548
|
+
CREATE POLICY entity_isolation_policy ON %I.%I
|
|
8549
|
+
AS RESTRICTIVE
|
|
8550
|
+
USING (
|
|
8551
|
+
current_entity_id() IS NULL
|
|
8552
|
+
OR %I = current_entity_id()
|
|
8553
|
+
)
|
|
8554
|
+
WITH CHECK (
|
|
8555
|
+
current_entity_id() IS NULL
|
|
8556
|
+
OR %I = current_entity_id()
|
|
8557
|
+
)
|
|
8558
|
+
', schema_name, table_name, entity_column_name, entity_column_name);
|
|
8559
|
+
RAISE NOTICE '[Entity RLS] Applied PERMISSIVE RESTRICTIVE to %.% (direct column: %, NULL allowed)', schema_name, table_name, entity_column_name;
|
|
8560
|
+
END IF;
|
|
8561
|
+
END IF;
|
|
8562
|
+
|
|
8563
|
+
-- Create indexes for efficient entity filtering
|
|
8564
|
+
IF room_column_name IS NOT NULL THEN
|
|
8565
|
+
EXECUTE format('CREATE INDEX IF NOT EXISTS idx_%I_room ON %I.%I(%I)',
|
|
8566
|
+
table_name, schema_name, table_name, room_column_name);
|
|
8567
|
+
END IF;
|
|
8568
|
+
|
|
8569
|
+
IF entity_column_name IS NOT NULL THEN
|
|
8570
|
+
EXECUTE format('CREATE INDEX IF NOT EXISTS idx_%I_entity ON %I.%I(%I)',
|
|
8571
|
+
table_name, schema_name, table_name, entity_column_name);
|
|
8572
|
+
END IF;
|
|
8573
|
+
END;
|
|
8574
|
+
$$ LANGUAGE plpgsql;
|
|
8575
|
+
`);
|
|
8576
|
+
logger8.info("[Entity RLS] Created add_entity_isolation() function");
|
|
8577
|
+
await db2.execute(sql`
|
|
8578
|
+
CREATE OR REPLACE FUNCTION apply_entity_rls_to_all_tables() RETURNS void AS $$
|
|
8579
|
+
DECLARE
|
|
8580
|
+
tbl record;
|
|
8581
|
+
require_entity_for_table boolean;
|
|
8582
|
+
BEGIN
|
|
8583
|
+
FOR tbl IN
|
|
8584
|
+
SELECT schemaname, tablename
|
|
8585
|
+
FROM pg_tables
|
|
8586
|
+
WHERE schemaname = 'public'
|
|
8587
|
+
AND tablename NOT IN (
|
|
8588
|
+
'servers', -- Server RLS table
|
|
8589
|
+
'users', -- Authentication table (no entity isolation needed)
|
|
8590
|
+
'entity_mappings', -- Mapping table (no entity isolation needed)
|
|
8591
|
+
'drizzle_migrations', -- Migration tracking
|
|
8592
|
+
'__drizzle_migrations' -- Migration tracking
|
|
8593
|
+
)
|
|
8594
|
+
LOOP
|
|
8595
|
+
BEGIN
|
|
8596
|
+
-- Apply STRICT mode (require_entity=true) to sensitive user-facing tables
|
|
8597
|
+
-- These tables MUST have entity context set to access data
|
|
8598
|
+
-- STRICT tables: memories, logs, components, tasks (user data requiring isolation)
|
|
8599
|
+
-- NOTE: Excluded tables:
|
|
8600
|
+
-- - 'participants': Adding participants is a privileged operation during initialization
|
|
8601
|
+
IF tbl.tablename IN ('memories', 'logs', 'components', 'tasks') THEN
|
|
8602
|
+
require_entity_for_table := true;
|
|
8603
|
+
ELSE
|
|
8604
|
+
-- PERMISSIVE mode (require_entity=false) for system/privileged tables
|
|
8605
|
+
-- This includes: participants, rooms, channels, entities, etc.
|
|
8606
|
+
require_entity_for_table := false;
|
|
8607
|
+
END IF;
|
|
8608
|
+
|
|
8609
|
+
PERFORM add_entity_isolation(tbl.schemaname, tbl.tablename, require_entity_for_table);
|
|
8610
|
+
EXCEPTION WHEN OTHERS THEN
|
|
8611
|
+
RAISE WARNING '[Entity RLS] Failed to apply to %.%: %', tbl.schemaname, tbl.tablename, SQLERRM;
|
|
8612
|
+
END;
|
|
8613
|
+
END LOOP;
|
|
8614
|
+
END;
|
|
8615
|
+
$$ LANGUAGE plpgsql;
|
|
8616
|
+
`);
|
|
8617
|
+
logger8.info("[Entity RLS] Created apply_entity_rls_to_all_tables() function");
|
|
8618
|
+
logger8.info("[Entity RLS] Entity RLS functions installed successfully");
|
|
8619
|
+
}
|
|
8620
|
+
async function applyEntityRLSToAllTables(adapter) {
|
|
8621
|
+
const db2 = adapter.db;
|
|
8622
|
+
try {
|
|
8623
|
+
await db2.execute(sql`SELECT apply_entity_rls_to_all_tables()`);
|
|
8624
|
+
logger8.info("[Entity RLS] Applied entity RLS to all eligible tables");
|
|
8625
|
+
} catch (error) {
|
|
8626
|
+
logger8.warn("[Entity RLS] Failed to apply entity RLS to some tables:", String(error));
|
|
8627
|
+
}
|
|
8628
|
+
}
|
|
8629
|
+
async function uninstallEntityRLS(adapter) {
|
|
8630
|
+
const db2 = adapter.db;
|
|
8631
|
+
logger8.info("[Entity RLS] Removing entity RLS policies and functions...");
|
|
8632
|
+
try {
|
|
8633
|
+
const tablesResult = await db2.execute(sql`
|
|
8634
|
+
SELECT schemaname, tablename
|
|
8635
|
+
FROM pg_tables
|
|
8636
|
+
WHERE schemaname = 'public'
|
|
8637
|
+
AND tablename NOT IN ('drizzle_migrations', '__drizzle_migrations')
|
|
8638
|
+
`);
|
|
8639
|
+
for (const row of tablesResult.rows || []) {
|
|
8640
|
+
const schemaName = row.schemaname;
|
|
8641
|
+
const tableName = row.tablename;
|
|
8642
|
+
try {
|
|
8643
|
+
await db2.execute(sql.raw(`DROP POLICY IF EXISTS entity_isolation_policy ON ${schemaName}.${tableName}`));
|
|
8644
|
+
logger8.debug(`[Entity RLS] Dropped entity_isolation_policy from ${schemaName}.${tableName}`);
|
|
8645
|
+
} catch (error) {
|
|
8646
|
+
logger8.debug(`[Entity RLS] No entity policy on ${schemaName}.${tableName}`);
|
|
8647
|
+
}
|
|
8648
|
+
}
|
|
8649
|
+
await db2.execute(sql`DROP FUNCTION IF EXISTS apply_entity_rls_to_all_tables() CASCADE`);
|
|
8650
|
+
await db2.execute(sql`DROP FUNCTION IF EXISTS add_entity_isolation(text, text) CASCADE`);
|
|
8651
|
+
await db2.execute(sql`DROP FUNCTION IF EXISTS current_entity_id() CASCADE`);
|
|
8652
|
+
logger8.info("[Entity RLS] Entity RLS functions and policies removed successfully");
|
|
8653
|
+
} catch (error) {
|
|
8654
|
+
logger8.error("[Entity RLS] Failed to remove entity RLS:", String(error));
|
|
8655
|
+
throw error;
|
|
8656
|
+
}
|
|
8657
|
+
}
|
|
8658
|
+
var init_rls = __esm(() => {
|
|
8659
|
+
init_drizzle_orm();
|
|
8660
|
+
init_server();
|
|
8661
|
+
init_agent();
|
|
8662
|
+
});
|
|
8663
|
+
|
|
7850
8664
|
// src/migration-service.ts
|
|
7851
8665
|
var exports_migration_service = {};
|
|
7852
8666
|
__export(exports_migration_service, {
|
|
7853
8667
|
DatabaseMigrationService: () => DatabaseMigrationService
|
|
7854
8668
|
});
|
|
7855
|
-
import { logger as
|
|
8669
|
+
import { logger as logger9 } from "@elizaos/core";
|
|
7856
8670
|
|
|
7857
8671
|
class DatabaseMigrationService {
|
|
7858
8672
|
db = null;
|
|
@@ -7861,22 +8675,22 @@ class DatabaseMigrationService {
|
|
|
7861
8675
|
constructor() {}
|
|
7862
8676
|
async initializeWithDatabase(db2) {
|
|
7863
8677
|
this.db = db2;
|
|
8678
|
+
await migrateToEntityRLS({ db: db2 });
|
|
7864
8679
|
this.migrator = new RuntimeMigrator(db2);
|
|
7865
8680
|
await this.migrator.initialize();
|
|
7866
|
-
|
|
8681
|
+
logger9.info({ src: "plugin:sql" }, "DatabaseMigrationService initialized");
|
|
7867
8682
|
}
|
|
7868
8683
|
discoverAndRegisterPluginSchemas(plugins) {
|
|
7869
8684
|
for (const plugin of plugins) {
|
|
7870
8685
|
if (plugin.schema) {
|
|
7871
8686
|
this.registeredSchemas.set(plugin.name, plugin.schema);
|
|
7872
|
-
logger7.info(`Registered schema for plugin: ${plugin.name}`);
|
|
7873
8687
|
}
|
|
7874
8688
|
}
|
|
7875
|
-
|
|
8689
|
+
logger9.info({ src: "plugin:sql", schemasDiscovered: this.registeredSchemas.size, totalPlugins: plugins.length }, "Plugin schemas discovered");
|
|
7876
8690
|
}
|
|
7877
8691
|
registerSchema(pluginName, schema2) {
|
|
7878
8692
|
this.registeredSchemas.set(pluginName, schema2);
|
|
7879
|
-
|
|
8693
|
+
logger9.debug({ src: "plugin:sql", pluginName }, "Schema registered");
|
|
7880
8694
|
}
|
|
7881
8695
|
async runAllPluginMigrations(options) {
|
|
7882
8696
|
if (!this.db || !this.migrator) {
|
|
@@ -7888,12 +8702,7 @@ class DatabaseMigrationService {
|
|
|
7888
8702
|
force: options?.force ?? false,
|
|
7889
8703
|
dryRun: options?.dryRun ?? false
|
|
7890
8704
|
};
|
|
7891
|
-
|
|
7892
|
-
logger7.info(`[DatabaseMigrationService] Environment: ${isProduction ? "PRODUCTION" : "DEVELOPMENT"}`);
|
|
7893
|
-
logger7.info(`[DatabaseMigrationService] Plugins to migrate: ${this.registeredSchemas.size}`);
|
|
7894
|
-
if (migrationOptions.dryRun) {
|
|
7895
|
-
logger7.info("[DatabaseMigrationService] DRY RUN mode - no changes will be applied");
|
|
7896
|
-
}
|
|
8705
|
+
logger9.info({ src: "plugin:sql", environment: isProduction ? "PRODUCTION" : "DEVELOPMENT", pluginCount: this.registeredSchemas.size, dryRun: migrationOptions.dryRun }, "Starting migrations");
|
|
7897
8706
|
let successCount = 0;
|
|
7898
8707
|
let failureCount = 0;
|
|
7899
8708
|
const errors2 = [];
|
|
@@ -7901,27 +8710,37 @@ class DatabaseMigrationService {
|
|
|
7901
8710
|
try {
|
|
7902
8711
|
await this.migrator.migrate(pluginName, schema2, migrationOptions);
|
|
7903
8712
|
successCount++;
|
|
7904
|
-
|
|
8713
|
+
logger9.info({ src: "plugin:sql", pluginName }, "Migration completed");
|
|
7905
8714
|
} catch (error) {
|
|
7906
8715
|
failureCount++;
|
|
7907
8716
|
const errorMessage = error.message;
|
|
7908
8717
|
errors2.push({ pluginName, error });
|
|
7909
8718
|
if (errorMessage.includes("Destructive migration blocked")) {
|
|
7910
|
-
|
|
7911
|
-
if (!migrationOptions.force && process.env.ELIZA_ALLOW_DESTRUCTIVE_MIGRATIONS !== "true") {
|
|
7912
|
-
logger7.error("[DatabaseMigrationService] To allow destructive migrations:");
|
|
7913
|
-
logger7.error("[DatabaseMigrationService] - Set ELIZA_ALLOW_DESTRUCTIVE_MIGRATIONS=true");
|
|
7914
|
-
logger7.error("[DatabaseMigrationService] - Or pass { force: true } to this method");
|
|
7915
|
-
}
|
|
8719
|
+
logger9.error({ src: "plugin:sql", pluginName }, "Migration blocked - destructive changes detected. Set ELIZA_ALLOW_DESTRUCTIVE_MIGRATIONS=true or use force option");
|
|
7916
8720
|
} else {
|
|
7917
|
-
|
|
8721
|
+
logger9.error({ src: "plugin:sql", pluginName, error: errorMessage }, "Migration failed");
|
|
7918
8722
|
}
|
|
7919
8723
|
}
|
|
7920
8724
|
}
|
|
7921
8725
|
if (failureCount === 0) {
|
|
7922
|
-
|
|
8726
|
+
logger9.info({ src: "plugin:sql", successCount }, "All migrations completed successfully");
|
|
8727
|
+
const dataIsolationEnabled = process.env.ENABLE_DATA_ISOLATION === "true";
|
|
8728
|
+
if (dataIsolationEnabled) {
|
|
8729
|
+
try {
|
|
8730
|
+
logger9.info({ src: "plugin:sql" }, "Re-applying Row Level Security...");
|
|
8731
|
+
await installRLSFunctions({ db: this.db });
|
|
8732
|
+
await applyRLSToNewTables({ db: this.db });
|
|
8733
|
+
await applyEntityRLSToAllTables({ db: this.db });
|
|
8734
|
+
logger9.info({ src: "plugin:sql" }, "RLS re-applied successfully");
|
|
8735
|
+
} catch (rlsError) {
|
|
8736
|
+
const errorMsg = rlsError instanceof Error ? rlsError.message : String(rlsError);
|
|
8737
|
+
logger9.warn({ src: "plugin:sql", error: errorMsg }, "Failed to re-apply RLS (this is OK if server_id columns are not yet in schemas)");
|
|
8738
|
+
}
|
|
8739
|
+
} else {
|
|
8740
|
+
logger9.info({ src: "plugin:sql" }, "Skipping RLS re-application (ENABLE_DATA_ISOLATION is not true)");
|
|
8741
|
+
}
|
|
7923
8742
|
} else {
|
|
7924
|
-
|
|
8743
|
+
logger9.error({ src: "plugin:sql", failureCount, successCount }, "Some migrations failed");
|
|
7925
8744
|
const errorSummary = errors2.map((e) => `${e.pluginName}: ${e.error.message}`).join(`
|
|
7926
8745
|
`);
|
|
7927
8746
|
throw new Error(`${failureCount} migration(s) failed:
|
|
@@ -7934,13 +8753,16 @@ class DatabaseMigrationService {
|
|
|
7934
8753
|
}
|
|
7935
8754
|
var init_migration_service = __esm(() => {
|
|
7936
8755
|
init_runtime_migrator2();
|
|
8756
|
+
init_migrations();
|
|
8757
|
+
init_rls();
|
|
7937
8758
|
});
|
|
7938
8759
|
|
|
7939
8760
|
// src/index.node.ts
|
|
7940
|
-
import { logger as
|
|
8761
|
+
import { logger as logger14, stringToUuid } from "@elizaos/core";
|
|
8762
|
+
import { mkdirSync } from "node:fs";
|
|
7941
8763
|
|
|
7942
8764
|
// src/pglite/adapter.ts
|
|
7943
|
-
import { logger as
|
|
8765
|
+
import { logger as logger11 } from "@elizaos/core";
|
|
7944
8766
|
|
|
7945
8767
|
// ../../node_modules/drizzle-orm/pglite/driver.js
|
|
7946
8768
|
init_entity();
|
|
@@ -8154,7 +8976,7 @@ init_drizzle_orm();
|
|
|
8154
8976
|
import {
|
|
8155
8977
|
ChannelType,
|
|
8156
8978
|
DatabaseAdapter,
|
|
8157
|
-
logger as
|
|
8979
|
+
logger as logger10
|
|
8158
8980
|
} from "@elizaos/core";
|
|
8159
8981
|
import { v4 } from "uuid";
|
|
8160
8982
|
|
|
@@ -8166,33 +8988,12 @@ import { VECTOR_DIMS } from "@elizaos/core";
|
|
|
8166
8988
|
// src/schema/memory.ts
|
|
8167
8989
|
init_drizzle_orm();
|
|
8168
8990
|
init_pg_core();
|
|
8169
|
-
|
|
8170
|
-
// src/schema/agent.ts
|
|
8171
|
-
init_drizzle_orm();
|
|
8172
|
-
init_pg_core();
|
|
8173
|
-
var agentTable = pgTable("agents", {
|
|
8174
|
-
id: uuid("id").primaryKey().defaultRandom(),
|
|
8175
|
-
enabled: boolean("enabled").default(true).notNull(),
|
|
8176
|
-
owner_id: uuid("owner_id"),
|
|
8177
|
-
createdAt: timestamp("created_at", { withTimezone: true }).default(sql`now()`).notNull(),
|
|
8178
|
-
updatedAt: timestamp("updated_at", { withTimezone: true }).default(sql`now()`).notNull(),
|
|
8179
|
-
name: text("name").notNull(),
|
|
8180
|
-
username: text("username"),
|
|
8181
|
-
system: text("system").default(""),
|
|
8182
|
-
bio: jsonb("bio").$type().default(sql`'[]'::jsonb`),
|
|
8183
|
-
messageExamples: jsonb("message_examples").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
8184
|
-
postExamples: jsonb("post_examples").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
8185
|
-
topics: jsonb("topics").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
8186
|
-
adjectives: jsonb("adjectives").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
8187
|
-
knowledge: jsonb("knowledge").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
8188
|
-
plugins: jsonb("plugins").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
8189
|
-
settings: jsonb("settings").$type().default(sql`'{}'::jsonb`).notNull(),
|
|
8190
|
-
style: jsonb("style").$type().default(sql`'{}'::jsonb`).notNull()
|
|
8191
|
-
});
|
|
8991
|
+
init_agent();
|
|
8192
8992
|
|
|
8193
8993
|
// src/schema/entity.ts
|
|
8194
8994
|
init_drizzle_orm();
|
|
8195
8995
|
init_pg_core();
|
|
8996
|
+
init_agent();
|
|
8196
8997
|
var entityTable = pgTable("entities", {
|
|
8197
8998
|
id: uuid("id").notNull().primaryKey(),
|
|
8198
8999
|
agentId: uuid("agent_id").notNull().references(() => agentTable.id, {
|
|
@@ -8210,6 +9011,7 @@ var entityTable = pgTable("entities", {
|
|
|
8210
9011
|
// src/schema/room.ts
|
|
8211
9012
|
init_drizzle_orm();
|
|
8212
9013
|
init_pg_core();
|
|
9014
|
+
init_agent();
|
|
8213
9015
|
var roomTable = pgTable("rooms", {
|
|
8214
9016
|
id: uuid("id").notNull().primaryKey().default(sql`gen_random_uuid()`),
|
|
8215
9017
|
agentId: uuid("agentId").references(() => agentTable.id, {
|
|
@@ -8217,12 +9019,12 @@ var roomTable = pgTable("rooms", {
|
|
|
8217
9019
|
}),
|
|
8218
9020
|
source: text("source").notNull(),
|
|
8219
9021
|
type: text("type").notNull(),
|
|
8220
|
-
|
|
9022
|
+
messageServerId: uuid("message_server_id"),
|
|
8221
9023
|
worldId: uuid("worldId"),
|
|
8222
9024
|
name: text("name"),
|
|
8223
9025
|
metadata: jsonb("metadata"),
|
|
8224
|
-
channelId: text("
|
|
8225
|
-
createdAt: timestamp("
|
|
9026
|
+
channelId: text("channel_id"),
|
|
9027
|
+
createdAt: timestamp("created_at").default(sql`now()`).notNull()
|
|
8226
9028
|
});
|
|
8227
9029
|
|
|
8228
9030
|
// src/schema/memory.ts
|
|
@@ -8314,17 +9116,18 @@ var embeddingTable = pgTable("embeddings", {
|
|
|
8314
9116
|
]);
|
|
8315
9117
|
|
|
8316
9118
|
// src/schema/index.ts
|
|
9119
|
+
init_agent();
|
|
8317
9120
|
var exports_schema = {};
|
|
8318
9121
|
__export(exports_schema, {
|
|
8319
9122
|
worldTable: () => worldTable,
|
|
8320
9123
|
taskTable: () => taskTable,
|
|
8321
|
-
|
|
9124
|
+
serverTable: () => serverTable,
|
|
8322
9125
|
roomTable: () => roomTable,
|
|
8323
9126
|
relationshipTable: () => relationshipTable,
|
|
8324
9127
|
participantTable: () => participantTable,
|
|
8325
|
-
ownersTable: () => ownersTable,
|
|
8326
9128
|
messageTable: () => messageTable,
|
|
8327
9129
|
messageServerTable: () => messageServerTable,
|
|
9130
|
+
messageServerAgentsTable: () => messageServerAgentsTable,
|
|
8328
9131
|
memoryTable: () => memoryTable,
|
|
8329
9132
|
logTable: () => logTable,
|
|
8330
9133
|
entityTable: () => entityTable,
|
|
@@ -8339,29 +9142,32 @@ __export(exports_schema, {
|
|
|
8339
9142
|
// src/schema/cache.ts
|
|
8340
9143
|
init_drizzle_orm();
|
|
8341
9144
|
init_pg_core();
|
|
9145
|
+
init_agent();
|
|
8342
9146
|
var cacheTable = pgTable("cache", {
|
|
8343
9147
|
key: text("key").notNull(),
|
|
8344
9148
|
agentId: uuid("agent_id").notNull().references(() => agentTable.id, { onDelete: "cascade" }),
|
|
8345
9149
|
value: jsonb("value").notNull(),
|
|
8346
9150
|
createdAt: timestamp("created_at", { withTimezone: true }).default(sql`now()`).notNull(),
|
|
8347
9151
|
expiresAt: timestamp("expires_at", { withTimezone: true })
|
|
8348
|
-
}, (table3) =>
|
|
8349
|
-
|
|
8350
|
-
|
|
9152
|
+
}, (table3) => [
|
|
9153
|
+
primaryKey({ columns: [table3.key, table3.agentId] })
|
|
9154
|
+
]);
|
|
8351
9155
|
// src/schema/component.ts
|
|
8352
9156
|
init_drizzle_orm();
|
|
8353
9157
|
init_pg_core();
|
|
9158
|
+
init_agent();
|
|
8354
9159
|
|
|
8355
9160
|
// src/schema/world.ts
|
|
8356
9161
|
init_drizzle_orm();
|
|
8357
9162
|
init_pg_core();
|
|
9163
|
+
init_agent();
|
|
8358
9164
|
var worldTable = pgTable("worlds", {
|
|
8359
9165
|
id: uuid("id").notNull().primaryKey().default(sql`gen_random_uuid()`),
|
|
8360
9166
|
agentId: uuid("agentId").notNull().references(() => agentTable.id, { onDelete: "cascade" }),
|
|
8361
9167
|
name: text("name").notNull(),
|
|
8362
9168
|
metadata: jsonb("metadata"),
|
|
8363
|
-
|
|
8364
|
-
createdAt: timestamp("
|
|
9169
|
+
messageServerId: uuid("message_server_id"),
|
|
9170
|
+
createdAt: timestamp("created_at").default(sql`now()`).notNull()
|
|
8365
9171
|
});
|
|
8366
9172
|
|
|
8367
9173
|
// src/schema/component.ts
|
|
@@ -8398,17 +9204,14 @@ var logTable = pgTable("logs", {
|
|
|
8398
9204
|
foreignColumns: [entityTable.id]
|
|
8399
9205
|
}).onDelete("cascade")
|
|
8400
9206
|
]);
|
|
8401
|
-
|
|
8402
|
-
|
|
8403
|
-
|
|
8404
|
-
|
|
8405
|
-
id: uuid("id").primaryKey(),
|
|
8406
|
-
createdAt: timestamp("created_at", { withTimezone: true }).default(sql`now()`).notNull(),
|
|
8407
|
-
updatedAt: timestamp("updated_at", { withTimezone: true }).default(sql`now()`).notNull()
|
|
8408
|
-
});
|
|
9207
|
+
|
|
9208
|
+
// src/schema/index.ts
|
|
9209
|
+
init_server();
|
|
9210
|
+
|
|
8409
9211
|
// src/schema/participant.ts
|
|
8410
9212
|
init_drizzle_orm();
|
|
8411
9213
|
init_pg_core();
|
|
9214
|
+
init_agent();
|
|
8412
9215
|
var participantTable = pgTable("participants", {
|
|
8413
9216
|
id: uuid("id").notNull().primaryKey().default(sql`gen_random_uuid()`),
|
|
8414
9217
|
createdAt: timestamp("created_at", { withTimezone: true }).default(sql`now()`).notNull(),
|
|
@@ -8439,6 +9242,7 @@ var participantTable = pgTable("participants", {
|
|
|
8439
9242
|
// src/schema/relationship.ts
|
|
8440
9243
|
init_drizzle_orm();
|
|
8441
9244
|
init_pg_core();
|
|
9245
|
+
init_agent();
|
|
8442
9246
|
var relationshipTable = pgTable("relationships", {
|
|
8443
9247
|
id: uuid("id").notNull().primaryKey().default(sql`gen_random_uuid()`),
|
|
8444
9248
|
createdAt: timestamp("created_at", { withTimezone: true }).default(sql`now()`).notNull(),
|
|
@@ -8464,6 +9268,7 @@ var relationshipTable = pgTable("relationships", {
|
|
|
8464
9268
|
// src/schema/tasks.ts
|
|
8465
9269
|
init_pg_core();
|
|
8466
9270
|
init_drizzle_orm();
|
|
9271
|
+
init_agent();
|
|
8467
9272
|
var taskTable = pgTable("tasks", {
|
|
8468
9273
|
id: uuid("id").primaryKey().defaultRandom(),
|
|
8469
9274
|
name: text("name").notNull(),
|
|
@@ -8471,7 +9276,7 @@ var taskTable = pgTable("tasks", {
|
|
|
8471
9276
|
roomId: uuid("roomId"),
|
|
8472
9277
|
worldId: uuid("worldId"),
|
|
8473
9278
|
entityId: uuid("entityId"),
|
|
8474
|
-
agentId: uuid("
|
|
9279
|
+
agentId: uuid("agentId").notNull().references(() => agentTable.id, { onDelete: "cascade" }),
|
|
8475
9280
|
tags: text("tags").array().default(sql`'{}'::text[]`),
|
|
8476
9281
|
metadata: jsonb("metadata").default(sql`'{}'::jsonb`),
|
|
8477
9282
|
createdAt: timestamp("created_at", { withTimezone: true }).defaultNow(),
|
|
@@ -8494,7 +9299,7 @@ init_pg_core();
|
|
|
8494
9299
|
init_drizzle_orm();
|
|
8495
9300
|
var channelTable = pgTable("channels", {
|
|
8496
9301
|
id: text("id").primaryKey(),
|
|
8497
|
-
messageServerId: uuid("
|
|
9302
|
+
messageServerId: uuid("message_server_id").notNull().references(() => messageServerTable.id, { onDelete: "cascade" }),
|
|
8498
9303
|
name: text("name").notNull(),
|
|
8499
9304
|
type: text("type").notNull(),
|
|
8500
9305
|
sourceType: text("source_type"),
|
|
@@ -8526,18 +9331,19 @@ var messageTable = pgTable("central_messages", {
|
|
|
8526
9331
|
init_pg_core();
|
|
8527
9332
|
var channelParticipantsTable = pgTable("channel_participants", {
|
|
8528
9333
|
channelId: text("channel_id").notNull().references(() => channelTable.id, { onDelete: "cascade" }),
|
|
8529
|
-
|
|
8530
|
-
}, (table3) =>
|
|
8531
|
-
|
|
8532
|
-
|
|
8533
|
-
// src/schema/
|
|
9334
|
+
entityId: text("entity_id").notNull()
|
|
9335
|
+
}, (table3) => [
|
|
9336
|
+
primaryKey({ columns: [table3.channelId, table3.entityId] })
|
|
9337
|
+
]);
|
|
9338
|
+
// src/schema/messageServerAgent.ts
|
|
8534
9339
|
init_pg_core();
|
|
8535
|
-
|
|
8536
|
-
|
|
9340
|
+
init_agent();
|
|
9341
|
+
var messageServerAgentsTable = pgTable("message_server_agents", {
|
|
9342
|
+
messageServerId: uuid("message_server_id").notNull().references(() => messageServerTable.id, { onDelete: "cascade" }),
|
|
8537
9343
|
agentId: uuid("agent_id").notNull().references(() => agentTable.id, { onDelete: "cascade" })
|
|
8538
|
-
}, (table3) =>
|
|
8539
|
-
|
|
8540
|
-
|
|
9344
|
+
}, (table3) => [
|
|
9345
|
+
primaryKey({ columns: [table3.messageServerId, table3.agentId] })
|
|
9346
|
+
]);
|
|
8541
9347
|
// src/base.ts
|
|
8542
9348
|
class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
8543
9349
|
maxRetries = 3;
|
|
@@ -8599,10 +9405,10 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
8599
9405
|
const backoffDelay = Math.min(this.baseDelay * 2 ** (attempt - 1), this.maxDelay);
|
|
8600
9406
|
const jitter = Math.random() * this.jitterMax;
|
|
8601
9407
|
const delay = backoffDelay + jitter;
|
|
8602
|
-
|
|
9408
|
+
logger10.warn({ src: "plugin:sql", attempt, maxRetries: this.maxRetries, error: error instanceof Error ? error.message : String(error) }, "Database operation failed, retrying");
|
|
8603
9409
|
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
8604
9410
|
} else {
|
|
8605
|
-
|
|
9411
|
+
logger10.error({ src: "plugin:sql", totalAttempts: attempt, error: error instanceof Error ? error.message : String(error) }, "Max retry attempts reached");
|
|
8606
9412
|
throw error instanceof Error ? error : new Error(String(error));
|
|
8607
9413
|
}
|
|
8608
9414
|
}
|
|
@@ -8656,7 +9462,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
8656
9462
|
if (agent.id) {
|
|
8657
9463
|
const existing = await this.db.select({ id: agentTable.id }).from(agentTable).where(eq(agentTable.id, agent.id)).limit(1);
|
|
8658
9464
|
if (existing.length > 0) {
|
|
8659
|
-
|
|
9465
|
+
logger10.warn({ src: "plugin:sql", agentId: agent.id }, "Attempted to create agent with duplicate ID");
|
|
8660
9466
|
return false;
|
|
8661
9467
|
}
|
|
8662
9468
|
}
|
|
@@ -8667,10 +9473,9 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
8667
9473
|
updatedAt: new Date(agent.updatedAt || Date.now())
|
|
8668
9474
|
});
|
|
8669
9475
|
});
|
|
8670
|
-
logger8.debug(`Agent created successfully: ${agent.id}`);
|
|
8671
9476
|
return true;
|
|
8672
9477
|
} catch (error) {
|
|
8673
|
-
|
|
9478
|
+
logger10.error({ src: "plugin:sql", agentId: agent.id, error: error instanceof Error ? error.message : String(error) }, "Failed to create agent");
|
|
8674
9479
|
return false;
|
|
8675
9480
|
}
|
|
8676
9481
|
});
|
|
@@ -8704,10 +9509,9 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
8704
9509
|
}
|
|
8705
9510
|
await tx.update(agentTable).set(updateData).where(eq(agentTable.id, agentId));
|
|
8706
9511
|
});
|
|
8707
|
-
logger8.debug(`Agent updated successfully: ${agentId}`);
|
|
8708
9512
|
return true;
|
|
8709
9513
|
} catch (error) {
|
|
8710
|
-
|
|
9514
|
+
logger10.error({ src: "plugin:sql", agentId, error: error instanceof Error ? error.message : String(error) }, "Failed to update agent");
|
|
8711
9515
|
return false;
|
|
8712
9516
|
}
|
|
8713
9517
|
});
|
|
@@ -8749,22 +9553,16 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
8749
9553
|
return finalSettings === undefined ? {} : finalSettings;
|
|
8750
9554
|
}
|
|
8751
9555
|
async deleteAgent(agentId) {
|
|
8752
|
-
logger8.debug(`[DB] Deleting agent with ID: ${agentId}`);
|
|
8753
9556
|
return this.withDatabase(async () => {
|
|
8754
9557
|
try {
|
|
8755
9558
|
const result = await this.db.delete(agentTable).where(eq(agentTable.id, agentId)).returning();
|
|
8756
9559
|
if (result.length === 0) {
|
|
8757
|
-
|
|
9560
|
+
logger10.warn({ src: "plugin:sql", agentId }, "Agent not found for deletion");
|
|
8758
9561
|
return false;
|
|
8759
9562
|
}
|
|
8760
|
-
logger8.success(`[DB] Agent ${agentId} and all related data successfully deleted via cascade`);
|
|
8761
9563
|
return true;
|
|
8762
9564
|
} catch (error) {
|
|
8763
|
-
|
|
8764
|
-
if (error instanceof Error) {
|
|
8765
|
-
logger8.error(`[DB] Error details: ${error.name} - ${error.message}`);
|
|
8766
|
-
logger8.error(`[DB] Stack trace: ${error.stack}`);
|
|
8767
|
-
}
|
|
9565
|
+
logger10.error({ src: "plugin:sql", agentId, error: error instanceof Error ? error.message : String(error) }, "Failed to delete agent");
|
|
8768
9566
|
throw error;
|
|
8769
9567
|
}
|
|
8770
9568
|
});
|
|
@@ -8775,7 +9573,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
8775
9573
|
const result = await this.db.select({ count: count() }).from(agentTable);
|
|
8776
9574
|
return result[0]?.count || 0;
|
|
8777
9575
|
} catch (error) {
|
|
8778
|
-
|
|
9576
|
+
logger10.error({ src: "plugin:sql", error: error instanceof Error ? error.message : String(error) }, "Failed to count agents");
|
|
8779
9577
|
return 0;
|
|
8780
9578
|
}
|
|
8781
9579
|
});
|
|
@@ -8784,9 +9582,8 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
8784
9582
|
return this.withDatabase(async () => {
|
|
8785
9583
|
try {
|
|
8786
9584
|
await this.db.delete(agentTable);
|
|
8787
|
-
logger8.success("Successfully cleaned up agent table");
|
|
8788
9585
|
} catch (error) {
|
|
8789
|
-
|
|
9586
|
+
logger10.error({ src: "plugin:sql", error: error instanceof Error ? error.message : String(error) }, "Failed to clean up agent table");
|
|
8790
9587
|
throw error;
|
|
8791
9588
|
}
|
|
8792
9589
|
});
|
|
@@ -8865,21 +9662,17 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
8865
9662
|
metadata: entity2.metadata || {}
|
|
8866
9663
|
}));
|
|
8867
9664
|
await tx.insert(entityTable).values(normalizedEntities);
|
|
8868
|
-
logger8.debug(`${entities.length} Entities created successfully`);
|
|
8869
9665
|
return true;
|
|
8870
9666
|
});
|
|
8871
9667
|
} catch (error) {
|
|
8872
|
-
|
|
8873
|
-
if (error instanceof Error && error.stack) {
|
|
8874
|
-
logger8.trace("Stack trace:", error.stack);
|
|
8875
|
-
}
|
|
9668
|
+
logger10.error({ src: "plugin:sql", entityId: entities[0]?.id, error: error instanceof Error ? error.message : String(error) }, "Failed to create entities");
|
|
8876
9669
|
return false;
|
|
8877
9670
|
}
|
|
8878
9671
|
});
|
|
8879
9672
|
}
|
|
8880
9673
|
async ensureEntityExists(entity2) {
|
|
8881
9674
|
if (!entity2.id) {
|
|
8882
|
-
|
|
9675
|
+
logger10.error({ src: "plugin:sql" }, "Entity ID is required for ensureEntityExists");
|
|
8883
9676
|
return false;
|
|
8884
9677
|
}
|
|
8885
9678
|
try {
|
|
@@ -8889,7 +9682,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
8889
9682
|
}
|
|
8890
9683
|
return true;
|
|
8891
9684
|
} catch (error) {
|
|
8892
|
-
|
|
9685
|
+
logger10.error({ src: "plugin:sql", entityId: entity2.id, error: error instanceof Error ? error.message : String(error) }, "Failed to ensure entity exists");
|
|
8893
9686
|
return false;
|
|
8894
9687
|
}
|
|
8895
9688
|
}
|
|
@@ -9057,14 +9850,11 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9057
9850
|
if (offset !== undefined && offset < 0) {
|
|
9058
9851
|
throw new Error("offset must be a non-negative number");
|
|
9059
9852
|
}
|
|
9060
|
-
return this.
|
|
9853
|
+
return this.withEntityContext(entityId ?? null, async (tx) => {
|
|
9061
9854
|
const conditions2 = [eq(memoryTable.type, tableName)];
|
|
9062
9855
|
if (start) {
|
|
9063
9856
|
conditions2.push(gte(memoryTable.createdAt, new Date(start)));
|
|
9064
9857
|
}
|
|
9065
|
-
if (entityId) {
|
|
9066
|
-
conditions2.push(eq(memoryTable.entityId, entityId));
|
|
9067
|
-
}
|
|
9068
9858
|
if (roomId) {
|
|
9069
9859
|
conditions2.push(eq(memoryTable.roomId, roomId));
|
|
9070
9860
|
}
|
|
@@ -9080,7 +9870,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9080
9870
|
if (agentId) {
|
|
9081
9871
|
conditions2.push(eq(memoryTable.agentId, agentId));
|
|
9082
9872
|
}
|
|
9083
|
-
const baseQuery =
|
|
9873
|
+
const baseQuery = tx.select({
|
|
9084
9874
|
memory: {
|
|
9085
9875
|
id: memoryTable.id,
|
|
9086
9876
|
type: memoryTable.type,
|
|
@@ -9242,7 +10032,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9242
10032
|
levenshtein_score: Number(row.levenshtein_score)
|
|
9243
10033
|
})).filter((row) => Array.isArray(row.embedding));
|
|
9244
10034
|
} catch (error) {
|
|
9245
|
-
|
|
10035
|
+
logger10.error({ src: "plugin:sql", tableName: opts.query_table_name, fieldName: opts.query_field_name, error: error instanceof Error ? error.message : String(error) }, "Failed to get cached embeddings");
|
|
9246
10036
|
if (error instanceof Error && error.message === "levenshtein argument exceeds maximum length of 255 characters") {
|
|
9247
10037
|
return [];
|
|
9248
10038
|
}
|
|
@@ -9255,7 +10045,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9255
10045
|
try {
|
|
9256
10046
|
const sanitizedBody = this.sanitizeJsonObject(params.body);
|
|
9257
10047
|
const jsonString = JSON.stringify(sanitizedBody);
|
|
9258
|
-
await this.
|
|
10048
|
+
await this.withEntityContext(params.entityId, async (tx) => {
|
|
9259
10049
|
await tx.insert(logTable).values({
|
|
9260
10050
|
body: sql`${jsonString}::jsonb`,
|
|
9261
10051
|
entityId: params.entityId,
|
|
@@ -9264,7 +10054,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9264
10054
|
});
|
|
9265
10055
|
});
|
|
9266
10056
|
} catch (error) {
|
|
9267
|
-
|
|
10057
|
+
logger10.error({ src: "plugin:sql", type: params.type, roomId: params.roomId, entityId: params.entityId, error: error instanceof Error ? error.message : String(error) }, "Failed to create log entry");
|
|
9268
10058
|
throw error;
|
|
9269
10059
|
}
|
|
9270
10060
|
});
|
|
@@ -9297,8 +10087,8 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9297
10087
|
}
|
|
9298
10088
|
async getLogs(params) {
|
|
9299
10089
|
const { entityId, roomId, type, count: count2, offset } = params;
|
|
9300
|
-
return this.
|
|
9301
|
-
const result = await
|
|
10090
|
+
return this.withEntityContext(entityId ?? null, async (tx) => {
|
|
10091
|
+
const result = await tx.select().from(logTable).where(and(roomId ? eq(logTable.roomId, roomId) : undefined, type ? eq(logTable.type, type) : undefined)).orderBy(desc(logTable.createdAt)).limit(count2 ?? 10).offset(offset ?? 0);
|
|
9302
10092
|
const logs = result.map((log) => ({
|
|
9303
10093
|
...log,
|
|
9304
10094
|
id: log.id,
|
|
@@ -9316,7 +10106,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9316
10106
|
const limit = Math.min(Math.max(params.limit ?? 20, 1), 100);
|
|
9317
10107
|
const fromDate = typeof params.from === "number" ? new Date(params.from) : undefined;
|
|
9318
10108
|
const toDate = typeof params.to === "number" ? new Date(params.to) : undefined;
|
|
9319
|
-
return this.
|
|
10109
|
+
return this.withEntityContext(params.entityId ?? null, async (tx) => {
|
|
9320
10110
|
const runMap = new Map;
|
|
9321
10111
|
const conditions2 = [
|
|
9322
10112
|
eq(logTable.type, "run_event"),
|
|
@@ -9334,7 +10124,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9334
10124
|
}
|
|
9335
10125
|
const whereClause = and(...conditions2);
|
|
9336
10126
|
const eventLimit = Math.max(limit * 20, 200);
|
|
9337
|
-
const runEventRows = await
|
|
10127
|
+
const runEventRows = await tx.select({
|
|
9338
10128
|
runId: sql`(${logTable.body} ->> 'runId')`,
|
|
9339
10129
|
status: sql`(${logTable.body} ->> 'status')`,
|
|
9340
10130
|
messageId: sql`(${logTable.body} ->> 'messageId')`,
|
|
@@ -9541,11 +10331,9 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9541
10331
|
});
|
|
9542
10332
|
}
|
|
9543
10333
|
async createMemory(memory, tableName) {
|
|
9544
|
-
logger8.debug(`DrizzleAdapter createMemory: memoryId: ${memory.id}, embeddingLength: ${memory.embedding?.length}, contentLength: ${memory.content?.text?.length}`);
|
|
9545
10334
|
const memoryId = memory.id ?? v4();
|
|
9546
10335
|
const existing = await this.getMemoryById(memoryId);
|
|
9547
10336
|
if (existing) {
|
|
9548
|
-
logger8.debug(`Memory already exists, skipping creation: ${memoryId}`);
|
|
9549
10337
|
return memoryId;
|
|
9550
10338
|
}
|
|
9551
10339
|
if (memory.unique === undefined) {
|
|
@@ -9564,7 +10352,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9564
10352
|
}
|
|
9565
10353
|
const contentToInsert = typeof memory.content === "string" ? memory.content : JSON.stringify(memory.content ?? {});
|
|
9566
10354
|
const metadataToInsert = typeof memory.metadata === "string" ? memory.metadata : JSON.stringify(memory.metadata ?? {});
|
|
9567
|
-
await this.
|
|
10355
|
+
await this.withEntityContext(memory.entityId, async (tx) => {
|
|
9568
10356
|
await tx.insert(memoryTable).values([
|
|
9569
10357
|
{
|
|
9570
10358
|
id: memoryId,
|
|
@@ -9595,7 +10383,6 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9595
10383
|
async updateMemory(memory) {
|
|
9596
10384
|
return this.withDatabase(async () => {
|
|
9597
10385
|
try {
|
|
9598
|
-
logger8.debug(`Updating memory: memoryId: ${memory.id}, hasEmbedding: ${!!memory.embedding}`);
|
|
9599
10386
|
await this.db.transaction(async (tx) => {
|
|
9600
10387
|
if (memory.content) {
|
|
9601
10388
|
const contentToUpdate = typeof memory.content === "string" ? memory.content : JSON.stringify(memory.content ?? {});
|
|
@@ -9627,10 +10414,9 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9627
10414
|
}
|
|
9628
10415
|
}
|
|
9629
10416
|
});
|
|
9630
|
-
logger8.debug(`Memory updated successfully: ${memory.id}`);
|
|
9631
10417
|
return true;
|
|
9632
10418
|
} catch (error) {
|
|
9633
|
-
|
|
10419
|
+
logger10.error({ src: "plugin:sql", memoryId: memory.id, error: error instanceof Error ? error.message : String(error) }, "Failed to update memory");
|
|
9634
10420
|
return false;
|
|
9635
10421
|
}
|
|
9636
10422
|
});
|
|
@@ -9642,7 +10428,6 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9642
10428
|
await tx.delete(embeddingTable).where(eq(embeddingTable.memoryId, memoryId));
|
|
9643
10429
|
await tx.delete(memoryTable).where(eq(memoryTable.id, memoryId));
|
|
9644
10430
|
});
|
|
9645
|
-
logger8.debug(`Memory and related fragments removed successfully: ${memoryId}`);
|
|
9646
10431
|
});
|
|
9647
10432
|
}
|
|
9648
10433
|
async deleteManyMemories(memoryIds) {
|
|
@@ -9661,7 +10446,6 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9661
10446
|
await tx.delete(memoryTable).where(inArray(memoryTable.id, batch));
|
|
9662
10447
|
}
|
|
9663
10448
|
});
|
|
9664
|
-
logger8.debug(`Batch memory deletion completed successfully: ${memoryIds.length}`);
|
|
9665
10449
|
});
|
|
9666
10450
|
}
|
|
9667
10451
|
async deleteMemoryFragments(tx, documentId) {
|
|
@@ -9670,7 +10454,6 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9670
10454
|
const fragmentIds = fragmentsToDelete.map((f) => f.id);
|
|
9671
10455
|
await tx.delete(embeddingTable).where(inArray(embeddingTable.memoryId, fragmentIds));
|
|
9672
10456
|
await tx.delete(memoryTable).where(inArray(memoryTable.id, fragmentIds));
|
|
9673
|
-
logger8.debug(`Deleted related fragments: documentId: ${documentId}, fragmentCount: ${fragmentsToDelete.length}`);
|
|
9674
10457
|
}
|
|
9675
10458
|
}
|
|
9676
10459
|
async getMemoryFragments(tx, documentId) {
|
|
@@ -9682,7 +10465,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9682
10465
|
await this.db.transaction(async (tx) => {
|
|
9683
10466
|
const rows = await tx.select({ id: memoryTable.id }).from(memoryTable).where(and(eq(memoryTable.roomId, roomId), eq(memoryTable.type, tableName)));
|
|
9684
10467
|
const ids = rows.map((r) => r.id);
|
|
9685
|
-
|
|
10468
|
+
logger10.debug({ src: "plugin:sql", roomId, tableName, memoryCount: ids.length }, "Deleting all memories");
|
|
9686
10469
|
if (ids.length === 0) {
|
|
9687
10470
|
return;
|
|
9688
10471
|
}
|
|
@@ -9692,7 +10475,6 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9692
10475
|
}));
|
|
9693
10476
|
await tx.delete(memoryTable).where(and(eq(memoryTable.roomId, roomId), eq(memoryTable.type, tableName)));
|
|
9694
10477
|
});
|
|
9695
|
-
logger8.debug(`All memories removed successfully: roomId: ${roomId}, tableName: ${tableName}`);
|
|
9696
10478
|
});
|
|
9697
10479
|
}
|
|
9698
10480
|
async countMemories(roomId, unique2 = true, tableName = "") {
|
|
@@ -9714,7 +10496,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9714
10496
|
name: roomTable.name,
|
|
9715
10497
|
channelId: roomTable.channelId,
|
|
9716
10498
|
agentId: roomTable.agentId,
|
|
9717
|
-
|
|
10499
|
+
messageServerId: roomTable.messageServerId,
|
|
9718
10500
|
worldId: roomTable.worldId,
|
|
9719
10501
|
type: roomTable.type,
|
|
9720
10502
|
source: roomTable.source,
|
|
@@ -9725,7 +10507,8 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9725
10507
|
id: room.id,
|
|
9726
10508
|
name: room.name ?? undefined,
|
|
9727
10509
|
agentId: room.agentId,
|
|
9728
|
-
|
|
10510
|
+
messageServerId: room.messageServerId,
|
|
10511
|
+
serverId: room.messageServerId,
|
|
9729
10512
|
worldId: room.worldId,
|
|
9730
10513
|
channelId: room.channelId,
|
|
9731
10514
|
type: room.type,
|
|
@@ -9742,7 +10525,8 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9742
10525
|
id: room.id,
|
|
9743
10526
|
name: room.name ?? undefined,
|
|
9744
10527
|
agentId: room.agentId,
|
|
9745
|
-
|
|
10528
|
+
messageServerId: room.messageServerId,
|
|
10529
|
+
serverId: room.messageServerId,
|
|
9746
10530
|
worldId: room.worldId,
|
|
9747
10531
|
channelId: room.channelId,
|
|
9748
10532
|
type: room.type,
|
|
@@ -9799,7 +10583,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9799
10583
|
}).onConflictDoNothing();
|
|
9800
10584
|
return true;
|
|
9801
10585
|
} catch (error) {
|
|
9802
|
-
|
|
10586
|
+
logger10.error({ src: "plugin:sql", entityId, roomId, agentId: this.agentId, error: error instanceof Error ? error.message : String(error) }, "Failed to add participant to room");
|
|
9803
10587
|
return false;
|
|
9804
10588
|
}
|
|
9805
10589
|
});
|
|
@@ -9813,10 +10597,9 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9813
10597
|
agentId: this.agentId
|
|
9814
10598
|
}));
|
|
9815
10599
|
await this.db.insert(participantTable).values(values).onConflictDoNothing().execute();
|
|
9816
|
-
logger8.debug(`${entityIds.length} Entities linked successfully`);
|
|
9817
10600
|
return true;
|
|
9818
10601
|
} catch (error) {
|
|
9819
|
-
|
|
10602
|
+
logger10.error({ src: "plugin:sql", roomId, agentId: this.agentId, error: error instanceof Error ? error.message : String(error) }, "Failed to add participants to room");
|
|
9820
10603
|
return false;
|
|
9821
10604
|
}
|
|
9822
10605
|
});
|
|
@@ -9828,10 +10611,9 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9828
10611
|
return await tx.delete(participantTable).where(and(eq(participantTable.entityId, entityId), eq(participantTable.roomId, roomId))).returning();
|
|
9829
10612
|
});
|
|
9830
10613
|
const removed = result.length > 0;
|
|
9831
|
-
logger8.debug(`Participant ${removed ? "removed" : "not found"}: entityId: ${entityId}, roomId: ${roomId}, removed: ${removed}`);
|
|
9832
10614
|
return removed;
|
|
9833
10615
|
} catch (error) {
|
|
9834
|
-
|
|
10616
|
+
logger10.error({ src: "plugin:sql", entityId, roomId, error: error instanceof Error ? error.message : String(error) }, "Failed to remove participant from room");
|
|
9835
10617
|
return false;
|
|
9836
10618
|
}
|
|
9837
10619
|
});
|
|
@@ -9859,6 +10641,12 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9859
10641
|
return result.map((row) => row.entityId);
|
|
9860
10642
|
});
|
|
9861
10643
|
}
|
|
10644
|
+
async isRoomParticipant(roomId, entityId) {
|
|
10645
|
+
return this.withDatabase(async () => {
|
|
10646
|
+
const result = await this.db.select().from(participantTable).where(and(eq(participantTable.roomId, roomId), eq(participantTable.entityId, entityId))).limit(1);
|
|
10647
|
+
return result.length > 0;
|
|
10648
|
+
});
|
|
10649
|
+
}
|
|
9862
10650
|
async getParticipantUserState(roomId, entityId) {
|
|
9863
10651
|
return this.withDatabase(async () => {
|
|
9864
10652
|
const result = await this.db.select({ roomState: participantTable.roomState }).from(participantTable).where(and(eq(participantTable.roomId, roomId), eq(participantTable.entityId, entityId), eq(participantTable.agentId, this.agentId))).limit(1);
|
|
@@ -9872,7 +10660,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9872
10660
|
await tx.update(participantTable).set({ roomState: state }).where(and(eq(participantTable.roomId, roomId), eq(participantTable.entityId, entityId), eq(participantTable.agentId, this.agentId)));
|
|
9873
10661
|
});
|
|
9874
10662
|
} catch (error) {
|
|
9875
|
-
|
|
10663
|
+
logger10.error({ src: "plugin:sql", roomId, entityId, state, error: error instanceof Error ? error.message : String(error) }, "Failed to set participant follow state");
|
|
9876
10664
|
throw error;
|
|
9877
10665
|
}
|
|
9878
10666
|
});
|
|
@@ -9892,7 +10680,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9892
10680
|
await this.db.insert(relationshipTable).values(saveParams);
|
|
9893
10681
|
return true;
|
|
9894
10682
|
} catch (error) {
|
|
9895
|
-
|
|
10683
|
+
logger10.error({ src: "plugin:sql", agentId: this.agentId, error: error instanceof Error ? error.message : String(error), saveParams }, "Error creating relationship");
|
|
9896
10684
|
return false;
|
|
9897
10685
|
}
|
|
9898
10686
|
});
|
|
@@ -9905,7 +10693,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9905
10693
|
metadata: relationship.metadata || {}
|
|
9906
10694
|
}).where(eq(relationshipTable.id, relationship.id));
|
|
9907
10695
|
} catch (error) {
|
|
9908
|
-
|
|
10696
|
+
logger10.error({ src: "plugin:sql", agentId: this.agentId, error: error instanceof Error ? error.message : String(error), relationshipId: relationship.id }, "Error updating relationship");
|
|
9909
10697
|
throw error;
|
|
9910
10698
|
}
|
|
9911
10699
|
});
|
|
@@ -9949,12 +10737,12 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9949
10737
|
return result.rows.map((relationship) => ({
|
|
9950
10738
|
...relationship,
|
|
9951
10739
|
id: relationship.id,
|
|
9952
|
-
sourceEntityId: relationship.sourceEntityId,
|
|
9953
|
-
targetEntityId: relationship.targetEntityId,
|
|
9954
|
-
agentId: relationship.agentId,
|
|
10740
|
+
sourceEntityId: relationship.source_entity_id || relationship.sourceEntityId,
|
|
10741
|
+
targetEntityId: relationship.target_entity_id || relationship.targetEntityId,
|
|
10742
|
+
agentId: relationship.agent_id || relationship.agentId,
|
|
9955
10743
|
tags: relationship.tags ?? [],
|
|
9956
10744
|
metadata: relationship.metadata ?? {},
|
|
9957
|
-
createdAt: relationship.createdAt ? relationship.createdAt instanceof Date ? relationship.createdAt.toISOString() : new Date(relationship.createdAt).toISOString() : new Date().toISOString()
|
|
10745
|
+
createdAt: relationship.created_at || relationship.createdAt ? (relationship.created_at || relationship.createdAt) instanceof Date ? (relationship.created_at || relationship.createdAt).toISOString() : new Date(relationship.created_at || relationship.createdAt).toISOString() : new Date().toISOString()
|
|
9958
10746
|
}));
|
|
9959
10747
|
});
|
|
9960
10748
|
}
|
|
@@ -9967,7 +10755,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9967
10755
|
}
|
|
9968
10756
|
return;
|
|
9969
10757
|
} catch (error) {
|
|
9970
|
-
|
|
10758
|
+
logger10.error({ src: "plugin:sql", agentId: this.agentId, error: error instanceof Error ? error.message : String(error), key }, "Error fetching cache");
|
|
9971
10759
|
return;
|
|
9972
10760
|
}
|
|
9973
10761
|
});
|
|
@@ -9987,7 +10775,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9987
10775
|
});
|
|
9988
10776
|
return true;
|
|
9989
10777
|
} catch (error) {
|
|
9990
|
-
|
|
10778
|
+
logger10.error({ src: "plugin:sql", agentId: this.agentId, error: error instanceof Error ? error.message : String(error), key }, "Error setting cache");
|
|
9991
10779
|
return false;
|
|
9992
10780
|
}
|
|
9993
10781
|
});
|
|
@@ -10000,7 +10788,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
10000
10788
|
});
|
|
10001
10789
|
return true;
|
|
10002
10790
|
} catch (error) {
|
|
10003
|
-
|
|
10791
|
+
logger10.error({ src: "plugin:sql", agentId: this.agentId, error: error instanceof Error ? error.message : String(error), key }, "Error deleting cache");
|
|
10004
10792
|
return false;
|
|
10005
10793
|
}
|
|
10006
10794
|
});
|
|
@@ -10163,25 +10951,20 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
10163
10951
|
return this.withDatabase(async () => {
|
|
10164
10952
|
const rooms = await this.db.select({ id: roomTable.id }).from(roomTable).where(and(eq(roomTable.worldId, worldId), eq(roomTable.agentId, this.agentId)));
|
|
10165
10953
|
if (rooms.length === 0) {
|
|
10166
|
-
logger8.debug(`No rooms found for worldId ${worldId} and agentId ${this.agentId} to delete.`);
|
|
10167
10954
|
return;
|
|
10168
10955
|
}
|
|
10169
10956
|
const roomIds = rooms.map((room) => room.id);
|
|
10170
10957
|
if (roomIds.length > 0) {
|
|
10171
10958
|
await this.db.delete(logTable).where(inArray(logTable.roomId, roomIds));
|
|
10172
|
-
logger8.debug(`Deleted logs for ${roomIds.length} rooms in world ${worldId}.`);
|
|
10173
10959
|
await this.db.delete(participantTable).where(inArray(participantTable.roomId, roomIds));
|
|
10174
|
-
logger8.debug(`Deleted participants for ${roomIds.length} rooms in world ${worldId}.`);
|
|
10175
10960
|
const memoriesInRooms = await this.db.select({ id: memoryTable.id }).from(memoryTable).where(inArray(memoryTable.roomId, roomIds));
|
|
10176
10961
|
const memoryIdsInRooms = memoriesInRooms.map((m) => m.id);
|
|
10177
10962
|
if (memoryIdsInRooms.length > 0) {
|
|
10178
10963
|
await this.db.delete(embeddingTable).where(inArray(embeddingTable.memoryId, memoryIdsInRooms));
|
|
10179
|
-
logger8.debug(`Deleted embeddings for ${memoryIdsInRooms.length} memories in world ${worldId}.`);
|
|
10180
10964
|
await this.db.delete(memoryTable).where(inArray(memoryTable.id, memoryIdsInRooms));
|
|
10181
|
-
logger8.debug(`Deleted ${memoryIdsInRooms.length} memories in world ${worldId}.`);
|
|
10182
10965
|
}
|
|
10183
10966
|
await this.db.delete(roomTable).where(inArray(roomTable.id, roomIds));
|
|
10184
|
-
|
|
10967
|
+
logger10.debug({ src: "plugin:sql", worldId, roomsDeleted: roomIds.length, memoriesDeleted: memoryIdsInRooms.length }, "World cleanup completed");
|
|
10185
10968
|
}
|
|
10186
10969
|
});
|
|
10187
10970
|
}
|
|
@@ -10245,6 +11028,26 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
10245
11028
|
} : null;
|
|
10246
11029
|
});
|
|
10247
11030
|
}
|
|
11031
|
+
async getMessageServerByRlsServerId(rlsServerId) {
|
|
11032
|
+
return this.withDatabase(async () => {
|
|
11033
|
+
const results = await this.db.execute(sql`
|
|
11034
|
+
SELECT id, name, source_type, source_id, metadata, created_at, updated_at
|
|
11035
|
+
FROM message_servers
|
|
11036
|
+
WHERE server_id = ${rlsServerId}
|
|
11037
|
+
LIMIT 1
|
|
11038
|
+
`);
|
|
11039
|
+
const rows = results.rows || results;
|
|
11040
|
+
return rows.length > 0 ? {
|
|
11041
|
+
id: rows[0].id,
|
|
11042
|
+
name: rows[0].name,
|
|
11043
|
+
sourceType: rows[0].source_type,
|
|
11044
|
+
sourceId: rows[0].source_id || undefined,
|
|
11045
|
+
metadata: rows[0].metadata || undefined,
|
|
11046
|
+
createdAt: new Date(rows[0].created_at),
|
|
11047
|
+
updatedAt: new Date(rows[0].updated_at)
|
|
11048
|
+
} : null;
|
|
11049
|
+
});
|
|
11050
|
+
}
|
|
10248
11051
|
async createChannel(data, participantIds) {
|
|
10249
11052
|
return this.withDatabase(async () => {
|
|
10250
11053
|
const newId = data.id || v4();
|
|
@@ -10264,9 +11067,9 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
10264
11067
|
await this.db.transaction(async (tx) => {
|
|
10265
11068
|
await tx.insert(channelTable).values(channelToInsert);
|
|
10266
11069
|
if (participantIds && participantIds.length > 0) {
|
|
10267
|
-
const participantValues = participantIds.map((
|
|
11070
|
+
const participantValues = participantIds.map((entityId) => ({
|
|
10268
11071
|
channelId: newId,
|
|
10269
|
-
|
|
11072
|
+
entityId
|
|
10270
11073
|
}));
|
|
10271
11074
|
await tx.insert(channelParticipantsTable).values(participantValues).onConflictDoNothing();
|
|
10272
11075
|
}
|
|
@@ -10274,9 +11077,9 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
10274
11077
|
return channelToInsert;
|
|
10275
11078
|
});
|
|
10276
11079
|
}
|
|
10277
|
-
async
|
|
11080
|
+
async getChannelsForMessageServer(messageServerId) {
|
|
10278
11081
|
return this.withDatabase(async () => {
|
|
10279
|
-
const results = await this.db.select().from(channelTable).where(eq(channelTable.messageServerId,
|
|
11082
|
+
const results = await this.db.select().from(channelTable).where(eq(channelTable.messageServerId, messageServerId));
|
|
10280
11083
|
return results.map((r) => ({
|
|
10281
11084
|
id: r.id,
|
|
10282
11085
|
messageServerId: r.messageServerId,
|
|
@@ -10398,9 +11201,9 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
10398
11201
|
if (updates.participantCentralUserIds !== undefined) {
|
|
10399
11202
|
await tx.delete(channelParticipantsTable).where(eq(channelParticipantsTable.channelId, channelId));
|
|
10400
11203
|
if (updates.participantCentralUserIds.length > 0) {
|
|
10401
|
-
const participantValues = updates.participantCentralUserIds.map((
|
|
11204
|
+
const participantValues = updates.participantCentralUserIds.map((entityId) => ({
|
|
10402
11205
|
channelId,
|
|
10403
|
-
|
|
11206
|
+
entityId
|
|
10404
11207
|
}));
|
|
10405
11208
|
await tx.insert(channelParticipantsTable).values(participantValues).onConflictDoNothing();
|
|
10406
11209
|
}
|
|
@@ -10422,40 +11225,46 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
10422
11225
|
});
|
|
10423
11226
|
});
|
|
10424
11227
|
}
|
|
10425
|
-
async addChannelParticipants(channelId,
|
|
11228
|
+
async addChannelParticipants(channelId, entityIds) {
|
|
10426
11229
|
return this.withDatabase(async () => {
|
|
10427
|
-
if (!
|
|
11230
|
+
if (!entityIds || entityIds.length === 0)
|
|
10428
11231
|
return;
|
|
10429
|
-
const participantValues =
|
|
11232
|
+
const participantValues = entityIds.map((entityId) => ({
|
|
10430
11233
|
channelId,
|
|
10431
|
-
|
|
11234
|
+
entityId
|
|
10432
11235
|
}));
|
|
10433
11236
|
await this.db.insert(channelParticipantsTable).values(participantValues).onConflictDoNothing();
|
|
10434
11237
|
});
|
|
10435
11238
|
}
|
|
10436
11239
|
async getChannelParticipants(channelId) {
|
|
10437
11240
|
return this.withDatabase(async () => {
|
|
10438
|
-
const results = await this.db.select({
|
|
10439
|
-
return results.map((r) => r.
|
|
11241
|
+
const results = await this.db.select({ entityId: channelParticipantsTable.entityId }).from(channelParticipantsTable).where(eq(channelParticipantsTable.channelId, channelId));
|
|
11242
|
+
return results.map((r) => r.entityId);
|
|
10440
11243
|
});
|
|
10441
11244
|
}
|
|
10442
|
-
async
|
|
11245
|
+
async isChannelParticipant(channelId, entityId) {
|
|
10443
11246
|
return this.withDatabase(async () => {
|
|
10444
|
-
await this.db.
|
|
10445
|
-
|
|
11247
|
+
const result = await this.db.select().from(channelParticipantsTable).where(and(eq(channelParticipantsTable.channelId, channelId), eq(channelParticipantsTable.entityId, entityId))).limit(1);
|
|
11248
|
+
return result.length > 0;
|
|
11249
|
+
});
|
|
11250
|
+
}
|
|
11251
|
+
async addAgentToMessageServer(messageServerId, agentId) {
|
|
11252
|
+
return this.withDatabase(async () => {
|
|
11253
|
+
await this.db.insert(messageServerAgentsTable).values({
|
|
11254
|
+
messageServerId,
|
|
10446
11255
|
agentId
|
|
10447
11256
|
}).onConflictDoNothing();
|
|
10448
11257
|
});
|
|
10449
11258
|
}
|
|
10450
|
-
async
|
|
11259
|
+
async getAgentsForMessageServer(messageServerId) {
|
|
10451
11260
|
return this.withDatabase(async () => {
|
|
10452
|
-
const results = await this.db.select({ agentId:
|
|
11261
|
+
const results = await this.db.select({ agentId: messageServerAgentsTable.agentId }).from(messageServerAgentsTable).where(eq(messageServerAgentsTable.messageServerId, messageServerId));
|
|
10453
11262
|
return results.map((r) => r.agentId);
|
|
10454
11263
|
});
|
|
10455
11264
|
}
|
|
10456
|
-
async
|
|
11265
|
+
async removeAgentFromMessageServer(messageServerId, agentId) {
|
|
10457
11266
|
return this.withDatabase(async () => {
|
|
10458
|
-
await this.db.delete(
|
|
11267
|
+
await this.db.delete(messageServerAgentsTable).where(and(eq(messageServerAgentsTable.messageServerId, messageServerId), eq(messageServerAgentsTable.agentId, agentId)));
|
|
10459
11268
|
});
|
|
10460
11269
|
}
|
|
10461
11270
|
async findOrCreateDmChannel(user1Id, user2Id, messageServerId) {
|
|
@@ -10496,11 +11305,14 @@ class PgliteDatabaseAdapter extends BaseDrizzleAdapter {
|
|
|
10496
11305
|
this.manager = manager;
|
|
10497
11306
|
this.db = drizzle(this.manager.getConnection());
|
|
10498
11307
|
}
|
|
11308
|
+
async withEntityContext(_entityId, callback) {
|
|
11309
|
+
return this.db.transaction(callback);
|
|
11310
|
+
}
|
|
10499
11311
|
async getEntityByIds(entityIds) {
|
|
10500
11312
|
return this.getEntitiesByIds(entityIds);
|
|
10501
11313
|
}
|
|
10502
11314
|
async getMemoriesByServerId(_params) {
|
|
10503
|
-
|
|
11315
|
+
logger11.warn({ src: "plugin:sql" }, "getMemoriesByServerId called but not implemented");
|
|
10504
11316
|
return [];
|
|
10505
11317
|
}
|
|
10506
11318
|
async ensureAgentExists(agent) {
|
|
@@ -10525,13 +11337,13 @@ class PgliteDatabaseAdapter extends BaseDrizzleAdapter {
|
|
|
10525
11337
|
}
|
|
10526
11338
|
async withDatabase(operation) {
|
|
10527
11339
|
if (this.manager.isShuttingDown()) {
|
|
10528
|
-
|
|
11340
|
+
logger11.warn({ src: "plugin:sql" }, "Database is shutting down");
|
|
10529
11341
|
return null;
|
|
10530
11342
|
}
|
|
10531
11343
|
return operation();
|
|
10532
11344
|
}
|
|
10533
11345
|
async init() {
|
|
10534
|
-
|
|
11346
|
+
logger11.debug({ src: "plugin:sql" }, "PGliteDatabaseAdapter initialized");
|
|
10535
11347
|
}
|
|
10536
11348
|
async isReady() {
|
|
10537
11349
|
return !this.manager.isShuttingDown();
|
|
@@ -10576,7 +11388,7 @@ class PGliteClientManager {
|
|
|
10576
11388
|
}
|
|
10577
11389
|
|
|
10578
11390
|
// src/pg/adapter.ts
|
|
10579
|
-
import { logger as
|
|
11391
|
+
import { logger as logger12 } from "@elizaos/core";
|
|
10580
11392
|
|
|
10581
11393
|
// ../../node_modules/drizzle-orm/node-postgres/driver.js
|
|
10582
11394
|
init_entity();
|
|
@@ -10599,12 +11411,12 @@ init_utils();
|
|
|
10599
11411
|
var { Pool, types: types3 } = pg;
|
|
10600
11412
|
|
|
10601
11413
|
class NodePgPreparedQuery extends PgPreparedQuery {
|
|
10602
|
-
constructor(client, queryString, params,
|
|
11414
|
+
constructor(client, queryString, params, logger12, cache, queryMetadata, cacheConfig, fields, name, _isResponseInArrayMode, customResultMapper) {
|
|
10603
11415
|
super({ sql: queryString, params }, cache, queryMetadata, cacheConfig);
|
|
10604
11416
|
this.client = client;
|
|
10605
11417
|
this.queryString = queryString;
|
|
10606
11418
|
this.params = params;
|
|
10607
|
-
this.logger =
|
|
11419
|
+
this.logger = logger12;
|
|
10608
11420
|
this.fields = fields;
|
|
10609
11421
|
this._isResponseInArrayMode = _isResponseInArrayMode;
|
|
10610
11422
|
this.customResultMapper = customResultMapper;
|
|
@@ -10814,11 +11626,11 @@ class NodePgDatabase extends PgDatabase {
|
|
|
10814
11626
|
}
|
|
10815
11627
|
function construct2(client, config = {}) {
|
|
10816
11628
|
const dialect2 = new PgDialect({ casing: config.casing });
|
|
10817
|
-
let
|
|
11629
|
+
let logger12;
|
|
10818
11630
|
if (config.logger === true) {
|
|
10819
|
-
|
|
11631
|
+
logger12 = new DefaultLogger;
|
|
10820
11632
|
} else if (config.logger !== false) {
|
|
10821
|
-
|
|
11633
|
+
logger12 = config.logger;
|
|
10822
11634
|
}
|
|
10823
11635
|
let schema2;
|
|
10824
11636
|
if (config.schema) {
|
|
@@ -10829,7 +11641,7 @@ function construct2(client, config = {}) {
|
|
|
10829
11641
|
tableNamesMap: tablesConfig.tableNamesMap
|
|
10830
11642
|
};
|
|
10831
11643
|
}
|
|
10832
|
-
const driver = new NodePgDriver(client, dialect2, { logger:
|
|
11644
|
+
const driver = new NodePgDriver(client, dialect2, { logger: logger12, cache: config.cache });
|
|
10833
11645
|
const session2 = driver.createSession(schema2);
|
|
10834
11646
|
const db2 = new NodePgDatabase(dialect2, session2, schema2);
|
|
10835
11647
|
db2.$client = client;
|
|
@@ -10873,11 +11685,17 @@ class PgDatabaseAdapter extends BaseDrizzleAdapter {
|
|
|
10873
11685
|
this.manager = manager;
|
|
10874
11686
|
this.db = manager.getDatabase();
|
|
10875
11687
|
}
|
|
11688
|
+
getManager() {
|
|
11689
|
+
return this.manager;
|
|
11690
|
+
}
|
|
11691
|
+
async withEntityContext(entityId, callback) {
|
|
11692
|
+
return await this.manager.withEntityContext(entityId, callback);
|
|
11693
|
+
}
|
|
10876
11694
|
async getEntityByIds(entityIds) {
|
|
10877
11695
|
return this.getEntitiesByIds(entityIds);
|
|
10878
11696
|
}
|
|
10879
11697
|
async getMemoriesByServerId(_params) {
|
|
10880
|
-
|
|
11698
|
+
logger12.warn({ src: "plugin:sql" }, "getMemoriesByServerId called but not implemented");
|
|
10881
11699
|
return [];
|
|
10882
11700
|
}
|
|
10883
11701
|
async ensureAgentExists(agent) {
|
|
@@ -10913,7 +11731,7 @@ class PgDatabaseAdapter extends BaseDrizzleAdapter {
|
|
|
10913
11731
|
});
|
|
10914
11732
|
}
|
|
10915
11733
|
async init() {
|
|
10916
|
-
|
|
11734
|
+
logger12.debug({ src: "plugin:sql" }, "PgDatabaseAdapter initialized");
|
|
10917
11735
|
}
|
|
10918
11736
|
async isReady() {
|
|
10919
11737
|
return this.manager.testConnection();
|
|
@@ -10975,20 +11793,21 @@ class PgDatabaseAdapter extends BaseDrizzleAdapter {
|
|
|
10975
11793
|
}
|
|
10976
11794
|
|
|
10977
11795
|
// src/pg/manager.ts
|
|
11796
|
+
init_drizzle_orm();
|
|
10978
11797
|
import { Pool as Pool2 } from "pg";
|
|
10979
|
-
import { logger as
|
|
11798
|
+
import { logger as logger13 } from "@elizaos/core";
|
|
10980
11799
|
|
|
10981
11800
|
class PostgresConnectionManager {
|
|
10982
11801
|
pool;
|
|
10983
11802
|
db;
|
|
10984
|
-
constructor(connectionString,
|
|
11803
|
+
constructor(connectionString, rlsServerId) {
|
|
10985
11804
|
const poolConfig = { connectionString };
|
|
10986
|
-
if (
|
|
10987
|
-
poolConfig.application_name =
|
|
10988
|
-
|
|
11805
|
+
if (rlsServerId) {
|
|
11806
|
+
poolConfig.application_name = rlsServerId;
|
|
11807
|
+
logger13.debug({ src: "plugin:sql", rlsServerId: rlsServerId.substring(0, 8) }, "Pool configured with RLS server");
|
|
10989
11808
|
}
|
|
10990
11809
|
this.pool = new Pool2(poolConfig);
|
|
10991
|
-
this.db = drizzle2(this.pool);
|
|
11810
|
+
this.db = drizzle2(this.pool, { casing: "snake_case" });
|
|
10992
11811
|
}
|
|
10993
11812
|
getDatabase() {
|
|
10994
11813
|
return this.db;
|
|
@@ -11006,7 +11825,7 @@ class PostgresConnectionManager {
|
|
|
11006
11825
|
await client.query("SELECT 1");
|
|
11007
11826
|
return true;
|
|
11008
11827
|
} catch (error) {
|
|
11009
|
-
|
|
11828
|
+
logger13.error({ src: "plugin:sql", error: error instanceof Error ? error.message : String(error) }, "Failed to connect to the database");
|
|
11010
11829
|
return false;
|
|
11011
11830
|
} finally {
|
|
11012
11831
|
if (client) {
|
|
@@ -11014,6 +11833,27 @@ class PostgresConnectionManager {
|
|
|
11014
11833
|
}
|
|
11015
11834
|
}
|
|
11016
11835
|
}
|
|
11836
|
+
async withEntityContext(entityId, callback) {
|
|
11837
|
+
return await this.db.transaction(async (tx) => {
|
|
11838
|
+
if (entityId) {
|
|
11839
|
+
try {
|
|
11840
|
+
await tx.execute(sql.raw(`SET LOCAL app.entity_id = '${entityId}'`));
|
|
11841
|
+
logger13.debug(`[Entity Context] Set app.entity_id = ${entityId}`);
|
|
11842
|
+
} catch (error) {
|
|
11843
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
11844
|
+
if (errorMessage.includes("unrecognized configuration parameter") || errorMessage.includes("app.entity_id")) {
|
|
11845
|
+
logger13.debug("[Entity Context] Entity RLS not enabled, executing without entity context");
|
|
11846
|
+
} else {
|
|
11847
|
+
logger13.error({ error, entityId }, "[Entity Context] Critical error setting entity context - this may indicate a configuration issue");
|
|
11848
|
+
logger13.warn("[Entity Context] Continuing without entity context due to error - data isolation may be compromised");
|
|
11849
|
+
}
|
|
11850
|
+
}
|
|
11851
|
+
} else {
|
|
11852
|
+
logger13.debug("[Entity Context] No entity context set (server operation)");
|
|
11853
|
+
}
|
|
11854
|
+
return await callback(tx);
|
|
11855
|
+
});
|
|
11856
|
+
}
|
|
11017
11857
|
async close() {
|
|
11018
11858
|
await this.pool.end();
|
|
11019
11859
|
}
|
|
@@ -11071,246 +11911,7 @@ function resolvePgliteDir(dir, fallbackDir) {
|
|
|
11071
11911
|
|
|
11072
11912
|
// src/index.node.ts
|
|
11073
11913
|
init_migration_service();
|
|
11074
|
-
|
|
11075
|
-
// src/rls.ts
|
|
11076
|
-
init_drizzle_orm();
|
|
11077
|
-
import { logger as logger12, validateUuid } from "@elizaos/core";
|
|
11078
|
-
async function installRLSFunctions(adapter) {
|
|
11079
|
-
const db2 = adapter.db;
|
|
11080
|
-
await db2.execute(sql`
|
|
11081
|
-
CREATE TABLE IF NOT EXISTS owners (
|
|
11082
|
-
id UUID PRIMARY KEY,
|
|
11083
|
-
created_at TIMESTAMPTZ DEFAULT NOW() NOT NULL,
|
|
11084
|
-
updated_at TIMESTAMPTZ DEFAULT NOW() NOT NULL
|
|
11085
|
-
)
|
|
11086
|
-
`);
|
|
11087
|
-
await db2.execute(sql`
|
|
11088
|
-
CREATE OR REPLACE FUNCTION current_owner_id() RETURNS UUID AS $$
|
|
11089
|
-
DECLARE
|
|
11090
|
-
app_name TEXT;
|
|
11091
|
-
BEGIN
|
|
11092
|
-
app_name := NULLIF(current_setting('application_name', TRUE), '');
|
|
11093
|
-
|
|
11094
|
-
-- Return NULL if application_name is not set or not a valid UUID
|
|
11095
|
-
-- This allows admin queries to work without RLS restrictions
|
|
11096
|
-
BEGIN
|
|
11097
|
-
RETURN app_name::UUID;
|
|
11098
|
-
EXCEPTION WHEN OTHERS THEN
|
|
11099
|
-
RETURN NULL;
|
|
11100
|
-
END;
|
|
11101
|
-
END;
|
|
11102
|
-
$$ LANGUAGE plpgsql STABLE;
|
|
11103
|
-
`);
|
|
11104
|
-
await db2.execute(sql`
|
|
11105
|
-
CREATE OR REPLACE FUNCTION add_owner_isolation(
|
|
11106
|
-
schema_name text,
|
|
11107
|
-
table_name text
|
|
11108
|
-
) RETURNS void AS $$
|
|
11109
|
-
DECLARE
|
|
11110
|
-
full_table_name text;
|
|
11111
|
-
column_exists boolean;
|
|
11112
|
-
orphaned_count bigint;
|
|
11113
|
-
BEGIN
|
|
11114
|
-
full_table_name := schema_name || '.' || table_name;
|
|
11115
|
-
|
|
11116
|
-
-- Check if owner_id column already exists
|
|
11117
|
-
SELECT EXISTS (
|
|
11118
|
-
SELECT 1 FROM information_schema.columns
|
|
11119
|
-
WHERE information_schema.columns.table_schema = schema_name
|
|
11120
|
-
AND information_schema.columns.table_name = add_owner_isolation.table_name
|
|
11121
|
-
AND information_schema.columns.column_name = 'owner_id'
|
|
11122
|
-
) INTO column_exists;
|
|
11123
|
-
|
|
11124
|
-
-- Add owner_id column if missing (DEFAULT populates it automatically for new rows)
|
|
11125
|
-
IF NOT column_exists THEN
|
|
11126
|
-
EXECUTE format('ALTER TABLE %I.%I ADD COLUMN owner_id UUID DEFAULT current_owner_id()', schema_name, table_name);
|
|
11127
|
-
|
|
11128
|
-
-- Backfill existing rows with current owner_id
|
|
11129
|
-
-- This ensures all existing data belongs to the tenant that is enabling RLS
|
|
11130
|
-
EXECUTE format('UPDATE %I.%I SET owner_id = current_owner_id() WHERE owner_id IS NULL', schema_name, table_name);
|
|
11131
|
-
ELSE
|
|
11132
|
-
-- Column already exists (RLS was previously enabled then disabled)
|
|
11133
|
-
-- Restore the DEFAULT clause (may have been removed during uninstallRLS)
|
|
11134
|
-
EXECUTE format('ALTER TABLE %I.%I ALTER COLUMN owner_id SET DEFAULT current_owner_id()', schema_name, table_name);
|
|
11135
|
-
|
|
11136
|
-
-- Only backfill NULL owner_id rows, do NOT steal data from other owners
|
|
11137
|
-
EXECUTE format('SELECT COUNT(*) FROM %I.%I WHERE owner_id IS NULL', schema_name, table_name) INTO orphaned_count;
|
|
11138
|
-
|
|
11139
|
-
IF orphaned_count > 0 THEN
|
|
11140
|
-
RAISE NOTICE 'Backfilling % rows with NULL owner_id in %.%', orphaned_count, schema_name, table_name;
|
|
11141
|
-
EXECUTE format('UPDATE %I.%I SET owner_id = current_owner_id() WHERE owner_id IS NULL', schema_name, table_name);
|
|
11142
|
-
END IF;
|
|
11143
|
-
END IF;
|
|
11144
|
-
|
|
11145
|
-
-- Create index for efficient owner_id filtering
|
|
11146
|
-
EXECUTE format('CREATE INDEX IF NOT EXISTS idx_%I_owner_id ON %I.%I(owner_id)', table_name, schema_name, table_name);
|
|
11147
|
-
|
|
11148
|
-
-- Enable RLS on the table
|
|
11149
|
-
EXECUTE format('ALTER TABLE %I.%I ENABLE ROW LEVEL SECURITY', schema_name, table_name);
|
|
11150
|
-
|
|
11151
|
-
-- FORCE RLS even for table owners (critical for security)
|
|
11152
|
-
EXECUTE format('ALTER TABLE %I.%I FORCE ROW LEVEL SECURITY', schema_name, table_name);
|
|
11153
|
-
|
|
11154
|
-
-- Drop existing policy if present
|
|
11155
|
-
EXECUTE format('DROP POLICY IF EXISTS owner_isolation_policy ON %I.%I', schema_name, table_name);
|
|
11156
|
-
|
|
11157
|
-
-- Create isolation policy: users can only see/modify rows where owner_id matches current tenant
|
|
11158
|
-
-- No NULL clause - all rows must have a valid owner_id (backfilled during column addition)
|
|
11159
|
-
EXECUTE format('
|
|
11160
|
-
CREATE POLICY owner_isolation_policy ON %I.%I
|
|
11161
|
-
USING (owner_id = current_owner_id())
|
|
11162
|
-
WITH CHECK (owner_id = current_owner_id())
|
|
11163
|
-
', schema_name, table_name);
|
|
11164
|
-
END;
|
|
11165
|
-
$$ LANGUAGE plpgsql;
|
|
11166
|
-
`);
|
|
11167
|
-
await db2.execute(sql`
|
|
11168
|
-
CREATE OR REPLACE FUNCTION apply_rls_to_all_tables() RETURNS void AS $$
|
|
11169
|
-
DECLARE
|
|
11170
|
-
tbl record;
|
|
11171
|
-
BEGIN
|
|
11172
|
-
FOR tbl IN
|
|
11173
|
-
SELECT schemaname, tablename
|
|
11174
|
-
FROM pg_tables
|
|
11175
|
-
WHERE schemaname = 'public'
|
|
11176
|
-
AND tablename NOT IN (
|
|
11177
|
-
'owners',
|
|
11178
|
-
'drizzle_migrations',
|
|
11179
|
-
'__drizzle_migrations',
|
|
11180
|
-
'server_agents'
|
|
11181
|
-
)
|
|
11182
|
-
LOOP
|
|
11183
|
-
BEGIN
|
|
11184
|
-
PERFORM add_owner_isolation(tbl.schemaname, tbl.tablename);
|
|
11185
|
-
EXCEPTION WHEN OTHERS THEN
|
|
11186
|
-
RAISE WARNING 'Failed to apply RLS to %.%: %', tbl.schemaname, tbl.tablename, SQLERRM;
|
|
11187
|
-
END;
|
|
11188
|
-
END LOOP;
|
|
11189
|
-
END;
|
|
11190
|
-
$$ LANGUAGE plpgsql;
|
|
11191
|
-
`);
|
|
11192
|
-
logger12.info("[RLS] PostgreSQL functions installed");
|
|
11193
|
-
}
|
|
11194
|
-
async function getOrCreateRlsOwner(adapter, ownerId) {
|
|
11195
|
-
const db2 = adapter.db;
|
|
11196
|
-
await db2.insert(ownersTable).values({
|
|
11197
|
-
id: ownerId
|
|
11198
|
-
}).onConflictDoNothing();
|
|
11199
|
-
logger12.info(`[RLS] Owner: ${ownerId.slice(0, 8)}…`);
|
|
11200
|
-
return ownerId;
|
|
11201
|
-
}
|
|
11202
|
-
async function setOwnerContext(adapter, ownerId) {
|
|
11203
|
-
if (!validateUuid(ownerId)) {
|
|
11204
|
-
throw new Error(`Invalid owner ID format: ${ownerId}. Must be a valid UUID.`);
|
|
11205
|
-
}
|
|
11206
|
-
const db2 = adapter.db;
|
|
11207
|
-
const owners = await db2.select().from(ownersTable).where(eq(ownersTable.id, ownerId));
|
|
11208
|
-
if (owners.length === 0) {
|
|
11209
|
-
throw new Error(`Owner ${ownerId} does not exist`);
|
|
11210
|
-
}
|
|
11211
|
-
logger12.info(`[RLS] Owner: ${ownerId.slice(0, 8)}…`);
|
|
11212
|
-
logger12.info("[RLS] Context configured successfully (using application_name)");
|
|
11213
|
-
}
|
|
11214
|
-
async function assignAgentToOwner(adapter, agentId, ownerId) {
|
|
11215
|
-
const db2 = adapter.db;
|
|
11216
|
-
const agents = await db2.select().from(agentTable).where(eq(agentTable.id, agentId));
|
|
11217
|
-
if (agents.length > 0) {
|
|
11218
|
-
const agent = agents[0];
|
|
11219
|
-
const currentOwnerId = agent.owner_id;
|
|
11220
|
-
if (currentOwnerId === ownerId) {
|
|
11221
|
-
logger12.debug(`[RLS] Agent ${agent.name} already owned by correct owner`);
|
|
11222
|
-
} else {
|
|
11223
|
-
await db2.update(agentTable).set({ owner_id: ownerId }).where(eq(agentTable.id, agentId));
|
|
11224
|
-
if (currentOwnerId === null) {
|
|
11225
|
-
logger12.info(`[RLS] Agent ${agent.name} assigned to owner`);
|
|
11226
|
-
} else {
|
|
11227
|
-
logger12.warn(`[RLS] Agent ${agent.name} owner changed`);
|
|
11228
|
-
}
|
|
11229
|
-
}
|
|
11230
|
-
} else {
|
|
11231
|
-
logger12.debug(`[RLS] Agent ${agentId} doesn't exist yet`);
|
|
11232
|
-
}
|
|
11233
|
-
}
|
|
11234
|
-
async function applyRLSToNewTables(adapter) {
|
|
11235
|
-
const db2 = adapter.db;
|
|
11236
|
-
try {
|
|
11237
|
-
await db2.execute(sql`SELECT apply_rls_to_all_tables()`);
|
|
11238
|
-
logger12.info("[RLS] Applied to all tables");
|
|
11239
|
-
} catch (error) {
|
|
11240
|
-
logger12.warn("[RLS] Failed to apply to some tables:", String(error));
|
|
11241
|
-
}
|
|
11242
|
-
}
|
|
11243
|
-
async function uninstallRLS(adapter) {
|
|
11244
|
-
const db2 = adapter.db;
|
|
11245
|
-
try {
|
|
11246
|
-
const checkResult = await db2.execute(sql`
|
|
11247
|
-
SELECT EXISTS (
|
|
11248
|
-
SELECT FROM pg_tables
|
|
11249
|
-
WHERE schemaname = 'public' AND tablename = 'owners'
|
|
11250
|
-
) as rls_enabled
|
|
11251
|
-
`);
|
|
11252
|
-
const rlsEnabled = checkResult.rows?.[0]?.rls_enabled;
|
|
11253
|
-
if (!rlsEnabled) {
|
|
11254
|
-
logger12.debug("[RLS] RLS not installed, skipping cleanup");
|
|
11255
|
-
return;
|
|
11256
|
-
}
|
|
11257
|
-
logger12.info("[RLS] Disabling RLS globally (keeping owner_id columns for schema compatibility)...");
|
|
11258
|
-
await db2.execute(sql`
|
|
11259
|
-
CREATE OR REPLACE FUNCTION _temp_disable_rls_on_table(
|
|
11260
|
-
p_schema_name text,
|
|
11261
|
-
p_table_name text
|
|
11262
|
-
) RETURNS void AS $$
|
|
11263
|
-
DECLARE
|
|
11264
|
-
policy_rec record;
|
|
11265
|
-
BEGIN
|
|
11266
|
-
-- Drop all policies on this table
|
|
11267
|
-
FOR policy_rec IN
|
|
11268
|
-
SELECT policyname
|
|
11269
|
-
FROM pg_policies
|
|
11270
|
-
WHERE schemaname = p_schema_name AND tablename = p_table_name
|
|
11271
|
-
LOOP
|
|
11272
|
-
EXECUTE format('DROP POLICY IF EXISTS %I ON %I.%I',
|
|
11273
|
-
policy_rec.policyname, p_schema_name, p_table_name);
|
|
11274
|
-
END LOOP;
|
|
11275
|
-
|
|
11276
|
-
-- Disable RLS
|
|
11277
|
-
EXECUTE format('ALTER TABLE %I.%I NO FORCE ROW LEVEL SECURITY', p_schema_name, p_table_name);
|
|
11278
|
-
EXECUTE format('ALTER TABLE %I.%I DISABLE ROW LEVEL SECURITY', p_schema_name, p_table_name);
|
|
11279
|
-
END;
|
|
11280
|
-
$$ LANGUAGE plpgsql;
|
|
11281
|
-
`);
|
|
11282
|
-
const tablesResult = await db2.execute(sql`
|
|
11283
|
-
SELECT schemaname, tablename
|
|
11284
|
-
FROM pg_tables
|
|
11285
|
-
WHERE schemaname = 'public'
|
|
11286
|
-
AND tablename NOT IN ('drizzle_migrations', '__drizzle_migrations')
|
|
11287
|
-
`);
|
|
11288
|
-
for (const row of tablesResult.rows || []) {
|
|
11289
|
-
const schemaName = row.schemaname;
|
|
11290
|
-
const tableName = row.tablename;
|
|
11291
|
-
try {
|
|
11292
|
-
await db2.execute(sql`SELECT _temp_disable_rls_on_table(${schemaName}, ${tableName})`);
|
|
11293
|
-
logger12.debug(`[RLS] Disabled RLS on table: ${schemaName}.${tableName}`);
|
|
11294
|
-
} catch (error) {
|
|
11295
|
-
logger12.warn(`[RLS] Failed to disable RLS on table ${schemaName}.${tableName}:`, String(error));
|
|
11296
|
-
}
|
|
11297
|
-
}
|
|
11298
|
-
await db2.execute(sql`DROP FUNCTION IF EXISTS _temp_disable_rls_on_table(text, text)`);
|
|
11299
|
-
logger12.info("[RLS] Keeping owner_id values intact (prevents data theft on re-enable)");
|
|
11300
|
-
logger12.info("[RLS] Clearing owners table...");
|
|
11301
|
-
await db2.execute(sql`TRUNCATE TABLE owners`);
|
|
11302
|
-
await db2.execute(sql`DROP FUNCTION IF EXISTS apply_rls_to_all_tables() CASCADE`);
|
|
11303
|
-
await db2.execute(sql`DROP FUNCTION IF EXISTS add_owner_isolation(text, text) CASCADE`);
|
|
11304
|
-
await db2.execute(sql`DROP FUNCTION IF EXISTS current_owner_id() CASCADE`);
|
|
11305
|
-
logger12.info("[RLS] Dropped all RLS functions");
|
|
11306
|
-
logger12.success("[RLS] RLS disabled successfully (owner_id columns preserved)");
|
|
11307
|
-
} catch (error) {
|
|
11308
|
-
logger12.error("[RLS] Failed to disable RLS:", String(error));
|
|
11309
|
-
throw error;
|
|
11310
|
-
}
|
|
11311
|
-
}
|
|
11312
|
-
|
|
11313
|
-
// src/index.node.ts
|
|
11914
|
+
init_rls();
|
|
11314
11915
|
var GLOBAL_SINGLETONS = Symbol.for("@elizaos/plugin-sql/global-singletons");
|
|
11315
11916
|
var globalSymbols = globalThis;
|
|
11316
11917
|
if (!globalSymbols[GLOBAL_SINGLETONS]) {
|
|
@@ -11319,30 +11920,33 @@ if (!globalSymbols[GLOBAL_SINGLETONS]) {
|
|
|
11319
11920
|
var globalSingletons = globalSymbols[GLOBAL_SINGLETONS];
|
|
11320
11921
|
function createDatabaseAdapter(config, agentId) {
|
|
11321
11922
|
if (config.postgresUrl) {
|
|
11322
|
-
const
|
|
11323
|
-
let
|
|
11923
|
+
const dataIsolationEnabled = process.env.ENABLE_DATA_ISOLATION === "true";
|
|
11924
|
+
let rlsServerId;
|
|
11324
11925
|
let managerKey = "default";
|
|
11325
|
-
if (
|
|
11326
|
-
const
|
|
11327
|
-
if (!
|
|
11328
|
-
throw new Error("[
|
|
11926
|
+
if (dataIsolationEnabled) {
|
|
11927
|
+
const rlsServerIdString = process.env.ELIZA_SERVER_ID;
|
|
11928
|
+
if (!rlsServerIdString) {
|
|
11929
|
+
throw new Error("[Data Isolation] ENABLE_DATA_ISOLATION=true requires ELIZA_SERVER_ID environment variable");
|
|
11329
11930
|
}
|
|
11330
|
-
|
|
11331
|
-
managerKey =
|
|
11332
|
-
|
|
11931
|
+
rlsServerId = stringToUuid(rlsServerIdString);
|
|
11932
|
+
managerKey = rlsServerId;
|
|
11933
|
+
logger14.debug({ src: "plugin:sql", rlsServerId: rlsServerId.slice(0, 8), serverIdString: rlsServerIdString }, "Using connection pool for RLS server");
|
|
11333
11934
|
}
|
|
11334
11935
|
if (!globalSingletons.postgresConnectionManagers) {
|
|
11335
11936
|
globalSingletons.postgresConnectionManagers = new Map;
|
|
11336
11937
|
}
|
|
11337
11938
|
let manager = globalSingletons.postgresConnectionManagers.get(managerKey);
|
|
11338
11939
|
if (!manager) {
|
|
11339
|
-
|
|
11340
|
-
manager = new PostgresConnectionManager(config.postgresUrl,
|
|
11940
|
+
logger14.debug({ src: "plugin:sql", managerKey: managerKey.slice(0, 8) }, "Creating new connection pool");
|
|
11941
|
+
manager = new PostgresConnectionManager(config.postgresUrl, rlsServerId);
|
|
11341
11942
|
globalSingletons.postgresConnectionManagers.set(managerKey, manager);
|
|
11342
11943
|
}
|
|
11343
11944
|
return new PgDatabaseAdapter(agentId, manager);
|
|
11344
11945
|
}
|
|
11345
11946
|
const dataDir = resolvePgliteDir(config.dataDir);
|
|
11947
|
+
if (dataDir && !dataDir.includes("://")) {
|
|
11948
|
+
mkdirSync(dataDir, { recursive: true });
|
|
11949
|
+
}
|
|
11346
11950
|
if (!globalSingletons.pgLiteClientManager) {
|
|
11347
11951
|
globalSingletons.pgLiteClientManager = new PGliteClientManager({ dataDir });
|
|
11348
11952
|
}
|
|
@@ -11354,18 +11958,18 @@ var plugin = {
|
|
|
11354
11958
|
priority: 0,
|
|
11355
11959
|
schema: exports_schema,
|
|
11356
11960
|
init: async (_config, runtime) => {
|
|
11357
|
-
|
|
11961
|
+
runtime.logger.info({ src: "plugin:sql", agentId: runtime.agentId }, "plugin-sql (node) init starting");
|
|
11358
11962
|
const adapterRegistered = await runtime.isReady().then(() => true).catch((error) => {
|
|
11359
11963
|
const message = error instanceof Error ? error.message : String(error);
|
|
11360
11964
|
if (message.includes("Database adapter not registered")) {
|
|
11361
|
-
|
|
11965
|
+
runtime.logger.info({ src: "plugin:sql", agentId: runtime.agentId }, "No pre-registered database adapter detected; registering adapter");
|
|
11362
11966
|
} else {
|
|
11363
|
-
|
|
11967
|
+
runtime.logger.warn({ src: "plugin:sql", agentId: runtime.agentId, error: message }, "Database adapter readiness check error; proceeding to register adapter");
|
|
11364
11968
|
}
|
|
11365
11969
|
return false;
|
|
11366
11970
|
});
|
|
11367
11971
|
if (adapterRegistered) {
|
|
11368
|
-
|
|
11972
|
+
runtime.logger.info({ src: "plugin:sql", agentId: runtime.agentId }, "Database adapter already registered, skipping creation");
|
|
11369
11973
|
return;
|
|
11370
11974
|
}
|
|
11371
11975
|
const postgresUrl = runtime.getSetting("POSTGRES_URL");
|
|
@@ -11375,22 +11979,22 @@ var plugin = {
|
|
|
11375
11979
|
postgresUrl
|
|
11376
11980
|
}, runtime.agentId);
|
|
11377
11981
|
runtime.registerDatabaseAdapter(dbAdapter);
|
|
11378
|
-
|
|
11982
|
+
runtime.logger.info({ src: "plugin:sql", agentId: runtime.agentId }, "Database adapter created and registered");
|
|
11379
11983
|
}
|
|
11380
11984
|
};
|
|
11381
11985
|
var index_node_default = plugin;
|
|
11382
11986
|
export {
|
|
11383
11987
|
uninstallRLS,
|
|
11384
|
-
|
|
11988
|
+
setServerContext,
|
|
11385
11989
|
plugin,
|
|
11386
11990
|
installRLSFunctions,
|
|
11387
|
-
|
|
11991
|
+
getOrCreateRlsServer,
|
|
11388
11992
|
index_node_default as default,
|
|
11389
11993
|
createDatabaseAdapter,
|
|
11390
|
-
|
|
11994
|
+
assignAgentToServer,
|
|
11391
11995
|
applyRLSToNewTables,
|
|
11392
11996
|
DatabaseMigrationService
|
|
11393
11997
|
};
|
|
11394
11998
|
|
|
11395
|
-
//# debugId=
|
|
11999
|
+
//# debugId=C2608A88C2C2BA8564756E2164756E21
|
|
11396
12000
|
//# sourceMappingURL=index.node.js.map
|