@elizaos/plugin-sql 1.6.5-alpha.9 → 1.6.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser/index.browser.js +1907 -436
- package/dist/browser/index.browser.js.map +26 -23
- package/dist/browser/tsconfig.build.tsbuildinfo +1 -1
- package/dist/node/index.d.ts +2 -2
- package/dist/node/index.node.js +1436 -662
- package/dist/node/index.node.js.map +33 -31
- package/dist/node/tsconfig.build.node.tsbuildinfo +1 -1
- package/package.json +17 -14
- package/dist/browser/base.d.ts +0 -926
- package/dist/browser/index.browser.d.ts +0 -11
- package/dist/browser/migration-service.d.ts +0 -43
- package/dist/browser/pglite/adapter.d.ts +0 -68
- package/dist/browser/pglite/manager.d.ts +0 -21
- package/dist/browser/runtime-migrator/drizzle-adapters/database-introspector.d.ts +0 -62
- package/dist/browser/runtime-migrator/drizzle-adapters/diff-calculator.d.ts +0 -59
- package/dist/browser/runtime-migrator/drizzle-adapters/snapshot-generator.d.ts +0 -18
- package/dist/browser/runtime-migrator/drizzle-adapters/sql-generator.d.ts +0 -38
- package/dist/browser/runtime-migrator/extension-manager.d.ts +0 -6
- package/dist/browser/runtime-migrator/index.d.ts +0 -8
- package/dist/browser/runtime-migrator/runtime-migrator.d.ts +0 -96
- package/dist/browser/runtime-migrator/schema-transformer.d.ts +0 -16
- package/dist/browser/runtime-migrator/storage/journal-storage.d.ts +0 -10
- package/dist/browser/runtime-migrator/storage/migration-tracker.d.ts +0 -13
- package/dist/browser/runtime-migrator/storage/snapshot-storage.d.ts +0 -9
- package/dist/browser/runtime-migrator/types.d.ts +0 -48
- package/dist/browser/schema/agent.d.ts +0 -344
- package/dist/browser/schema/cache.d.ts +0 -97
- package/dist/browser/schema/channel.d.ts +0 -177
- package/dist/browser/schema/channelParticipant.d.ts +0 -41
- package/dist/browser/schema/component.d.ts +0 -163
- package/dist/browser/schema/embedding.d.ts +0 -193
- package/dist/browser/schema/entity.d.ts +0 -122
- package/dist/browser/schema/index.d.ts +0 -18
- package/dist/browser/schema/log.d.ts +0 -114
- package/dist/browser/schema/memory.d.ts +0 -188
- package/dist/browser/schema/message.d.ts +0 -1
- package/dist/browser/schema/messageServer.d.ts +0 -126
- package/dist/browser/schema/owners.d.ts +0 -63
- package/dist/browser/schema/participant.d.ts +0 -114
- package/dist/browser/schema/relationship.d.ts +0 -156
- package/dist/browser/schema/room.d.ts +0 -192
- package/dist/browser/schema/serverAgent.d.ts +0 -41
- package/dist/browser/schema/tasks.d.ts +0 -225
- package/dist/browser/schema/types.d.ts +0 -68
- package/dist/browser/schema/world.d.ts +0 -114
- package/dist/browser/src/base.d.ts +0 -926
- package/dist/browser/src/index.browser.d.ts +0 -11
- package/dist/browser/src/migration-service.d.ts +0 -43
- package/dist/browser/src/pglite/adapter.d.ts +0 -68
- package/dist/browser/src/pglite/manager.d.ts +0 -21
- package/dist/browser/src/runtime-migrator/drizzle-adapters/database-introspector.d.ts +0 -62
- package/dist/browser/src/runtime-migrator/drizzle-adapters/diff-calculator.d.ts +0 -59
- package/dist/browser/src/runtime-migrator/drizzle-adapters/snapshot-generator.d.ts +0 -18
- package/dist/browser/src/runtime-migrator/drizzle-adapters/sql-generator.d.ts +0 -38
- package/dist/browser/src/runtime-migrator/extension-manager.d.ts +0 -6
- package/dist/browser/src/runtime-migrator/index.d.ts +0 -8
- package/dist/browser/src/runtime-migrator/runtime-migrator.d.ts +0 -96
- package/dist/browser/src/runtime-migrator/schema-transformer.d.ts +0 -16
- package/dist/browser/src/runtime-migrator/storage/journal-storage.d.ts +0 -10
- package/dist/browser/src/runtime-migrator/storage/migration-tracker.d.ts +0 -13
- package/dist/browser/src/runtime-migrator/storage/snapshot-storage.d.ts +0 -9
- package/dist/browser/src/runtime-migrator/types.d.ts +0 -48
- package/dist/browser/src/schema/agent.d.ts +0 -344
- package/dist/browser/src/schema/cache.d.ts +0 -97
- package/dist/browser/src/schema/channel.d.ts +0 -177
- package/dist/browser/src/schema/channelParticipant.d.ts +0 -41
- package/dist/browser/src/schema/component.d.ts +0 -163
- package/dist/browser/src/schema/embedding.d.ts +0 -193
- package/dist/browser/src/schema/entity.d.ts +0 -122
- package/dist/browser/src/schema/index.d.ts +0 -18
- package/dist/browser/src/schema/log.d.ts +0 -114
- package/dist/browser/src/schema/memory.d.ts +0 -188
- package/dist/browser/src/schema/message.d.ts +0 -1
- package/dist/browser/src/schema/messageServer.d.ts +0 -126
- package/dist/browser/src/schema/owners.d.ts +0 -63
- package/dist/browser/src/schema/participant.d.ts +0 -114
- package/dist/browser/src/schema/relationship.d.ts +0 -156
- package/dist/browser/src/schema/room.d.ts +0 -192
- package/dist/browser/src/schema/serverAgent.d.ts +0 -41
- package/dist/browser/src/schema/tasks.d.ts +0 -225
- package/dist/browser/src/schema/types.d.ts +0 -68
- package/dist/browser/src/schema/world.d.ts +0 -114
- package/dist/browser/src/types.d.ts +0 -15
- package/dist/browser/src/utils.browser.d.ts +0 -21
- package/dist/browser/types.d.ts +0 -15
- package/dist/browser/utils.browser.d.ts +0 -21
- package/dist/node/src/base.d.ts +0 -926
- package/dist/node/src/index.d.ts +0 -33
- package/dist/node/src/index.node.d.ts +0 -10
- package/dist/node/src/migration-service.d.ts +0 -43
- package/dist/node/src/pg/adapter.d.ts +0 -69
- package/dist/node/src/pg/manager.d.ts +0 -17
- package/dist/node/src/pglite/adapter.d.ts +0 -68
- package/dist/node/src/pglite/manager.d.ts +0 -21
- package/dist/node/src/rls.d.ts +0 -43
- package/dist/node/src/runtime-migrator/drizzle-adapters/database-introspector.d.ts +0 -62
- package/dist/node/src/runtime-migrator/drizzle-adapters/diff-calculator.d.ts +0 -59
- package/dist/node/src/runtime-migrator/drizzle-adapters/snapshot-generator.d.ts +0 -18
- package/dist/node/src/runtime-migrator/drizzle-adapters/sql-generator.d.ts +0 -38
- package/dist/node/src/runtime-migrator/extension-manager.d.ts +0 -6
- package/dist/node/src/runtime-migrator/index.d.ts +0 -8
- package/dist/node/src/runtime-migrator/runtime-migrator.d.ts +0 -96
- package/dist/node/src/runtime-migrator/schema-transformer.d.ts +0 -16
- package/dist/node/src/runtime-migrator/storage/journal-storage.d.ts +0 -10
- package/dist/node/src/runtime-migrator/storage/migration-tracker.d.ts +0 -13
- package/dist/node/src/runtime-migrator/storage/snapshot-storage.d.ts +0 -9
- package/dist/node/src/runtime-migrator/types.d.ts +0 -48
- package/dist/node/src/schema/agent.d.ts +0 -344
- package/dist/node/src/schema/cache.d.ts +0 -97
- package/dist/node/src/schema/channel.d.ts +0 -177
- package/dist/node/src/schema/channelParticipant.d.ts +0 -41
- package/dist/node/src/schema/component.d.ts +0 -163
- package/dist/node/src/schema/embedding.d.ts +0 -193
- package/dist/node/src/schema/entity.d.ts +0 -122
- package/dist/node/src/schema/index.d.ts +0 -18
- package/dist/node/src/schema/log.d.ts +0 -114
- package/dist/node/src/schema/memory.d.ts +0 -188
- package/dist/node/src/schema/message.d.ts +0 -1
- package/dist/node/src/schema/messageServer.d.ts +0 -126
- package/dist/node/src/schema/owners.d.ts +0 -63
- package/dist/node/src/schema/participant.d.ts +0 -114
- package/dist/node/src/schema/relationship.d.ts +0 -156
- package/dist/node/src/schema/room.d.ts +0 -192
- package/dist/node/src/schema/serverAgent.d.ts +0 -41
- package/dist/node/src/schema/tasks.d.ts +0 -225
- package/dist/node/src/schema/types.d.ts +0 -68
- package/dist/node/src/schema/world.d.ts +0 -114
- package/dist/node/src/types.d.ts +0 -15
- package/dist/node/src/utils.d.ts +0 -32
- package/dist/node/src/utils.node.d.ts +0 -6
package/dist/node/index.node.js
CHANGED
|
@@ -634,7 +634,7 @@ var init_subquery = __esm(() => {
|
|
|
634
634
|
});
|
|
635
635
|
|
|
636
636
|
// ../../node_modules/drizzle-orm/version.js
|
|
637
|
-
var version = "0.
|
|
637
|
+
var version = "0.45.0";
|
|
638
638
|
var init_version = () => {};
|
|
639
639
|
|
|
640
640
|
// ../../node_modules/drizzle-orm/tracing.js
|
|
@@ -1261,6 +1261,8 @@ function mapResultRow(columns, row, joinsNotNullableMap) {
|
|
|
1261
1261
|
decoder = field;
|
|
1262
1262
|
} else if (is(field, SQL)) {
|
|
1263
1263
|
decoder = field.decoder;
|
|
1264
|
+
} else if (is(field, Subquery)) {
|
|
1265
|
+
decoder = field._.sql.decoder;
|
|
1264
1266
|
} else {
|
|
1265
1267
|
decoder = field.sql.decoder;
|
|
1266
1268
|
}
|
|
@@ -1301,7 +1303,7 @@ function orderSelectedFields(fields, pathPrefix) {
|
|
|
1301
1303
|
return result;
|
|
1302
1304
|
}
|
|
1303
1305
|
const newPath = pathPrefix ? [...pathPrefix, name] : [name];
|
|
1304
|
-
if (is(field, Column) || is(field, SQL) || is(field, SQL.Aliased)) {
|
|
1306
|
+
if (is(field, Column) || is(field, SQL) || is(field, SQL.Aliased) || is(field, Subquery)) {
|
|
1305
1307
|
result.push({ path: newPath, field });
|
|
1306
1308
|
} else if (is(field, Table)) {
|
|
1307
1309
|
result.push(...orderSelectedFields(field[Table.Symbol.Columns], newPath));
|
|
@@ -1739,7 +1741,9 @@ var init_date = __esm(() => {
|
|
|
1739
1741
|
return "date";
|
|
1740
1742
|
}
|
|
1741
1743
|
mapFromDriverValue(value) {
|
|
1742
|
-
|
|
1744
|
+
if (typeof value === "string")
|
|
1745
|
+
return new Date(value);
|
|
1746
|
+
return value;
|
|
1743
1747
|
}
|
|
1744
1748
|
mapToDriverValue(value) {
|
|
1745
1749
|
return value.toISOString();
|
|
@@ -1759,6 +1763,11 @@ var init_date = __esm(() => {
|
|
|
1759
1763
|
getSQLType() {
|
|
1760
1764
|
return "date";
|
|
1761
1765
|
}
|
|
1766
|
+
mapFromDriverValue(value) {
|
|
1767
|
+
if (typeof value === "string")
|
|
1768
|
+
return value;
|
|
1769
|
+
return value.toISOString().slice(0, -14);
|
|
1770
|
+
}
|
|
1762
1771
|
};
|
|
1763
1772
|
});
|
|
1764
1773
|
|
|
@@ -2581,9 +2590,11 @@ var init_timestamp = __esm(() => {
|
|
|
2581
2590
|
const precision = this.precision === undefined ? "" : ` (${this.precision})`;
|
|
2582
2591
|
return `timestamp${precision}${this.withTimezone ? " with time zone" : ""}`;
|
|
2583
2592
|
}
|
|
2584
|
-
mapFromDriverValue
|
|
2585
|
-
|
|
2586
|
-
|
|
2593
|
+
mapFromDriverValue(value) {
|
|
2594
|
+
if (typeof value === "string")
|
|
2595
|
+
return new Date(this.withTimezone ? value : value + "+0000");
|
|
2596
|
+
return value;
|
|
2597
|
+
}
|
|
2587
2598
|
mapToDriverValue = (value) => {
|
|
2588
2599
|
return value.toISOString();
|
|
2589
2600
|
};
|
|
@@ -2612,6 +2623,17 @@ var init_timestamp = __esm(() => {
|
|
|
2612
2623
|
const precision = this.precision === undefined ? "" : `(${this.precision})`;
|
|
2613
2624
|
return `timestamp${precision}${this.withTimezone ? " with time zone" : ""}`;
|
|
2614
2625
|
}
|
|
2626
|
+
mapFromDriverValue(value) {
|
|
2627
|
+
if (typeof value === "string")
|
|
2628
|
+
return value;
|
|
2629
|
+
const shortened = value.toISOString().slice(0, -1).replace("T", " ");
|
|
2630
|
+
if (this.withTimezone) {
|
|
2631
|
+
const offset = value.getTimezoneOffset();
|
|
2632
|
+
const sign = offset <= 0 ? "+" : "-";
|
|
2633
|
+
return `${shortened}${sign}${Math.floor(Math.abs(offset) / 60).toString().padStart(2, "0")}`;
|
|
2634
|
+
}
|
|
2635
|
+
return shortened;
|
|
2636
|
+
}
|
|
2615
2637
|
};
|
|
2616
2638
|
});
|
|
2617
2639
|
|
|
@@ -3727,7 +3749,8 @@ var init_dialect = __esm(() => {
|
|
|
3727
3749
|
const setSize = columnNames.length;
|
|
3728
3750
|
return sql.join(columnNames.flatMap((colName, i) => {
|
|
3729
3751
|
const col = tableColumns[colName];
|
|
3730
|
-
const
|
|
3752
|
+
const onUpdateFnResult = col.onUpdateFn?.();
|
|
3753
|
+
const value = set[colName] ?? (is(onUpdateFnResult, SQL) ? onUpdateFnResult : sql.param(onUpdateFnResult, col));
|
|
3731
3754
|
const res = sql`${sql.identifier(this.casing.getColumnCasing(col))} = ${value}`;
|
|
3732
3755
|
if (i < setSize - 1) {
|
|
3733
3756
|
return [res, sql.raw(", ")];
|
|
@@ -3776,6 +3799,16 @@ var init_dialect = __esm(() => {
|
|
|
3776
3799
|
} else {
|
|
3777
3800
|
chunk.push(field);
|
|
3778
3801
|
}
|
|
3802
|
+
} else if (is(field, Subquery)) {
|
|
3803
|
+
const entries = Object.entries(field._.selectedFields);
|
|
3804
|
+
if (entries.length === 1) {
|
|
3805
|
+
const entry = entries[0][1];
|
|
3806
|
+
const fieldDecoder = is(entry, SQL) ? entry.decoder : is(entry, Column) ? { mapFromDriverValue: (v) => entry.mapFromDriverValue(v) } : entry.sql.decoder;
|
|
3807
|
+
if (fieldDecoder) {
|
|
3808
|
+
field._.sql.decoder = fieldDecoder;
|
|
3809
|
+
}
|
|
3810
|
+
}
|
|
3811
|
+
chunk.push(field);
|
|
3779
3812
|
}
|
|
3780
3813
|
if (i < columnsLen - 1) {
|
|
3781
3814
|
chunk.push(sql`, `);
|
|
@@ -5694,6 +5727,44 @@ var init_drizzle_orm = __esm(() => {
|
|
|
5694
5727
|
init_view_common();
|
|
5695
5728
|
});
|
|
5696
5729
|
|
|
5730
|
+
// src/schema/agent.ts
|
|
5731
|
+
var agentTable;
|
|
5732
|
+
var init_agent = __esm(() => {
|
|
5733
|
+
init_drizzle_orm();
|
|
5734
|
+
init_pg_core();
|
|
5735
|
+
agentTable = pgTable("agents", {
|
|
5736
|
+
id: uuid("id").primaryKey().defaultRandom(),
|
|
5737
|
+
enabled: boolean("enabled").default(true).notNull(),
|
|
5738
|
+
server_id: uuid("server_id"),
|
|
5739
|
+
createdAt: timestamp("created_at", { withTimezone: true }).default(sql`now()`).notNull(),
|
|
5740
|
+
updatedAt: timestamp("updated_at", { withTimezone: true }).default(sql`now()`).notNull(),
|
|
5741
|
+
name: text("name").notNull(),
|
|
5742
|
+
username: text("username"),
|
|
5743
|
+
system: text("system").default(""),
|
|
5744
|
+
bio: jsonb("bio").$type().default(sql`'[]'::jsonb`),
|
|
5745
|
+
messageExamples: jsonb("message_examples").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
5746
|
+
postExamples: jsonb("post_examples").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
5747
|
+
topics: jsonb("topics").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
5748
|
+
adjectives: jsonb("adjectives").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
5749
|
+
knowledge: jsonb("knowledge").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
5750
|
+
plugins: jsonb("plugins").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
5751
|
+
settings: jsonb("settings").$type().default(sql`'{}'::jsonb`).notNull(),
|
|
5752
|
+
style: jsonb("style").$type().default(sql`'{}'::jsonb`).notNull()
|
|
5753
|
+
});
|
|
5754
|
+
});
|
|
5755
|
+
|
|
5756
|
+
// src/schema/server.ts
|
|
5757
|
+
var serverTable;
|
|
5758
|
+
var init_server = __esm(() => {
|
|
5759
|
+
init_drizzle_orm();
|
|
5760
|
+
init_pg_core();
|
|
5761
|
+
serverTable = pgTable("servers", {
|
|
5762
|
+
id: uuid("id").primaryKey(),
|
|
5763
|
+
createdAt: timestamp("created_at", { withTimezone: true }).default(sql`now()`).notNull(),
|
|
5764
|
+
updatedAt: timestamp("updated_at", { withTimezone: true }).default(sql`now()`).notNull()
|
|
5765
|
+
});
|
|
5766
|
+
});
|
|
5767
|
+
|
|
5697
5768
|
// src/runtime-migrator/storage/migration-tracker.ts
|
|
5698
5769
|
class MigrationTracker {
|
|
5699
5770
|
db;
|
|
@@ -5871,14 +5942,14 @@ class ExtensionManager {
|
|
|
5871
5942
|
for (const extension of extensions) {
|
|
5872
5943
|
try {
|
|
5873
5944
|
if (!/^[a-zA-Z0-9_-]+$/.test(extension)) {
|
|
5874
|
-
logger2.warn(
|
|
5945
|
+
logger2.warn({ src: "plugin:sql", extension }, "Invalid extension name - contains invalid characters");
|
|
5875
5946
|
continue;
|
|
5876
5947
|
}
|
|
5877
5948
|
await this.db.execute(sql`CREATE EXTENSION IF NOT EXISTS ${sql.identifier(extension)}`);
|
|
5878
|
-
logger2.debug(
|
|
5949
|
+
logger2.debug({ src: "plugin:sql", extension }, "Extension installed");
|
|
5879
5950
|
} catch (error) {
|
|
5880
5951
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
5881
|
-
logger2.warn(
|
|
5952
|
+
logger2.warn({ src: "plugin:sql", extension, error: errorMessage }, "Could not install extension");
|
|
5882
5953
|
}
|
|
5883
5954
|
}
|
|
5884
5955
|
}
|
|
@@ -6590,10 +6661,7 @@ async function generateMigrationSQL(previousSnapshot, currentSnapshot, diff) {
|
|
|
6590
6661
|
}
|
|
6591
6662
|
const dataLossCheck = checkForDataLoss(diff);
|
|
6592
6663
|
if (dataLossCheck.warnings.length > 0) {
|
|
6593
|
-
logger3.warn("
|
|
6594
|
-
for (const warning of dataLossCheck.warnings) {
|
|
6595
|
-
logger3.warn(` - ${warning}`);
|
|
6596
|
-
}
|
|
6664
|
+
logger3.warn({ src: "plugin:sql", warnings: dataLossCheck.warnings }, "Schema changes may cause data loss");
|
|
6597
6665
|
}
|
|
6598
6666
|
const schemasToCreate = new Set;
|
|
6599
6667
|
for (const tableName of diff.tables.created) {
|
|
@@ -7015,7 +7083,7 @@ class DatabaseIntrospector {
|
|
|
7015
7083
|
this.db = db2;
|
|
7016
7084
|
}
|
|
7017
7085
|
async introspectSchema(schemaName = "public") {
|
|
7018
|
-
logger5.info(
|
|
7086
|
+
logger5.info({ src: "plugin:sql", schemaName }, "Starting database introspection");
|
|
7019
7087
|
const tables = {};
|
|
7020
7088
|
const schemas = {};
|
|
7021
7089
|
const enums = {};
|
|
@@ -7023,7 +7091,7 @@ class DatabaseIntrospector {
|
|
|
7023
7091
|
for (const tableInfo of allTables) {
|
|
7024
7092
|
const tableName = tableInfo.table_name;
|
|
7025
7093
|
const tableSchema = tableInfo.table_schema || "public";
|
|
7026
|
-
logger5.debug(
|
|
7094
|
+
logger5.debug({ src: "plugin:sql", tableSchema, tableName }, "Introspecting table");
|
|
7027
7095
|
const columns2 = await this.getColumns(tableSchema, tableName);
|
|
7028
7096
|
const columnsObject = {};
|
|
7029
7097
|
const uniqueConstraintObject = {};
|
|
@@ -7040,15 +7108,17 @@ class DatabaseIntrospector {
|
|
|
7040
7108
|
const indexesObject = {};
|
|
7041
7109
|
for (const idx of indexes2) {
|
|
7042
7110
|
if (!idx.is_primary && !idx.is_unique_constraint) {
|
|
7043
|
-
|
|
7044
|
-
|
|
7045
|
-
|
|
7046
|
-
|
|
7047
|
-
|
|
7048
|
-
|
|
7049
|
-
|
|
7050
|
-
|
|
7051
|
-
|
|
7111
|
+
if (idx.columns && Array.isArray(idx.columns) && idx.columns.length > 0) {
|
|
7112
|
+
indexesObject[idx.name] = {
|
|
7113
|
+
name: idx.name,
|
|
7114
|
+
columns: idx.columns.map((col) => ({
|
|
7115
|
+
expression: col,
|
|
7116
|
+
isExpression: false
|
|
7117
|
+
})),
|
|
7118
|
+
isUnique: idx.is_unique,
|
|
7119
|
+
method: idx.method || "btree"
|
|
7120
|
+
};
|
|
7121
|
+
}
|
|
7052
7122
|
}
|
|
7053
7123
|
}
|
|
7054
7124
|
const foreignKeys = await this.getForeignKeys(tableSchema, tableName);
|
|
@@ -7116,7 +7186,7 @@ class DatabaseIntrospector {
|
|
|
7116
7186
|
}
|
|
7117
7187
|
enums[key].values.push(enumInfo.value);
|
|
7118
7188
|
}
|
|
7119
|
-
logger5.info(
|
|
7189
|
+
logger5.info({ src: "plugin:sql", tableCount: Object.keys(tables).length }, "Database introspection complete");
|
|
7120
7190
|
return {
|
|
7121
7191
|
version: "7",
|
|
7122
7192
|
dialect: "postgresql",
|
|
@@ -7378,7 +7448,7 @@ class RuntimeMigrator {
|
|
|
7378
7448
|
}
|
|
7379
7449
|
}
|
|
7380
7450
|
for (const schemaName of schemasToCreate) {
|
|
7381
|
-
logger6.debug(
|
|
7451
|
+
logger6.debug({ src: "plugin:sql", schemaName }, "Ensuring schema exists");
|
|
7382
7452
|
await this.db.execute(sql.raw(`CREATE SCHEMA IF NOT EXISTS "${schemaName}"`));
|
|
7383
7453
|
}
|
|
7384
7454
|
}
|
|
@@ -7389,10 +7459,15 @@ class RuntimeMigrator {
|
|
|
7389
7459
|
const tableData = table3;
|
|
7390
7460
|
const actualSchema = tableData.schema || "public";
|
|
7391
7461
|
if (!isCorePLugin && actualSchema === "public") {
|
|
7392
|
-
logger6.warn(
|
|
7462
|
+
logger6.warn({ src: "plugin:sql", pluginName, tableName: tableData.name, expectedSchema }, "Plugin table is using public schema - consider using pgSchema for better isolation");
|
|
7393
7463
|
}
|
|
7394
7464
|
if (isCorePLugin && actualSchema !== "public") {
|
|
7395
|
-
logger6.warn(
|
|
7465
|
+
logger6.warn({
|
|
7466
|
+
src: "plugin:sql",
|
|
7467
|
+
pluginName: "@elizaos/plugin-sql",
|
|
7468
|
+
tableName: tableData.name,
|
|
7469
|
+
actualSchema
|
|
7470
|
+
}, "Core plugin table should use public schema");
|
|
7396
7471
|
}
|
|
7397
7472
|
}
|
|
7398
7473
|
}
|
|
@@ -7603,13 +7678,13 @@ class RuntimeMigrator {
|
|
|
7603
7678
|
}
|
|
7604
7679
|
}
|
|
7605
7680
|
}
|
|
7606
|
-
logger6.debug(
|
|
7681
|
+
logger6.debug({ src: "plugin:sql", urlPreview: url.substring(0, 50) }, "Connection string did not match any PostgreSQL patterns");
|
|
7607
7682
|
return false;
|
|
7608
7683
|
}
|
|
7609
7684
|
async initialize() {
|
|
7610
|
-
logger6.info("
|
|
7685
|
+
logger6.info({ src: "plugin:sql" }, "Initializing migration system");
|
|
7611
7686
|
await this.migrationTracker.ensureTables();
|
|
7612
|
-
logger6.info("
|
|
7687
|
+
logger6.info({ src: "plugin:sql" }, "Migration system initialized");
|
|
7613
7688
|
}
|
|
7614
7689
|
async migrate(pluginName, schema2, options = {}) {
|
|
7615
7690
|
const lockId = this.getAdvisoryLockId(pluginName);
|
|
@@ -7618,46 +7693,54 @@ class RuntimeMigrator {
|
|
|
7618
7693
|
}
|
|
7619
7694
|
let lockAcquired = false;
|
|
7620
7695
|
try {
|
|
7621
|
-
logger6.info(
|
|
7696
|
+
logger6.info({ src: "plugin:sql", pluginName }, "Starting migration for plugin");
|
|
7622
7697
|
await this.initialize();
|
|
7623
7698
|
const postgresUrl = process.env.POSTGRES_URL || process.env.DATABASE_URL || "";
|
|
7624
7699
|
const isRealPostgres = this.isRealPostgresDatabase(postgresUrl);
|
|
7625
7700
|
if (isRealPostgres) {
|
|
7626
7701
|
try {
|
|
7627
|
-
logger6.debug(
|
|
7702
|
+
logger6.debug({ src: "plugin:sql", pluginName }, "Using PostgreSQL advisory locks");
|
|
7628
7703
|
const lockIdStr = lockId.toString();
|
|
7629
7704
|
const lockResult = await this.db.execute(sql`SELECT pg_try_advisory_lock(CAST(${lockIdStr} AS bigint)) as acquired`);
|
|
7630
7705
|
lockAcquired = lockResult.rows[0]?.acquired === true;
|
|
7631
7706
|
if (!lockAcquired) {
|
|
7632
|
-
logger6.info(
|
|
7707
|
+
logger6.info({ src: "plugin:sql", pluginName }, "Migration already in progress, waiting for lock");
|
|
7633
7708
|
await this.db.execute(sql`SELECT pg_advisory_lock(CAST(${lockIdStr} AS bigint))`);
|
|
7634
7709
|
lockAcquired = true;
|
|
7635
|
-
logger6.info(
|
|
7710
|
+
logger6.info({ src: "plugin:sql", pluginName }, "Lock acquired");
|
|
7636
7711
|
} else {
|
|
7637
|
-
logger6.debug(
|
|
7712
|
+
logger6.debug({ src: "plugin:sql", pluginName, lockId: lockIdStr }, "Advisory lock acquired");
|
|
7638
7713
|
}
|
|
7639
7714
|
} catch (lockError) {
|
|
7640
|
-
logger6.warn(
|
|
7715
|
+
logger6.warn({
|
|
7716
|
+
src: "plugin:sql",
|
|
7717
|
+
pluginName,
|
|
7718
|
+
error: lockError instanceof Error ? lockError.message : String(lockError)
|
|
7719
|
+
}, "Failed to acquire advisory lock, continuing without lock");
|
|
7641
7720
|
lockAcquired = false;
|
|
7642
7721
|
}
|
|
7643
7722
|
} else {
|
|
7644
|
-
logger6.debug(
|
|
7723
|
+
logger6.debug({ src: "plugin:sql" }, "Development database detected, skipping advisory locks");
|
|
7645
7724
|
}
|
|
7646
|
-
await this.extensionManager.installRequiredExtensions([
|
|
7725
|
+
await this.extensionManager.installRequiredExtensions([
|
|
7726
|
+
"vector",
|
|
7727
|
+
"fuzzystrmatch",
|
|
7728
|
+
"pgcrypto"
|
|
7729
|
+
]);
|
|
7647
7730
|
const currentSnapshot = await generateSnapshot(schema2);
|
|
7648
7731
|
await this.ensureSchemasExist(currentSnapshot);
|
|
7649
7732
|
this.validateSchemaUsage(pluginName, currentSnapshot);
|
|
7650
7733
|
const currentHash = hashSnapshot(currentSnapshot);
|
|
7651
7734
|
const lastMigration = await this.migrationTracker.getLastMigration(pluginName);
|
|
7652
7735
|
if (lastMigration && lastMigration.hash === currentHash) {
|
|
7653
|
-
logger6.info(
|
|
7736
|
+
logger6.info({ src: "plugin:sql", pluginName, hash: currentHash }, "No changes detected, skipping migration");
|
|
7654
7737
|
return;
|
|
7655
7738
|
}
|
|
7656
7739
|
let previousSnapshot = await this.snapshotStorage.getLatestSnapshot(pluginName);
|
|
7657
7740
|
if (!previousSnapshot && Object.keys(currentSnapshot.tables).length > 0) {
|
|
7658
7741
|
const hasExistingTables = await this.introspector.hasExistingTables(pluginName);
|
|
7659
7742
|
if (hasExistingTables) {
|
|
7660
|
-
logger6.info(
|
|
7743
|
+
logger6.info({ src: "plugin:sql", pluginName }, "No snapshot found but tables exist in database, introspecting");
|
|
7661
7744
|
const schemaName = this.getExpectedSchemaName(pluginName);
|
|
7662
7745
|
const introspectedSnapshot = await this.introspector.introspectSchema(schemaName);
|
|
7663
7746
|
if (Object.keys(introspectedSnapshot.tables).length > 0) {
|
|
@@ -7665,15 +7748,15 @@ class RuntimeMigrator {
|
|
|
7665
7748
|
await this.journalStorage.updateJournal(pluginName, 0, `introspected_${Date.now()}`, true);
|
|
7666
7749
|
const introspectedHash = hashSnapshot(introspectedSnapshot);
|
|
7667
7750
|
await this.migrationTracker.recordMigration(pluginName, introspectedHash, Date.now());
|
|
7668
|
-
logger6.info(
|
|
7751
|
+
logger6.info({ src: "plugin:sql", pluginName }, "Created initial snapshot from existing database");
|
|
7669
7752
|
previousSnapshot = introspectedSnapshot;
|
|
7670
7753
|
}
|
|
7671
7754
|
}
|
|
7672
7755
|
}
|
|
7673
7756
|
if (!hasChanges(previousSnapshot, currentSnapshot)) {
|
|
7674
|
-
logger6.info(
|
|
7757
|
+
logger6.info({ src: "plugin:sql", pluginName }, "No schema changes");
|
|
7675
7758
|
if (!previousSnapshot && Object.keys(currentSnapshot.tables).length === 0) {
|
|
7676
|
-
logger6.info(
|
|
7759
|
+
logger6.info({ src: "plugin:sql", pluginName }, "Recording empty schema");
|
|
7677
7760
|
await this.migrationTracker.recordMigration(pluginName, currentHash, Date.now());
|
|
7678
7761
|
const idx = await this.journalStorage.getNextIdx(pluginName);
|
|
7679
7762
|
const tag = this.generateMigrationTag(idx, pluginName);
|
|
@@ -7684,7 +7767,7 @@ class RuntimeMigrator {
|
|
|
7684
7767
|
}
|
|
7685
7768
|
const diff = await calculateDiff(previousSnapshot, currentSnapshot);
|
|
7686
7769
|
if (!hasDiffChanges(diff)) {
|
|
7687
|
-
logger6.info(
|
|
7770
|
+
logger6.info({ src: "plugin:sql", pluginName }, "No actionable changes");
|
|
7688
7771
|
return;
|
|
7689
7772
|
}
|
|
7690
7773
|
const dataLossCheck = checkForDataLoss(diff);
|
|
@@ -7692,55 +7775,43 @@ class RuntimeMigrator {
|
|
|
7692
7775
|
const isProduction = false;
|
|
7693
7776
|
const allowDestructive = options.force || options.allowDataLoss || process.env.ELIZA_ALLOW_DESTRUCTIVE_MIGRATIONS === "true";
|
|
7694
7777
|
if (!allowDestructive) {
|
|
7695
|
-
logger6.error(
|
|
7696
|
-
|
|
7697
|
-
|
|
7698
|
-
|
|
7699
|
-
|
|
7700
|
-
|
|
7701
|
-
}
|
|
7702
|
-
logger6.error("[RuntimeMigrator] To proceed with destructive migrations:");
|
|
7703
|
-
logger6.error("[RuntimeMigrator] 1. Set environment variable: export ELIZA_ALLOW_DESTRUCTIVE_MIGRATIONS=true");
|
|
7704
|
-
logger6.error("[RuntimeMigrator] 2. Or use option: migrate(plugin, schema, { force: true })");
|
|
7705
|
-
if (isProduction) {
|
|
7706
|
-
logger6.error("[RuntimeMigrator] 3. For production, consider using drizzle-kit for manual migration");
|
|
7707
|
-
}
|
|
7778
|
+
logger6.error({
|
|
7779
|
+
src: "plugin:sql",
|
|
7780
|
+
pluginName,
|
|
7781
|
+
environment: isProduction ? "PRODUCTION" : "DEVELOPMENT",
|
|
7782
|
+
warnings: dataLossCheck.warnings
|
|
7783
|
+
}, "Destructive migration blocked - set ELIZA_ALLOW_DESTRUCTIVE_MIGRATIONS=true or use force option");
|
|
7708
7784
|
const errorMessage = isProduction ? `Destructive migration blocked in production for ${pluginName}. Set ELIZA_ALLOW_DESTRUCTIVE_MIGRATIONS=true or use drizzle-kit.` : `Destructive migration blocked for ${pluginName}. Set ELIZA_ALLOW_DESTRUCTIVE_MIGRATIONS=true to proceed.`;
|
|
7709
7785
|
throw new Error(errorMessage);
|
|
7710
7786
|
}
|
|
7711
7787
|
if (dataLossCheck.requiresConfirmation) {
|
|
7712
|
-
logger6.warn("
|
|
7713
|
-
logger6.warn(`[RuntimeMigrator] Plugin: ${pluginName}`);
|
|
7714
|
-
logger6.warn("[RuntimeMigrator] The following operations will be performed:");
|
|
7715
|
-
for (const warning of dataLossCheck.warnings) {
|
|
7716
|
-
logger6.warn(`[RuntimeMigrator] ⚠️ ${warning}`);
|
|
7717
|
-
}
|
|
7788
|
+
logger6.warn({ src: "plugin:sql", pluginName, warnings: dataLossCheck.warnings }, "Proceeding with destructive migration");
|
|
7718
7789
|
}
|
|
7719
7790
|
}
|
|
7720
7791
|
const sqlStatements = await generateMigrationSQL(previousSnapshot, currentSnapshot, diff);
|
|
7721
7792
|
if (sqlStatements.length === 0) {
|
|
7722
|
-
logger6.info(
|
|
7793
|
+
logger6.info({ src: "plugin:sql", pluginName }, "No SQL statements to execute");
|
|
7723
7794
|
return;
|
|
7724
7795
|
}
|
|
7725
|
-
logger6.info(
|
|
7796
|
+
logger6.info({ src: "plugin:sql", pluginName, statementCount: sqlStatements.length }, "Executing SQL statements");
|
|
7726
7797
|
if (options.verbose) {
|
|
7727
7798
|
sqlStatements.forEach((stmt, i) => {
|
|
7728
|
-
logger6.debug(
|
|
7799
|
+
logger6.debug({ src: "plugin:sql", statementIndex: i + 1, statement: stmt }, "SQL statement");
|
|
7729
7800
|
});
|
|
7730
7801
|
}
|
|
7731
7802
|
if (options.dryRun) {
|
|
7732
|
-
logger6.info("
|
|
7733
|
-
logger6.info("[RuntimeMigrator] Would execute:");
|
|
7734
|
-
sqlStatements.forEach((stmt, i) => {
|
|
7735
|
-
logger6.info(` ${i + 1}. ${stmt}`);
|
|
7736
|
-
});
|
|
7803
|
+
logger6.info({ src: "plugin:sql", pluginName, statements: sqlStatements }, "DRY RUN mode - not executing statements");
|
|
7737
7804
|
return;
|
|
7738
7805
|
}
|
|
7739
7806
|
await this.executeMigration(pluginName, currentSnapshot, currentHash, sqlStatements);
|
|
7740
|
-
logger6.info(
|
|
7807
|
+
logger6.info({ src: "plugin:sql", pluginName }, "Migration completed successfully");
|
|
7741
7808
|
return;
|
|
7742
7809
|
} catch (error) {
|
|
7743
|
-
logger6.error(
|
|
7810
|
+
logger6.error({
|
|
7811
|
+
src: "plugin:sql",
|
|
7812
|
+
pluginName,
|
|
7813
|
+
error: error instanceof Error ? error.message : String(error)
|
|
7814
|
+
}, "Migration failed");
|
|
7744
7815
|
throw error;
|
|
7745
7816
|
} finally {
|
|
7746
7817
|
const postgresUrl = process.env.POSTGRES_URL || process.env.DATABASE_URL || "";
|
|
@@ -7749,9 +7820,13 @@ class RuntimeMigrator {
|
|
|
7749
7820
|
try {
|
|
7750
7821
|
const lockIdStr = lockId.toString();
|
|
7751
7822
|
await this.db.execute(sql`SELECT pg_advisory_unlock(CAST(${lockIdStr} AS bigint))`);
|
|
7752
|
-
logger6.debug(
|
|
7823
|
+
logger6.debug({ src: "plugin:sql", pluginName }, "Advisory lock released");
|
|
7753
7824
|
} catch (unlockError) {
|
|
7754
|
-
logger6.warn(
|
|
7825
|
+
logger6.warn({
|
|
7826
|
+
src: "plugin:sql",
|
|
7827
|
+
pluginName,
|
|
7828
|
+
error: unlockError instanceof Error ? unlockError.message : String(unlockError)
|
|
7829
|
+
}, "Failed to release advisory lock");
|
|
7755
7830
|
}
|
|
7756
7831
|
}
|
|
7757
7832
|
}
|
|
@@ -7762,7 +7837,7 @@ class RuntimeMigrator {
|
|
|
7762
7837
|
await this.db.execute(sql`BEGIN`);
|
|
7763
7838
|
transactionStarted = true;
|
|
7764
7839
|
for (const stmt of sqlStatements) {
|
|
7765
|
-
logger6.debug(
|
|
7840
|
+
logger6.debug({ src: "plugin:sql", statement: stmt }, "Executing SQL statement");
|
|
7766
7841
|
await this.db.execute(sql.raw(stmt));
|
|
7767
7842
|
}
|
|
7768
7843
|
const idx = await this.journalStorage.getNextIdx(pluginName);
|
|
@@ -7771,14 +7846,17 @@ class RuntimeMigrator {
|
|
|
7771
7846
|
await this.journalStorage.updateJournal(pluginName, idx, tag, true);
|
|
7772
7847
|
await this.snapshotStorage.saveSnapshot(pluginName, idx, snapshot);
|
|
7773
7848
|
await this.db.execute(sql`COMMIT`);
|
|
7774
|
-
logger6.info(
|
|
7849
|
+
logger6.info({ src: "plugin:sql", pluginName, tag }, "Recorded migration");
|
|
7775
7850
|
} catch (error) {
|
|
7776
7851
|
if (transactionStarted) {
|
|
7777
7852
|
try {
|
|
7778
7853
|
await this.db.execute(sql`ROLLBACK`);
|
|
7779
|
-
logger6.error("
|
|
7854
|
+
logger6.error({ src: "plugin:sql", error: error instanceof Error ? error.message : String(error) }, "Migration failed, rolled back");
|
|
7780
7855
|
} catch (rollbackError) {
|
|
7781
|
-
logger6.error(
|
|
7856
|
+
logger6.error({
|
|
7857
|
+
src: "plugin:sql",
|
|
7858
|
+
error: rollbackError instanceof Error ? rollbackError.message : String(rollbackError)
|
|
7859
|
+
}, "Failed to rollback transaction");
|
|
7782
7860
|
}
|
|
7783
7861
|
}
|
|
7784
7862
|
throw error;
|
|
@@ -7801,31 +7879,35 @@ class RuntimeMigrator {
|
|
|
7801
7879
|
};
|
|
7802
7880
|
}
|
|
7803
7881
|
async reset(pluginName) {
|
|
7804
|
-
logger6.warn(
|
|
7882
|
+
logger6.warn({ src: "plugin:sql", pluginName }, "Resetting migrations");
|
|
7805
7883
|
await this.db.execute(sql`DELETE FROM migrations._migrations WHERE plugin_name = ${pluginName}`);
|
|
7806
7884
|
await this.db.execute(sql`DELETE FROM migrations._journal WHERE plugin_name = ${pluginName}`);
|
|
7807
7885
|
await this.db.execute(sql`DELETE FROM migrations._snapshots WHERE plugin_name = ${pluginName}`);
|
|
7808
|
-
logger6.warn(
|
|
7886
|
+
logger6.warn({ src: "plugin:sql", pluginName }, "Reset complete");
|
|
7809
7887
|
}
|
|
7810
7888
|
async checkMigration(pluginName, schema2) {
|
|
7811
7889
|
try {
|
|
7812
|
-
logger6.info(
|
|
7890
|
+
logger6.info({ src: "plugin:sql", pluginName }, "Checking migration");
|
|
7813
7891
|
const currentSnapshot = await generateSnapshot(schema2);
|
|
7814
7892
|
const previousSnapshot = await this.snapshotStorage.getLatestSnapshot(pluginName);
|
|
7815
7893
|
if (!hasChanges(previousSnapshot, currentSnapshot)) {
|
|
7816
|
-
logger6.info(
|
|
7894
|
+
logger6.info({ src: "plugin:sql", pluginName }, "No changes detected");
|
|
7817
7895
|
return null;
|
|
7818
7896
|
}
|
|
7819
7897
|
const diff = await calculateDiff(previousSnapshot, currentSnapshot);
|
|
7820
7898
|
const dataLossCheck = checkForDataLoss(diff);
|
|
7821
7899
|
if (dataLossCheck.hasDataLoss) {
|
|
7822
|
-
logger6.warn(
|
|
7900
|
+
logger6.warn({ src: "plugin:sql", pluginName }, "Migration would cause data loss");
|
|
7823
7901
|
} else {
|
|
7824
|
-
logger6.info(
|
|
7902
|
+
logger6.info({ src: "plugin:sql", pluginName }, "Migration is safe (no data loss)");
|
|
7825
7903
|
}
|
|
7826
7904
|
return dataLossCheck;
|
|
7827
7905
|
} catch (error) {
|
|
7828
|
-
logger6.error(
|
|
7906
|
+
logger6.error({
|
|
7907
|
+
src: "plugin:sql",
|
|
7908
|
+
pluginName,
|
|
7909
|
+
error: error instanceof Error ? error.message : String(error)
|
|
7910
|
+
}, "Failed to check migration");
|
|
7829
7911
|
throw error;
|
|
7830
7912
|
}
|
|
7831
7913
|
}
|
|
@@ -7847,172 +7929,987 @@ var init_runtime_migrator2 = __esm(() => {
|
|
|
7847
7929
|
init_runtime_migrator();
|
|
7848
7930
|
});
|
|
7849
7931
|
|
|
7850
|
-
// src/
|
|
7851
|
-
var exports_migration_service = {};
|
|
7852
|
-
__export(exports_migration_service, {
|
|
7853
|
-
DatabaseMigrationService: () => DatabaseMigrationService
|
|
7854
|
-
});
|
|
7932
|
+
// src/migrations.ts
|
|
7855
7933
|
import { logger as logger7 } from "@elizaos/core";
|
|
7856
|
-
|
|
7857
|
-
|
|
7858
|
-
|
|
7859
|
-
|
|
7860
|
-
|
|
7861
|
-
|
|
7862
|
-
|
|
7863
|
-
this.db = db2;
|
|
7864
|
-
this.migrator = new RuntimeMigrator(db2);
|
|
7865
|
-
await this.migrator.initialize();
|
|
7866
|
-
logger7.info("DatabaseMigrationService initialized with database and runtime migrator");
|
|
7867
|
-
}
|
|
7868
|
-
discoverAndRegisterPluginSchemas(plugins) {
|
|
7869
|
-
for (const plugin of plugins) {
|
|
7870
|
-
if (plugin.schema) {
|
|
7871
|
-
this.registeredSchemas.set(plugin.name, plugin.schema);
|
|
7872
|
-
logger7.info(`Registered schema for plugin: ${plugin.name}`);
|
|
7873
|
-
}
|
|
7874
|
-
}
|
|
7875
|
-
logger7.info(`Discovered ${this.registeredSchemas.size} plugin schemas out of ${plugins.length} plugins`);
|
|
7876
|
-
}
|
|
7877
|
-
registerSchema(pluginName, schema2) {
|
|
7878
|
-
this.registeredSchemas.set(pluginName, schema2);
|
|
7879
|
-
logger7.info(`Registered schema for plugin: ${pluginName}`);
|
|
7934
|
+
async function migrateToEntityRLS(adapter) {
|
|
7935
|
+
const db2 = adapter.db;
|
|
7936
|
+
try {
|
|
7937
|
+
await db2.execute(sql`SELECT 1 FROM pg_tables LIMIT 1`);
|
|
7938
|
+
} catch {
|
|
7939
|
+
logger7.debug("[Migration] ⊘ Not PostgreSQL, skipping PostgreSQL-specific migrations");
|
|
7940
|
+
return;
|
|
7880
7941
|
}
|
|
7881
|
-
|
|
7882
|
-
|
|
7883
|
-
|
|
7942
|
+
logger7.info("[Migration] Starting develop → feat/entity-rls migration...");
|
|
7943
|
+
try {
|
|
7944
|
+
logger7.debug("[Migration] → Clearing RuntimeMigrator snapshot cache...");
|
|
7945
|
+
try {
|
|
7946
|
+
await db2.execute(sql`DELETE FROM migrations._snapshots WHERE plugin_name = '@elizaos/plugin-sql'`);
|
|
7947
|
+
logger7.debug("[Migration] ✓ Snapshot cache cleared");
|
|
7948
|
+
} catch (error) {
|
|
7949
|
+
logger7.debug("[Migration] ⊘ No snapshot cache to clear (migrations schema not yet created)");
|
|
7884
7950
|
}
|
|
7885
|
-
|
|
7886
|
-
|
|
7887
|
-
|
|
7888
|
-
|
|
7889
|
-
|
|
7890
|
-
|
|
7891
|
-
|
|
7892
|
-
|
|
7893
|
-
|
|
7894
|
-
|
|
7895
|
-
|
|
7951
|
+
logger7.debug("[Migration] → Disabling Row Level Security on all tables...");
|
|
7952
|
+
try {
|
|
7953
|
+
const tablesResult = await db2.execute(sql`
|
|
7954
|
+
SELECT tablename
|
|
7955
|
+
FROM pg_tables
|
|
7956
|
+
WHERE schemaname = 'public'
|
|
7957
|
+
ORDER BY tablename
|
|
7958
|
+
`);
|
|
7959
|
+
for (const row of tablesResult.rows || []) {
|
|
7960
|
+
const tableName = row.tablename;
|
|
7961
|
+
try {
|
|
7962
|
+
await db2.execute(sql.raw(`ALTER TABLE "${tableName}" DISABLE ROW LEVEL SECURITY`));
|
|
7963
|
+
logger7.debug(`[Migration] ✓ Disabled RLS on ${tableName}`);
|
|
7964
|
+
} catch (error) {
|
|
7965
|
+
logger7.debug(`[Migration] ⊘ Could not disable RLS on ${tableName}`);
|
|
7966
|
+
}
|
|
7967
|
+
}
|
|
7968
|
+
} catch (error) {
|
|
7969
|
+
logger7.debug("[Migration] ⊘ Could not disable RLS (may not have permissions)");
|
|
7896
7970
|
}
|
|
7897
|
-
|
|
7898
|
-
|
|
7899
|
-
const
|
|
7900
|
-
for (const [pluginName, schema2] of this.registeredSchemas) {
|
|
7971
|
+
logger7.debug("[Migration] → Handling server_id → message_server_id migrations...");
|
|
7972
|
+
const tablesToMigrate = ["channels", "worlds", "rooms"];
|
|
7973
|
+
for (const tableName of tablesToMigrate) {
|
|
7901
7974
|
try {
|
|
7902
|
-
await
|
|
7903
|
-
|
|
7904
|
-
|
|
7905
|
-
|
|
7906
|
-
|
|
7907
|
-
|
|
7908
|
-
|
|
7909
|
-
|
|
7910
|
-
|
|
7911
|
-
|
|
7912
|
-
|
|
7913
|
-
|
|
7914
|
-
|
|
7975
|
+
const columnsResult = await db2.execute(sql`
|
|
7976
|
+
SELECT column_name, data_type, is_nullable
|
|
7977
|
+
FROM information_schema.columns
|
|
7978
|
+
WHERE table_schema = 'public'
|
|
7979
|
+
AND table_name = ${tableName}
|
|
7980
|
+
AND column_name IN ('server_id', 'message_server_id')
|
|
7981
|
+
ORDER BY column_name
|
|
7982
|
+
`);
|
|
7983
|
+
const columns2 = columnsResult.rows || [];
|
|
7984
|
+
const serverId = columns2.find((c) => c.column_name === "server_id");
|
|
7985
|
+
const messageServerId = columns2.find((c) => c.column_name === "message_server_id");
|
|
7986
|
+
if (serverId && !messageServerId) {
|
|
7987
|
+
logger7.debug(`[Migration] → Renaming ${tableName}.server_id to message_server_id...`);
|
|
7988
|
+
await db2.execute(sql.raw(`ALTER TABLE "${tableName}" RENAME COLUMN "server_id" TO "message_server_id"`));
|
|
7989
|
+
logger7.debug(`[Migration] ✓ Renamed ${tableName}.server_id → message_server_id`);
|
|
7990
|
+
if (serverId.data_type === "text") {
|
|
7991
|
+
try {
|
|
7992
|
+
logger7.debug(`[Migration] → Dropping DEFAULT constraint on ${tableName}.message_server_id...`);
|
|
7993
|
+
await db2.execute(sql.raw(`ALTER TABLE "${tableName}" ALTER COLUMN "message_server_id" DROP DEFAULT`));
|
|
7994
|
+
logger7.debug(`[Migration] ✓ Dropped DEFAULT constraint`);
|
|
7995
|
+
logger7.debug(`[Migration] → Converting ${tableName}.message_server_id from text to uuid...`);
|
|
7996
|
+
await db2.execute(sql.raw(`ALTER TABLE "${tableName}" ALTER COLUMN "message_server_id" TYPE uuid USING "message_server_id"::uuid`));
|
|
7997
|
+
logger7.debug(`[Migration] ✓ Converted ${tableName}.message_server_id to uuid`);
|
|
7998
|
+
} catch (convertError) {
|
|
7999
|
+
logger7.warn(`[Migration] ⚠️ Could not convert ${tableName}.message_server_id to uuid - data may not be valid UUIDs`);
|
|
8000
|
+
logger7.debug(`[Migration] → Setting invalid UUIDs to NULL in ${tableName}.message_server_id...`);
|
|
8001
|
+
await db2.execute(sql.raw(`ALTER TABLE "${tableName}" ALTER COLUMN "message_server_id" TYPE uuid USING CASE WHEN "message_server_id" ~ '^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$' THEN "message_server_id"::uuid ELSE NULL END`));
|
|
8002
|
+
}
|
|
8003
|
+
}
|
|
8004
|
+
if (tableName === "channels") {
|
|
8005
|
+
const nullCountResult = await db2.execute(sql.raw(`SELECT COUNT(*) as count FROM "${tableName}" WHERE "message_server_id" IS NULL`));
|
|
8006
|
+
const nullCount = nullCountResult.rows?.[0]?.count;
|
|
8007
|
+
if (nullCount && parseInt(nullCount) > 0) {
|
|
8008
|
+
logger7.warn(`[Migration] ⚠️ ${tableName} has ${nullCount} rows with NULL message_server_id - these will be deleted`);
|
|
8009
|
+
await db2.execute(sql.raw(`DELETE FROM "${tableName}" WHERE "message_server_id" IS NULL`));
|
|
8010
|
+
logger7.debug(`[Migration] ✓ Deleted ${nullCount} rows with NULL message_server_id from ${tableName}`);
|
|
8011
|
+
}
|
|
8012
|
+
logger7.debug(`[Migration] → Making ${tableName}.message_server_id NOT NULL...`);
|
|
8013
|
+
await db2.execute(sql.raw(`ALTER TABLE "${tableName}" ALTER COLUMN "message_server_id" SET NOT NULL`));
|
|
8014
|
+
logger7.debug(`[Migration] ✓ Set ${tableName}.message_server_id NOT NULL`);
|
|
8015
|
+
}
|
|
8016
|
+
} else if (serverId && messageServerId) {
|
|
8017
|
+
logger7.debug(`[Migration] → ${tableName} has both columns, dropping server_id...`);
|
|
8018
|
+
await db2.execute(sql.raw(`ALTER TABLE "${tableName}" DROP COLUMN "server_id" CASCADE`));
|
|
8019
|
+
logger7.debug(`[Migration] ✓ Dropped ${tableName}.server_id (will be re-added by RuntimeMigrator for RLS)`);
|
|
8020
|
+
} else if (!serverId && messageServerId) {
|
|
8021
|
+
if (messageServerId.data_type === "text") {
|
|
8022
|
+
logger7.debug(`[Migration] → ${tableName}.message_server_id exists but is TEXT, needs UUID conversion...`);
|
|
8023
|
+
logger7.debug(`[Migration] → Dropping DEFAULT constraint on ${tableName}.message_server_id...`);
|
|
8024
|
+
await db2.execute(sql.raw(`ALTER TABLE "${tableName}" ALTER COLUMN "message_server_id" DROP DEFAULT`));
|
|
8025
|
+
logger7.debug(`[Migration] ✓ Dropped DEFAULT constraint`);
|
|
8026
|
+
logger7.debug(`[Migration] → Converting ${tableName}.message_server_id from text to uuid (generating UUIDs from text)...`);
|
|
8027
|
+
await db2.execute(sql.raw(`
|
|
8028
|
+
ALTER TABLE "${tableName}"
|
|
8029
|
+
ALTER COLUMN "message_server_id" TYPE uuid
|
|
8030
|
+
USING CASE
|
|
8031
|
+
WHEN "message_server_id" ~ '^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$'
|
|
8032
|
+
THEN "message_server_id"::uuid
|
|
8033
|
+
ELSE md5("message_server_id")::uuid
|
|
8034
|
+
END
|
|
8035
|
+
`));
|
|
8036
|
+
logger7.debug(`[Migration] ✓ Converted ${tableName}.message_server_id to uuid`);
|
|
8037
|
+
} else {
|
|
8038
|
+
logger7.debug(`[Migration] ⊘ ${tableName}.message_server_id already UUID, skipping`);
|
|
7915
8039
|
}
|
|
7916
8040
|
} else {
|
|
7917
|
-
logger7.
|
|
8041
|
+
logger7.debug(`[Migration] ⊘ ${tableName} already migrated, skipping`);
|
|
7918
8042
|
}
|
|
8043
|
+
} catch (error) {
|
|
8044
|
+
logger7.warn(`[Migration] ⚠️ Error migrating ${tableName}.server_id: ${error}`);
|
|
7919
8045
|
}
|
|
7920
8046
|
}
|
|
7921
|
-
|
|
7922
|
-
|
|
7923
|
-
|
|
7924
|
-
|
|
7925
|
-
|
|
7926
|
-
|
|
7927
|
-
|
|
7928
|
-
|
|
8047
|
+
logger7.debug("[Migration] → Dropping all remaining RLS-managed server_id columns...");
|
|
8048
|
+
try {
|
|
8049
|
+
const serverIdColumnsResult = await db2.execute(sql`
|
|
8050
|
+
SELECT table_name
|
|
8051
|
+
FROM information_schema.columns
|
|
8052
|
+
WHERE table_schema = 'public'
|
|
8053
|
+
AND column_name = 'server_id'
|
|
8054
|
+
AND table_name NOT IN (
|
|
8055
|
+
'servers', -- server_id is the primary key
|
|
8056
|
+
'agents', -- server_id is in the schema (for RLS)
|
|
8057
|
+
'channels', -- already handled above
|
|
8058
|
+
'worlds', -- already handled above
|
|
8059
|
+
'rooms', -- already handled above
|
|
8060
|
+
'server_agents', -- server_id is part of composite key
|
|
8061
|
+
'drizzle_migrations',
|
|
8062
|
+
'__drizzle_migrations'
|
|
8063
|
+
)
|
|
8064
|
+
ORDER BY table_name
|
|
8065
|
+
`);
|
|
8066
|
+
const tablesToClean = serverIdColumnsResult.rows || [];
|
|
8067
|
+
logger7.debug(`[Migration] → Found ${tablesToClean.length} tables with server_id columns`);
|
|
8068
|
+
for (const row of tablesToClean) {
|
|
8069
|
+
const tableName = row.table_name;
|
|
8070
|
+
try {
|
|
8071
|
+
await db2.execute(sql.raw(`ALTER TABLE "${tableName}" DROP COLUMN IF EXISTS server_id CASCADE`));
|
|
8072
|
+
logger7.debug(`[Migration] ✓ Dropped server_id from ${tableName}`);
|
|
8073
|
+
} catch (error) {
|
|
8074
|
+
logger7.debug(`[Migration] ⊘ Could not drop server_id from ${tableName}`);
|
|
8075
|
+
}
|
|
8076
|
+
}
|
|
8077
|
+
} catch (error) {
|
|
8078
|
+
logger7.debug("[Migration] ⊘ Could not drop server_id columns (may not have permissions)");
|
|
7929
8079
|
}
|
|
7930
|
-
|
|
7931
|
-
|
|
7932
|
-
|
|
8080
|
+
logger7.debug("[Migration] → Checking server_agents table rename...");
|
|
8081
|
+
try {
|
|
8082
|
+
const tablesResult = await db2.execute(sql`
|
|
8083
|
+
SELECT table_name
|
|
8084
|
+
FROM information_schema.tables
|
|
8085
|
+
WHERE table_schema = 'public'
|
|
8086
|
+
AND table_name IN ('server_agents', 'message_server_agents')
|
|
8087
|
+
ORDER BY table_name
|
|
8088
|
+
`);
|
|
8089
|
+
const tables = tablesResult.rows || [];
|
|
8090
|
+
const hasServerAgents = tables.some((t) => t.table_name === "server_agents");
|
|
8091
|
+
const hasMessageServerAgents = tables.some((t) => t.table_name === "message_server_agents");
|
|
8092
|
+
if (hasServerAgents && !hasMessageServerAgents) {
|
|
8093
|
+
logger7.debug("[Migration] → Renaming server_agents to message_server_agents...");
|
|
8094
|
+
await db2.execute(sql.raw(`ALTER TABLE "server_agents" RENAME TO "message_server_agents"`));
|
|
8095
|
+
logger7.debug("[Migration] ✓ Renamed server_agents → message_server_agents");
|
|
8096
|
+
logger7.debug("[Migration] → Renaming message_server_agents.server_id to message_server_id...");
|
|
8097
|
+
await db2.execute(sql.raw(`ALTER TABLE "message_server_agents" RENAME COLUMN "server_id" TO "message_server_id"`));
|
|
8098
|
+
logger7.debug("[Migration] ✓ Renamed message_server_agents.server_id → message_server_id");
|
|
8099
|
+
} else if (!hasServerAgents && !hasMessageServerAgents) {
|
|
8100
|
+
logger7.debug("[Migration] ⊘ No server_agents table to migrate");
|
|
8101
|
+
} else if (hasMessageServerAgents) {
|
|
8102
|
+
logger7.debug("[Migration] → Checking message_server_agents columns...");
|
|
8103
|
+
const columnsResult = await db2.execute(sql`
|
|
8104
|
+
SELECT column_name
|
|
8105
|
+
FROM information_schema.columns
|
|
8106
|
+
WHERE table_schema = 'public'
|
|
8107
|
+
AND table_name = 'message_server_agents'
|
|
8108
|
+
AND column_name IN ('server_id', 'message_server_id')
|
|
8109
|
+
ORDER BY column_name
|
|
8110
|
+
`);
|
|
8111
|
+
const columns2 = columnsResult.rows || [];
|
|
8112
|
+
const hasServerId = columns2.some((c) => c.column_name === "server_id");
|
|
8113
|
+
const hasMessageServerId = columns2.some((c) => c.column_name === "message_server_id");
|
|
8114
|
+
if (hasServerId && !hasMessageServerId) {
|
|
8115
|
+
logger7.debug("[Migration] → Renaming message_server_agents.server_id to message_server_id...");
|
|
8116
|
+
await db2.execute(sql.raw(`ALTER TABLE "message_server_agents" RENAME COLUMN "server_id" TO "message_server_id"`));
|
|
8117
|
+
logger7.debug("[Migration] ✓ Renamed message_server_agents.server_id → message_server_id");
|
|
8118
|
+
} else if (!hasServerId && !hasMessageServerId) {
|
|
8119
|
+
logger7.debug("[Migration] → message_server_agents exists without required columns, truncating...");
|
|
8120
|
+
await db2.execute(sql`TRUNCATE TABLE message_server_agents CASCADE`);
|
|
8121
|
+
logger7.debug("[Migration] ✓ Truncated message_server_agents");
|
|
8122
|
+
} else {
|
|
8123
|
+
logger7.debug("[Migration] ⊘ message_server_agents already has correct schema");
|
|
8124
|
+
}
|
|
8125
|
+
}
|
|
8126
|
+
} catch (error) {
|
|
8127
|
+
logger7.debug("[Migration] ⊘ Could not check/migrate server_agents table");
|
|
8128
|
+
}
|
|
8129
|
+
logger7.debug("[Migration] → Checking channel_participants table...");
|
|
8130
|
+
try {
|
|
8131
|
+
const columnsResult = await db2.execute(sql`
|
|
8132
|
+
SELECT column_name
|
|
8133
|
+
FROM information_schema.columns
|
|
8134
|
+
WHERE table_schema = 'public'
|
|
8135
|
+
AND table_name = 'channel_participants'
|
|
8136
|
+
AND column_name IN ('user_id', 'entity_id')
|
|
8137
|
+
ORDER BY column_name
|
|
8138
|
+
`);
|
|
8139
|
+
const columns2 = columnsResult.rows || [];
|
|
8140
|
+
const hasUserId = columns2.some((c) => c.column_name === "user_id");
|
|
8141
|
+
const hasEntityId = columns2.some((c) => c.column_name === "entity_id");
|
|
8142
|
+
if (hasUserId && !hasEntityId) {
|
|
8143
|
+
logger7.debug("[Migration] → Renaming channel_participants.user_id to entity_id...");
|
|
8144
|
+
await db2.execute(sql.raw(`ALTER TABLE "channel_participants" RENAME COLUMN "user_id" TO "entity_id"`));
|
|
8145
|
+
logger7.debug("[Migration] ✓ Renamed channel_participants.user_id → entity_id");
|
|
8146
|
+
} else if (!hasUserId && !hasEntityId) {
|
|
8147
|
+
logger7.debug("[Migration] → channel_participants exists without entity_id or user_id, truncating...");
|
|
8148
|
+
await db2.execute(sql`TRUNCATE TABLE channel_participants CASCADE`);
|
|
8149
|
+
logger7.debug("[Migration] ✓ Truncated channel_participants");
|
|
8150
|
+
} else {
|
|
8151
|
+
logger7.debug("[Migration] ⊘ channel_participants already has entity_id column");
|
|
8152
|
+
}
|
|
8153
|
+
} catch (error) {
|
|
8154
|
+
logger7.debug("[Migration] ⊘ Could not check/migrate channel_participants");
|
|
8155
|
+
}
|
|
8156
|
+
logger7.debug("[Migration] → Discovering and dropping all regular indexes...");
|
|
8157
|
+
try {
|
|
8158
|
+
const indexesResult = await db2.execute(sql`
|
|
8159
|
+
SELECT i.relname AS index_name
|
|
8160
|
+
FROM pg_index idx
|
|
8161
|
+
JOIN pg_class i ON i.oid = idx.indexrelid
|
|
8162
|
+
JOIN pg_class c ON c.oid = idx.indrelid
|
|
8163
|
+
JOIN pg_namespace n ON n.oid = c.relnamespace
|
|
8164
|
+
LEFT JOIN pg_constraint con ON con.conindid = idx.indexrelid
|
|
8165
|
+
WHERE n.nspname = 'public'
|
|
8166
|
+
AND NOT idx.indisprimary -- Not a primary key
|
|
8167
|
+
AND con.contype IS NULL -- Not a constraint (unique, etc)
|
|
8168
|
+
ORDER BY i.relname
|
|
8169
|
+
`);
|
|
8170
|
+
const indexesToDrop = indexesResult.rows || [];
|
|
8171
|
+
logger7.debug(`[Migration] → Found ${indexesToDrop.length} indexes to drop`);
|
|
8172
|
+
for (const row of indexesToDrop) {
|
|
8173
|
+
const indexName = row.index_name;
|
|
8174
|
+
try {
|
|
8175
|
+
await db2.execute(sql.raw(`DROP INDEX IF EXISTS "${indexName}"`));
|
|
8176
|
+
logger7.debug(`[Migration] ✓ Dropped index ${indexName}`);
|
|
8177
|
+
} catch (error) {
|
|
8178
|
+
logger7.debug(`[Migration] ⊘ Could not drop index ${indexName}`);
|
|
8179
|
+
}
|
|
8180
|
+
}
|
|
8181
|
+
} catch (error) {
|
|
8182
|
+
logger7.debug("[Migration] ⊘ Could not drop indexes (may not have permissions)");
|
|
8183
|
+
}
|
|
8184
|
+
logger7.info("[Migration] ✓ Migration complete - develop to feat/entity-rls migration finished");
|
|
8185
|
+
} catch (error) {
|
|
8186
|
+
logger7.error("[Migration] Migration failed:", String(error));
|
|
8187
|
+
throw error;
|
|
7933
8188
|
}
|
|
7934
8189
|
}
|
|
7935
|
-
var
|
|
7936
|
-
|
|
8190
|
+
var init_migrations = __esm(() => {
|
|
8191
|
+
init_drizzle_orm();
|
|
7937
8192
|
});
|
|
7938
8193
|
|
|
7939
|
-
// src/
|
|
7940
|
-
import { logger as
|
|
8194
|
+
// src/rls.ts
|
|
8195
|
+
import { logger as logger8, validateUuid } from "@elizaos/core";
|
|
8196
|
+
async function installRLSFunctions(adapter) {
|
|
8197
|
+
const db2 = adapter.db;
|
|
8198
|
+
await db2.execute(sql`
|
|
8199
|
+
CREATE TABLE IF NOT EXISTS servers (
|
|
8200
|
+
id UUID PRIMARY KEY,
|
|
8201
|
+
created_at TIMESTAMPTZ DEFAULT NOW() NOT NULL,
|
|
8202
|
+
updated_at TIMESTAMPTZ DEFAULT NOW() NOT NULL
|
|
8203
|
+
)
|
|
8204
|
+
`);
|
|
8205
|
+
await db2.execute(sql`
|
|
8206
|
+
CREATE OR REPLACE FUNCTION current_server_id() RETURNS UUID AS $$
|
|
8207
|
+
DECLARE
|
|
8208
|
+
app_name TEXT;
|
|
8209
|
+
BEGIN
|
|
8210
|
+
app_name := NULLIF(current_setting('application_name', TRUE), '');
|
|
7941
8211
|
|
|
7942
|
-
|
|
7943
|
-
|
|
8212
|
+
-- Return NULL if application_name is not set or not a valid UUID
|
|
8213
|
+
-- This allows admin queries to work without RLS restrictions
|
|
8214
|
+
BEGIN
|
|
8215
|
+
RETURN app_name::UUID;
|
|
8216
|
+
EXCEPTION WHEN OTHERS THEN
|
|
8217
|
+
RETURN NULL;
|
|
8218
|
+
END;
|
|
8219
|
+
END;
|
|
8220
|
+
$$ LANGUAGE plpgsql STABLE;
|
|
8221
|
+
`);
|
|
8222
|
+
await db2.execute(sql`
|
|
8223
|
+
CREATE OR REPLACE FUNCTION add_server_isolation(
|
|
8224
|
+
schema_name text,
|
|
8225
|
+
table_name text
|
|
8226
|
+
) RETURNS void AS $$
|
|
8227
|
+
DECLARE
|
|
8228
|
+
full_table_name text;
|
|
8229
|
+
column_exists boolean;
|
|
8230
|
+
orphaned_count bigint;
|
|
8231
|
+
BEGIN
|
|
8232
|
+
full_table_name := schema_name || '.' || table_name;
|
|
7944
8233
|
|
|
7945
|
-
|
|
7946
|
-
|
|
7947
|
-
|
|
7948
|
-
|
|
7949
|
-
|
|
7950
|
-
|
|
7951
|
-
|
|
7952
|
-
import { PGlite } from "@electric-sql/pglite";
|
|
8234
|
+
-- Check if server_id column already exists
|
|
8235
|
+
SELECT EXISTS (
|
|
8236
|
+
SELECT 1 FROM information_schema.columns
|
|
8237
|
+
WHERE information_schema.columns.table_schema = schema_name
|
|
8238
|
+
AND information_schema.columns.table_name = add_server_isolation.table_name
|
|
8239
|
+
AND information_schema.columns.column_name = 'server_id'
|
|
8240
|
+
) INTO column_exists;
|
|
7953
8241
|
|
|
7954
|
-
|
|
7955
|
-
|
|
7956
|
-
|
|
7957
|
-
init_pg_core();
|
|
7958
|
-
init_session();
|
|
7959
|
-
init_sql();
|
|
7960
|
-
init_utils();
|
|
7961
|
-
init_cache();
|
|
7962
|
-
import { types } from "@electric-sql/pglite";
|
|
8242
|
+
-- Add server_id column if missing (DEFAULT populates it automatically for new rows)
|
|
8243
|
+
IF NOT column_exists THEN
|
|
8244
|
+
EXECUTE format('ALTER TABLE %I.%I ADD COLUMN server_id UUID DEFAULT current_server_id()', schema_name, table_name);
|
|
7963
8245
|
|
|
7964
|
-
|
|
7965
|
-
|
|
7966
|
-
|
|
7967
|
-
|
|
7968
|
-
|
|
7969
|
-
|
|
7970
|
-
|
|
7971
|
-
|
|
7972
|
-
|
|
7973
|
-
|
|
7974
|
-
|
|
7975
|
-
|
|
7976
|
-
|
|
7977
|
-
|
|
7978
|
-
|
|
7979
|
-
|
|
7980
|
-
|
|
7981
|
-
|
|
7982
|
-
|
|
7983
|
-
|
|
7984
|
-
|
|
7985
|
-
|
|
7986
|
-
|
|
7987
|
-
|
|
7988
|
-
|
|
7989
|
-
|
|
7990
|
-
|
|
7991
|
-
|
|
7992
|
-
|
|
7993
|
-
|
|
7994
|
-
|
|
7995
|
-
|
|
7996
|
-
|
|
7997
|
-
|
|
7998
|
-
|
|
7999
|
-
|
|
8000
|
-
|
|
8001
|
-
|
|
8002
|
-
|
|
8003
|
-
|
|
8004
|
-
|
|
8005
|
-
|
|
8006
|
-
|
|
8007
|
-
|
|
8008
|
-
|
|
8009
|
-
|
|
8010
|
-
|
|
8011
|
-
|
|
8012
|
-
|
|
8013
|
-
|
|
8014
|
-
|
|
8015
|
-
|
|
8246
|
+
-- Backfill existing rows with current server_id
|
|
8247
|
+
-- This ensures all existing data belongs to the server instance that is enabling RLS
|
|
8248
|
+
EXECUTE format('UPDATE %I.%I SET server_id = current_server_id() WHERE server_id IS NULL', schema_name, table_name);
|
|
8249
|
+
ELSE
|
|
8250
|
+
-- Column already exists (RLS was previously enabled then disabled)
|
|
8251
|
+
-- Restore the DEFAULT clause (may have been removed during uninstallRLS)
|
|
8252
|
+
EXECUTE format('ALTER TABLE %I.%I ALTER COLUMN server_id SET DEFAULT current_server_id()', schema_name, table_name);
|
|
8253
|
+
|
|
8254
|
+
-- Only backfill NULL server_id rows, do NOT steal data from other servers
|
|
8255
|
+
EXECUTE format('SELECT COUNT(*) FROM %I.%I WHERE server_id IS NULL', schema_name, table_name) INTO orphaned_count;
|
|
8256
|
+
|
|
8257
|
+
IF orphaned_count > 0 THEN
|
|
8258
|
+
RAISE NOTICE 'Backfilling % rows with NULL server_id in %.%', orphaned_count, schema_name, table_name;
|
|
8259
|
+
EXECUTE format('UPDATE %I.%I SET server_id = current_server_id() WHERE server_id IS NULL', schema_name, table_name);
|
|
8260
|
+
END IF;
|
|
8261
|
+
END IF;
|
|
8262
|
+
|
|
8263
|
+
-- Create index for efficient server_id filtering
|
|
8264
|
+
EXECUTE format('CREATE INDEX IF NOT EXISTS idx_%I_server_id ON %I.%I(server_id)', table_name, schema_name, table_name);
|
|
8265
|
+
|
|
8266
|
+
-- Enable RLS on the table
|
|
8267
|
+
EXECUTE format('ALTER TABLE %I.%I ENABLE ROW LEVEL SECURITY', schema_name, table_name);
|
|
8268
|
+
|
|
8269
|
+
-- FORCE RLS even for table owners (critical for security)
|
|
8270
|
+
EXECUTE format('ALTER TABLE %I.%I FORCE ROW LEVEL SECURITY', schema_name, table_name);
|
|
8271
|
+
|
|
8272
|
+
-- Drop existing policy if present
|
|
8273
|
+
EXECUTE format('DROP POLICY IF EXISTS server_isolation_policy ON %I.%I', schema_name, table_name);
|
|
8274
|
+
|
|
8275
|
+
-- Create isolation policy: users can only see/modify rows where server_id matches current server instance
|
|
8276
|
+
-- No NULL clause - all rows must have a valid server_id (backfilled during column addition)
|
|
8277
|
+
EXECUTE format('
|
|
8278
|
+
CREATE POLICY server_isolation_policy ON %I.%I
|
|
8279
|
+
USING (server_id = current_server_id())
|
|
8280
|
+
WITH CHECK (server_id = current_server_id())
|
|
8281
|
+
', schema_name, table_name);
|
|
8282
|
+
END;
|
|
8283
|
+
$$ LANGUAGE plpgsql;
|
|
8284
|
+
`);
|
|
8285
|
+
await db2.execute(sql`
|
|
8286
|
+
CREATE OR REPLACE FUNCTION apply_rls_to_all_tables() RETURNS void AS $$
|
|
8287
|
+
DECLARE
|
|
8288
|
+
tbl record;
|
|
8289
|
+
BEGIN
|
|
8290
|
+
FOR tbl IN
|
|
8291
|
+
SELECT schemaname, tablename
|
|
8292
|
+
FROM pg_tables
|
|
8293
|
+
WHERE schemaname = 'public'
|
|
8294
|
+
AND tablename NOT IN (
|
|
8295
|
+
'servers',
|
|
8296
|
+
'drizzle_migrations',
|
|
8297
|
+
'__drizzle_migrations'
|
|
8298
|
+
)
|
|
8299
|
+
LOOP
|
|
8300
|
+
BEGIN
|
|
8301
|
+
PERFORM add_server_isolation(tbl.schemaname, tbl.tablename);
|
|
8302
|
+
EXCEPTION WHEN OTHERS THEN
|
|
8303
|
+
RAISE WARNING 'Failed to apply RLS to %.%: %', tbl.schemaname, tbl.tablename, SQLERRM;
|
|
8304
|
+
END;
|
|
8305
|
+
END LOOP;
|
|
8306
|
+
END;
|
|
8307
|
+
$$ LANGUAGE plpgsql;
|
|
8308
|
+
`);
|
|
8309
|
+
logger8.info({ src: "plugin:sql" }, "RLS PostgreSQL functions installed");
|
|
8310
|
+
await installEntityRLS(adapter);
|
|
8311
|
+
}
|
|
8312
|
+
async function getOrCreateRlsServer(adapter, serverId) {
|
|
8313
|
+
const db2 = adapter.db;
|
|
8314
|
+
await db2.insert(serverTable).values({
|
|
8315
|
+
id: serverId
|
|
8316
|
+
}).onConflictDoNothing();
|
|
8317
|
+
logger8.info({ src: "plugin:sql", serverId: serverId.slice(0, 8) }, "RLS server registered");
|
|
8318
|
+
return serverId;
|
|
8319
|
+
}
|
|
8320
|
+
async function setServerContext(adapter, serverId) {
|
|
8321
|
+
if (!validateUuid(serverId)) {
|
|
8322
|
+
throw new Error(`Invalid server ID format: ${serverId}. Must be a valid UUID.`);
|
|
8323
|
+
}
|
|
8324
|
+
const db2 = adapter.db;
|
|
8325
|
+
const servers = await db2.select().from(serverTable).where(eq(serverTable.id, serverId));
|
|
8326
|
+
if (servers.length === 0) {
|
|
8327
|
+
throw new Error(`Server ${serverId} does not exist`);
|
|
8328
|
+
}
|
|
8329
|
+
logger8.info({ src: "plugin:sql", serverId: serverId.slice(0, 8) }, "RLS context configured");
|
|
8330
|
+
}
|
|
8331
|
+
async function assignAgentToServer(adapter, agentId, serverId) {
|
|
8332
|
+
if (!agentId || !serverId) {
|
|
8333
|
+
logger8.warn(`[Data Isolation] Cannot assign agent to server: invalid agentId (${agentId}) or serverId (${serverId})`);
|
|
8334
|
+
return;
|
|
8335
|
+
}
|
|
8336
|
+
const db2 = adapter.db;
|
|
8337
|
+
const agents = await db2.select().from(agentTable).where(eq(agentTable.id, agentId));
|
|
8338
|
+
if (agents.length > 0) {
|
|
8339
|
+
const agent = agents[0];
|
|
8340
|
+
const currentServerId = agent.server_id;
|
|
8341
|
+
if (currentServerId === serverId) {
|
|
8342
|
+
logger8.debug({ src: "plugin:sql", agentName: agent.name }, "Agent already assigned to correct server");
|
|
8343
|
+
} else {
|
|
8344
|
+
await db2.update(agentTable).set({ server_id: serverId }).where(eq(agentTable.id, agentId));
|
|
8345
|
+
if (currentServerId === null) {
|
|
8346
|
+
logger8.info({ src: "plugin:sql", agentName: agent.name }, "Agent assigned to server");
|
|
8347
|
+
} else {
|
|
8348
|
+
logger8.warn({ src: "plugin:sql", agentName: agent.name }, "Agent server changed");
|
|
8349
|
+
}
|
|
8350
|
+
}
|
|
8351
|
+
} else {
|
|
8352
|
+
logger8.debug({ src: "plugin:sql", agentId }, "Agent does not exist yet");
|
|
8353
|
+
}
|
|
8354
|
+
}
|
|
8355
|
+
async function applyRLSToNewTables(adapter) {
|
|
8356
|
+
const db2 = adapter.db;
|
|
8357
|
+
try {
|
|
8358
|
+
await db2.execute(sql`SELECT apply_rls_to_all_tables()`);
|
|
8359
|
+
logger8.info({ src: "plugin:sql" }, "RLS applied to all tables");
|
|
8360
|
+
} catch (error) {
|
|
8361
|
+
logger8.warn({ src: "plugin:sql", error: String(error) }, "Failed to apply RLS to some tables");
|
|
8362
|
+
}
|
|
8363
|
+
}
|
|
8364
|
+
async function uninstallRLS(adapter) {
|
|
8365
|
+
const db2 = adapter.db;
|
|
8366
|
+
try {
|
|
8367
|
+
const checkResult = await db2.execute(sql`
|
|
8368
|
+
SELECT EXISTS (
|
|
8369
|
+
SELECT FROM pg_tables
|
|
8370
|
+
WHERE schemaname = 'public' AND tablename = 'servers'
|
|
8371
|
+
) as rls_enabled
|
|
8372
|
+
`);
|
|
8373
|
+
const rlsEnabled = checkResult.rows?.[0]?.rls_enabled;
|
|
8374
|
+
if (!rlsEnabled) {
|
|
8375
|
+
logger8.debug({ src: "plugin:sql" }, "RLS not installed, skipping cleanup");
|
|
8376
|
+
return;
|
|
8377
|
+
}
|
|
8378
|
+
logger8.info({ src: "plugin:sql" }, "Disabling RLS globally (keeping server_id columns for schema compatibility)...");
|
|
8379
|
+
try {
|
|
8380
|
+
await uninstallEntityRLS(adapter);
|
|
8381
|
+
} catch (entityRlsError) {
|
|
8382
|
+
logger8.debug({ src: "plugin:sql" }, "Entity RLS cleanup skipped (not installed or already cleaned)");
|
|
8383
|
+
}
|
|
8384
|
+
await db2.execute(sql`
|
|
8385
|
+
CREATE OR REPLACE FUNCTION _temp_disable_rls_on_table(
|
|
8386
|
+
p_schema_name text,
|
|
8387
|
+
p_table_name text
|
|
8388
|
+
) RETURNS void AS $$
|
|
8389
|
+
DECLARE
|
|
8390
|
+
policy_rec record;
|
|
8391
|
+
BEGIN
|
|
8392
|
+
-- Drop all policies on this table
|
|
8393
|
+
FOR policy_rec IN
|
|
8394
|
+
SELECT policyname
|
|
8395
|
+
FROM pg_policies
|
|
8396
|
+
WHERE schemaname = p_schema_name AND tablename = p_table_name
|
|
8397
|
+
LOOP
|
|
8398
|
+
EXECUTE format('DROP POLICY IF EXISTS %I ON %I.%I',
|
|
8399
|
+
policy_rec.policyname, p_schema_name, p_table_name);
|
|
8400
|
+
END LOOP;
|
|
8401
|
+
|
|
8402
|
+
-- Disable RLS
|
|
8403
|
+
EXECUTE format('ALTER TABLE %I.%I NO FORCE ROW LEVEL SECURITY', p_schema_name, p_table_name);
|
|
8404
|
+
EXECUTE format('ALTER TABLE %I.%I DISABLE ROW LEVEL SECURITY', p_schema_name, p_table_name);
|
|
8405
|
+
END;
|
|
8406
|
+
$$ LANGUAGE plpgsql;
|
|
8407
|
+
`);
|
|
8408
|
+
const tablesResult = await db2.execute(sql`
|
|
8409
|
+
SELECT schemaname, tablename
|
|
8410
|
+
FROM pg_tables
|
|
8411
|
+
WHERE schemaname = 'public'
|
|
8412
|
+
AND tablename NOT IN ('drizzle_migrations', '__drizzle_migrations')
|
|
8413
|
+
`);
|
|
8414
|
+
for (const row of tablesResult.rows || []) {
|
|
8415
|
+
const schemaName = row.schemaname;
|
|
8416
|
+
const tableName = row.tablename;
|
|
8417
|
+
try {
|
|
8418
|
+
await db2.execute(sql`SELECT _temp_disable_rls_on_table(${schemaName}, ${tableName})`);
|
|
8419
|
+
logger8.debug({ src: "plugin:sql", schemaName, tableName }, "Disabled RLS on table");
|
|
8420
|
+
} catch (error) {
|
|
8421
|
+
logger8.warn({ src: "plugin:sql", schemaName, tableName, error: String(error) }, "Failed to disable RLS on table");
|
|
8422
|
+
}
|
|
8423
|
+
}
|
|
8424
|
+
await db2.execute(sql`DROP FUNCTION IF EXISTS _temp_disable_rls_on_table(text, text)`);
|
|
8425
|
+
logger8.info({ src: "plugin:sql" }, "Keeping server_id values intact (prevents data theft on re-enable)");
|
|
8426
|
+
logger8.info({ src: "plugin:sql" }, "Clearing servers table...");
|
|
8427
|
+
await db2.execute(sql`TRUNCATE TABLE servers`);
|
|
8428
|
+
await db2.execute(sql`DROP FUNCTION IF EXISTS apply_rls_to_all_tables() CASCADE`);
|
|
8429
|
+
await db2.execute(sql`DROP FUNCTION IF EXISTS add_server_isolation(text, text) CASCADE`);
|
|
8430
|
+
await db2.execute(sql`DROP FUNCTION IF EXISTS current_server_id() CASCADE`);
|
|
8431
|
+
logger8.info({ src: "plugin:sql" }, "Dropped all RLS functions");
|
|
8432
|
+
logger8.info({ src: "plugin:sql" }, "RLS disabled successfully (server_id columns preserved)");
|
|
8433
|
+
} catch (error) {
|
|
8434
|
+
logger8.error({ src: "plugin:sql", error: String(error) }, "Failed to disable RLS");
|
|
8435
|
+
throw error;
|
|
8436
|
+
}
|
|
8437
|
+
}
|
|
8438
|
+
async function installEntityRLS(adapter) {
|
|
8439
|
+
const db2 = adapter.db;
|
|
8440
|
+
logger8.info("[Entity RLS] Installing entity RLS functions and policies...");
|
|
8441
|
+
await db2.execute(sql`
|
|
8442
|
+
CREATE OR REPLACE FUNCTION current_entity_id()
|
|
8443
|
+
RETURNS UUID AS $$
|
|
8444
|
+
DECLARE
|
|
8445
|
+
entity_id_text TEXT;
|
|
8446
|
+
BEGIN
|
|
8447
|
+
-- Read from transaction-local variable
|
|
8448
|
+
entity_id_text := NULLIF(current_setting('app.entity_id', TRUE), '');
|
|
8449
|
+
|
|
8450
|
+
IF entity_id_text IS NULL OR entity_id_text = '' THEN
|
|
8451
|
+
RETURN NULL;
|
|
8452
|
+
END IF;
|
|
8453
|
+
|
|
8454
|
+
BEGIN
|
|
8455
|
+
RETURN entity_id_text::UUID;
|
|
8456
|
+
EXCEPTION WHEN OTHERS THEN
|
|
8457
|
+
RETURN NULL;
|
|
8458
|
+
END;
|
|
8459
|
+
END;
|
|
8460
|
+
$$ LANGUAGE plpgsql STABLE;
|
|
8461
|
+
`);
|
|
8462
|
+
logger8.info("[Entity RLS] Created current_entity_id() function");
|
|
8463
|
+
await db2.execute(sql`
|
|
8464
|
+
CREATE OR REPLACE FUNCTION add_entity_isolation(
|
|
8465
|
+
schema_name text,
|
|
8466
|
+
table_name text,
|
|
8467
|
+
require_entity boolean DEFAULT false
|
|
8468
|
+
) RETURNS void AS $$
|
|
8469
|
+
DECLARE
|
|
8470
|
+
full_table_name text;
|
|
8471
|
+
has_entity_id boolean;
|
|
8472
|
+
has_author_id boolean;
|
|
8473
|
+
has_channel_id boolean;
|
|
8474
|
+
has_room_id boolean;
|
|
8475
|
+
entity_column_name text;
|
|
8476
|
+
room_column_name text;
|
|
8477
|
+
BEGIN
|
|
8478
|
+
full_table_name := schema_name || '.' || table_name;
|
|
8479
|
+
|
|
8480
|
+
-- Check which columns exist (using camelCase as per schema definition)
|
|
8481
|
+
SELECT EXISTS (
|
|
8482
|
+
SELECT 1 FROM information_schema.columns
|
|
8483
|
+
WHERE information_schema.columns.table_schema = schema_name
|
|
8484
|
+
AND information_schema.columns.table_name = add_entity_isolation.table_name
|
|
8485
|
+
AND information_schema.columns.column_name = 'entityId'
|
|
8486
|
+
) INTO has_entity_id;
|
|
8487
|
+
|
|
8488
|
+
SELECT EXISTS (
|
|
8489
|
+
SELECT 1 FROM information_schema.columns
|
|
8490
|
+
WHERE information_schema.columns.table_schema = schema_name
|
|
8491
|
+
AND information_schema.columns.table_name = add_entity_isolation.table_name
|
|
8492
|
+
AND information_schema.columns.column_name = 'authorId'
|
|
8493
|
+
) INTO has_author_id;
|
|
8494
|
+
|
|
8495
|
+
SELECT EXISTS (
|
|
8496
|
+
SELECT 1 FROM information_schema.columns
|
|
8497
|
+
WHERE information_schema.columns.table_schema = schema_name
|
|
8498
|
+
AND information_schema.columns.table_name = add_entity_isolation.table_name
|
|
8499
|
+
AND information_schema.columns.column_name = 'roomId'
|
|
8500
|
+
) INTO has_room_id;
|
|
8501
|
+
|
|
8502
|
+
-- Skip if no entity-related columns
|
|
8503
|
+
IF NOT (has_entity_id OR has_author_id OR has_room_id) THEN
|
|
8504
|
+
RAISE NOTICE '[Entity RLS] Skipping %.%: no entity columns found', schema_name, table_name;
|
|
8505
|
+
RETURN;
|
|
8506
|
+
END IF;
|
|
8507
|
+
|
|
8508
|
+
-- Determine which column to use for entity filtering
|
|
8509
|
+
-- Priority: roomId (shared access via participants) > entityId/authorId (direct access)
|
|
8510
|
+
--
|
|
8511
|
+
-- SPECIAL CASE: participants table must use direct entityId to avoid infinite recursion
|
|
8512
|
+
IF table_name = 'participants' AND has_entity_id THEN
|
|
8513
|
+
entity_column_name := 'entityId';
|
|
8514
|
+
room_column_name := NULL;
|
|
8515
|
+
ELSIF has_room_id THEN
|
|
8516
|
+
room_column_name := 'roomId';
|
|
8517
|
+
entity_column_name := NULL;
|
|
8518
|
+
ELSIF has_entity_id THEN
|
|
8519
|
+
entity_column_name := 'entityId';
|
|
8520
|
+
room_column_name := NULL;
|
|
8521
|
+
ELSIF has_author_id THEN
|
|
8522
|
+
entity_column_name := 'authorId';
|
|
8523
|
+
room_column_name := NULL;
|
|
8524
|
+
ELSE
|
|
8525
|
+
entity_column_name := NULL;
|
|
8526
|
+
room_column_name := NULL;
|
|
8527
|
+
END IF;
|
|
8528
|
+
|
|
8529
|
+
-- Enable RLS on the table
|
|
8530
|
+
EXECUTE format('ALTER TABLE %I.%I ENABLE ROW LEVEL SECURITY', schema_name, table_name);
|
|
8531
|
+
EXECUTE format('ALTER TABLE %I.%I FORCE ROW LEVEL SECURITY', schema_name, table_name);
|
|
8532
|
+
|
|
8533
|
+
-- Drop existing entity policies if present
|
|
8534
|
+
EXECUTE format('DROP POLICY IF EXISTS entity_isolation_policy ON %I.%I', schema_name, table_name);
|
|
8535
|
+
|
|
8536
|
+
-- CASE 1: Table has roomId or channelId (shared access via participants)
|
|
8537
|
+
IF room_column_name IS NOT NULL THEN
|
|
8538
|
+
-- Determine the corresponding column name in participants table
|
|
8539
|
+
-- If the table has roomId, look for roomId in participants.roomId
|
|
8540
|
+
-- participants table uses: entityId (for participant), roomId (for room)
|
|
8541
|
+
-- RESTRICTIVE: Must pass BOTH server RLS AND entity RLS (combined with AND)
|
|
8542
|
+
|
|
8543
|
+
-- Build policy with or without NULL check based on require_entity parameter
|
|
8544
|
+
IF require_entity THEN
|
|
8545
|
+
-- STRICT MODE: Entity context is REQUIRED (blocks NULL entity_id)
|
|
8546
|
+
EXECUTE format('
|
|
8547
|
+
CREATE POLICY entity_isolation_policy ON %I.%I
|
|
8548
|
+
AS RESTRICTIVE
|
|
8549
|
+
USING (
|
|
8550
|
+
current_entity_id() IS NOT NULL
|
|
8551
|
+
AND %I IN (
|
|
8552
|
+
SELECT "roomId"
|
|
8553
|
+
FROM participants
|
|
8554
|
+
WHERE "entityId" = current_entity_id()
|
|
8555
|
+
)
|
|
8556
|
+
)
|
|
8557
|
+
WITH CHECK (
|
|
8558
|
+
current_entity_id() IS NOT NULL
|
|
8559
|
+
AND %I IN (
|
|
8560
|
+
SELECT "roomId"
|
|
8561
|
+
FROM participants
|
|
8562
|
+
WHERE "entityId" = current_entity_id()
|
|
8563
|
+
)
|
|
8564
|
+
)
|
|
8565
|
+
', schema_name, table_name, room_column_name, room_column_name);
|
|
8566
|
+
RAISE NOTICE '[Entity RLS] Applied STRICT RESTRICTIVE to %.% (via % → participants.roomId, entity REQUIRED)', schema_name, table_name, room_column_name;
|
|
8567
|
+
ELSE
|
|
8568
|
+
-- PERMISSIVE MODE: NULL entity_id allows system/admin access
|
|
8569
|
+
EXECUTE format('
|
|
8570
|
+
CREATE POLICY entity_isolation_policy ON %I.%I
|
|
8571
|
+
AS RESTRICTIVE
|
|
8572
|
+
USING (
|
|
8573
|
+
current_entity_id() IS NULL
|
|
8574
|
+
OR %I IN (
|
|
8575
|
+
SELECT "roomId"
|
|
8576
|
+
FROM participants
|
|
8577
|
+
WHERE "entityId" = current_entity_id()
|
|
8578
|
+
)
|
|
8579
|
+
)
|
|
8580
|
+
WITH CHECK (
|
|
8581
|
+
current_entity_id() IS NULL
|
|
8582
|
+
OR %I IN (
|
|
8583
|
+
SELECT "roomId"
|
|
8584
|
+
FROM participants
|
|
8585
|
+
WHERE "entityId" = current_entity_id()
|
|
8586
|
+
)
|
|
8587
|
+
)
|
|
8588
|
+
', schema_name, table_name, room_column_name, room_column_name);
|
|
8589
|
+
RAISE NOTICE '[Entity RLS] Applied PERMISSIVE RESTRICTIVE to %.% (via % → participants.roomId, NULL allowed)', schema_name, table_name, room_column_name;
|
|
8590
|
+
END IF;
|
|
8591
|
+
|
|
8592
|
+
-- CASE 2: Table has direct entity_id or author_id column
|
|
8593
|
+
ELSIF entity_column_name IS NOT NULL THEN
|
|
8594
|
+
-- RESTRICTIVE: Must pass BOTH server RLS AND entity RLS (combined with AND)
|
|
8595
|
+
|
|
8596
|
+
IF require_entity THEN
|
|
8597
|
+
-- STRICT MODE: Entity context is REQUIRED
|
|
8598
|
+
EXECUTE format('
|
|
8599
|
+
CREATE POLICY entity_isolation_policy ON %I.%I
|
|
8600
|
+
AS RESTRICTIVE
|
|
8601
|
+
USING (
|
|
8602
|
+
current_entity_id() IS NOT NULL
|
|
8603
|
+
AND %I = current_entity_id()
|
|
8604
|
+
)
|
|
8605
|
+
WITH CHECK (
|
|
8606
|
+
current_entity_id() IS NOT NULL
|
|
8607
|
+
AND %I = current_entity_id()
|
|
8608
|
+
)
|
|
8609
|
+
', schema_name, table_name, entity_column_name, entity_column_name);
|
|
8610
|
+
RAISE NOTICE '[Entity RLS] Applied STRICT RESTRICTIVE to %.% (direct column: %, entity REQUIRED)', schema_name, table_name, entity_column_name;
|
|
8611
|
+
ELSE
|
|
8612
|
+
-- PERMISSIVE MODE: NULL entity_id allows system/admin access
|
|
8613
|
+
EXECUTE format('
|
|
8614
|
+
CREATE POLICY entity_isolation_policy ON %I.%I
|
|
8615
|
+
AS RESTRICTIVE
|
|
8616
|
+
USING (
|
|
8617
|
+
current_entity_id() IS NULL
|
|
8618
|
+
OR %I = current_entity_id()
|
|
8619
|
+
)
|
|
8620
|
+
WITH CHECK (
|
|
8621
|
+
current_entity_id() IS NULL
|
|
8622
|
+
OR %I = current_entity_id()
|
|
8623
|
+
)
|
|
8624
|
+
', schema_name, table_name, entity_column_name, entity_column_name);
|
|
8625
|
+
RAISE NOTICE '[Entity RLS] Applied PERMISSIVE RESTRICTIVE to %.% (direct column: %, NULL allowed)', schema_name, table_name, entity_column_name;
|
|
8626
|
+
END IF;
|
|
8627
|
+
END IF;
|
|
8628
|
+
|
|
8629
|
+
-- Create indexes for efficient entity filtering
|
|
8630
|
+
IF room_column_name IS NOT NULL THEN
|
|
8631
|
+
EXECUTE format('CREATE INDEX IF NOT EXISTS idx_%I_room ON %I.%I(%I)',
|
|
8632
|
+
table_name, schema_name, table_name, room_column_name);
|
|
8633
|
+
END IF;
|
|
8634
|
+
|
|
8635
|
+
IF entity_column_name IS NOT NULL THEN
|
|
8636
|
+
EXECUTE format('CREATE INDEX IF NOT EXISTS idx_%I_entity ON %I.%I(%I)',
|
|
8637
|
+
table_name, schema_name, table_name, entity_column_name);
|
|
8638
|
+
END IF;
|
|
8639
|
+
END;
|
|
8640
|
+
$$ LANGUAGE plpgsql;
|
|
8641
|
+
`);
|
|
8642
|
+
logger8.info("[Entity RLS] Created add_entity_isolation() function");
|
|
8643
|
+
await db2.execute(sql`
|
|
8644
|
+
CREATE OR REPLACE FUNCTION apply_entity_rls_to_all_tables() RETURNS void AS $$
|
|
8645
|
+
DECLARE
|
|
8646
|
+
tbl record;
|
|
8647
|
+
require_entity_for_table boolean;
|
|
8648
|
+
BEGIN
|
|
8649
|
+
FOR tbl IN
|
|
8650
|
+
SELECT schemaname, tablename
|
|
8651
|
+
FROM pg_tables
|
|
8652
|
+
WHERE schemaname = 'public'
|
|
8653
|
+
AND tablename NOT IN (
|
|
8654
|
+
'servers', -- Server RLS table
|
|
8655
|
+
'users', -- Authentication table (no entity isolation needed)
|
|
8656
|
+
'entity_mappings', -- Mapping table (no entity isolation needed)
|
|
8657
|
+
'drizzle_migrations', -- Migration tracking
|
|
8658
|
+
'__drizzle_migrations' -- Migration tracking
|
|
8659
|
+
)
|
|
8660
|
+
LOOP
|
|
8661
|
+
BEGIN
|
|
8662
|
+
-- Apply STRICT mode (require_entity=true) to sensitive user-facing tables
|
|
8663
|
+
-- These tables MUST have entity context set to access data
|
|
8664
|
+
-- STRICT tables: memories, logs, components, tasks (user data requiring isolation)
|
|
8665
|
+
-- NOTE: Excluded tables:
|
|
8666
|
+
-- - 'participants': Adding participants is a privileged operation during initialization
|
|
8667
|
+
IF tbl.tablename IN ('memories', 'logs', 'components', 'tasks') THEN
|
|
8668
|
+
require_entity_for_table := true;
|
|
8669
|
+
ELSE
|
|
8670
|
+
-- PERMISSIVE mode (require_entity=false) for system/privileged tables
|
|
8671
|
+
-- This includes: participants, rooms, channels, entities, etc.
|
|
8672
|
+
require_entity_for_table := false;
|
|
8673
|
+
END IF;
|
|
8674
|
+
|
|
8675
|
+
PERFORM add_entity_isolation(tbl.schemaname, tbl.tablename, require_entity_for_table);
|
|
8676
|
+
EXCEPTION WHEN OTHERS THEN
|
|
8677
|
+
RAISE WARNING '[Entity RLS] Failed to apply to %.%: %', tbl.schemaname, tbl.tablename, SQLERRM;
|
|
8678
|
+
END;
|
|
8679
|
+
END LOOP;
|
|
8680
|
+
END;
|
|
8681
|
+
$$ LANGUAGE plpgsql;
|
|
8682
|
+
`);
|
|
8683
|
+
logger8.info("[Entity RLS] Created apply_entity_rls_to_all_tables() function");
|
|
8684
|
+
logger8.info("[Entity RLS] Entity RLS functions installed successfully");
|
|
8685
|
+
}
|
|
8686
|
+
async function applyEntityRLSToAllTables(adapter) {
|
|
8687
|
+
const db2 = adapter.db;
|
|
8688
|
+
try {
|
|
8689
|
+
await db2.execute(sql`SELECT apply_entity_rls_to_all_tables()`);
|
|
8690
|
+
logger8.info("[Entity RLS] Applied entity RLS to all eligible tables");
|
|
8691
|
+
} catch (error) {
|
|
8692
|
+
logger8.warn("[Entity RLS] Failed to apply entity RLS to some tables:", String(error));
|
|
8693
|
+
}
|
|
8694
|
+
}
|
|
8695
|
+
async function uninstallEntityRLS(adapter) {
|
|
8696
|
+
const db2 = adapter.db;
|
|
8697
|
+
logger8.info("[Entity RLS] Removing entity RLS policies and functions...");
|
|
8698
|
+
try {
|
|
8699
|
+
const tablesResult = await db2.execute(sql`
|
|
8700
|
+
SELECT schemaname, tablename
|
|
8701
|
+
FROM pg_tables
|
|
8702
|
+
WHERE schemaname = 'public'
|
|
8703
|
+
AND tablename NOT IN ('drizzle_migrations', '__drizzle_migrations')
|
|
8704
|
+
`);
|
|
8705
|
+
for (const row of tablesResult.rows || []) {
|
|
8706
|
+
const schemaName = row.schemaname;
|
|
8707
|
+
const tableName = row.tablename;
|
|
8708
|
+
try {
|
|
8709
|
+
await db2.execute(sql.raw(`DROP POLICY IF EXISTS entity_isolation_policy ON ${schemaName}.${tableName}`));
|
|
8710
|
+
logger8.debug(`[Entity RLS] Dropped entity_isolation_policy from ${schemaName}.${tableName}`);
|
|
8711
|
+
} catch (error) {
|
|
8712
|
+
logger8.debug(`[Entity RLS] No entity policy on ${schemaName}.${tableName}`);
|
|
8713
|
+
}
|
|
8714
|
+
}
|
|
8715
|
+
await db2.execute(sql`DROP FUNCTION IF EXISTS apply_entity_rls_to_all_tables() CASCADE`);
|
|
8716
|
+
await db2.execute(sql`DROP FUNCTION IF EXISTS add_entity_isolation(text, text) CASCADE`);
|
|
8717
|
+
await db2.execute(sql`DROP FUNCTION IF EXISTS current_entity_id() CASCADE`);
|
|
8718
|
+
logger8.info("[Entity RLS] Entity RLS functions and policies removed successfully");
|
|
8719
|
+
} catch (error) {
|
|
8720
|
+
logger8.error("[Entity RLS] Failed to remove entity RLS:", String(error));
|
|
8721
|
+
throw error;
|
|
8722
|
+
}
|
|
8723
|
+
}
|
|
8724
|
+
var init_rls = __esm(() => {
|
|
8725
|
+
init_drizzle_orm();
|
|
8726
|
+
init_server();
|
|
8727
|
+
init_agent();
|
|
8728
|
+
});
|
|
8729
|
+
|
|
8730
|
+
// src/migration-service.ts
|
|
8731
|
+
var exports_migration_service = {};
|
|
8732
|
+
__export(exports_migration_service, {
|
|
8733
|
+
DatabaseMigrationService: () => DatabaseMigrationService
|
|
8734
|
+
});
|
|
8735
|
+
import { logger as logger9 } from "@elizaos/core";
|
|
8736
|
+
|
|
8737
|
+
class DatabaseMigrationService {
|
|
8738
|
+
db = null;
|
|
8739
|
+
registeredSchemas = new Map;
|
|
8740
|
+
migrator = null;
|
|
8741
|
+
constructor() {}
|
|
8742
|
+
async initializeWithDatabase(db2) {
|
|
8743
|
+
this.db = db2;
|
|
8744
|
+
await migrateToEntityRLS({ db: db2 });
|
|
8745
|
+
this.migrator = new RuntimeMigrator(db2);
|
|
8746
|
+
await this.migrator.initialize();
|
|
8747
|
+
logger9.info({ src: "plugin:sql" }, "DatabaseMigrationService initialized");
|
|
8748
|
+
}
|
|
8749
|
+
discoverAndRegisterPluginSchemas(plugins) {
|
|
8750
|
+
for (const plugin of plugins) {
|
|
8751
|
+
if (plugin.schema) {
|
|
8752
|
+
this.registeredSchemas.set(plugin.name, plugin.schema);
|
|
8753
|
+
}
|
|
8754
|
+
}
|
|
8755
|
+
logger9.info({
|
|
8756
|
+
src: "plugin:sql",
|
|
8757
|
+
schemasDiscovered: this.registeredSchemas.size,
|
|
8758
|
+
totalPlugins: plugins.length
|
|
8759
|
+
}, "Plugin schemas discovered");
|
|
8760
|
+
}
|
|
8761
|
+
registerSchema(pluginName, schema2) {
|
|
8762
|
+
this.registeredSchemas.set(pluginName, schema2);
|
|
8763
|
+
logger9.debug({ src: "plugin:sql", pluginName }, "Schema registered");
|
|
8764
|
+
}
|
|
8765
|
+
async runAllPluginMigrations(options) {
|
|
8766
|
+
if (!this.db || !this.migrator) {
|
|
8767
|
+
throw new Error("Database or migrator not initialized in DatabaseMigrationService");
|
|
8768
|
+
}
|
|
8769
|
+
const isProduction = false;
|
|
8770
|
+
const migrationOptions = {
|
|
8771
|
+
verbose: options?.verbose ?? !isProduction,
|
|
8772
|
+
force: options?.force ?? false,
|
|
8773
|
+
dryRun: options?.dryRun ?? false
|
|
8774
|
+
};
|
|
8775
|
+
logger9.info({
|
|
8776
|
+
src: "plugin:sql",
|
|
8777
|
+
environment: isProduction ? "PRODUCTION" : "DEVELOPMENT",
|
|
8778
|
+
pluginCount: this.registeredSchemas.size,
|
|
8779
|
+
dryRun: migrationOptions.dryRun
|
|
8780
|
+
}, "Starting migrations");
|
|
8781
|
+
let successCount = 0;
|
|
8782
|
+
let failureCount = 0;
|
|
8783
|
+
const errors2 = [];
|
|
8784
|
+
for (const [pluginName, schema2] of this.registeredSchemas) {
|
|
8785
|
+
try {
|
|
8786
|
+
await this.migrator.migrate(pluginName, schema2, migrationOptions);
|
|
8787
|
+
successCount++;
|
|
8788
|
+
logger9.info({ src: "plugin:sql", pluginName }, "Migration completed");
|
|
8789
|
+
} catch (error) {
|
|
8790
|
+
failureCount++;
|
|
8791
|
+
const errorMessage = error.message;
|
|
8792
|
+
errors2.push({ pluginName, error });
|
|
8793
|
+
if (errorMessage.includes("Destructive migration blocked")) {
|
|
8794
|
+
logger9.error({ src: "plugin:sql", pluginName }, "Migration blocked - destructive changes detected. Set ELIZA_ALLOW_DESTRUCTIVE_MIGRATIONS=true or use force option");
|
|
8795
|
+
} else {
|
|
8796
|
+
logger9.error({ src: "plugin:sql", pluginName, error: errorMessage }, "Migration failed");
|
|
8797
|
+
}
|
|
8798
|
+
}
|
|
8799
|
+
}
|
|
8800
|
+
if (failureCount === 0) {
|
|
8801
|
+
logger9.info({ src: "plugin:sql", successCount }, "All migrations completed successfully");
|
|
8802
|
+
const dataIsolationEnabled = process.env.ENABLE_DATA_ISOLATION === "true";
|
|
8803
|
+
if (dataIsolationEnabled) {
|
|
8804
|
+
try {
|
|
8805
|
+
logger9.info({ src: "plugin:sql" }, "Re-applying Row Level Security...");
|
|
8806
|
+
await installRLSFunctions({ db: this.db });
|
|
8807
|
+
await applyRLSToNewTables({ db: this.db });
|
|
8808
|
+
await applyEntityRLSToAllTables({ db: this.db });
|
|
8809
|
+
logger9.info({ src: "plugin:sql" }, "RLS re-applied successfully");
|
|
8810
|
+
} catch (rlsError) {
|
|
8811
|
+
const errorMsg = rlsError instanceof Error ? rlsError.message : String(rlsError);
|
|
8812
|
+
logger9.warn({ src: "plugin:sql", error: errorMsg }, "Failed to re-apply RLS (this is OK if server_id columns are not yet in schemas)");
|
|
8813
|
+
}
|
|
8814
|
+
} else {
|
|
8815
|
+
logger9.info({ src: "plugin:sql" }, "Skipping RLS re-application (ENABLE_DATA_ISOLATION is not true)");
|
|
8816
|
+
}
|
|
8817
|
+
} else {
|
|
8818
|
+
logger9.error({ src: "plugin:sql", failureCount, successCount }, "Some migrations failed");
|
|
8819
|
+
const errorSummary = errors2.map((e) => `${e.pluginName}: ${e.error.message}`).join(`
|
|
8820
|
+
`);
|
|
8821
|
+
throw new Error(`${failureCount} migration(s) failed:
|
|
8822
|
+
${errorSummary}`);
|
|
8823
|
+
}
|
|
8824
|
+
}
|
|
8825
|
+
getMigrator() {
|
|
8826
|
+
return this.migrator;
|
|
8827
|
+
}
|
|
8828
|
+
}
|
|
8829
|
+
var init_migration_service = __esm(() => {
|
|
8830
|
+
init_runtime_migrator2();
|
|
8831
|
+
init_migrations();
|
|
8832
|
+
init_rls();
|
|
8833
|
+
});
|
|
8834
|
+
|
|
8835
|
+
// src/index.node.ts
|
|
8836
|
+
import { logger as logger14, stringToUuid } from "@elizaos/core";
|
|
8837
|
+
import { mkdirSync } from "node:fs";
|
|
8838
|
+
|
|
8839
|
+
// src/pglite/adapter.ts
|
|
8840
|
+
import { logger as logger11 } from "@elizaos/core";
|
|
8841
|
+
|
|
8842
|
+
// ../../node_modules/drizzle-orm/pglite/driver.js
|
|
8843
|
+
init_entity();
|
|
8844
|
+
init_logger();
|
|
8845
|
+
init_db();
|
|
8846
|
+
init_dialect();
|
|
8847
|
+
init_relations();
|
|
8848
|
+
init_utils();
|
|
8849
|
+
import { PGlite } from "@electric-sql/pglite";
|
|
8850
|
+
|
|
8851
|
+
// ../../node_modules/drizzle-orm/pglite/session.js
|
|
8852
|
+
init_entity();
|
|
8853
|
+
init_logger();
|
|
8854
|
+
init_pg_core();
|
|
8855
|
+
init_session();
|
|
8856
|
+
init_sql();
|
|
8857
|
+
init_utils();
|
|
8858
|
+
init_cache();
|
|
8859
|
+
import { types } from "@electric-sql/pglite";
|
|
8860
|
+
|
|
8861
|
+
class PglitePreparedQuery extends PgPreparedQuery {
|
|
8862
|
+
constructor(client, queryString, params, logger, cache, queryMetadata, cacheConfig, fields, name, _isResponseInArrayMode, customResultMapper) {
|
|
8863
|
+
super({ sql: queryString, params }, cache, queryMetadata, cacheConfig);
|
|
8864
|
+
this.client = client;
|
|
8865
|
+
this.queryString = queryString;
|
|
8866
|
+
this.params = params;
|
|
8867
|
+
this.logger = logger;
|
|
8868
|
+
this.fields = fields;
|
|
8869
|
+
this._isResponseInArrayMode = _isResponseInArrayMode;
|
|
8870
|
+
this.customResultMapper = customResultMapper;
|
|
8871
|
+
this.rawQueryConfig = {
|
|
8872
|
+
rowMode: "object",
|
|
8873
|
+
parsers: {
|
|
8874
|
+
[types.TIMESTAMP]: (value) => value,
|
|
8875
|
+
[types.TIMESTAMPTZ]: (value) => value,
|
|
8876
|
+
[types.INTERVAL]: (value) => value,
|
|
8877
|
+
[types.DATE]: (value) => value,
|
|
8878
|
+
[1231]: (value) => value,
|
|
8879
|
+
[1115]: (value) => value,
|
|
8880
|
+
[1185]: (value) => value,
|
|
8881
|
+
[1187]: (value) => value,
|
|
8882
|
+
[1182]: (value) => value
|
|
8883
|
+
}
|
|
8884
|
+
};
|
|
8885
|
+
this.queryConfig = {
|
|
8886
|
+
rowMode: "array",
|
|
8887
|
+
parsers: {
|
|
8888
|
+
[types.TIMESTAMP]: (value) => value,
|
|
8889
|
+
[types.TIMESTAMPTZ]: (value) => value,
|
|
8890
|
+
[types.INTERVAL]: (value) => value,
|
|
8891
|
+
[types.DATE]: (value) => value,
|
|
8892
|
+
[1231]: (value) => value,
|
|
8893
|
+
[1115]: (value) => value,
|
|
8894
|
+
[1185]: (value) => value,
|
|
8895
|
+
[1187]: (value) => value,
|
|
8896
|
+
[1182]: (value) => value
|
|
8897
|
+
}
|
|
8898
|
+
};
|
|
8899
|
+
}
|
|
8900
|
+
static [entityKind] = "PglitePreparedQuery";
|
|
8901
|
+
rawQueryConfig;
|
|
8902
|
+
queryConfig;
|
|
8903
|
+
async execute(placeholderValues = {}) {
|
|
8904
|
+
const params = fillPlaceholders(this.params, placeholderValues);
|
|
8905
|
+
this.logger.logQuery(this.queryString, params);
|
|
8906
|
+
const { fields, client, queryConfig, joinsNotNullableMap, customResultMapper, queryString, rawQueryConfig } = this;
|
|
8907
|
+
if (!fields && !customResultMapper) {
|
|
8908
|
+
return this.queryWithCache(queryString, params, async () => {
|
|
8909
|
+
return await client.query(queryString, params, rawQueryConfig);
|
|
8910
|
+
});
|
|
8911
|
+
}
|
|
8912
|
+
const result = await this.queryWithCache(queryString, params, async () => {
|
|
8016
8913
|
return await client.query(queryString, params, queryConfig);
|
|
8017
8914
|
});
|
|
8018
8915
|
return customResultMapper ? customResultMapper(result.rows) : result.rows.map((row) => mapResultRow(fields, row, joinsNotNullableMap));
|
|
@@ -8154,7 +9051,7 @@ init_drizzle_orm();
|
|
|
8154
9051
|
import {
|
|
8155
9052
|
ChannelType,
|
|
8156
9053
|
DatabaseAdapter,
|
|
8157
|
-
logger as
|
|
9054
|
+
logger as logger10
|
|
8158
9055
|
} from "@elizaos/core";
|
|
8159
9056
|
import { v4 } from "uuid";
|
|
8160
9057
|
|
|
@@ -8166,33 +9063,12 @@ import { VECTOR_DIMS } from "@elizaos/core";
|
|
|
8166
9063
|
// src/schema/memory.ts
|
|
8167
9064
|
init_drizzle_orm();
|
|
8168
9065
|
init_pg_core();
|
|
8169
|
-
|
|
8170
|
-
// src/schema/agent.ts
|
|
8171
|
-
init_drizzle_orm();
|
|
8172
|
-
init_pg_core();
|
|
8173
|
-
var agentTable = pgTable("agents", {
|
|
8174
|
-
id: uuid("id").primaryKey().defaultRandom(),
|
|
8175
|
-
enabled: boolean("enabled").default(true).notNull(),
|
|
8176
|
-
owner_id: uuid("owner_id"),
|
|
8177
|
-
createdAt: timestamp("created_at", { withTimezone: true }).default(sql`now()`).notNull(),
|
|
8178
|
-
updatedAt: timestamp("updated_at", { withTimezone: true }).default(sql`now()`).notNull(),
|
|
8179
|
-
name: text("name").notNull(),
|
|
8180
|
-
username: text("username"),
|
|
8181
|
-
system: text("system").default(""),
|
|
8182
|
-
bio: jsonb("bio").$type().default(sql`'[]'::jsonb`),
|
|
8183
|
-
messageExamples: jsonb("message_examples").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
8184
|
-
postExamples: jsonb("post_examples").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
8185
|
-
topics: jsonb("topics").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
8186
|
-
adjectives: jsonb("adjectives").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
8187
|
-
knowledge: jsonb("knowledge").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
8188
|
-
plugins: jsonb("plugins").$type().default(sql`'[]'::jsonb`).notNull(),
|
|
8189
|
-
settings: jsonb("settings").$type().default(sql`'{}'::jsonb`).notNull(),
|
|
8190
|
-
style: jsonb("style").$type().default(sql`'{}'::jsonb`).notNull()
|
|
8191
|
-
});
|
|
9066
|
+
init_agent();
|
|
8192
9067
|
|
|
8193
9068
|
// src/schema/entity.ts
|
|
8194
9069
|
init_drizzle_orm();
|
|
8195
9070
|
init_pg_core();
|
|
9071
|
+
init_agent();
|
|
8196
9072
|
var entityTable = pgTable("entities", {
|
|
8197
9073
|
id: uuid("id").notNull().primaryKey(),
|
|
8198
9074
|
agentId: uuid("agent_id").notNull().references(() => agentTable.id, {
|
|
@@ -8210,6 +9086,7 @@ var entityTable = pgTable("entities", {
|
|
|
8210
9086
|
// src/schema/room.ts
|
|
8211
9087
|
init_drizzle_orm();
|
|
8212
9088
|
init_pg_core();
|
|
9089
|
+
init_agent();
|
|
8213
9090
|
var roomTable = pgTable("rooms", {
|
|
8214
9091
|
id: uuid("id").notNull().primaryKey().default(sql`gen_random_uuid()`),
|
|
8215
9092
|
agentId: uuid("agentId").references(() => agentTable.id, {
|
|
@@ -8217,12 +9094,12 @@ var roomTable = pgTable("rooms", {
|
|
|
8217
9094
|
}),
|
|
8218
9095
|
source: text("source").notNull(),
|
|
8219
9096
|
type: text("type").notNull(),
|
|
8220
|
-
|
|
9097
|
+
messageServerId: uuid("message_server_id"),
|
|
8221
9098
|
worldId: uuid("worldId"),
|
|
8222
9099
|
name: text("name"),
|
|
8223
9100
|
metadata: jsonb("metadata"),
|
|
8224
|
-
channelId: text("
|
|
8225
|
-
createdAt: timestamp("
|
|
9101
|
+
channelId: text("channel_id"),
|
|
9102
|
+
createdAt: timestamp("created_at").default(sql`now()`).notNull()
|
|
8226
9103
|
});
|
|
8227
9104
|
|
|
8228
9105
|
// src/schema/memory.ts
|
|
@@ -8314,17 +9191,18 @@ var embeddingTable = pgTable("embeddings", {
|
|
|
8314
9191
|
]);
|
|
8315
9192
|
|
|
8316
9193
|
// src/schema/index.ts
|
|
9194
|
+
init_agent();
|
|
8317
9195
|
var exports_schema = {};
|
|
8318
9196
|
__export(exports_schema, {
|
|
8319
9197
|
worldTable: () => worldTable,
|
|
8320
9198
|
taskTable: () => taskTable,
|
|
8321
|
-
|
|
9199
|
+
serverTable: () => serverTable,
|
|
8322
9200
|
roomTable: () => roomTable,
|
|
8323
9201
|
relationshipTable: () => relationshipTable,
|
|
8324
9202
|
participantTable: () => participantTable,
|
|
8325
|
-
ownersTable: () => ownersTable,
|
|
8326
9203
|
messageTable: () => messageTable,
|
|
8327
9204
|
messageServerTable: () => messageServerTable,
|
|
9205
|
+
messageServerAgentsTable: () => messageServerAgentsTable,
|
|
8328
9206
|
memoryTable: () => memoryTable,
|
|
8329
9207
|
logTable: () => logTable,
|
|
8330
9208
|
entityTable: () => entityTable,
|
|
@@ -8339,29 +9217,30 @@ __export(exports_schema, {
|
|
|
8339
9217
|
// src/schema/cache.ts
|
|
8340
9218
|
init_drizzle_orm();
|
|
8341
9219
|
init_pg_core();
|
|
9220
|
+
init_agent();
|
|
8342
9221
|
var cacheTable = pgTable("cache", {
|
|
8343
9222
|
key: text("key").notNull(),
|
|
8344
9223
|
agentId: uuid("agent_id").notNull().references(() => agentTable.id, { onDelete: "cascade" }),
|
|
8345
9224
|
value: jsonb("value").notNull(),
|
|
8346
9225
|
createdAt: timestamp("created_at", { withTimezone: true }).default(sql`now()`).notNull(),
|
|
8347
9226
|
expiresAt: timestamp("expires_at", { withTimezone: true })
|
|
8348
|
-
}, (table3) => ({
|
|
8349
|
-
pk: primaryKey({ columns: [table3.key, table3.agentId] })
|
|
8350
|
-
}));
|
|
9227
|
+
}, (table3) => [primaryKey({ columns: [table3.key, table3.agentId] })]);
|
|
8351
9228
|
// src/schema/component.ts
|
|
8352
9229
|
init_drizzle_orm();
|
|
8353
9230
|
init_pg_core();
|
|
9231
|
+
init_agent();
|
|
8354
9232
|
|
|
8355
9233
|
// src/schema/world.ts
|
|
8356
9234
|
init_drizzle_orm();
|
|
8357
9235
|
init_pg_core();
|
|
9236
|
+
init_agent();
|
|
8358
9237
|
var worldTable = pgTable("worlds", {
|
|
8359
9238
|
id: uuid("id").notNull().primaryKey().default(sql`gen_random_uuid()`),
|
|
8360
9239
|
agentId: uuid("agentId").notNull().references(() => agentTable.id, { onDelete: "cascade" }),
|
|
8361
9240
|
name: text("name").notNull(),
|
|
8362
9241
|
metadata: jsonb("metadata"),
|
|
8363
|
-
|
|
8364
|
-
createdAt: timestamp("
|
|
9242
|
+
messageServerId: uuid("message_server_id"),
|
|
9243
|
+
createdAt: timestamp("created_at").default(sql`now()`).notNull()
|
|
8365
9244
|
});
|
|
8366
9245
|
|
|
8367
9246
|
// src/schema/component.ts
|
|
@@ -8398,17 +9277,14 @@ var logTable = pgTable("logs", {
|
|
|
8398
9277
|
foreignColumns: [entityTable.id]
|
|
8399
9278
|
}).onDelete("cascade")
|
|
8400
9279
|
]);
|
|
8401
|
-
|
|
8402
|
-
|
|
8403
|
-
|
|
8404
|
-
|
|
8405
|
-
id: uuid("id").primaryKey(),
|
|
8406
|
-
createdAt: timestamp("created_at", { withTimezone: true }).default(sql`now()`).notNull(),
|
|
8407
|
-
updatedAt: timestamp("updated_at", { withTimezone: true }).default(sql`now()`).notNull()
|
|
8408
|
-
});
|
|
9280
|
+
|
|
9281
|
+
// src/schema/index.ts
|
|
9282
|
+
init_server();
|
|
9283
|
+
|
|
8409
9284
|
// src/schema/participant.ts
|
|
8410
9285
|
init_drizzle_orm();
|
|
8411
9286
|
init_pg_core();
|
|
9287
|
+
init_agent();
|
|
8412
9288
|
var participantTable = pgTable("participants", {
|
|
8413
9289
|
id: uuid("id").notNull().primaryKey().default(sql`gen_random_uuid()`),
|
|
8414
9290
|
createdAt: timestamp("created_at", { withTimezone: true }).default(sql`now()`).notNull(),
|
|
@@ -8439,6 +9315,7 @@ var participantTable = pgTable("participants", {
|
|
|
8439
9315
|
// src/schema/relationship.ts
|
|
8440
9316
|
init_drizzle_orm();
|
|
8441
9317
|
init_pg_core();
|
|
9318
|
+
init_agent();
|
|
8442
9319
|
var relationshipTable = pgTable("relationships", {
|
|
8443
9320
|
id: uuid("id").notNull().primaryKey().default(sql`gen_random_uuid()`),
|
|
8444
9321
|
createdAt: timestamp("created_at", { withTimezone: true }).default(sql`now()`).notNull(),
|
|
@@ -8464,6 +9341,7 @@ var relationshipTable = pgTable("relationships", {
|
|
|
8464
9341
|
// src/schema/tasks.ts
|
|
8465
9342
|
init_pg_core();
|
|
8466
9343
|
init_drizzle_orm();
|
|
9344
|
+
init_agent();
|
|
8467
9345
|
var taskTable = pgTable("tasks", {
|
|
8468
9346
|
id: uuid("id").primaryKey().defaultRandom(),
|
|
8469
9347
|
name: text("name").notNull(),
|
|
@@ -8471,7 +9349,7 @@ var taskTable = pgTable("tasks", {
|
|
|
8471
9349
|
roomId: uuid("roomId"),
|
|
8472
9350
|
worldId: uuid("worldId"),
|
|
8473
9351
|
entityId: uuid("entityId"),
|
|
8474
|
-
agentId: uuid("
|
|
9352
|
+
agentId: uuid("agentId").notNull().references(() => agentTable.id, { onDelete: "cascade" }),
|
|
8475
9353
|
tags: text("tags").array().default(sql`'{}'::text[]`),
|
|
8476
9354
|
metadata: jsonb("metadata").default(sql`'{}'::jsonb`),
|
|
8477
9355
|
createdAt: timestamp("created_at", { withTimezone: true }).defaultNow(),
|
|
@@ -8494,7 +9372,7 @@ init_pg_core();
|
|
|
8494
9372
|
init_drizzle_orm();
|
|
8495
9373
|
var channelTable = pgTable("channels", {
|
|
8496
9374
|
id: text("id").primaryKey(),
|
|
8497
|
-
messageServerId: uuid("
|
|
9375
|
+
messageServerId: uuid("message_server_id").notNull().references(() => messageServerTable.id, { onDelete: "cascade" }),
|
|
8498
9376
|
name: text("name").notNull(),
|
|
8499
9377
|
type: text("type").notNull(),
|
|
8500
9378
|
sourceType: text("source_type"),
|
|
@@ -8526,18 +9404,15 @@ var messageTable = pgTable("central_messages", {
|
|
|
8526
9404
|
init_pg_core();
|
|
8527
9405
|
var channelParticipantsTable = pgTable("channel_participants", {
|
|
8528
9406
|
channelId: text("channel_id").notNull().references(() => channelTable.id, { onDelete: "cascade" }),
|
|
8529
|
-
|
|
8530
|
-
}, (table3) => ({
|
|
8531
|
-
|
|
8532
|
-
}));
|
|
8533
|
-
// src/schema/serverAgent.ts
|
|
9407
|
+
entityId: text("entity_id").notNull()
|
|
9408
|
+
}, (table3) => [primaryKey({ columns: [table3.channelId, table3.entityId] })]);
|
|
9409
|
+
// src/schema/messageServerAgent.ts
|
|
8534
9410
|
init_pg_core();
|
|
8535
|
-
|
|
8536
|
-
|
|
9411
|
+
init_agent();
|
|
9412
|
+
var messageServerAgentsTable = pgTable("message_server_agents", {
|
|
9413
|
+
messageServerId: uuid("message_server_id").notNull().references(() => messageServerTable.id, { onDelete: "cascade" }),
|
|
8537
9414
|
agentId: uuid("agent_id").notNull().references(() => agentTable.id, { onDelete: "cascade" })
|
|
8538
|
-
}, (table3) => ({
|
|
8539
|
-
pk: primaryKey({ columns: [table3.serverId, table3.agentId] })
|
|
8540
|
-
}));
|
|
9415
|
+
}, (table3) => [primaryKey({ columns: [table3.messageServerId, table3.agentId] })]);
|
|
8541
9416
|
// src/base.ts
|
|
8542
9417
|
class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
8543
9418
|
maxRetries = 3;
|
|
@@ -8599,10 +9474,19 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
8599
9474
|
const backoffDelay = Math.min(this.baseDelay * 2 ** (attempt - 1), this.maxDelay);
|
|
8600
9475
|
const jitter = Math.random() * this.jitterMax;
|
|
8601
9476
|
const delay = backoffDelay + jitter;
|
|
8602
|
-
|
|
9477
|
+
logger10.warn({
|
|
9478
|
+
src: "plugin:sql",
|
|
9479
|
+
attempt,
|
|
9480
|
+
maxRetries: this.maxRetries,
|
|
9481
|
+
error: error instanceof Error ? error.message : String(error)
|
|
9482
|
+
}, "Database operation failed, retrying");
|
|
8603
9483
|
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
8604
9484
|
} else {
|
|
8605
|
-
|
|
9485
|
+
logger10.error({
|
|
9486
|
+
src: "plugin:sql",
|
|
9487
|
+
totalAttempts: attempt,
|
|
9488
|
+
error: error instanceof Error ? error.message : String(error)
|
|
9489
|
+
}, "Max retry attempts reached");
|
|
8606
9490
|
throw error instanceof Error ? error : new Error(String(error));
|
|
8607
9491
|
}
|
|
8608
9492
|
}
|
|
@@ -8656,7 +9540,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
8656
9540
|
if (agent.id) {
|
|
8657
9541
|
const existing = await this.db.select({ id: agentTable.id }).from(agentTable).where(eq(agentTable.id, agent.id)).limit(1);
|
|
8658
9542
|
if (existing.length > 0) {
|
|
8659
|
-
|
|
9543
|
+
logger10.warn({ src: "plugin:sql", agentId: agent.id }, "Attempted to create agent with duplicate ID");
|
|
8660
9544
|
return false;
|
|
8661
9545
|
}
|
|
8662
9546
|
}
|
|
@@ -8667,10 +9551,13 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
8667
9551
|
updatedAt: new Date(agent.updatedAt || Date.now())
|
|
8668
9552
|
});
|
|
8669
9553
|
});
|
|
8670
|
-
logger8.debug(`Agent created successfully: ${agent.id}`);
|
|
8671
9554
|
return true;
|
|
8672
9555
|
} catch (error) {
|
|
8673
|
-
|
|
9556
|
+
logger10.error({
|
|
9557
|
+
src: "plugin:sql",
|
|
9558
|
+
agentId: agent.id,
|
|
9559
|
+
error: error instanceof Error ? error.message : String(error)
|
|
9560
|
+
}, "Failed to create agent");
|
|
8674
9561
|
return false;
|
|
8675
9562
|
}
|
|
8676
9563
|
});
|
|
@@ -8704,10 +9591,13 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
8704
9591
|
}
|
|
8705
9592
|
await tx.update(agentTable).set(updateData).where(eq(agentTable.id, agentId));
|
|
8706
9593
|
});
|
|
8707
|
-
logger8.debug(`Agent updated successfully: ${agentId}`);
|
|
8708
9594
|
return true;
|
|
8709
9595
|
} catch (error) {
|
|
8710
|
-
|
|
9596
|
+
logger10.error({
|
|
9597
|
+
src: "plugin:sql",
|
|
9598
|
+
agentId,
|
|
9599
|
+
error: error instanceof Error ? error.message : String(error)
|
|
9600
|
+
}, "Failed to update agent");
|
|
8711
9601
|
return false;
|
|
8712
9602
|
}
|
|
8713
9603
|
});
|
|
@@ -8749,22 +9639,20 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
8749
9639
|
return finalSettings === undefined ? {} : finalSettings;
|
|
8750
9640
|
}
|
|
8751
9641
|
async deleteAgent(agentId) {
|
|
8752
|
-
logger8.debug(`[DB] Deleting agent with ID: ${agentId}`);
|
|
8753
9642
|
return this.withDatabase(async () => {
|
|
8754
9643
|
try {
|
|
8755
9644
|
const result = await this.db.delete(agentTable).where(eq(agentTable.id, agentId)).returning();
|
|
8756
9645
|
if (result.length === 0) {
|
|
8757
|
-
|
|
9646
|
+
logger10.warn({ src: "plugin:sql", agentId }, "Agent not found for deletion");
|
|
8758
9647
|
return false;
|
|
8759
9648
|
}
|
|
8760
|
-
logger8.success(`[DB] Agent ${agentId} and all related data successfully deleted via cascade`);
|
|
8761
9649
|
return true;
|
|
8762
9650
|
} catch (error) {
|
|
8763
|
-
|
|
8764
|
-
|
|
8765
|
-
|
|
8766
|
-
|
|
8767
|
-
}
|
|
9651
|
+
logger10.error({
|
|
9652
|
+
src: "plugin:sql",
|
|
9653
|
+
agentId,
|
|
9654
|
+
error: error instanceof Error ? error.message : String(error)
|
|
9655
|
+
}, "Failed to delete agent");
|
|
8768
9656
|
throw error;
|
|
8769
9657
|
}
|
|
8770
9658
|
});
|
|
@@ -8775,7 +9663,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
8775
9663
|
const result = await this.db.select({ count: count() }).from(agentTable);
|
|
8776
9664
|
return result[0]?.count || 0;
|
|
8777
9665
|
} catch (error) {
|
|
8778
|
-
|
|
9666
|
+
logger10.error({ src: "plugin:sql", error: error instanceof Error ? error.message : String(error) }, "Failed to count agents");
|
|
8779
9667
|
return 0;
|
|
8780
9668
|
}
|
|
8781
9669
|
});
|
|
@@ -8784,9 +9672,8 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
8784
9672
|
return this.withDatabase(async () => {
|
|
8785
9673
|
try {
|
|
8786
9674
|
await this.db.delete(agentTable);
|
|
8787
|
-
logger8.success("Successfully cleaned up agent table");
|
|
8788
9675
|
} catch (error) {
|
|
8789
|
-
|
|
9676
|
+
logger10.error({ src: "plugin:sql", error: error instanceof Error ? error.message : String(error) }, "Failed to clean up agent table");
|
|
8790
9677
|
throw error;
|
|
8791
9678
|
}
|
|
8792
9679
|
});
|
|
@@ -8865,21 +9752,21 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
8865
9752
|
metadata: entity2.metadata || {}
|
|
8866
9753
|
}));
|
|
8867
9754
|
await tx.insert(entityTable).values(normalizedEntities);
|
|
8868
|
-
logger8.debug(`${entities.length} Entities created successfully`);
|
|
8869
9755
|
return true;
|
|
8870
9756
|
});
|
|
8871
9757
|
} catch (error) {
|
|
8872
|
-
|
|
8873
|
-
|
|
8874
|
-
|
|
8875
|
-
|
|
9758
|
+
logger10.error({
|
|
9759
|
+
src: "plugin:sql",
|
|
9760
|
+
entityId: entities[0]?.id,
|
|
9761
|
+
error: error instanceof Error ? error.message : String(error)
|
|
9762
|
+
}, "Failed to create entities");
|
|
8876
9763
|
return false;
|
|
8877
9764
|
}
|
|
8878
9765
|
});
|
|
8879
9766
|
}
|
|
8880
9767
|
async ensureEntityExists(entity2) {
|
|
8881
9768
|
if (!entity2.id) {
|
|
8882
|
-
|
|
9769
|
+
logger10.error({ src: "plugin:sql" }, "Entity ID is required for ensureEntityExists");
|
|
8883
9770
|
return false;
|
|
8884
9771
|
}
|
|
8885
9772
|
try {
|
|
@@ -8889,7 +9776,11 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
8889
9776
|
}
|
|
8890
9777
|
return true;
|
|
8891
9778
|
} catch (error) {
|
|
8892
|
-
|
|
9779
|
+
logger10.error({
|
|
9780
|
+
src: "plugin:sql",
|
|
9781
|
+
entityId: entity2.id,
|
|
9782
|
+
error: error instanceof Error ? error.message : String(error)
|
|
9783
|
+
}, "Failed to ensure entity exists");
|
|
8893
9784
|
return false;
|
|
8894
9785
|
}
|
|
8895
9786
|
}
|
|
@@ -9057,14 +9948,11 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9057
9948
|
if (offset !== undefined && offset < 0) {
|
|
9058
9949
|
throw new Error("offset must be a non-negative number");
|
|
9059
9950
|
}
|
|
9060
|
-
return this.
|
|
9951
|
+
return this.withEntityContext(entityId ?? null, async (tx) => {
|
|
9061
9952
|
const conditions2 = [eq(memoryTable.type, tableName)];
|
|
9062
9953
|
if (start) {
|
|
9063
9954
|
conditions2.push(gte(memoryTable.createdAt, new Date(start)));
|
|
9064
9955
|
}
|
|
9065
|
-
if (entityId) {
|
|
9066
|
-
conditions2.push(eq(memoryTable.entityId, entityId));
|
|
9067
|
-
}
|
|
9068
9956
|
if (roomId) {
|
|
9069
9957
|
conditions2.push(eq(memoryTable.roomId, roomId));
|
|
9070
9958
|
}
|
|
@@ -9080,7 +9968,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9080
9968
|
if (agentId) {
|
|
9081
9969
|
conditions2.push(eq(memoryTable.agentId, agentId));
|
|
9082
9970
|
}
|
|
9083
|
-
const baseQuery =
|
|
9971
|
+
const baseQuery = tx.select({
|
|
9084
9972
|
memory: {
|
|
9085
9973
|
id: memoryTable.id,
|
|
9086
9974
|
type: memoryTable.type,
|
|
@@ -9242,7 +10130,12 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9242
10130
|
levenshtein_score: Number(row.levenshtein_score)
|
|
9243
10131
|
})).filter((row) => Array.isArray(row.embedding));
|
|
9244
10132
|
} catch (error) {
|
|
9245
|
-
|
|
10133
|
+
logger10.error({
|
|
10134
|
+
src: "plugin:sql",
|
|
10135
|
+
tableName: opts.query_table_name,
|
|
10136
|
+
fieldName: opts.query_field_name,
|
|
10137
|
+
error: error instanceof Error ? error.message : String(error)
|
|
10138
|
+
}, "Failed to get cached embeddings");
|
|
9246
10139
|
if (error instanceof Error && error.message === "levenshtein argument exceeds maximum length of 255 characters") {
|
|
9247
10140
|
return [];
|
|
9248
10141
|
}
|
|
@@ -9255,7 +10148,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9255
10148
|
try {
|
|
9256
10149
|
const sanitizedBody = this.sanitizeJsonObject(params.body);
|
|
9257
10150
|
const jsonString = JSON.stringify(sanitizedBody);
|
|
9258
|
-
await this.
|
|
10151
|
+
await this.withEntityContext(params.entityId, async (tx) => {
|
|
9259
10152
|
await tx.insert(logTable).values({
|
|
9260
10153
|
body: sql`${jsonString}::jsonb`,
|
|
9261
10154
|
entityId: params.entityId,
|
|
@@ -9264,7 +10157,13 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9264
10157
|
});
|
|
9265
10158
|
});
|
|
9266
10159
|
} catch (error) {
|
|
9267
|
-
|
|
10160
|
+
logger10.error({
|
|
10161
|
+
src: "plugin:sql",
|
|
10162
|
+
type: params.type,
|
|
10163
|
+
roomId: params.roomId,
|
|
10164
|
+
entityId: params.entityId,
|
|
10165
|
+
error: error instanceof Error ? error.message : String(error)
|
|
10166
|
+
}, "Failed to create log entry");
|
|
9268
10167
|
throw error;
|
|
9269
10168
|
}
|
|
9270
10169
|
});
|
|
@@ -9297,8 +10196,8 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9297
10196
|
}
|
|
9298
10197
|
async getLogs(params) {
|
|
9299
10198
|
const { entityId, roomId, type, count: count2, offset } = params;
|
|
9300
|
-
return this.
|
|
9301
|
-
const result = await
|
|
10199
|
+
return this.withEntityContext(entityId ?? null, async (tx) => {
|
|
10200
|
+
const result = await tx.select().from(logTable).where(and(roomId ? eq(logTable.roomId, roomId) : undefined, type ? eq(logTable.type, type) : undefined)).orderBy(desc(logTable.createdAt)).limit(count2 ?? 10).offset(offset ?? 0);
|
|
9302
10201
|
const logs = result.map((log) => ({
|
|
9303
10202
|
...log,
|
|
9304
10203
|
id: log.id,
|
|
@@ -9316,7 +10215,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9316
10215
|
const limit = Math.min(Math.max(params.limit ?? 20, 1), 100);
|
|
9317
10216
|
const fromDate = typeof params.from === "number" ? new Date(params.from) : undefined;
|
|
9318
10217
|
const toDate = typeof params.to === "number" ? new Date(params.to) : undefined;
|
|
9319
|
-
return this.
|
|
10218
|
+
return this.withEntityContext(params.entityId ?? null, async (tx) => {
|
|
9320
10219
|
const runMap = new Map;
|
|
9321
10220
|
const conditions2 = [
|
|
9322
10221
|
eq(logTable.type, "run_event"),
|
|
@@ -9334,7 +10233,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9334
10233
|
}
|
|
9335
10234
|
const whereClause = and(...conditions2);
|
|
9336
10235
|
const eventLimit = Math.max(limit * 20, 200);
|
|
9337
|
-
const runEventRows = await
|
|
10236
|
+
const runEventRows = await tx.select({
|
|
9338
10237
|
runId: sql`(${logTable.body} ->> 'runId')`,
|
|
9339
10238
|
status: sql`(${logTable.body} ->> 'status')`,
|
|
9340
10239
|
messageId: sql`(${logTable.body} ->> 'messageId')`,
|
|
@@ -9541,11 +10440,9 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9541
10440
|
});
|
|
9542
10441
|
}
|
|
9543
10442
|
async createMemory(memory, tableName) {
|
|
9544
|
-
logger8.debug(`DrizzleAdapter createMemory: memoryId: ${memory.id}, embeddingLength: ${memory.embedding?.length}, contentLength: ${memory.content?.text?.length}`);
|
|
9545
10443
|
const memoryId = memory.id ?? v4();
|
|
9546
10444
|
const existing = await this.getMemoryById(memoryId);
|
|
9547
10445
|
if (existing) {
|
|
9548
|
-
logger8.debug(`Memory already exists, skipping creation: ${memoryId}`);
|
|
9549
10446
|
return memoryId;
|
|
9550
10447
|
}
|
|
9551
10448
|
if (memory.unique === undefined) {
|
|
@@ -9564,7 +10461,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9564
10461
|
}
|
|
9565
10462
|
const contentToInsert = typeof memory.content === "string" ? memory.content : JSON.stringify(memory.content ?? {});
|
|
9566
10463
|
const metadataToInsert = typeof memory.metadata === "string" ? memory.metadata : JSON.stringify(memory.metadata ?? {});
|
|
9567
|
-
await this.
|
|
10464
|
+
await this.withEntityContext(memory.entityId, async (tx) => {
|
|
9568
10465
|
await tx.insert(memoryTable).values([
|
|
9569
10466
|
{
|
|
9570
10467
|
id: memoryId,
|
|
@@ -9595,7 +10492,6 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9595
10492
|
async updateMemory(memory) {
|
|
9596
10493
|
return this.withDatabase(async () => {
|
|
9597
10494
|
try {
|
|
9598
|
-
logger8.debug(`Updating memory: memoryId: ${memory.id}, hasEmbedding: ${!!memory.embedding}`);
|
|
9599
10495
|
await this.db.transaction(async (tx) => {
|
|
9600
10496
|
if (memory.content) {
|
|
9601
10497
|
const contentToUpdate = typeof memory.content === "string" ? memory.content : JSON.stringify(memory.content ?? {});
|
|
@@ -9627,10 +10523,13 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9627
10523
|
}
|
|
9628
10524
|
}
|
|
9629
10525
|
});
|
|
9630
|
-
logger8.debug(`Memory updated successfully: ${memory.id}`);
|
|
9631
10526
|
return true;
|
|
9632
10527
|
} catch (error) {
|
|
9633
|
-
|
|
10528
|
+
logger10.error({
|
|
10529
|
+
src: "plugin:sql",
|
|
10530
|
+
memoryId: memory.id,
|
|
10531
|
+
error: error instanceof Error ? error.message : String(error)
|
|
10532
|
+
}, "Failed to update memory");
|
|
9634
10533
|
return false;
|
|
9635
10534
|
}
|
|
9636
10535
|
});
|
|
@@ -9642,7 +10541,6 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9642
10541
|
await tx.delete(embeddingTable).where(eq(embeddingTable.memoryId, memoryId));
|
|
9643
10542
|
await tx.delete(memoryTable).where(eq(memoryTable.id, memoryId));
|
|
9644
10543
|
});
|
|
9645
|
-
logger8.debug(`Memory and related fragments removed successfully: ${memoryId}`);
|
|
9646
10544
|
});
|
|
9647
10545
|
}
|
|
9648
10546
|
async deleteManyMemories(memoryIds) {
|
|
@@ -9661,7 +10559,6 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9661
10559
|
await tx.delete(memoryTable).where(inArray(memoryTable.id, batch));
|
|
9662
10560
|
}
|
|
9663
10561
|
});
|
|
9664
|
-
logger8.debug(`Batch memory deletion completed successfully: ${memoryIds.length}`);
|
|
9665
10562
|
});
|
|
9666
10563
|
}
|
|
9667
10564
|
async deleteMemoryFragments(tx, documentId) {
|
|
@@ -9670,7 +10567,6 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9670
10567
|
const fragmentIds = fragmentsToDelete.map((f) => f.id);
|
|
9671
10568
|
await tx.delete(embeddingTable).where(inArray(embeddingTable.memoryId, fragmentIds));
|
|
9672
10569
|
await tx.delete(memoryTable).where(inArray(memoryTable.id, fragmentIds));
|
|
9673
|
-
logger8.debug(`Deleted related fragments: documentId: ${documentId}, fragmentCount: ${fragmentsToDelete.length}`);
|
|
9674
10570
|
}
|
|
9675
10571
|
}
|
|
9676
10572
|
async getMemoryFragments(tx, documentId) {
|
|
@@ -9682,7 +10578,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9682
10578
|
await this.db.transaction(async (tx) => {
|
|
9683
10579
|
const rows = await tx.select({ id: memoryTable.id }).from(memoryTable).where(and(eq(memoryTable.roomId, roomId), eq(memoryTable.type, tableName)));
|
|
9684
10580
|
const ids = rows.map((r) => r.id);
|
|
9685
|
-
|
|
10581
|
+
logger10.debug({ src: "plugin:sql", roomId, tableName, memoryCount: ids.length }, "Deleting all memories");
|
|
9686
10582
|
if (ids.length === 0) {
|
|
9687
10583
|
return;
|
|
9688
10584
|
}
|
|
@@ -9692,7 +10588,6 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9692
10588
|
}));
|
|
9693
10589
|
await tx.delete(memoryTable).where(and(eq(memoryTable.roomId, roomId), eq(memoryTable.type, tableName)));
|
|
9694
10590
|
});
|
|
9695
|
-
logger8.debug(`All memories removed successfully: roomId: ${roomId}, tableName: ${tableName}`);
|
|
9696
10591
|
});
|
|
9697
10592
|
}
|
|
9698
10593
|
async countMemories(roomId, unique2 = true, tableName = "") {
|
|
@@ -9714,7 +10609,7 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9714
10609
|
name: roomTable.name,
|
|
9715
10610
|
channelId: roomTable.channelId,
|
|
9716
10611
|
agentId: roomTable.agentId,
|
|
9717
|
-
|
|
10612
|
+
messageServerId: roomTable.messageServerId,
|
|
9718
10613
|
worldId: roomTable.worldId,
|
|
9719
10614
|
type: roomTable.type,
|
|
9720
10615
|
source: roomTable.source,
|
|
@@ -9725,7 +10620,8 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9725
10620
|
id: room.id,
|
|
9726
10621
|
name: room.name ?? undefined,
|
|
9727
10622
|
agentId: room.agentId,
|
|
9728
|
-
|
|
10623
|
+
messageServerId: room.messageServerId,
|
|
10624
|
+
serverId: room.messageServerId,
|
|
9729
10625
|
worldId: room.worldId,
|
|
9730
10626
|
channelId: room.channelId,
|
|
9731
10627
|
type: room.type,
|
|
@@ -9742,7 +10638,8 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9742
10638
|
id: room.id,
|
|
9743
10639
|
name: room.name ?? undefined,
|
|
9744
10640
|
agentId: room.agentId,
|
|
9745
|
-
|
|
10641
|
+
messageServerId: room.messageServerId,
|
|
10642
|
+
serverId: room.messageServerId,
|
|
9746
10643
|
worldId: room.worldId,
|
|
9747
10644
|
channelId: room.channelId,
|
|
9748
10645
|
type: room.type,
|
|
@@ -9799,7 +10696,13 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9799
10696
|
}).onConflictDoNothing();
|
|
9800
10697
|
return true;
|
|
9801
10698
|
} catch (error) {
|
|
9802
|
-
|
|
10699
|
+
logger10.error({
|
|
10700
|
+
src: "plugin:sql",
|
|
10701
|
+
entityId,
|
|
10702
|
+
roomId,
|
|
10703
|
+
agentId: this.agentId,
|
|
10704
|
+
error: error instanceof Error ? error.message : String(error)
|
|
10705
|
+
}, "Failed to add participant to room");
|
|
9803
10706
|
return false;
|
|
9804
10707
|
}
|
|
9805
10708
|
});
|
|
@@ -9813,10 +10716,14 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9813
10716
|
agentId: this.agentId
|
|
9814
10717
|
}));
|
|
9815
10718
|
await this.db.insert(participantTable).values(values).onConflictDoNothing().execute();
|
|
9816
|
-
logger8.debug(`${entityIds.length} Entities linked successfully`);
|
|
9817
10719
|
return true;
|
|
9818
10720
|
} catch (error) {
|
|
9819
|
-
|
|
10721
|
+
logger10.error({
|
|
10722
|
+
src: "plugin:sql",
|
|
10723
|
+
roomId,
|
|
10724
|
+
agentId: this.agentId,
|
|
10725
|
+
error: error instanceof Error ? error.message : String(error)
|
|
10726
|
+
}, "Failed to add participants to room");
|
|
9820
10727
|
return false;
|
|
9821
10728
|
}
|
|
9822
10729
|
});
|
|
@@ -9828,10 +10735,14 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9828
10735
|
return await tx.delete(participantTable).where(and(eq(participantTable.entityId, entityId), eq(participantTable.roomId, roomId))).returning();
|
|
9829
10736
|
});
|
|
9830
10737
|
const removed = result.length > 0;
|
|
9831
|
-
logger8.debug(`Participant ${removed ? "removed" : "not found"}: entityId: ${entityId}, roomId: ${roomId}, removed: ${removed}`);
|
|
9832
10738
|
return removed;
|
|
9833
10739
|
} catch (error) {
|
|
9834
|
-
|
|
10740
|
+
logger10.error({
|
|
10741
|
+
src: "plugin:sql",
|
|
10742
|
+
entityId,
|
|
10743
|
+
roomId,
|
|
10744
|
+
error: error instanceof Error ? error.message : String(error)
|
|
10745
|
+
}, "Failed to remove participant from room");
|
|
9835
10746
|
return false;
|
|
9836
10747
|
}
|
|
9837
10748
|
});
|
|
@@ -9859,6 +10770,12 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9859
10770
|
return result.map((row) => row.entityId);
|
|
9860
10771
|
});
|
|
9861
10772
|
}
|
|
10773
|
+
async isRoomParticipant(roomId, entityId) {
|
|
10774
|
+
return this.withDatabase(async () => {
|
|
10775
|
+
const result = await this.db.select().from(participantTable).where(and(eq(participantTable.roomId, roomId), eq(participantTable.entityId, entityId))).limit(1);
|
|
10776
|
+
return result.length > 0;
|
|
10777
|
+
});
|
|
10778
|
+
}
|
|
9862
10779
|
async getParticipantUserState(roomId, entityId) {
|
|
9863
10780
|
return this.withDatabase(async () => {
|
|
9864
10781
|
const result = await this.db.select({ roomState: participantTable.roomState }).from(participantTable).where(and(eq(participantTable.roomId, roomId), eq(participantTable.entityId, entityId), eq(participantTable.agentId, this.agentId))).limit(1);
|
|
@@ -9872,7 +10789,13 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9872
10789
|
await tx.update(participantTable).set({ roomState: state }).where(and(eq(participantTable.roomId, roomId), eq(participantTable.entityId, entityId), eq(participantTable.agentId, this.agentId)));
|
|
9873
10790
|
});
|
|
9874
10791
|
} catch (error) {
|
|
9875
|
-
|
|
10792
|
+
logger10.error({
|
|
10793
|
+
src: "plugin:sql",
|
|
10794
|
+
roomId,
|
|
10795
|
+
entityId,
|
|
10796
|
+
state,
|
|
10797
|
+
error: error instanceof Error ? error.message : String(error)
|
|
10798
|
+
}, "Failed to set participant follow state");
|
|
9876
10799
|
throw error;
|
|
9877
10800
|
}
|
|
9878
10801
|
});
|
|
@@ -9892,7 +10815,12 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9892
10815
|
await this.db.insert(relationshipTable).values(saveParams);
|
|
9893
10816
|
return true;
|
|
9894
10817
|
} catch (error) {
|
|
9895
|
-
|
|
10818
|
+
logger10.error({
|
|
10819
|
+
src: "plugin:sql",
|
|
10820
|
+
agentId: this.agentId,
|
|
10821
|
+
error: error instanceof Error ? error.message : String(error),
|
|
10822
|
+
saveParams
|
|
10823
|
+
}, "Error creating relationship");
|
|
9896
10824
|
return false;
|
|
9897
10825
|
}
|
|
9898
10826
|
});
|
|
@@ -9905,7 +10833,12 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9905
10833
|
metadata: relationship.metadata || {}
|
|
9906
10834
|
}).where(eq(relationshipTable.id, relationship.id));
|
|
9907
10835
|
} catch (error) {
|
|
9908
|
-
|
|
10836
|
+
logger10.error({
|
|
10837
|
+
src: "plugin:sql",
|
|
10838
|
+
agentId: this.agentId,
|
|
10839
|
+
error: error instanceof Error ? error.message : String(error),
|
|
10840
|
+
relationshipId: relationship.id
|
|
10841
|
+
}, "Error updating relationship");
|
|
9909
10842
|
throw error;
|
|
9910
10843
|
}
|
|
9911
10844
|
});
|
|
@@ -9949,12 +10882,12 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9949
10882
|
return result.rows.map((relationship) => ({
|
|
9950
10883
|
...relationship,
|
|
9951
10884
|
id: relationship.id,
|
|
9952
|
-
sourceEntityId: relationship.sourceEntityId,
|
|
9953
|
-
targetEntityId: relationship.targetEntityId,
|
|
9954
|
-
agentId: relationship.agentId,
|
|
10885
|
+
sourceEntityId: relationship.source_entity_id || relationship.sourceEntityId,
|
|
10886
|
+
targetEntityId: relationship.target_entity_id || relationship.targetEntityId,
|
|
10887
|
+
agentId: relationship.agent_id || relationship.agentId,
|
|
9955
10888
|
tags: relationship.tags ?? [],
|
|
9956
10889
|
metadata: relationship.metadata ?? {},
|
|
9957
|
-
createdAt: relationship.createdAt ? relationship.createdAt instanceof Date ? relationship.createdAt.toISOString() : new Date(relationship.createdAt).toISOString() : new Date().toISOString()
|
|
10890
|
+
createdAt: relationship.created_at || relationship.createdAt ? (relationship.created_at || relationship.createdAt) instanceof Date ? (relationship.created_at || relationship.createdAt).toISOString() : new Date(relationship.created_at || relationship.createdAt).toISOString() : new Date().toISOString()
|
|
9958
10891
|
}));
|
|
9959
10892
|
});
|
|
9960
10893
|
}
|
|
@@ -9967,7 +10900,12 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9967
10900
|
}
|
|
9968
10901
|
return;
|
|
9969
10902
|
} catch (error) {
|
|
9970
|
-
|
|
10903
|
+
logger10.error({
|
|
10904
|
+
src: "plugin:sql",
|
|
10905
|
+
agentId: this.agentId,
|
|
10906
|
+
error: error instanceof Error ? error.message : String(error),
|
|
10907
|
+
key
|
|
10908
|
+
}, "Error fetching cache");
|
|
9971
10909
|
return;
|
|
9972
10910
|
}
|
|
9973
10911
|
});
|
|
@@ -9987,7 +10925,12 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
9987
10925
|
});
|
|
9988
10926
|
return true;
|
|
9989
10927
|
} catch (error) {
|
|
9990
|
-
|
|
10928
|
+
logger10.error({
|
|
10929
|
+
src: "plugin:sql",
|
|
10930
|
+
agentId: this.agentId,
|
|
10931
|
+
error: error instanceof Error ? error.message : String(error),
|
|
10932
|
+
key
|
|
10933
|
+
}, "Error setting cache");
|
|
9991
10934
|
return false;
|
|
9992
10935
|
}
|
|
9993
10936
|
});
|
|
@@ -10000,7 +10943,12 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
10000
10943
|
});
|
|
10001
10944
|
return true;
|
|
10002
10945
|
} catch (error) {
|
|
10003
|
-
|
|
10946
|
+
logger10.error({
|
|
10947
|
+
src: "plugin:sql",
|
|
10948
|
+
agentId: this.agentId,
|
|
10949
|
+
error: error instanceof Error ? error.message : String(error),
|
|
10950
|
+
key
|
|
10951
|
+
}, "Error deleting cache");
|
|
10004
10952
|
return false;
|
|
10005
10953
|
}
|
|
10006
10954
|
});
|
|
@@ -10163,25 +11111,25 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
10163
11111
|
return this.withDatabase(async () => {
|
|
10164
11112
|
const rooms = await this.db.select({ id: roomTable.id }).from(roomTable).where(and(eq(roomTable.worldId, worldId), eq(roomTable.agentId, this.agentId)));
|
|
10165
11113
|
if (rooms.length === 0) {
|
|
10166
|
-
logger8.debug(`No rooms found for worldId ${worldId} and agentId ${this.agentId} to delete.`);
|
|
10167
11114
|
return;
|
|
10168
11115
|
}
|
|
10169
11116
|
const roomIds = rooms.map((room) => room.id);
|
|
10170
11117
|
if (roomIds.length > 0) {
|
|
10171
11118
|
await this.db.delete(logTable).where(inArray(logTable.roomId, roomIds));
|
|
10172
|
-
logger8.debug(`Deleted logs for ${roomIds.length} rooms in world ${worldId}.`);
|
|
10173
11119
|
await this.db.delete(participantTable).where(inArray(participantTable.roomId, roomIds));
|
|
10174
|
-
logger8.debug(`Deleted participants for ${roomIds.length} rooms in world ${worldId}.`);
|
|
10175
11120
|
const memoriesInRooms = await this.db.select({ id: memoryTable.id }).from(memoryTable).where(inArray(memoryTable.roomId, roomIds));
|
|
10176
11121
|
const memoryIdsInRooms = memoriesInRooms.map((m) => m.id);
|
|
10177
11122
|
if (memoryIdsInRooms.length > 0) {
|
|
10178
11123
|
await this.db.delete(embeddingTable).where(inArray(embeddingTable.memoryId, memoryIdsInRooms));
|
|
10179
|
-
logger8.debug(`Deleted embeddings for ${memoryIdsInRooms.length} memories in world ${worldId}.`);
|
|
10180
11124
|
await this.db.delete(memoryTable).where(inArray(memoryTable.id, memoryIdsInRooms));
|
|
10181
|
-
logger8.debug(`Deleted ${memoryIdsInRooms.length} memories in world ${worldId}.`);
|
|
10182
11125
|
}
|
|
10183
11126
|
await this.db.delete(roomTable).where(inArray(roomTable.id, roomIds));
|
|
10184
|
-
|
|
11127
|
+
logger10.debug({
|
|
11128
|
+
src: "plugin:sql",
|
|
11129
|
+
worldId,
|
|
11130
|
+
roomsDeleted: roomIds.length,
|
|
11131
|
+
memoriesDeleted: memoryIdsInRooms.length
|
|
11132
|
+
}, "World cleanup completed");
|
|
10185
11133
|
}
|
|
10186
11134
|
});
|
|
10187
11135
|
}
|
|
@@ -10245,6 +11193,26 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
10245
11193
|
} : null;
|
|
10246
11194
|
});
|
|
10247
11195
|
}
|
|
11196
|
+
async getMessageServerByRlsServerId(rlsServerId) {
|
|
11197
|
+
return this.withDatabase(async () => {
|
|
11198
|
+
const results = await this.db.execute(sql`
|
|
11199
|
+
SELECT id, name, source_type, source_id, metadata, created_at, updated_at
|
|
11200
|
+
FROM message_servers
|
|
11201
|
+
WHERE server_id = ${rlsServerId}
|
|
11202
|
+
LIMIT 1
|
|
11203
|
+
`);
|
|
11204
|
+
const rows = results.rows || results;
|
|
11205
|
+
return rows.length > 0 ? {
|
|
11206
|
+
id: rows[0].id,
|
|
11207
|
+
name: rows[0].name,
|
|
11208
|
+
sourceType: rows[0].source_type,
|
|
11209
|
+
sourceId: rows[0].source_id || undefined,
|
|
11210
|
+
metadata: rows[0].metadata || undefined,
|
|
11211
|
+
createdAt: new Date(rows[0].created_at),
|
|
11212
|
+
updatedAt: new Date(rows[0].updated_at)
|
|
11213
|
+
} : null;
|
|
11214
|
+
});
|
|
11215
|
+
}
|
|
10248
11216
|
async createChannel(data, participantIds) {
|
|
10249
11217
|
return this.withDatabase(async () => {
|
|
10250
11218
|
const newId = data.id || v4();
|
|
@@ -10264,9 +11232,9 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
10264
11232
|
await this.db.transaction(async (tx) => {
|
|
10265
11233
|
await tx.insert(channelTable).values(channelToInsert);
|
|
10266
11234
|
if (participantIds && participantIds.length > 0) {
|
|
10267
|
-
const participantValues = participantIds.map((
|
|
11235
|
+
const participantValues = participantIds.map((entityId) => ({
|
|
10268
11236
|
channelId: newId,
|
|
10269
|
-
|
|
11237
|
+
entityId
|
|
10270
11238
|
}));
|
|
10271
11239
|
await tx.insert(channelParticipantsTable).values(participantValues).onConflictDoNothing();
|
|
10272
11240
|
}
|
|
@@ -10274,9 +11242,9 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
10274
11242
|
return channelToInsert;
|
|
10275
11243
|
});
|
|
10276
11244
|
}
|
|
10277
|
-
async
|
|
11245
|
+
async getChannelsForMessageServer(messageServerId) {
|
|
10278
11246
|
return this.withDatabase(async () => {
|
|
10279
|
-
const results = await this.db.select().from(channelTable).where(eq(channelTable.messageServerId,
|
|
11247
|
+
const results = await this.db.select().from(channelTable).where(eq(channelTable.messageServerId, messageServerId));
|
|
10280
11248
|
return results.map((r) => ({
|
|
10281
11249
|
id: r.id,
|
|
10282
11250
|
messageServerId: r.messageServerId,
|
|
@@ -10398,9 +11366,9 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
10398
11366
|
if (updates.participantCentralUserIds !== undefined) {
|
|
10399
11367
|
await tx.delete(channelParticipantsTable).where(eq(channelParticipantsTable.channelId, channelId));
|
|
10400
11368
|
if (updates.participantCentralUserIds.length > 0) {
|
|
10401
|
-
const participantValues = updates.participantCentralUserIds.map((
|
|
11369
|
+
const participantValues = updates.participantCentralUserIds.map((entityId) => ({
|
|
10402
11370
|
channelId,
|
|
10403
|
-
|
|
11371
|
+
entityId
|
|
10404
11372
|
}));
|
|
10405
11373
|
await tx.insert(channelParticipantsTable).values(participantValues).onConflictDoNothing();
|
|
10406
11374
|
}
|
|
@@ -10422,40 +11390,46 @@ class BaseDrizzleAdapter extends DatabaseAdapter {
|
|
|
10422
11390
|
});
|
|
10423
11391
|
});
|
|
10424
11392
|
}
|
|
10425
|
-
async addChannelParticipants(channelId,
|
|
11393
|
+
async addChannelParticipants(channelId, entityIds) {
|
|
10426
11394
|
return this.withDatabase(async () => {
|
|
10427
|
-
if (!
|
|
11395
|
+
if (!entityIds || entityIds.length === 0)
|
|
10428
11396
|
return;
|
|
10429
|
-
const participantValues =
|
|
11397
|
+
const participantValues = entityIds.map((entityId) => ({
|
|
10430
11398
|
channelId,
|
|
10431
|
-
|
|
11399
|
+
entityId
|
|
10432
11400
|
}));
|
|
10433
11401
|
await this.db.insert(channelParticipantsTable).values(participantValues).onConflictDoNothing();
|
|
10434
11402
|
});
|
|
10435
11403
|
}
|
|
10436
11404
|
async getChannelParticipants(channelId) {
|
|
10437
11405
|
return this.withDatabase(async () => {
|
|
10438
|
-
const results = await this.db.select({
|
|
10439
|
-
return results.map((r) => r.
|
|
11406
|
+
const results = await this.db.select({ entityId: channelParticipantsTable.entityId }).from(channelParticipantsTable).where(eq(channelParticipantsTable.channelId, channelId));
|
|
11407
|
+
return results.map((r) => r.entityId);
|
|
11408
|
+
});
|
|
11409
|
+
}
|
|
11410
|
+
async isChannelParticipant(channelId, entityId) {
|
|
11411
|
+
return this.withDatabase(async () => {
|
|
11412
|
+
const result = await this.db.select().from(channelParticipantsTable).where(and(eq(channelParticipantsTable.channelId, channelId), eq(channelParticipantsTable.entityId, entityId))).limit(1);
|
|
11413
|
+
return result.length > 0;
|
|
10440
11414
|
});
|
|
10441
11415
|
}
|
|
10442
|
-
async
|
|
11416
|
+
async addAgentToMessageServer(messageServerId, agentId) {
|
|
10443
11417
|
return this.withDatabase(async () => {
|
|
10444
|
-
await this.db.insert(
|
|
10445
|
-
|
|
11418
|
+
await this.db.insert(messageServerAgentsTable).values({
|
|
11419
|
+
messageServerId,
|
|
10446
11420
|
agentId
|
|
10447
11421
|
}).onConflictDoNothing();
|
|
10448
11422
|
});
|
|
10449
11423
|
}
|
|
10450
|
-
async
|
|
11424
|
+
async getAgentsForMessageServer(messageServerId) {
|
|
10451
11425
|
return this.withDatabase(async () => {
|
|
10452
|
-
const results = await this.db.select({ agentId:
|
|
11426
|
+
const results = await this.db.select({ agentId: messageServerAgentsTable.agentId }).from(messageServerAgentsTable).where(eq(messageServerAgentsTable.messageServerId, messageServerId));
|
|
10453
11427
|
return results.map((r) => r.agentId);
|
|
10454
11428
|
});
|
|
10455
11429
|
}
|
|
10456
|
-
async
|
|
11430
|
+
async removeAgentFromMessageServer(messageServerId, agentId) {
|
|
10457
11431
|
return this.withDatabase(async () => {
|
|
10458
|
-
await this.db.delete(
|
|
11432
|
+
await this.db.delete(messageServerAgentsTable).where(and(eq(messageServerAgentsTable.messageServerId, messageServerId), eq(messageServerAgentsTable.agentId, agentId)));
|
|
10459
11433
|
});
|
|
10460
11434
|
}
|
|
10461
11435
|
async findOrCreateDmChannel(user1Id, user2Id, messageServerId) {
|
|
@@ -10496,11 +11470,14 @@ class PgliteDatabaseAdapter extends BaseDrizzleAdapter {
|
|
|
10496
11470
|
this.manager = manager;
|
|
10497
11471
|
this.db = drizzle(this.manager.getConnection());
|
|
10498
11472
|
}
|
|
11473
|
+
async withEntityContext(_entityId, callback) {
|
|
11474
|
+
return this.db.transaction(callback);
|
|
11475
|
+
}
|
|
10499
11476
|
async getEntityByIds(entityIds) {
|
|
10500
11477
|
return this.getEntitiesByIds(entityIds);
|
|
10501
11478
|
}
|
|
10502
11479
|
async getMemoriesByServerId(_params) {
|
|
10503
|
-
|
|
11480
|
+
logger11.warn({ src: "plugin:sql" }, "getMemoriesByServerId called but not implemented");
|
|
10504
11481
|
return [];
|
|
10505
11482
|
}
|
|
10506
11483
|
async ensureAgentExists(agent) {
|
|
@@ -10525,13 +11502,13 @@ class PgliteDatabaseAdapter extends BaseDrizzleAdapter {
|
|
|
10525
11502
|
}
|
|
10526
11503
|
async withDatabase(operation) {
|
|
10527
11504
|
if (this.manager.isShuttingDown()) {
|
|
10528
|
-
|
|
11505
|
+
logger11.warn({ src: "plugin:sql" }, "Database is shutting down");
|
|
10529
11506
|
return null;
|
|
10530
11507
|
}
|
|
10531
11508
|
return operation();
|
|
10532
11509
|
}
|
|
10533
11510
|
async init() {
|
|
10534
|
-
|
|
11511
|
+
logger11.debug({ src: "plugin:sql" }, "PGliteDatabaseAdapter initialized");
|
|
10535
11512
|
}
|
|
10536
11513
|
async isReady() {
|
|
10537
11514
|
return !this.manager.isShuttingDown();
|
|
@@ -10576,7 +11553,7 @@ class PGliteClientManager {
|
|
|
10576
11553
|
}
|
|
10577
11554
|
|
|
10578
11555
|
// src/pg/adapter.ts
|
|
10579
|
-
import { logger as
|
|
11556
|
+
import { logger as logger12 } from "@elizaos/core";
|
|
10580
11557
|
|
|
10581
11558
|
// ../../node_modules/drizzle-orm/node-postgres/driver.js
|
|
10582
11559
|
init_entity();
|
|
@@ -10597,14 +11574,15 @@ init_sql();
|
|
|
10597
11574
|
init_tracing();
|
|
10598
11575
|
init_utils();
|
|
10599
11576
|
var { Pool, types: types3 } = pg;
|
|
11577
|
+
var NativePool = pg.native ? pg.native.Pool : undefined;
|
|
10600
11578
|
|
|
10601
11579
|
class NodePgPreparedQuery extends PgPreparedQuery {
|
|
10602
|
-
constructor(client, queryString, params,
|
|
11580
|
+
constructor(client, queryString, params, logger12, cache, queryMetadata, cacheConfig, fields, name, _isResponseInArrayMode, customResultMapper) {
|
|
10603
11581
|
super({ sql: queryString, params }, cache, queryMetadata, cacheConfig);
|
|
10604
11582
|
this.client = client;
|
|
10605
11583
|
this.queryString = queryString;
|
|
10606
11584
|
this.params = params;
|
|
10607
|
-
this.logger =
|
|
11585
|
+
this.logger = logger12;
|
|
10608
11586
|
this.fields = fields;
|
|
10609
11587
|
this._isResponseInArrayMode = _isResponseInArrayMode;
|
|
10610
11588
|
this.customResultMapper = customResultMapper;
|
|
@@ -10754,7 +11732,7 @@ class NodePgSession extends PgSession {
|
|
|
10754
11732
|
return new NodePgPreparedQuery(this.client, query.sql, query.params, this.logger, this.cache, queryMetadata, cacheConfig, fields, name, isResponseInArrayMode, customResultMapper);
|
|
10755
11733
|
}
|
|
10756
11734
|
async transaction(transaction, config) {
|
|
10757
|
-
const session2 = this.client instanceof Pool ? new NodePgSession(await this.client.connect(), this.dialect, this.schema, this.options) : this;
|
|
11735
|
+
const session2 = this.client instanceof Pool || NativePool && this.client instanceof NativePool ? new NodePgSession(await this.client.connect(), this.dialect, this.schema, this.options) : this;
|
|
10758
11736
|
const tx = new NodePgTransaction(this.dialect, session2, this.schema);
|
|
10759
11737
|
await tx.execute(sql`begin${config ? sql` ${tx.getTransactionConfigSQL(config)}` : undefined}`);
|
|
10760
11738
|
try {
|
|
@@ -10765,7 +11743,7 @@ class NodePgSession extends PgSession {
|
|
|
10765
11743
|
await tx.execute(sql`rollback`);
|
|
10766
11744
|
throw error;
|
|
10767
11745
|
} finally {
|
|
10768
|
-
if (this.client instanceof Pool) {
|
|
11746
|
+
if (this.client instanceof Pool || NativePool && this.client instanceof NativePool) {
|
|
10769
11747
|
session2.client.release();
|
|
10770
11748
|
}
|
|
10771
11749
|
}
|
|
@@ -10814,11 +11792,11 @@ class NodePgDatabase extends PgDatabase {
|
|
|
10814
11792
|
}
|
|
10815
11793
|
function construct2(client, config = {}) {
|
|
10816
11794
|
const dialect2 = new PgDialect({ casing: config.casing });
|
|
10817
|
-
let
|
|
11795
|
+
let logger12;
|
|
10818
11796
|
if (config.logger === true) {
|
|
10819
|
-
|
|
11797
|
+
logger12 = new DefaultLogger;
|
|
10820
11798
|
} else if (config.logger !== false) {
|
|
10821
|
-
|
|
11799
|
+
logger12 = config.logger;
|
|
10822
11800
|
}
|
|
10823
11801
|
let schema2;
|
|
10824
11802
|
if (config.schema) {
|
|
@@ -10829,7 +11807,7 @@ function construct2(client, config = {}) {
|
|
|
10829
11807
|
tableNamesMap: tablesConfig.tableNamesMap
|
|
10830
11808
|
};
|
|
10831
11809
|
}
|
|
10832
|
-
const driver = new NodePgDriver(client, dialect2, { logger:
|
|
11810
|
+
const driver = new NodePgDriver(client, dialect2, { logger: logger12, cache: config.cache });
|
|
10833
11811
|
const session2 = driver.createSession(schema2);
|
|
10834
11812
|
const db2 = new NodePgDatabase(dialect2, session2, schema2);
|
|
10835
11813
|
db2.$client = client;
|
|
@@ -10873,11 +11851,17 @@ class PgDatabaseAdapter extends BaseDrizzleAdapter {
|
|
|
10873
11851
|
this.manager = manager;
|
|
10874
11852
|
this.db = manager.getDatabase();
|
|
10875
11853
|
}
|
|
11854
|
+
getManager() {
|
|
11855
|
+
return this.manager;
|
|
11856
|
+
}
|
|
11857
|
+
async withEntityContext(entityId, callback) {
|
|
11858
|
+
return await this.manager.withEntityContext(entityId, callback);
|
|
11859
|
+
}
|
|
10876
11860
|
async getEntityByIds(entityIds) {
|
|
10877
11861
|
return this.getEntitiesByIds(entityIds);
|
|
10878
11862
|
}
|
|
10879
11863
|
async getMemoriesByServerId(_params) {
|
|
10880
|
-
|
|
11864
|
+
logger12.warn({ src: "plugin:sql" }, "getMemoriesByServerId called but not implemented");
|
|
10881
11865
|
return [];
|
|
10882
11866
|
}
|
|
10883
11867
|
async ensureAgentExists(agent) {
|
|
@@ -10913,7 +11897,7 @@ class PgDatabaseAdapter extends BaseDrizzleAdapter {
|
|
|
10913
11897
|
});
|
|
10914
11898
|
}
|
|
10915
11899
|
async init() {
|
|
10916
|
-
|
|
11900
|
+
logger12.debug({ src: "plugin:sql" }, "PgDatabaseAdapter initialized");
|
|
10917
11901
|
}
|
|
10918
11902
|
async isReady() {
|
|
10919
11903
|
return this.manager.testConnection();
|
|
@@ -10975,20 +11959,21 @@ class PgDatabaseAdapter extends BaseDrizzleAdapter {
|
|
|
10975
11959
|
}
|
|
10976
11960
|
|
|
10977
11961
|
// src/pg/manager.ts
|
|
11962
|
+
init_drizzle_orm();
|
|
10978
11963
|
import { Pool as Pool2 } from "pg";
|
|
10979
|
-
import { logger as
|
|
11964
|
+
import { logger as logger13 } from "@elizaos/core";
|
|
10980
11965
|
|
|
10981
11966
|
class PostgresConnectionManager {
|
|
10982
11967
|
pool;
|
|
10983
11968
|
db;
|
|
10984
|
-
constructor(connectionString,
|
|
11969
|
+
constructor(connectionString, rlsServerId) {
|
|
10985
11970
|
const poolConfig = { connectionString };
|
|
10986
|
-
if (
|
|
10987
|
-
poolConfig.application_name =
|
|
10988
|
-
|
|
11971
|
+
if (rlsServerId) {
|
|
11972
|
+
poolConfig.application_name = rlsServerId;
|
|
11973
|
+
logger13.debug({ src: "plugin:sql", rlsServerId: rlsServerId.substring(0, 8) }, "Pool configured with RLS server");
|
|
10989
11974
|
}
|
|
10990
11975
|
this.pool = new Pool2(poolConfig);
|
|
10991
|
-
this.db = drizzle2(this.pool);
|
|
11976
|
+
this.db = drizzle2(this.pool, { casing: "snake_case" });
|
|
10992
11977
|
}
|
|
10993
11978
|
getDatabase() {
|
|
10994
11979
|
return this.db;
|
|
@@ -11006,7 +11991,7 @@ class PostgresConnectionManager {
|
|
|
11006
11991
|
await client.query("SELECT 1");
|
|
11007
11992
|
return true;
|
|
11008
11993
|
} catch (error) {
|
|
11009
|
-
|
|
11994
|
+
logger13.error({ src: "plugin:sql", error: error instanceof Error ? error.message : String(error) }, "Failed to connect to the database");
|
|
11010
11995
|
return false;
|
|
11011
11996
|
} finally {
|
|
11012
11997
|
if (client) {
|
|
@@ -11014,6 +11999,27 @@ class PostgresConnectionManager {
|
|
|
11014
11999
|
}
|
|
11015
12000
|
}
|
|
11016
12001
|
}
|
|
12002
|
+
async withEntityContext(entityId, callback) {
|
|
12003
|
+
return await this.db.transaction(async (tx) => {
|
|
12004
|
+
if (entityId) {
|
|
12005
|
+
try {
|
|
12006
|
+
await tx.execute(sql.raw(`SET LOCAL app.entity_id = '${entityId}'`));
|
|
12007
|
+
logger13.debug(`[Entity Context] Set app.entity_id = ${entityId}`);
|
|
12008
|
+
} catch (error) {
|
|
12009
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
12010
|
+
if (errorMessage.includes("unrecognized configuration parameter") || errorMessage.includes("app.entity_id")) {
|
|
12011
|
+
logger13.debug("[Entity Context] Entity RLS not enabled, executing without entity context");
|
|
12012
|
+
} else {
|
|
12013
|
+
logger13.error({ error, entityId }, "[Entity Context] Critical error setting entity context - this may indicate a configuration issue");
|
|
12014
|
+
logger13.warn("[Entity Context] Continuing without entity context due to error - data isolation may be compromised");
|
|
12015
|
+
}
|
|
12016
|
+
}
|
|
12017
|
+
} else {
|
|
12018
|
+
logger13.debug("[Entity Context] No entity context set (server operation)");
|
|
12019
|
+
}
|
|
12020
|
+
return await callback(tx);
|
|
12021
|
+
});
|
|
12022
|
+
}
|
|
11017
12023
|
async close() {
|
|
11018
12024
|
await this.pool.end();
|
|
11019
12025
|
}
|
|
@@ -11071,246 +12077,7 @@ function resolvePgliteDir(dir, fallbackDir) {
|
|
|
11071
12077
|
|
|
11072
12078
|
// src/index.node.ts
|
|
11073
12079
|
init_migration_service();
|
|
11074
|
-
|
|
11075
|
-
// src/rls.ts
|
|
11076
|
-
init_drizzle_orm();
|
|
11077
|
-
import { logger as logger12, validateUuid } from "@elizaos/core";
|
|
11078
|
-
async function installRLSFunctions(adapter) {
|
|
11079
|
-
const db2 = adapter.db;
|
|
11080
|
-
await db2.execute(sql`
|
|
11081
|
-
CREATE TABLE IF NOT EXISTS owners (
|
|
11082
|
-
id UUID PRIMARY KEY,
|
|
11083
|
-
created_at TIMESTAMPTZ DEFAULT NOW() NOT NULL,
|
|
11084
|
-
updated_at TIMESTAMPTZ DEFAULT NOW() NOT NULL
|
|
11085
|
-
)
|
|
11086
|
-
`);
|
|
11087
|
-
await db2.execute(sql`
|
|
11088
|
-
CREATE OR REPLACE FUNCTION current_owner_id() RETURNS UUID AS $$
|
|
11089
|
-
DECLARE
|
|
11090
|
-
app_name TEXT;
|
|
11091
|
-
BEGIN
|
|
11092
|
-
app_name := NULLIF(current_setting('application_name', TRUE), '');
|
|
11093
|
-
|
|
11094
|
-
-- Return NULL if application_name is not set or not a valid UUID
|
|
11095
|
-
-- This allows admin queries to work without RLS restrictions
|
|
11096
|
-
BEGIN
|
|
11097
|
-
RETURN app_name::UUID;
|
|
11098
|
-
EXCEPTION WHEN OTHERS THEN
|
|
11099
|
-
RETURN NULL;
|
|
11100
|
-
END;
|
|
11101
|
-
END;
|
|
11102
|
-
$$ LANGUAGE plpgsql STABLE;
|
|
11103
|
-
`);
|
|
11104
|
-
await db2.execute(sql`
|
|
11105
|
-
CREATE OR REPLACE FUNCTION add_owner_isolation(
|
|
11106
|
-
schema_name text,
|
|
11107
|
-
table_name text
|
|
11108
|
-
) RETURNS void AS $$
|
|
11109
|
-
DECLARE
|
|
11110
|
-
full_table_name text;
|
|
11111
|
-
column_exists boolean;
|
|
11112
|
-
orphaned_count bigint;
|
|
11113
|
-
BEGIN
|
|
11114
|
-
full_table_name := schema_name || '.' || table_name;
|
|
11115
|
-
|
|
11116
|
-
-- Check if owner_id column already exists
|
|
11117
|
-
SELECT EXISTS (
|
|
11118
|
-
SELECT 1 FROM information_schema.columns
|
|
11119
|
-
WHERE information_schema.columns.table_schema = schema_name
|
|
11120
|
-
AND information_schema.columns.table_name = add_owner_isolation.table_name
|
|
11121
|
-
AND information_schema.columns.column_name = 'owner_id'
|
|
11122
|
-
) INTO column_exists;
|
|
11123
|
-
|
|
11124
|
-
-- Add owner_id column if missing (DEFAULT populates it automatically for new rows)
|
|
11125
|
-
IF NOT column_exists THEN
|
|
11126
|
-
EXECUTE format('ALTER TABLE %I.%I ADD COLUMN owner_id UUID DEFAULT current_owner_id()', schema_name, table_name);
|
|
11127
|
-
|
|
11128
|
-
-- Backfill existing rows with current owner_id
|
|
11129
|
-
-- This ensures all existing data belongs to the tenant that is enabling RLS
|
|
11130
|
-
EXECUTE format('UPDATE %I.%I SET owner_id = current_owner_id() WHERE owner_id IS NULL', schema_name, table_name);
|
|
11131
|
-
ELSE
|
|
11132
|
-
-- Column already exists (RLS was previously enabled then disabled)
|
|
11133
|
-
-- Restore the DEFAULT clause (may have been removed during uninstallRLS)
|
|
11134
|
-
EXECUTE format('ALTER TABLE %I.%I ALTER COLUMN owner_id SET DEFAULT current_owner_id()', schema_name, table_name);
|
|
11135
|
-
|
|
11136
|
-
-- Only backfill NULL owner_id rows, do NOT steal data from other owners
|
|
11137
|
-
EXECUTE format('SELECT COUNT(*) FROM %I.%I WHERE owner_id IS NULL', schema_name, table_name) INTO orphaned_count;
|
|
11138
|
-
|
|
11139
|
-
IF orphaned_count > 0 THEN
|
|
11140
|
-
RAISE NOTICE 'Backfilling % rows with NULL owner_id in %.%', orphaned_count, schema_name, table_name;
|
|
11141
|
-
EXECUTE format('UPDATE %I.%I SET owner_id = current_owner_id() WHERE owner_id IS NULL', schema_name, table_name);
|
|
11142
|
-
END IF;
|
|
11143
|
-
END IF;
|
|
11144
|
-
|
|
11145
|
-
-- Create index for efficient owner_id filtering
|
|
11146
|
-
EXECUTE format('CREATE INDEX IF NOT EXISTS idx_%I_owner_id ON %I.%I(owner_id)', table_name, schema_name, table_name);
|
|
11147
|
-
|
|
11148
|
-
-- Enable RLS on the table
|
|
11149
|
-
EXECUTE format('ALTER TABLE %I.%I ENABLE ROW LEVEL SECURITY', schema_name, table_name);
|
|
11150
|
-
|
|
11151
|
-
-- FORCE RLS even for table owners (critical for security)
|
|
11152
|
-
EXECUTE format('ALTER TABLE %I.%I FORCE ROW LEVEL SECURITY', schema_name, table_name);
|
|
11153
|
-
|
|
11154
|
-
-- Drop existing policy if present
|
|
11155
|
-
EXECUTE format('DROP POLICY IF EXISTS owner_isolation_policy ON %I.%I', schema_name, table_name);
|
|
11156
|
-
|
|
11157
|
-
-- Create isolation policy: users can only see/modify rows where owner_id matches current tenant
|
|
11158
|
-
-- No NULL clause - all rows must have a valid owner_id (backfilled during column addition)
|
|
11159
|
-
EXECUTE format('
|
|
11160
|
-
CREATE POLICY owner_isolation_policy ON %I.%I
|
|
11161
|
-
USING (owner_id = current_owner_id())
|
|
11162
|
-
WITH CHECK (owner_id = current_owner_id())
|
|
11163
|
-
', schema_name, table_name);
|
|
11164
|
-
END;
|
|
11165
|
-
$$ LANGUAGE plpgsql;
|
|
11166
|
-
`);
|
|
11167
|
-
await db2.execute(sql`
|
|
11168
|
-
CREATE OR REPLACE FUNCTION apply_rls_to_all_tables() RETURNS void AS $$
|
|
11169
|
-
DECLARE
|
|
11170
|
-
tbl record;
|
|
11171
|
-
BEGIN
|
|
11172
|
-
FOR tbl IN
|
|
11173
|
-
SELECT schemaname, tablename
|
|
11174
|
-
FROM pg_tables
|
|
11175
|
-
WHERE schemaname = 'public'
|
|
11176
|
-
AND tablename NOT IN (
|
|
11177
|
-
'owners',
|
|
11178
|
-
'drizzle_migrations',
|
|
11179
|
-
'__drizzle_migrations',
|
|
11180
|
-
'server_agents'
|
|
11181
|
-
)
|
|
11182
|
-
LOOP
|
|
11183
|
-
BEGIN
|
|
11184
|
-
PERFORM add_owner_isolation(tbl.schemaname, tbl.tablename);
|
|
11185
|
-
EXCEPTION WHEN OTHERS THEN
|
|
11186
|
-
RAISE WARNING 'Failed to apply RLS to %.%: %', tbl.schemaname, tbl.tablename, SQLERRM;
|
|
11187
|
-
END;
|
|
11188
|
-
END LOOP;
|
|
11189
|
-
END;
|
|
11190
|
-
$$ LANGUAGE plpgsql;
|
|
11191
|
-
`);
|
|
11192
|
-
logger12.info("[RLS] PostgreSQL functions installed");
|
|
11193
|
-
}
|
|
11194
|
-
async function getOrCreateRlsOwner(adapter, ownerId) {
|
|
11195
|
-
const db2 = adapter.db;
|
|
11196
|
-
await db2.insert(ownersTable).values({
|
|
11197
|
-
id: ownerId
|
|
11198
|
-
}).onConflictDoNothing();
|
|
11199
|
-
logger12.info(`[RLS] Owner: ${ownerId.slice(0, 8)}…`);
|
|
11200
|
-
return ownerId;
|
|
11201
|
-
}
|
|
11202
|
-
async function setOwnerContext(adapter, ownerId) {
|
|
11203
|
-
if (!validateUuid(ownerId)) {
|
|
11204
|
-
throw new Error(`Invalid owner ID format: ${ownerId}. Must be a valid UUID.`);
|
|
11205
|
-
}
|
|
11206
|
-
const db2 = adapter.db;
|
|
11207
|
-
const owners = await db2.select().from(ownersTable).where(eq(ownersTable.id, ownerId));
|
|
11208
|
-
if (owners.length === 0) {
|
|
11209
|
-
throw new Error(`Owner ${ownerId} does not exist`);
|
|
11210
|
-
}
|
|
11211
|
-
logger12.info(`[RLS] Owner: ${ownerId.slice(0, 8)}…`);
|
|
11212
|
-
logger12.info("[RLS] Context configured successfully (using application_name)");
|
|
11213
|
-
}
|
|
11214
|
-
async function assignAgentToOwner(adapter, agentId, ownerId) {
|
|
11215
|
-
const db2 = adapter.db;
|
|
11216
|
-
const agents = await db2.select().from(agentTable).where(eq(agentTable.id, agentId));
|
|
11217
|
-
if (agents.length > 0) {
|
|
11218
|
-
const agent = agents[0];
|
|
11219
|
-
const currentOwnerId = agent.owner_id;
|
|
11220
|
-
if (currentOwnerId === ownerId) {
|
|
11221
|
-
logger12.debug(`[RLS] Agent ${agent.name} already owned by correct owner`);
|
|
11222
|
-
} else {
|
|
11223
|
-
await db2.update(agentTable).set({ owner_id: ownerId }).where(eq(agentTable.id, agentId));
|
|
11224
|
-
if (currentOwnerId === null) {
|
|
11225
|
-
logger12.info(`[RLS] Agent ${agent.name} assigned to owner`);
|
|
11226
|
-
} else {
|
|
11227
|
-
logger12.warn(`[RLS] Agent ${agent.name} owner changed`);
|
|
11228
|
-
}
|
|
11229
|
-
}
|
|
11230
|
-
} else {
|
|
11231
|
-
logger12.debug(`[RLS] Agent ${agentId} doesn't exist yet`);
|
|
11232
|
-
}
|
|
11233
|
-
}
|
|
11234
|
-
async function applyRLSToNewTables(adapter) {
|
|
11235
|
-
const db2 = adapter.db;
|
|
11236
|
-
try {
|
|
11237
|
-
await db2.execute(sql`SELECT apply_rls_to_all_tables()`);
|
|
11238
|
-
logger12.info("[RLS] Applied to all tables");
|
|
11239
|
-
} catch (error) {
|
|
11240
|
-
logger12.warn("[RLS] Failed to apply to some tables:", String(error));
|
|
11241
|
-
}
|
|
11242
|
-
}
|
|
11243
|
-
async function uninstallRLS(adapter) {
|
|
11244
|
-
const db2 = adapter.db;
|
|
11245
|
-
try {
|
|
11246
|
-
const checkResult = await db2.execute(sql`
|
|
11247
|
-
SELECT EXISTS (
|
|
11248
|
-
SELECT FROM pg_tables
|
|
11249
|
-
WHERE schemaname = 'public' AND tablename = 'owners'
|
|
11250
|
-
) as rls_enabled
|
|
11251
|
-
`);
|
|
11252
|
-
const rlsEnabled = checkResult.rows?.[0]?.rls_enabled;
|
|
11253
|
-
if (!rlsEnabled) {
|
|
11254
|
-
logger12.debug("[RLS] RLS not installed, skipping cleanup");
|
|
11255
|
-
return;
|
|
11256
|
-
}
|
|
11257
|
-
logger12.info("[RLS] Disabling RLS globally (keeping owner_id columns for schema compatibility)...");
|
|
11258
|
-
await db2.execute(sql`
|
|
11259
|
-
CREATE OR REPLACE FUNCTION _temp_disable_rls_on_table(
|
|
11260
|
-
p_schema_name text,
|
|
11261
|
-
p_table_name text
|
|
11262
|
-
) RETURNS void AS $$
|
|
11263
|
-
DECLARE
|
|
11264
|
-
policy_rec record;
|
|
11265
|
-
BEGIN
|
|
11266
|
-
-- Drop all policies on this table
|
|
11267
|
-
FOR policy_rec IN
|
|
11268
|
-
SELECT policyname
|
|
11269
|
-
FROM pg_policies
|
|
11270
|
-
WHERE schemaname = p_schema_name AND tablename = p_table_name
|
|
11271
|
-
LOOP
|
|
11272
|
-
EXECUTE format('DROP POLICY IF EXISTS %I ON %I.%I',
|
|
11273
|
-
policy_rec.policyname, p_schema_name, p_table_name);
|
|
11274
|
-
END LOOP;
|
|
11275
|
-
|
|
11276
|
-
-- Disable RLS
|
|
11277
|
-
EXECUTE format('ALTER TABLE %I.%I NO FORCE ROW LEVEL SECURITY', p_schema_name, p_table_name);
|
|
11278
|
-
EXECUTE format('ALTER TABLE %I.%I DISABLE ROW LEVEL SECURITY', p_schema_name, p_table_name);
|
|
11279
|
-
END;
|
|
11280
|
-
$$ LANGUAGE plpgsql;
|
|
11281
|
-
`);
|
|
11282
|
-
const tablesResult = await db2.execute(sql`
|
|
11283
|
-
SELECT schemaname, tablename
|
|
11284
|
-
FROM pg_tables
|
|
11285
|
-
WHERE schemaname = 'public'
|
|
11286
|
-
AND tablename NOT IN ('drizzle_migrations', '__drizzle_migrations')
|
|
11287
|
-
`);
|
|
11288
|
-
for (const row of tablesResult.rows || []) {
|
|
11289
|
-
const schemaName = row.schemaname;
|
|
11290
|
-
const tableName = row.tablename;
|
|
11291
|
-
try {
|
|
11292
|
-
await db2.execute(sql`SELECT _temp_disable_rls_on_table(${schemaName}, ${tableName})`);
|
|
11293
|
-
logger12.debug(`[RLS] Disabled RLS on table: ${schemaName}.${tableName}`);
|
|
11294
|
-
} catch (error) {
|
|
11295
|
-
logger12.warn(`[RLS] Failed to disable RLS on table ${schemaName}.${tableName}:`, String(error));
|
|
11296
|
-
}
|
|
11297
|
-
}
|
|
11298
|
-
await db2.execute(sql`DROP FUNCTION IF EXISTS _temp_disable_rls_on_table(text, text)`);
|
|
11299
|
-
logger12.info("[RLS] Keeping owner_id values intact (prevents data theft on re-enable)");
|
|
11300
|
-
logger12.info("[RLS] Clearing owners table...");
|
|
11301
|
-
await db2.execute(sql`TRUNCATE TABLE owners`);
|
|
11302
|
-
await db2.execute(sql`DROP FUNCTION IF EXISTS apply_rls_to_all_tables() CASCADE`);
|
|
11303
|
-
await db2.execute(sql`DROP FUNCTION IF EXISTS add_owner_isolation(text, text) CASCADE`);
|
|
11304
|
-
await db2.execute(sql`DROP FUNCTION IF EXISTS current_owner_id() CASCADE`);
|
|
11305
|
-
logger12.info("[RLS] Dropped all RLS functions");
|
|
11306
|
-
logger12.success("[RLS] RLS disabled successfully (owner_id columns preserved)");
|
|
11307
|
-
} catch (error) {
|
|
11308
|
-
logger12.error("[RLS] Failed to disable RLS:", String(error));
|
|
11309
|
-
throw error;
|
|
11310
|
-
}
|
|
11311
|
-
}
|
|
11312
|
-
|
|
11313
|
-
// src/index.node.ts
|
|
12080
|
+
init_rls();
|
|
11314
12081
|
var GLOBAL_SINGLETONS = Symbol.for("@elizaos/plugin-sql/global-singletons");
|
|
11315
12082
|
var globalSymbols = globalThis;
|
|
11316
12083
|
if (!globalSymbols[GLOBAL_SINGLETONS]) {
|
|
@@ -11319,30 +12086,37 @@ if (!globalSymbols[GLOBAL_SINGLETONS]) {
|
|
|
11319
12086
|
var globalSingletons = globalSymbols[GLOBAL_SINGLETONS];
|
|
11320
12087
|
function createDatabaseAdapter(config, agentId) {
|
|
11321
12088
|
if (config.postgresUrl) {
|
|
11322
|
-
const
|
|
11323
|
-
let
|
|
12089
|
+
const dataIsolationEnabled = process.env.ENABLE_DATA_ISOLATION === "true";
|
|
12090
|
+
let rlsServerId;
|
|
11324
12091
|
let managerKey = "default";
|
|
11325
|
-
if (
|
|
11326
|
-
const
|
|
11327
|
-
if (!
|
|
11328
|
-
throw new Error("[
|
|
11329
|
-
}
|
|
11330
|
-
|
|
11331
|
-
managerKey =
|
|
11332
|
-
|
|
12092
|
+
if (dataIsolationEnabled) {
|
|
12093
|
+
const rlsServerIdString = process.env.ELIZA_SERVER_ID;
|
|
12094
|
+
if (!rlsServerIdString) {
|
|
12095
|
+
throw new Error("[Data Isolation] ENABLE_DATA_ISOLATION=true requires ELIZA_SERVER_ID environment variable");
|
|
12096
|
+
}
|
|
12097
|
+
rlsServerId = stringToUuid(rlsServerIdString);
|
|
12098
|
+
managerKey = rlsServerId;
|
|
12099
|
+
logger14.debug({
|
|
12100
|
+
src: "plugin:sql",
|
|
12101
|
+
rlsServerId: rlsServerId.slice(0, 8),
|
|
12102
|
+
serverIdString: rlsServerIdString
|
|
12103
|
+
}, "Using connection pool for RLS server");
|
|
11333
12104
|
}
|
|
11334
12105
|
if (!globalSingletons.postgresConnectionManagers) {
|
|
11335
12106
|
globalSingletons.postgresConnectionManagers = new Map;
|
|
11336
12107
|
}
|
|
11337
12108
|
let manager = globalSingletons.postgresConnectionManagers.get(managerKey);
|
|
11338
12109
|
if (!manager) {
|
|
11339
|
-
|
|
11340
|
-
manager = new PostgresConnectionManager(config.postgresUrl,
|
|
12110
|
+
logger14.debug({ src: "plugin:sql", managerKey: managerKey.slice(0, 8) }, "Creating new connection pool");
|
|
12111
|
+
manager = new PostgresConnectionManager(config.postgresUrl, rlsServerId);
|
|
11341
12112
|
globalSingletons.postgresConnectionManagers.set(managerKey, manager);
|
|
11342
12113
|
}
|
|
11343
12114
|
return new PgDatabaseAdapter(agentId, manager);
|
|
11344
12115
|
}
|
|
11345
12116
|
const dataDir = resolvePgliteDir(config.dataDir);
|
|
12117
|
+
if (dataDir && !dataDir.includes("://")) {
|
|
12118
|
+
mkdirSync(dataDir, { recursive: true });
|
|
12119
|
+
}
|
|
11346
12120
|
if (!globalSingletons.pgLiteClientManager) {
|
|
11347
12121
|
globalSingletons.pgLiteClientManager = new PGliteClientManager({ dataDir });
|
|
11348
12122
|
}
|
|
@@ -11354,18 +12128,18 @@ var plugin = {
|
|
|
11354
12128
|
priority: 0,
|
|
11355
12129
|
schema: exports_schema,
|
|
11356
12130
|
init: async (_config, runtime) => {
|
|
11357
|
-
|
|
12131
|
+
runtime.logger.info({ src: "plugin:sql", agentId: runtime.agentId }, "plugin-sql (node) init starting");
|
|
11358
12132
|
const adapterRegistered = await runtime.isReady().then(() => true).catch((error) => {
|
|
11359
12133
|
const message = error instanceof Error ? error.message : String(error);
|
|
11360
12134
|
if (message.includes("Database adapter not registered")) {
|
|
11361
|
-
|
|
12135
|
+
runtime.logger.info({ src: "plugin:sql", agentId: runtime.agentId }, "No pre-registered database adapter detected; registering adapter");
|
|
11362
12136
|
} else {
|
|
11363
|
-
|
|
12137
|
+
runtime.logger.warn({ src: "plugin:sql", agentId: runtime.agentId, error: message }, "Database adapter readiness check error; proceeding to register adapter");
|
|
11364
12138
|
}
|
|
11365
12139
|
return false;
|
|
11366
12140
|
});
|
|
11367
12141
|
if (adapterRegistered) {
|
|
11368
|
-
|
|
12142
|
+
runtime.logger.info({ src: "plugin:sql", agentId: runtime.agentId }, "Database adapter already registered, skipping creation");
|
|
11369
12143
|
return;
|
|
11370
12144
|
}
|
|
11371
12145
|
const postgresUrl = runtime.getSetting("POSTGRES_URL");
|
|
@@ -11375,22 +12149,22 @@ var plugin = {
|
|
|
11375
12149
|
postgresUrl
|
|
11376
12150
|
}, runtime.agentId);
|
|
11377
12151
|
runtime.registerDatabaseAdapter(dbAdapter);
|
|
11378
|
-
|
|
12152
|
+
runtime.logger.info({ src: "plugin:sql", agentId: runtime.agentId }, "Database adapter created and registered");
|
|
11379
12153
|
}
|
|
11380
12154
|
};
|
|
11381
12155
|
var index_node_default = plugin;
|
|
11382
12156
|
export {
|
|
11383
12157
|
uninstallRLS,
|
|
11384
|
-
|
|
12158
|
+
setServerContext,
|
|
11385
12159
|
plugin,
|
|
11386
12160
|
installRLSFunctions,
|
|
11387
|
-
|
|
12161
|
+
getOrCreateRlsServer,
|
|
11388
12162
|
index_node_default as default,
|
|
11389
12163
|
createDatabaseAdapter,
|
|
11390
|
-
|
|
12164
|
+
assignAgentToServer,
|
|
11391
12165
|
applyRLSToNewTables,
|
|
11392
12166
|
DatabaseMigrationService
|
|
11393
12167
|
};
|
|
11394
12168
|
|
|
11395
|
-
//# debugId=
|
|
12169
|
+
//# debugId=EA546D2333E8154064756E2164756E21
|
|
11396
12170
|
//# sourceMappingURL=index.node.js.map
|