pgrift 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/.env.example ADDED
@@ -0,0 +1,38 @@
1
+ # Connection (URL preferred)
2
+ SOURCE_URL=postgresql://postgres:password@localhost:5432
3
+ TARGET_URL=postgresql://postgres:password@localhost:5432/tenants
4
+
5
+ # OR individual fields (used only if URL not set)
6
+ # SOURCE_HOST=localhost
7
+ # SOURCE_PORT=5432
8
+ # SOURCE_USER=postgres
9
+ # SOURCE_PASSWORD=
10
+ # TARGET_HOST=localhost
11
+ # TARGET_PORT=5432
12
+ # TARGET_USER=postgres
13
+ # TARGET_PASSWORD=
14
+ # TARGET_DATABASE=tenants
15
+
16
+ # Migration
17
+ FILTER_PREFIX=
18
+ DUMP_DIR=/tmp/pg_migration_dumps
19
+ STATE_FILE=./migration-state.json
20
+ CONCURRENCY=10
21
+ EXEC_TIMEOUT_MS=600000
22
+ # Skip checksum verification for tables with more than N rows (optional)
23
+ # SKIP_CHECKSUM_ABOVE_ROWS=100000
24
+
25
+ # Benchmark / script helpers
26
+ # Path to JSON array of DB or schema names (used by comparison, manyBases, manySchemas)
27
+ DB_LIST_PATH=./scripts/db-list.json
28
+ # Table name used by benchmark scripts (e.g. users_data)
29
+ BENCH_TABLE=users_data
30
+ # Column used as synthetic ID in benchmarks (e.g. dbId)
31
+ BENCH_ID_COLUMN=dbId
32
+ # Column used as name field in benchmarks (e.g. name)
33
+ BENCH_NAME_COLUMN=name
34
+
35
+ # createRandomDb script only
36
+ # CREATE_DB_COUNT=1000
37
+ # CREATE_DB_PREFIX=bench_db_
38
+ # CREATE_DB_PROJECT_PATH=
package/README.md ADDED
@@ -0,0 +1,63 @@
1
+ # pgrift
2
+
3
+ Moves many PostgreSQL databases (one per tenant) into a single DB: each tenant becomes a schema. Uses in-place rename on source (`public` → tenant name), `pg_dump`, then `psql` into the target. State is saved so you can resume after a crash.
4
+
5
+ **Requires:** Node 18+, `pg_dump` and `psql` in PATH.
6
+
7
+ ## Setup
8
+
9
+ ```bash
10
+ cp .env.example .env
11
+ # edit .env: SOURCE_URL, TARGET_URL (and optionally DUMP_DIR, STATE_FILE, CONCURRENCY, FILTER_PREFIX)
12
+ npm install
13
+ ```
14
+
15
+ Config is read from env (see `.env.example`). Main options: `SOURCE_URL`, `TARGET_URL`, `TARGET_DATABASE` (default `tenants`), `DUMP_DIR`, `STATE_FILE`, `CONCURRENCY`, `FILTER_PREFIX` (e.g. `bench_db_` to only migrate those DBs).
16
+
17
+ ## Commands
18
+
19
+ | Command | What it does |
20
+ |--------|----------------|
21
+ | `pgrift` / `npx pgrift` / `npm run dev` | Run migration |
22
+ | `npm run verify` | Compare source DBs vs target schemas: table list, row counts, optional checksums |
23
+ | `npm run cleanup` | Remove all tenant schemas from target, state file, dump dir contents, and `migration-report.json`. Asks for `DELETE ALL` before dropping schemas |
24
+ | `npm run lint` | Run Biome linter (`just lint`) |
25
+ | `npm run lint:fix` | Lint and apply safe fixes |
26
+ | `npm run format` | Format code with Biome |
27
+
28
+ After an interrupt, run `npm run dev` again; completed tenants are skipped.
29
+
30
+ ## What the migration does per tenant
31
+
32
+ 1. Terminate connections to the source DB.
33
+ 2. In source: `ALTER SCHEMA public RENAME TO "<tenant>"`, create new `public`, set DB `search_path`.
34
+ 3. `pg_dump -n "<tenant>"` to a file (fixes `gin_trgm_ops` schema in dump if present).
35
+ 4. Rollback source: restore `public`, reset `search_path`.
36
+ 5. In target: create schema `"<tenant>"`, apply dump with `psql -f`.
37
+
38
+ Extensions from the source DB are created in target `public` if possible; no custom format or `pg_restore`.
39
+
40
+ ## Verification
41
+
42
+ Built-in (after each tenant in the migration): same tables, row counts, and MD5 checksums per table (optional skip for large tables via `SKIP_CHECKSUM_ABOVE_ROWS`).
43
+
44
+ Standalone: `npm run verify` [db1 db2 ...]. With no args, uses all tenant DBs from source.
45
+
46
+ ## Other scripts
47
+
48
+ - `npm run comparison` — load test: separate DBs vs single-DB schemas (writes `heavy-load-results.json`).
49
+ - `npm run many-schemas` / `many-bases` — benchmark helpers.
50
+ - `npm run create-db` — create random DBs for testing.
51
+
52
+ ## Caveats
53
+
54
+ - Source must have only `public` (or you’ll need to handle other schemas yourself). Test on one tenant first.
55
+ - Extensions are re-created in target from the list in source; some may fail if already present or incompatible.
56
+ - Cleanup wipes all non-system schemas in the target and local migration state; use for dev/test only.
57
+
58
+ ## Publishing to npm
59
+
60
+ 1. Create an account at [npmjs.com](https://www.npmjs.com) and run `npm login`.
61
+ 2. Bump version if needed: `npm version patch` (or `minor` / `major`).
62
+ 3. Build and publish: `npm run build && npm publish`.
63
+ 4. For scoped packages (e.g. `@username/pgrift`): use `npm publish --access public`.
@@ -0,0 +1,9 @@
1
+ #!/usr/bin/env ts-node
2
+ /**
3
+ * Cleanup script - removes all custom schemas from target database,
4
+ * state file, dump directory contents, and migration report.
5
+ * Use only for testing!
6
+ *
7
+ * Usage: npm run cleanup (or ts-node cleanup.ts)
8
+ */
9
+ export {};
@@ -0,0 +1,114 @@
1
+ #!/usr/bin/env ts-node
2
+ "use strict";
3
+ /**
4
+ * Cleanup script - removes all custom schemas from target database,
5
+ * state file, dump directory contents, and migration report.
6
+ * Use only for testing!
7
+ *
8
+ * Usage: npm run cleanup (or ts-node cleanup.ts)
9
+ */
10
+ var __importDefault = (this && this.__importDefault) || function (mod) {
11
+ return (mod && mod.__esModule) ? mod : { "default": mod };
12
+ };
13
+ Object.defineProperty(exports, "__esModule", { value: true });
14
+ const node_fs_1 = __importDefault(require("node:fs"));
15
+ const node_path_1 = __importDefault(require("node:path"));
16
+ const pg_1 = require("pg");
17
+ const config_1 = require("./src/config");
18
+ function cleanLocalFiles() {
19
+ // State file
20
+ if (node_fs_1.default.existsSync(config_1.CONFIG.stateFile)) {
21
+ node_fs_1.default.unlinkSync(config_1.CONFIG.stateFile);
22
+ console.log("Deleted state file:", config_1.CONFIG.stateFile);
23
+ }
24
+ // Migration report
25
+ const reportPath = "./migration-report.json";
26
+ if (node_fs_1.default.existsSync(reportPath)) {
27
+ node_fs_1.default.unlinkSync(reportPath);
28
+ console.log("Deleted:", reportPath);
29
+ }
30
+ // Dump directory: remove all files (e.g. *.final.dump)
31
+ if (node_fs_1.default.existsSync(config_1.CONFIG.dumpDir)) {
32
+ const entries = node_fs_1.default.readdirSync(config_1.CONFIG.dumpDir, { withFileTypes: true });
33
+ let removed = 0;
34
+ for (const ent of entries) {
35
+ const full = node_path_1.default.join(config_1.CONFIG.dumpDir, ent.name);
36
+ if (ent.isFile()) {
37
+ node_fs_1.default.unlinkSync(full);
38
+ removed++;
39
+ }
40
+ }
41
+ if (removed > 0) {
42
+ console.log(`Deleted ${removed} file(s) in dump dir: ${config_1.CONFIG.dumpDir}`);
43
+ }
44
+ }
45
+ }
46
+ async function cleanupSchemas() {
47
+ cleanLocalFiles();
48
+ const client = new pg_1.Client(config_1.CONFIG.target);
49
+ try {
50
+ await client.connect();
51
+ console.log("Connected to target DB:", config_1.CONFIG.target.database);
52
+ // Get all custom schemas (exclude system schemas)
53
+ const { rows } = await client.query(`
54
+ SELECT schema_name
55
+ FROM information_schema.schemata
56
+ WHERE schema_name NOT IN (
57
+ 'information_schema',
58
+ 'pg_catalog',
59
+ 'pg_toast',
60
+ 'public'
61
+ )
62
+ ORDER BY schema_name
63
+ `);
64
+ const schemas = rows.map((r) => r.schema_name);
65
+ if (schemas.length === 0) {
66
+ console.log("No custom schemas found. Cleanup complete.");
67
+ return;
68
+ }
69
+ console.log(`Found ${schemas.length} schemas to delete:`);
70
+ for (const schema of schemas)
71
+ console.log(` ${schema}`);
72
+ // Confirmation
73
+ const readline = require("node:readline").createInterface({
74
+ input: process.stdin,
75
+ output: process.stdout,
76
+ });
77
+ const response = await new Promise((resolve) => {
78
+ readline.question("Type 'DELETE ALL' to continue: ", (answer) => {
79
+ readline.close();
80
+ resolve(answer);
81
+ });
82
+ });
83
+ if (response !== "DELETE ALL") {
84
+ console.log("Aborted");
85
+ return;
86
+ }
87
+ // Delete schemas
88
+ let deleted = 0;
89
+ for (const schema of schemas) {
90
+ try {
91
+ // Terminate active connections to schema
92
+ await client.query(`
93
+ SELECT pg_terminate_backend(pid)
94
+ FROM pg_stat_activity
95
+ WHERE datname = $1
96
+ AND pid <> pg_backend_pid()
97
+ AND query LIKE $2
98
+ `, [config_1.CONFIG.target.database, `%${schema}%`]);
99
+ // Drop schema
100
+ await client.query(`DROP SCHEMA IF EXISTS "${schema}" CASCADE`);
101
+ console.log(`Deleted schema: ${schema}`);
102
+ deleted++;
103
+ }
104
+ catch (error) {
105
+ console.error(`Failed to delete ${schema}:`, error);
106
+ }
107
+ }
108
+ console.log(`Cleanup complete. Deleted ${deleted}/${schemas.length} schemas`);
109
+ }
110
+ finally {
111
+ await client.end();
112
+ }
113
+ }
114
+ cleanupSchemas().catch(console.error);
@@ -0,0 +1 @@
1
+ export * from "./src/index";
package/dist/index.js ADDED
@@ -0,0 +1,17 @@
1
+ "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
+ };
16
+ Object.defineProperty(exports, "__esModule", { value: true });
17
+ __exportStar(require("./src/index"), exports);
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env node
2
+ export {};
@@ -0,0 +1,9 @@
1
+ #!/usr/bin/env node
2
+ "use strict";
3
+ Object.defineProperty(exports, "__esModule", { value: true });
4
+ const runner_1 = require("./src/runner");
5
+ const utils_1 = require("./src/utils");
6
+ (0, runner_1.runMigration)().catch((err) => {
7
+ (0, utils_1.log)("error", `fatal: ${err instanceof Error ? (err.stack ?? err.message) : String(err)}`);
8
+ process.exit(1);
9
+ });
@@ -0,0 +1,29 @@
1
+ import "dotenv/config";
2
+ import type { ClientConfig } from "pg";
3
+ /** Build a connection URL from config and optional database name. */
4
+ export declare function buildConnectionString(cfg: ClientConfig, database?: string): string;
5
+ export declare const CONFIG: {
6
+ source: ClientConfig;
7
+ target: ClientConfig;
8
+ dumpDir: string;
9
+ stateFile: string;
10
+ concurrency: number;
11
+ excludeDatabases: string[];
12
+ filterPrefix: string | null;
13
+ execTimeoutMs: number;
14
+ skipChecksumAboveRows: number | undefined;
15
+ /** Path to JSON array of DB/schema names (benchmark scripts). Default: ./scripts/db-list.json */
16
+ dbListPath: string;
17
+ /** Table name used by benchmark scripts (e.g. comparison, manyBases, manySchemas). Default: users_data */
18
+ benchTable: string;
19
+ /** Column used as synthetic ID in benchmarks. Default: dbId */
20
+ benchIdColumn: string;
21
+ /** Column used as name field in benchmarks. Default: name */
22
+ benchNameColumn: string;
23
+ /** createRandomDb script: number of DBs to create. Default: 1000 */
24
+ createDbCount: number;
25
+ /** createRandomDb script: DB name prefix. Default: bench_db_ */
26
+ createDbPrefix: string;
27
+ /** createRandomDb script: path to project that runs postinstall-dev for each DB. Optional. */
28
+ createDbProjectPath: string | null;
29
+ };
@@ -0,0 +1,69 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.CONFIG = void 0;
4
+ exports.buildConnectionString = buildConnectionString;
5
+ require("dotenv/config");
6
+ function parseConnectionString(url) {
7
+ const u = new URL(url);
8
+ return {
9
+ host: u.hostname,
10
+ port: u.port ? parseInt(u.port, 10) : 5432,
11
+ user: u.username || undefined,
12
+ password: u.password ? decodeURIComponent(u.password) : undefined,
13
+ database: u.pathname.slice(1) || undefined,
14
+ };
15
+ }
16
+ function resolveConfig(urlEnv, hostEnv, portEnv, userEnv, passEnv, dbEnv) {
17
+ if (urlEnv) {
18
+ const parsed = parseConnectionString(urlEnv);
19
+ return dbEnv ? { ...parsed, database: dbEnv } : parsed;
20
+ }
21
+ return {
22
+ host: hostEnv ?? "localhost",
23
+ port: parseInt(portEnv ?? "5432", 10),
24
+ user: userEnv ?? "postgres",
25
+ password: passEnv ?? "",
26
+ ...(dbEnv ? { database: dbEnv } : {}),
27
+ };
28
+ }
29
+ /** Build a connection URL from config and optional database name. */
30
+ function buildConnectionString(cfg, database) {
31
+ const db = database ?? cfg.database ?? "postgres";
32
+ const user = cfg.user ?? "postgres";
33
+ const pwRaw = typeof cfg.password === "string" ? cfg.password : "";
34
+ const pw = pwRaw ? `:${encodeURIComponent(pwRaw)}` : "";
35
+ const port = cfg.port ?? 5432;
36
+ return `postgresql://${user}${pw}@${cfg.host}:${port}/${db}`;
37
+ }
38
+ exports.CONFIG = {
39
+ source: resolveConfig(process.env.SOURCE_URL, process.env.SOURCE_HOST, process.env.SOURCE_PORT, process.env.SOURCE_USER, process.env.SOURCE_PASSWORD),
40
+ target: resolveConfig(process.env.TARGET_URL, process.env.TARGET_HOST, process.env.TARGET_PORT, process.env.TARGET_USER, process.env.TARGET_PASSWORD, process.env.TARGET_DATABASE ?? "tenants"),
41
+ dumpDir: process.env.DUMP_DIR ?? "/tmp/pg_migration_dumps",
42
+ stateFile: process.env.STATE_FILE ?? "./migration-state.json",
43
+ concurrency: parseInt(process.env.CONCURRENCY ?? "10", 10),
44
+ excludeDatabases: [
45
+ "postgres",
46
+ "template0",
47
+ "template1",
48
+ process.env.TARGET_DATABASE ?? "tenants",
49
+ ],
50
+ filterPrefix: process.env.FILTER_PREFIX ?? null,
51
+ execTimeoutMs: parseInt(process.env.EXEC_TIMEOUT_MS ?? "600000", 10),
52
+ skipChecksumAboveRows: process.env.SKIP_CHECKSUM_ABOVE_ROWS
53
+ ? parseInt(process.env.SKIP_CHECKSUM_ABOVE_ROWS, 10)
54
+ : undefined,
55
+ /** Path to JSON array of DB/schema names (benchmark scripts). Default: ./scripts/db-list.json */
56
+ dbListPath: process.env.DB_LIST_PATH ?? "./scripts/db-list.json",
57
+ /** Table name used by benchmark scripts (e.g. comparison, manyBases, manySchemas). Default: users_data */
58
+ benchTable: process.env.BENCH_TABLE ?? "users_data",
59
+ /** Column used as synthetic ID in benchmarks. Default: dbId */
60
+ benchIdColumn: process.env.BENCH_ID_COLUMN ?? "dbId",
61
+ /** Column used as name field in benchmarks. Default: name */
62
+ benchNameColumn: process.env.BENCH_NAME_COLUMN ?? "name",
63
+ /** createRandomDb script: number of DBs to create. Default: 1000 */
64
+ createDbCount: parseInt(process.env.CREATE_DB_COUNT ?? "1000", 10),
65
+ /** createRandomDb script: DB name prefix. Default: bench_db_ */
66
+ createDbPrefix: process.env.CREATE_DB_PREFIX ?? "bench_db_",
67
+ /** createRandomDb script: path to project that runs postinstall-dev for each DB. Optional. */
68
+ createDbProjectPath: process.env.CREATE_DB_PROJECT_PATH ?? null,
69
+ };
@@ -0,0 +1,4 @@
1
+ import { Client } from "pg";
2
+ export declare function withClient<T>(config: object, fn: (client: Client) => Promise<T>): Promise<T>;
3
+ export declare function ensureTargetDatabase(): Promise<void>;
4
+ export declare function getTenants(): Promise<string[]>;
package/dist/src/db.js ADDED
@@ -0,0 +1,43 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.withClient = withClient;
4
+ exports.ensureTargetDatabase = ensureTargetDatabase;
5
+ exports.getTenants = getTenants;
6
+ const pg_1 = require("pg");
7
+ const config_1 = require("./config");
8
+ const utils_1 = require("./utils");
9
+ async function withClient(config, fn) {
10
+ const client = new pg_1.Client(config);
11
+ await client.connect();
12
+ try {
13
+ return await fn(client);
14
+ }
15
+ finally {
16
+ await client.end();
17
+ }
18
+ }
19
+ async function ensureTargetDatabase() {
20
+ await withClient({ ...config_1.CONFIG.target, database: "postgres" }, async (client) => {
21
+ const { rows } = await client.query("SELECT 1 FROM pg_database WHERE datname = $1", [
22
+ config_1.CONFIG.target.database,
23
+ ]);
24
+ if (rows.length === 0) {
25
+ await client.query(`CREATE DATABASE "${config_1.CONFIG.target.database}"`);
26
+ (0, utils_1.log)("info", `created database: ${config_1.CONFIG.target.database}`);
27
+ }
28
+ });
29
+ }
30
+ async function getTenants() {
31
+ return withClient({ ...config_1.CONFIG.source, database: "postgres" }, async (client) => {
32
+ const placeholders = config_1.CONFIG.excludeDatabases.map((_, i) => `$${i + 1}`).join(", ");
33
+ const { rows } = await client.query(`SELECT datname FROM pg_database
34
+ WHERE datname NOT IN (${placeholders})
35
+ AND datname NOT LIKE 'pg_%'
36
+ ORDER BY datname`, config_1.CONFIG.excludeDatabases);
37
+ let tenants = rows.map((r) => r.datname);
38
+ if (config_1.CONFIG.filterPrefix) {
39
+ tenants = tenants.filter((db) => db.startsWith(config_1.CONFIG.filterPrefix));
40
+ }
41
+ return tenants;
42
+ });
43
+ }
@@ -0,0 +1,2 @@
1
+ export declare function getFreeBytesOnDir(dir: string): number;
2
+ export declare function assertDiskSpace(dir: string, minBytes?: number): void;
@@ -0,0 +1,17 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.getFreeBytesOnDir = getFreeBytesOnDir;
4
+ exports.assertDiskSpace = assertDiskSpace;
5
+ const node_child_process_1 = require("node:child_process");
6
+ function getFreeBytesOnDir(dir) {
7
+ const out = (0, node_child_process_1.execSync)(`df -k "${dir}"`, { encoding: "utf-8" });
8
+ const line = out.trim().split("\n")[1];
9
+ const available = parseInt(line.trim().split(/\s+/)[3], 10);
10
+ return available * 1024;
11
+ }
12
+ function assertDiskSpace(dir, minBytes = 512 * 1024 * 1024) {
13
+ const free = getFreeBytesOnDir(dir);
14
+ if (free < minBytes) {
15
+ throw new Error(`Not enough disk space in ${dir}: ${Math.round(free / 1024 / 1024)} MB free, need at least ${Math.round(minBytes / 1024 / 1024)} MB`);
16
+ }
17
+ }
@@ -0,0 +1,16 @@
1
+ /**
2
+ * pgrift – PostgreSQL multi-tenant migration (separate DBs → single DB with schemas)
3
+ *
4
+ * Entry points:
5
+ * - migrate.ts → runMigration() from ./src/runner
6
+ * - verify.ts → uses getTenants from ./src/db, CONFIG from ./src/config
7
+ * - cleanup.ts → uses CONFIG from ./src/config
8
+ */
9
+ export { buildConnectionString, CONFIG } from "./config";
10
+ export { ensureTargetDatabase, getTenants, withClient } from "./db";
11
+ export { migrateTenant } from "./migrate-tenant";
12
+ export { runMigration } from "./runner";
13
+ export { loadState, saveState } from "./state";
14
+ export { FailedEntry, State } from "./types";
15
+ export { atomicWrite, exec, log } from "./utils";
16
+ export { tableChecksum, verifyMigration } from "./verify-migration";
@@ -0,0 +1,32 @@
1
+ "use strict";
2
+ /**
3
+ * pgrift – PostgreSQL multi-tenant migration (separate DBs → single DB with schemas)
4
+ *
5
+ * Entry points:
6
+ * - migrate.ts → runMigration() from ./src/runner
7
+ * - verify.ts → uses getTenants from ./src/db, CONFIG from ./src/config
8
+ * - cleanup.ts → uses CONFIG from ./src/config
9
+ */
10
+ Object.defineProperty(exports, "__esModule", { value: true });
11
+ exports.verifyMigration = exports.tableChecksum = exports.log = exports.exec = exports.atomicWrite = exports.saveState = exports.loadState = exports.runMigration = exports.migrateTenant = exports.withClient = exports.getTenants = exports.ensureTargetDatabase = exports.CONFIG = exports.buildConnectionString = void 0;
12
+ var config_1 = require("./config");
13
+ Object.defineProperty(exports, "buildConnectionString", { enumerable: true, get: function () { return config_1.buildConnectionString; } });
14
+ Object.defineProperty(exports, "CONFIG", { enumerable: true, get: function () { return config_1.CONFIG; } });
15
+ var db_1 = require("./db");
16
+ Object.defineProperty(exports, "ensureTargetDatabase", { enumerable: true, get: function () { return db_1.ensureTargetDatabase; } });
17
+ Object.defineProperty(exports, "getTenants", { enumerable: true, get: function () { return db_1.getTenants; } });
18
+ Object.defineProperty(exports, "withClient", { enumerable: true, get: function () { return db_1.withClient; } });
19
+ var migrate_tenant_1 = require("./migrate-tenant");
20
+ Object.defineProperty(exports, "migrateTenant", { enumerable: true, get: function () { return migrate_tenant_1.migrateTenant; } });
21
+ var runner_1 = require("./runner");
22
+ Object.defineProperty(exports, "runMigration", { enumerable: true, get: function () { return runner_1.runMigration; } });
23
+ var state_1 = require("./state");
24
+ Object.defineProperty(exports, "loadState", { enumerable: true, get: function () { return state_1.loadState; } });
25
+ Object.defineProperty(exports, "saveState", { enumerable: true, get: function () { return state_1.saveState; } });
26
+ var utils_1 = require("./utils");
27
+ Object.defineProperty(exports, "atomicWrite", { enumerable: true, get: function () { return utils_1.atomicWrite; } });
28
+ Object.defineProperty(exports, "exec", { enumerable: true, get: function () { return utils_1.exec; } });
29
+ Object.defineProperty(exports, "log", { enumerable: true, get: function () { return utils_1.log; } });
30
+ var verify_migration_1 = require("./verify-migration");
31
+ Object.defineProperty(exports, "tableChecksum", { enumerable: true, get: function () { return verify_migration_1.tableChecksum; } });
32
+ Object.defineProperty(exports, "verifyMigration", { enumerable: true, get: function () { return verify_migration_1.verifyMigration; } });
@@ -0,0 +1 @@
1
+ export declare function migrateTenant(dbName: string): Promise<void>;
@@ -0,0 +1,105 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.migrateTenant = migrateTenant;
7
+ const node_fs_1 = __importDefault(require("node:fs"));
8
+ const node_path_1 = __importDefault(require("node:path"));
9
+ const config_1 = require("./config");
10
+ const db_1 = require("./db");
11
+ const disk_1 = require("./disk");
12
+ const utils_1 = require("./utils");
13
+ async function migrateTenant(dbName) {
14
+ const finalDumpFile = node_path_1.default.join(config_1.CONFIG.dumpDir, `${dbName}.final.dump`);
15
+ const src = config_1.CONFIG.source;
16
+ const tgt = config_1.CONFIG.target;
17
+ const srcPw = String(src.password ?? "");
18
+ const tgtPw = String(tgt.password ?? "");
19
+ const dbNameEsc = dbName.replace(/"/g, '""');
20
+ (0, disk_1.assertDiskSpace)(config_1.CONFIG.dumpDir);
21
+ const extensions = await (0, db_1.withClient)({ ...src, database: dbName }, async (client) => {
22
+ const { rows } = await client.query(`SELECT extname FROM pg_extension WHERE extname != 'plpgsql' ORDER BY extname`);
23
+ return rows.map((r) => r.extname);
24
+ });
25
+ let rollbackNeeded = false;
26
+ try {
27
+ (0, utils_1.log)("info", ` [${dbName}] terminating connections...`);
28
+ await (0, db_1.withClient)({ ...src, database: "postgres" }, async (client) => {
29
+ await client.query(`SELECT pg_terminate_backend(pid)
30
+ FROM pg_stat_activity
31
+ WHERE datname = $1 AND pid <> pg_backend_pid()`, [dbName]);
32
+ });
33
+ (0, utils_1.log)("info", ` [${dbName}] renaming schema in source...`);
34
+ await (0, db_1.withClient)({ ...src, database: dbName }, async (client) => {
35
+ await client.query(`ALTER SCHEMA public RENAME TO "${dbNameEsc}"`);
36
+ await client.query(`CREATE SCHEMA public`);
37
+ await client.query(`ALTER DATABASE "${dbNameEsc}" SET search_path = '"${dbNameEsc}"'`);
38
+ });
39
+ rollbackNeeded = true;
40
+ (0, utils_1.log)("info", ` [${dbName}] dumping renamed schema...`);
41
+ (0, utils_1.exec)([
42
+ "pg_dump",
43
+ `-h "${src.host}"`,
44
+ `-p ${src.port}`,
45
+ `-U "${src.user}"`,
46
+ `-d "${dbName}"`,
47
+ `-n "${dbName}"`,
48
+ `--no-owner`,
49
+ `--no-acl`,
50
+ `-f "${finalDumpFile}"`,
51
+ ].join(" "), srcPw);
52
+ (0, utils_1.log)("info", ` [${dbName}] rolling back source schema...`);
53
+ await (0, db_1.withClient)({ ...src, database: dbName }, async (client) => {
54
+ await client.query(`ALTER DATABASE "${dbNameEsc}" RESET search_path`);
55
+ await client.query(`DROP SCHEMA public`);
56
+ await client.query(`ALTER SCHEMA "${dbNameEsc}" RENAME TO public`);
57
+ });
58
+ rollbackNeeded = false;
59
+ (0, utils_1.log)("info", ` [${dbName}] restoring to target database...`);
60
+ await (0, db_1.withClient)(tgt, async (client) => {
61
+ await client.query(`DROP SCHEMA IF EXISTS "${dbNameEsc}" CASCADE`);
62
+ for (const extname of extensions) {
63
+ try {
64
+ await client.query(`CREATE EXTENSION IF NOT EXISTS "${extname}" WITH SCHEMA public`);
65
+ }
66
+ catch {
67
+ // extension may already exist
68
+ }
69
+ }
70
+ });
71
+ let dumpSql = node_fs_1.default.readFileSync(finalDumpFile, "utf-8");
72
+ const opclassSchema = `${dbName}.gin_trgm_ops`;
73
+ if (dumpSql.includes(opclassSchema)) {
74
+ dumpSql = dumpSql.split(opclassSchema).join("public.gin_trgm_ops");
75
+ }
76
+ node_fs_1.default.writeFileSync(finalDumpFile, dumpSql, "utf-8");
77
+ (0, utils_1.exec)([
78
+ "psql",
79
+ `-h "${tgt.host}"`,
80
+ `-p ${tgt.port}`,
81
+ `-U "${tgt.user}"`,
82
+ `-d "${tgt.database}"`,
83
+ `-v ON_ERROR_STOP=1`,
84
+ `-f "${finalDumpFile}"`,
85
+ ].join(" "), tgtPw);
86
+ (0, utils_1.log)("info", ` [${dbName}] migration completed`);
87
+ }
88
+ finally {
89
+ if (rollbackNeeded) {
90
+ (0, utils_1.log)("warn", ` [${dbName}] rollback after error...`);
91
+ try {
92
+ await (0, db_1.withClient)({ ...src, database: dbName }, async (client) => {
93
+ await client.query(`ALTER DATABASE "${dbNameEsc}" RESET search_path`);
94
+ await client.query(`DROP SCHEMA IF EXISTS public`);
95
+ await client.query(`ALTER SCHEMA "${dbNameEsc}" RENAME TO public`);
96
+ });
97
+ }
98
+ catch (err) {
99
+ (0, utils_1.log)("error", ` [${dbName}] rollback failed: ${err}`);
100
+ }
101
+ }
102
+ if (node_fs_1.default.existsSync(finalDumpFile))
103
+ node_fs_1.default.unlinkSync(finalDumpFile);
104
+ }
105
+ }
@@ -0,0 +1 @@
1
+ export declare function runMigration(): Promise<void>;
@@ -0,0 +1,133 @@
1
+ "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
15
+ }) : function(o, v) {
16
+ o["default"] = v;
17
+ });
18
+ var __importStar = (this && this.__importStar) || (function () {
19
+ var ownKeys = function(o) {
20
+ ownKeys = Object.getOwnPropertyNames || function (o) {
21
+ var ar = [];
22
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
23
+ return ar;
24
+ };
25
+ return ownKeys(o);
26
+ };
27
+ return function (mod) {
28
+ if (mod && mod.__esModule) return mod;
29
+ var result = {};
30
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
31
+ __setModuleDefault(result, mod);
32
+ return result;
33
+ };
34
+ })();
35
+ var __importDefault = (this && this.__importDefault) || function (mod) {
36
+ return (mod && mod.__esModule) ? mod : { "default": mod };
37
+ };
38
+ Object.defineProperty(exports, "__esModule", { value: true });
39
+ exports.runMigration = runMigration;
40
+ const node_fs_1 = __importDefault(require("node:fs"));
41
+ const readline = __importStar(require("node:readline"));
42
+ const config_1 = require("./config");
43
+ const db_1 = require("./db");
44
+ const migrate_tenant_1 = require("./migrate-tenant");
45
+ const state_1 = require("./state");
46
+ const utils_1 = require("./utils");
47
+ const verify_migration_1 = require("./verify-migration");
48
+ async function runBatch(tenants, state) {
49
+ const results = await Promise.allSettled(tenants.map(async (db) => {
50
+ await (0, migrate_tenant_1.migrateTenant)(db);
51
+ const { ok, reasons } = await (0, verify_migration_1.verifyMigration)(db);
52
+ if (!ok) {
53
+ throw new Error(`verification failed: ${reasons.join(" | ")}`);
54
+ }
55
+ return db;
56
+ }));
57
+ for (let i = 0; i < results.length; i++) {
58
+ const db = tenants[i];
59
+ const result = results[i];
60
+ if (result.status === "fulfilled") {
61
+ state.completed.push(db);
62
+ (0, utils_1.log)("done", `${db} (${state.completed.length} total)`);
63
+ }
64
+ else {
65
+ const message = result.reason instanceof Error ? result.reason.message : String(result.reason);
66
+ const existing = state.failed.find((f) => f.db === db);
67
+ if (existing) {
68
+ existing.attempts++;
69
+ existing.error = message;
70
+ }
71
+ else {
72
+ state.failed.push({ db, error: message, attempts: 1 });
73
+ }
74
+ (0, utils_1.log)("fail", `${db}: ${message.slice(0, 200)}`);
75
+ }
76
+ }
77
+ }
78
+ function askConfirm(prompt) {
79
+ const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
80
+ return new Promise((resolve) => {
81
+ rl.question(prompt, (answer) => {
82
+ rl.close();
83
+ resolve(answer.trim().toUpperCase() === "Y");
84
+ });
85
+ });
86
+ }
87
+ async function runMigration() {
88
+ const confirmed = await askConfirm("Run migration? (Y/N): ");
89
+ if (!confirmed) {
90
+ (0, utils_1.log)("info", "Aborted.");
91
+ return;
92
+ }
93
+ node_fs_1.default.mkdirSync(config_1.CONFIG.dumpDir, { recursive: true });
94
+ await (0, db_1.ensureTargetDatabase)();
95
+ const state = (0, state_1.loadState)();
96
+ const allTenants = await (0, db_1.getTenants)();
97
+ const completed = new Set(state.completed);
98
+ const remaining = allTenants.filter((db) => !completed.has(db));
99
+ (0, utils_1.log)("info", `total: ${allTenants.length} | done: ${state.completed.length} | ` +
100
+ `remaining: ${remaining.length} | failed: ${state.failed.length}`);
101
+ if (remaining.length === 0) {
102
+ (0, utils_1.log)("info", "nothing to migrate");
103
+ return;
104
+ }
105
+ const batches = [];
106
+ for (let i = 0; i < remaining.length; i += config_1.CONFIG.concurrency) {
107
+ batches.push(remaining.slice(i, i + config_1.CONFIG.concurrency));
108
+ }
109
+ const startTime = Date.now();
110
+ const initialDone = state.completed.length;
111
+ for (let i = 0; i < batches.length; i++) {
112
+ const elapsed = Math.round((Date.now() - startTime) / 1000);
113
+ const migratedSoFar = state.completed.length - initialDone;
114
+ const pct = Math.round((state.completed.length / allTenants.length) * 100);
115
+ const remainingCount = remaining.length - migratedSoFar;
116
+ const eta = migratedSoFar > 0 ? Math.round((elapsed / migratedSoFar) * remainingCount) : "?";
117
+ (0, utils_1.log)("info", `batch ${i + 1}/${batches.length} | ${pct}% | ${elapsed}s elapsed | ETA ~${eta}s`);
118
+ await runBatch(batches[i], state);
119
+ (0, state_1.saveState)(state);
120
+ }
121
+ const totalTime = Math.round((Date.now() - startTime) / 1000);
122
+ (0, utils_1.log)("info", `completed: ${state.completed.length} | failed: ${state.failed.length} | time: ${totalTime}s`);
123
+ if (state.failed.length > 0) {
124
+ (0, utils_1.log)("warn", "failed tenants:");
125
+ for (const f of state.failed)
126
+ (0, utils_1.log)("fail", ` ${f.db} (${f.attempts} attempts): ${f.error.slice(0, 200)}`);
127
+ }
128
+ (0, utils_1.atomicWrite)("./migration-report.json", JSON.stringify({
129
+ ...state,
130
+ totalDatabases: allTenants.length,
131
+ totalTimeSeconds: totalTime,
132
+ }, null, 2));
133
+ }
@@ -0,0 +1,3 @@
1
+ import type { State } from "./types";
2
+ export declare function loadState(): State;
3
+ export declare function saveState(state: State): void;
@@ -0,0 +1,30 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.loadState = loadState;
7
+ exports.saveState = saveState;
8
+ const node_fs_1 = __importDefault(require("node:fs"));
9
+ const config_1 = require("./config");
10
+ const utils_1 = require("./utils");
11
+ function loadState() {
12
+ if (node_fs_1.default.existsSync(config_1.CONFIG.stateFile)) {
13
+ try {
14
+ return JSON.parse(node_fs_1.default.readFileSync(config_1.CONFIG.stateFile, "utf-8"));
15
+ }
16
+ catch {
17
+ (0, utils_1.log)("warn", "state file is corrupted, starting fresh");
18
+ }
19
+ }
20
+ return {
21
+ completed: [],
22
+ failed: [],
23
+ startedAt: new Date().toISOString(),
24
+ lastUpdated: new Date().toISOString(),
25
+ };
26
+ }
27
+ function saveState(state) {
28
+ state.lastUpdated = new Date().toISOString();
29
+ (0, utils_1.atomicWrite)(config_1.CONFIG.stateFile, JSON.stringify(state, null, 2));
30
+ }
@@ -0,0 +1,11 @@
1
+ export interface FailedEntry {
2
+ db: string;
3
+ error: string;
4
+ attempts: number;
5
+ }
6
+ export interface State {
7
+ completed: string[];
8
+ failed: FailedEntry[];
9
+ startedAt: string;
10
+ lastUpdated: string;
11
+ }
@@ -0,0 +1,2 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1,5 @@
1
+ export type LogLevel = "info" | "warn" | "error" | "done" | "fail";
2
+ export declare function log(level: LogLevel, msg: string): void;
3
+ /** Atomic file write: write to temp file then rename. */
4
+ export declare function atomicWrite(filePath: string, data: string): void;
5
+ export declare function exec(cmd: string, password: string): void;
@@ -0,0 +1,36 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.log = log;
7
+ exports.atomicWrite = atomicWrite;
8
+ exports.exec = exec;
9
+ const node_child_process_1 = require("node:child_process");
10
+ const node_fs_1 = __importDefault(require("node:fs"));
11
+ const config_1 = require("./config");
12
+ function log(level, msg) {
13
+ const prefix = {
14
+ info: "[info]",
15
+ warn: "[warn]",
16
+ error: "[error]",
17
+ done: "[done]",
18
+ fail: "[fail]",
19
+ };
20
+ const out = level === "error" || level === "fail" ? process.stderr : process.stdout;
21
+ out.write(`${prefix[level]} ${msg}\n`);
22
+ }
23
+ /** Atomic file write: write to temp file then rename. */
24
+ function atomicWrite(filePath, data) {
25
+ const tmp = `${filePath}.tmp`;
26
+ node_fs_1.default.writeFileSync(tmp, data, "utf-8");
27
+ node_fs_1.default.renameSync(tmp, filePath);
28
+ }
29
+ function exec(cmd, password) {
30
+ const opts = {
31
+ env: { ...process.env, PGPASSWORD: password },
32
+ stdio: "pipe",
33
+ timeout: config_1.CONFIG.execTimeoutMs ?? 10 * 60 * 1000,
34
+ };
35
+ (0, node_child_process_1.execSync)(cmd, opts);
36
+ }
@@ -0,0 +1,6 @@
1
+ import type { Client } from "pg";
2
+ export declare function tableChecksum(client: Client, schema: string, table: string): Promise<string>;
3
+ export declare function verifyMigration(dbName: string): Promise<{
4
+ ok: boolean;
5
+ reasons: string[];
6
+ }>;
@@ -0,0 +1,75 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.tableChecksum = tableChecksum;
4
+ exports.verifyMigration = verifyMigration;
5
+ const config_1 = require("./config");
6
+ const db_1 = require("./db");
7
+ const utils_1 = require("./utils");
8
+ async function tableChecksum(client, schema, table) {
9
+ const { rows: cols } = await client.query(`SELECT column_name, data_type
10
+ FROM information_schema.columns
11
+ WHERE table_schema = $1 AND table_name = $2
12
+ ORDER BY ordinal_position`, [schema, table]);
13
+ if (cols.length === 0)
14
+ return "empty";
15
+ const orderBy = cols
16
+ .map((c) => {
17
+ const colExpr = c.data_type === "json" || c.data_type === "jsonb"
18
+ ? `"${c.column_name}"::text`
19
+ : `"${c.column_name}"`;
20
+ return `${colExpr} NULLS FIRST`;
21
+ })
22
+ .join(", ");
23
+ const { rows } = await client.query(`SELECT md5(string_agg(row_md5, ',' ORDER BY rn)) AS checksum
24
+ FROM (
25
+ SELECT ROW_NUMBER() OVER (ORDER BY ${orderBy}) AS rn,
26
+ md5(ROW(${cols.map((c) => `"${c.column_name}"`).join(", ")})::text) AS row_md5
27
+ FROM ${schema === "public" ? "public" : `"${schema}"`}."${table}"
28
+ ) sub`);
29
+ return rows[0]?.checksum ?? "null";
30
+ }
31
+ async function verifyMigration(dbName) {
32
+ const reasons = [];
33
+ await (0, db_1.withClient)({ ...config_1.CONFIG.source, database: dbName }, async (srcClient) => {
34
+ await (0, db_1.withClient)(config_1.CONFIG.target, async (tgtClient) => {
35
+ const { rows: srcTables } = await srcClient.query(`SELECT table_name FROM information_schema.tables
36
+ WHERE table_schema = 'public' AND table_type = 'BASE TABLE'
37
+ ORDER BY table_name`);
38
+ const { rows: tgtTables } = await tgtClient.query(`SELECT table_name FROM information_schema.tables
39
+ WHERE table_schema = $1 AND table_type = 'BASE TABLE'
40
+ ORDER BY table_name`, [dbName]);
41
+ const srcTableNames = srcTables.map((r) => r.table_name);
42
+ const tgtTableNames = new Set(tgtTables.map((r) => r.table_name));
43
+ const missingTables = srcTableNames.filter((t) => !tgtTableNames.has(t));
44
+ if (missingTables.length > 0) {
45
+ reasons.push(`missing tables: ${missingTables.join(", ")}`);
46
+ }
47
+ for (const table of srcTableNames) {
48
+ if (!tgtTableNames.has(table))
49
+ continue;
50
+ const { rows: srcCnt } = await srcClient.query(`SELECT COUNT(*)::text AS cnt FROM public."${table}"`);
51
+ const { rows: tgtCnt } = await tgtClient.query(`SELECT COUNT(*)::text AS cnt FROM "${dbName}"."${table}"`);
52
+ const srcCount = srcCnt[0].cnt;
53
+ const tgtCount = tgtCnt[0].cnt;
54
+ if (srcCount !== tgtCount) {
55
+ reasons.push(`${table}: row count mismatch (src: ${srcCount}, tgt: ${tgtCount})`);
56
+ continue;
57
+ }
58
+ const rowCount = parseInt(srcCount, 10);
59
+ const skipAbove = config_1.CONFIG.skipChecksumAboveRows;
60
+ if (skipAbove !== undefined && rowCount > skipAbove) {
61
+ (0, utils_1.log)("warn", ` ${table}: checksum skipped (${rowCount} rows > threshold ${skipAbove})`);
62
+ continue;
63
+ }
64
+ const [srcChecksum, tgtChecksum] = await Promise.all([
65
+ tableChecksum(srcClient, "public", table),
66
+ tableChecksum(tgtClient, dbName, table),
67
+ ]);
68
+ if (srcChecksum !== tgtChecksum) {
69
+ reasons.push(`${table}: checksum mismatch (src: ${srcChecksum}, tgt: ${tgtChecksum})`);
70
+ }
71
+ }
72
+ });
73
+ });
74
+ return { ok: reasons.length === 0, reasons };
75
+ }
@@ -0,0 +1 @@
1
+ export {};
package/dist/verify.js ADDED
@@ -0,0 +1,75 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const pg_1 = require("pg");
4
+ const config_1 = require("./src/config");
5
+ const db_1 = require("./src/db");
6
+ async function verifyTenant(dbName) {
7
+ const sourceClient = new pg_1.Client({ ...config_1.CONFIG.source, database: dbName });
8
+ const targetClient = new pg_1.Client(config_1.CONFIG.target);
9
+ await sourceClient.connect();
10
+ await targetClient.connect();
11
+ try {
12
+ // 1. Compare table lists
13
+ const { rows: srcTables } = await sourceClient.query(`SELECT table_name FROM information_schema.tables
14
+ WHERE table_schema = 'public' AND table_type = 'BASE TABLE'
15
+ ORDER BY table_name`);
16
+ const { rows: tgtTables } = await targetClient.query(`SELECT table_name FROM information_schema.tables
17
+ WHERE table_schema = $1 AND table_type = 'BASE TABLE'
18
+ ORDER BY table_name`, [dbName]);
19
+ const srcNames = srcTables.map((r) => r.table_name);
20
+ const tgtNames = tgtTables.map((r) => r.table_name);
21
+ const missing = srcNames.filter((t) => !tgtNames.includes(t));
22
+ if (missing.length > 0) {
23
+ return { db: dbName, status: "fail", reason: `missing tables: ${missing.join(", ")}` };
24
+ }
25
+ const extra = tgtNames.filter((t) => !srcNames.includes(t));
26
+ if (extra.length > 0) {
27
+ return { db: dbName, status: "fail", reason: `unexpected tables: ${extra.join(", ")}` };
28
+ }
29
+ // 2. Compare row counts per table
30
+ for (const table of srcNames) {
31
+ const { rows: s } = await sourceClient.query(`SELECT COUNT(*) AS cnt FROM public."${table}"`);
32
+ const { rows: t } = await targetClient.query(`SELECT COUNT(*) AS cnt FROM "${dbName}"."${table}"`);
33
+ const srcCnt = parseInt(s[0].cnt, 10);
34
+ const tgtCnt = parseInt(t[0].cnt, 10);
35
+ if (srcCnt !== tgtCnt) {
36
+ return {
37
+ db: dbName,
38
+ status: "fail",
39
+ reason: `${table}: row count mismatch (src: ${srcCnt}, tgt: ${tgtCnt})`,
40
+ };
41
+ }
42
+ }
43
+ return { db: dbName, status: "ok" };
44
+ }
45
+ finally {
46
+ await sourceClient.end();
47
+ await targetClient.end();
48
+ }
49
+ }
50
+ async function main() {
51
+ // Optional: pass specific DBs as args: npx ts-node verify.ts db-company1 db-company2
52
+ const args = process.argv.slice(2);
53
+ const tenants = args.length > 0 ? args : await (0, db_1.getTenants)();
54
+ console.log(`Verifying ${tenants.length} tenant(s)...\n`);
55
+ const results = [];
56
+ // Run sequentially to avoid overloading the database
57
+ for (const db of tenants) {
58
+ process.stdout.write(` ${db} ... `);
59
+ const result = await verifyTenant(db);
60
+ results.push(result);
61
+ console.log(result.status === "ok" ? "✓" : `✗ ${result.reason}`);
62
+ }
63
+ const failed = results.filter((r) => r.status === "fail");
64
+ console.log(`\n${results.length - failed.length}/${results.length} passed`);
65
+ if (failed.length > 0) {
66
+ console.log("\nFailed:");
67
+ for (const r of failed)
68
+ console.log(` ${r.db}: ${r.reason}`);
69
+ process.exit(1);
70
+ }
71
+ }
72
+ main().catch((err) => {
73
+ console.error("fatal:", err);
74
+ process.exit(1);
75
+ });
package/package.json ADDED
@@ -0,0 +1,46 @@
1
+ {
2
+ "name": "pgrift",
3
+ "version": "1.0.0",
4
+ "description": "Migrate many PostgreSQL databases (one per tenant) into a single DB with one schema per tenant. Uses pg_dump and psql with resumable state.",
5
+ "license": "ISC",
6
+ "author": "Satoru Fujinuma",
7
+ "type": "commonjs",
8
+ "main": "dist/index.js",
9
+ "bin": { "pgrift": "./dist/migrate.js" },
10
+ "repository": { "type": "git", "url": "https://github.com/SatoruFF/pgrift.git" },
11
+ "keywords": ["postgresql", "migration", "multi-tenant", "pg_dump", "schema"],
12
+ "scripts": {
13
+ "dev": "node dist/migrate.js",
14
+ "migrate": "node dist/migrate.js",
15
+ "cleanup": "node dist/cleanup.js",
16
+ "verify": "node dist/verify.js",
17
+ "create-db": "npx tsx scripts/createRandomDb.test.ts",
18
+ "many-bases": "npx tsx scripts/manyBases.test.ts",
19
+ "many-schemas": "npx tsx scripts/manySchemas.test.ts",
20
+ "comparison": "npx tsx scripts/comparison.test.ts",
21
+ "lint": "biome check .",
22
+ "lint:fix": "biome check --write .",
23
+ "format": "biome format --write .",
24
+ "build": "tsc -p tsconfig.json",
25
+ "prepare": "npm run build"
26
+ },
27
+ "dependencies": {
28
+ "dotenv": "^17.3.1",
29
+ "p-map": "^7.0.4",
30
+ "pg": "^8.18.0"
31
+ },
32
+ "devDependencies": {
33
+ "@biomejs/biome": "^2.4.4",
34
+ "@types/pg": "^8.16.0",
35
+ "ts-node": "^10.9.2",
36
+ "tsx": "^4.21.0",
37
+ "typescript": "^5.9.3"
38
+ },
39
+ "types": "dist/index.d.ts",
40
+ "files": [
41
+ "dist",
42
+ "README.md",
43
+ ".env.example"
44
+ ],
45
+ "engines": { "node": ">=18" }
46
+ }