@vorplex/database 0.0.4 → 0.0.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
export type DatabaseProviderName = 'postgres' | 'sql';
|
|
2
|
+
export interface DatabaseDefinition {
|
|
3
|
+
name: string;
|
|
4
|
+
provider: DatabaseProviderName;
|
|
5
|
+
version: string;
|
|
6
|
+
tables: Record<string, TableDefinition>;
|
|
7
|
+
}
|
|
8
|
+
export interface IndexDefinition {
|
|
9
|
+
name: string;
|
|
10
|
+
columns: string[];
|
|
11
|
+
unique?: boolean;
|
|
12
|
+
}
|
|
13
|
+
export interface TableDefinition {
|
|
14
|
+
name: string;
|
|
15
|
+
columns: Record<string, ColumnDefinition>;
|
|
16
|
+
constraints?: Record<string, ConstraintDefinitions>;
|
|
17
|
+
indexes?: Record<string, IndexDefinition>;
|
|
18
|
+
}
|
|
19
|
+
export interface ConstraintDefinition {
|
|
20
|
+
name: string;
|
|
21
|
+
type: string;
|
|
22
|
+
}
|
|
23
|
+
export interface PrimaryKeyConstraintDefinition extends ConstraintDefinition {
|
|
24
|
+
type: 'primary-key';
|
|
25
|
+
column: string;
|
|
26
|
+
}
|
|
27
|
+
export interface CompositePrimaryKeyConstraintDefinition extends ConstraintDefinition {
|
|
28
|
+
type: 'composite-primary-key';
|
|
29
|
+
columns: string[];
|
|
30
|
+
}
|
|
31
|
+
export interface UniqueConstraintDefinition extends ConstraintDefinition {
|
|
32
|
+
type: 'unique';
|
|
33
|
+
column: string;
|
|
34
|
+
}
|
|
35
|
+
export type ConstraintDefinitions = PrimaryKeyConstraintDefinition | UniqueConstraintDefinition | CompositePrimaryKeyConstraintDefinition;
|
|
36
|
+
export type ColumnType = 'string' | 'number' | 'boolean' | 'json';
|
|
37
|
+
export interface ColumnDefinition {
|
|
38
|
+
name: string;
|
|
39
|
+
type: ColumnType;
|
|
40
|
+
nullable?: boolean;
|
|
41
|
+
overrides?: Partial<Record<DatabaseProviderName, ColumnOverrideDefinition>>;
|
|
42
|
+
}
|
|
43
|
+
export interface ColumnOverrideDefinition {
|
|
44
|
+
type: string;
|
|
45
|
+
default: string;
|
|
46
|
+
}
|
|
47
|
+
export interface Migration {
|
|
48
|
+
hash: string;
|
|
49
|
+
/**
|
|
50
|
+
* Actions that must be executed before connecting to the target database (e.g., CREATE DATABASE)
|
|
51
|
+
*/
|
|
52
|
+
preDatabaseActions: MigrationActions[];
|
|
53
|
+
/**
|
|
54
|
+
* Actions that must be executed after connecting to the target database (e.g., CREATE TABLE, CREATE INDEX)
|
|
55
|
+
*/
|
|
56
|
+
postDatabaseActions: MigrationActions[];
|
|
57
|
+
}
|
|
58
|
+
export interface MigrationAction {
|
|
59
|
+
type: string;
|
|
60
|
+
}
|
|
61
|
+
export interface CreateTableMigrationAction extends MigrationAction {
|
|
62
|
+
type: 'create-table';
|
|
63
|
+
definition: TableDefinition;
|
|
64
|
+
}
|
|
65
|
+
export interface AddColumnMigrationAction extends MigrationAction {
|
|
66
|
+
type: 'add-column';
|
|
67
|
+
table: string;
|
|
68
|
+
definition: ColumnDefinition;
|
|
69
|
+
}
|
|
70
|
+
export interface CreateDatabaseMigrationAction extends MigrationAction {
|
|
71
|
+
type: 'create-database';
|
|
72
|
+
name: string;
|
|
73
|
+
}
|
|
74
|
+
export interface RenameTableMigrationAction extends MigrationAction {
|
|
75
|
+
type: 'rename-table';
|
|
76
|
+
from: string;
|
|
77
|
+
to: string;
|
|
78
|
+
}
|
|
79
|
+
export interface RenameColumnMigrationAction extends MigrationAction {
|
|
80
|
+
type: 'rename-column';
|
|
81
|
+
table: string;
|
|
82
|
+
from: string;
|
|
83
|
+
to: string;
|
|
84
|
+
}
|
|
85
|
+
export type MigrationActionPolicyStrategy = 'none' | 'archive';
|
|
86
|
+
export interface MigrationActionPolicy {
|
|
87
|
+
strategy: MigrationActionPolicyStrategy;
|
|
88
|
+
}
|
|
89
|
+
export interface ArchiveTableMigrationActionPolicy extends MigrationActionPolicy {
|
|
90
|
+
strategy: 'archive';
|
|
91
|
+
table: string;
|
|
92
|
+
}
|
|
93
|
+
export interface DropTableMigrationAction extends MigrationAction {
|
|
94
|
+
type: 'drop-table';
|
|
95
|
+
name: string;
|
|
96
|
+
policy?: ArchiveTableMigrationActionPolicy;
|
|
97
|
+
}
|
|
98
|
+
export interface DropColumnMigrationAction extends MigrationAction {
|
|
99
|
+
type: 'drop-column';
|
|
100
|
+
table: string;
|
|
101
|
+
name: string;
|
|
102
|
+
}
|
|
103
|
+
/**
|
|
104
|
+
* Recreates a table by:
|
|
105
|
+
* 1. Creating new table with target schema
|
|
106
|
+
* 2. Copying data from old table using bulk copy (INSERT INTO ... SELECT)
|
|
107
|
+
* 3. Creating indexes (deferred until after data load)
|
|
108
|
+
* 4. Dropping old table
|
|
109
|
+
* 5. Renaming new table to original name
|
|
110
|
+
*
|
|
111
|
+
* This approach is faster and more reliable than in-place ALTER operations for:
|
|
112
|
+
* - Column type changes
|
|
113
|
+
* - Nullability changes
|
|
114
|
+
* - Constraint modifications
|
|
115
|
+
* - Index modifications
|
|
116
|
+
* - Any combination of schema changes
|
|
117
|
+
*/
|
|
118
|
+
export interface RecreateTableMigrationAction extends MigrationAction {
|
|
119
|
+
type: 'recreate-table';
|
|
120
|
+
oldTable: TableDefinition;
|
|
121
|
+
newTable: TableDefinition;
|
|
122
|
+
policy?: ArchiveTableMigrationActionPolicy;
|
|
123
|
+
}
|
|
124
|
+
export type MigrationActions = CreateTableMigrationAction | AddColumnMigrationAction | CreateDatabaseMigrationAction | RenameTableMigrationAction | RenameColumnMigrationAction | DropTableMigrationAction | DropColumnMigrationAction | RecreateTableMigrationAction;
|
|
File without changes
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
import { ColumnDefinition, ColumnType, DatabaseDefinition, DatabaseProviderName, Migration, MigrationActionPolicyStrategy, MigrationActions } from './database-definition.interface';
|
|
2
|
+
export interface DatabaseProviderConfig {
|
|
3
|
+
provider: DatabaseProviderName;
|
|
4
|
+
host: string;
|
|
5
|
+
port?: number;
|
|
6
|
+
user: string;
|
|
7
|
+
password: string;
|
|
8
|
+
database?: string;
|
|
9
|
+
}
|
|
10
|
+
export declare class $DatabaseMigrator {
|
|
11
|
+
static generateMigration(from: DatabaseDefinition | null, to: DatabaseDefinition, options?: {
|
|
12
|
+
strategy: MigrationActionPolicyStrategy;
|
|
13
|
+
}): Promise<Migration>;
|
|
14
|
+
/**
|
|
15
|
+
* Determines if a table needs to be recreated based on schema changes.
|
|
16
|
+
* Returns true for any changes beyond simple metadata operations.
|
|
17
|
+
*
|
|
18
|
+
* Note: This compares tables from the SAME provider (e.g., comparing current postgres DB to desired postgres schema).
|
|
19
|
+
* Overrides are compared only for the provider being used, not all providers.
|
|
20
|
+
*/
|
|
21
|
+
private static tableNeedsRecreation;
|
|
22
|
+
/**
|
|
23
|
+
* Compares constraints semantically, ignoring database-generated names.
|
|
24
|
+
* Constraints are considered equal if they have the same semantic keys and types.
|
|
25
|
+
*/
|
|
26
|
+
private static constraintsMatch;
|
|
27
|
+
/**
|
|
28
|
+
* Compares indexes semantically, ignoring database-generated names.
|
|
29
|
+
* Indexes are considered equal if they have the same semantic keys, columns, and uniqueness.
|
|
30
|
+
*/
|
|
31
|
+
private static indexesMatch;
|
|
32
|
+
/**
|
|
33
|
+
* Generates SQL queries for pre-database migration actions (e.g., CREATE DATABASE).
|
|
34
|
+
* These queries must be executed with a connection to the server WITHOUT specifying a database.
|
|
35
|
+
*/
|
|
36
|
+
static generatePreDatabaseMigrationQuery(provider: DatabaseProviderName, migration: Migration): string[];
|
|
37
|
+
/**
|
|
38
|
+
* Generates SQL queries for post-database migration actions (e.g., CREATE TABLE, CREATE INDEX).
|
|
39
|
+
* These queries must be executed with a connection to the TARGET database.
|
|
40
|
+
*/
|
|
41
|
+
static generatePostDatabaseMigrationQuery(provider: DatabaseProviderName, migration: Migration): string[];
|
|
42
|
+
/**
|
|
43
|
+
* Generates all SQL queries for the migration.
|
|
44
|
+
* Note: If preDatabaseActions exist, you must reconnect to the created database before executing postDatabaseActions.
|
|
45
|
+
*/
|
|
46
|
+
static generateMigrationQuery(provider: DatabaseProviderName, migration: Migration): string[];
|
|
47
|
+
static generateDatabaseDefinition(providerConfig: DatabaseProviderConfig): Promise<DatabaseDefinition | null>;
|
|
48
|
+
/**
|
|
49
|
+
* Executes migrations against a database.
|
|
50
|
+
* - Creates direct connections (no pooling)
|
|
51
|
+
* - Wraps post-database queries in a transaction
|
|
52
|
+
* - Handles pre-database actions (CREATE DATABASE) separately from post-database actions
|
|
53
|
+
*/
|
|
54
|
+
static executeMigrations(providerConfig: DatabaseProviderConfig, migrations: Migration[]): Promise<void>;
|
|
55
|
+
private static executePostgresMigrations;
|
|
56
|
+
private static executeSqlServerMigrations;
|
|
57
|
+
}
|
|
58
|
+
export declare const PostgresColumnType: {
|
|
59
|
+
readonly text: "TEXT";
|
|
60
|
+
readonly bigInt: "BIGINT";
|
|
61
|
+
readonly boolean: "BOOLEAN";
|
|
62
|
+
readonly jsonb: "JSONB";
|
|
63
|
+
};
|
|
64
|
+
export type PostgresColumnType = typeof PostgresColumnType[keyof typeof PostgresColumnType];
|
|
65
|
+
export declare const SqlServerColumnType: {
|
|
66
|
+
readonly nvarchar: "NVARCHAR(MAX)";
|
|
67
|
+
readonly bigInt: "BIGINT";
|
|
68
|
+
readonly bit: "BIT";
|
|
69
|
+
readonly nvarcharMax: "NVARCHAR(MAX)";
|
|
70
|
+
};
|
|
71
|
+
export type SqlServerColumnType = typeof SqlServerColumnType[keyof typeof SqlServerColumnType];
|
|
72
|
+
export interface DatabaseProvider<T = any> {
|
|
73
|
+
name: DatabaseProviderName;
|
|
74
|
+
columnMappings: Record<ColumnType, {
|
|
75
|
+
type: string;
|
|
76
|
+
default: string;
|
|
77
|
+
}>;
|
|
78
|
+
actions: {
|
|
79
|
+
[K in MigrationActions['type']]: (action: Extract<MigrationActions, {
|
|
80
|
+
type: K;
|
|
81
|
+
}>) => string;
|
|
82
|
+
};
|
|
83
|
+
generateDefinition: (client: T) => Promise<DatabaseDefinition>;
|
|
84
|
+
}
|
|
85
|
+
export declare class DatabaseTranslator {
|
|
86
|
+
static providers: Record<DatabaseProviderName, DatabaseProvider>;
|
|
87
|
+
static mapColumnType(provider: DatabaseProviderName, column: ColumnDefinition | ColumnType): string;
|
|
88
|
+
static getDefaultForColumnType(provider: DatabaseProviderName, column: ColumnDefinition | ColumnType): string;
|
|
89
|
+
static getMigrationActionQuery(provider: DatabaseProviderName, action: MigrationActions): string;
|
|
90
|
+
static generateDefinition(provider: DatabaseProviderName, client: any): Promise<DatabaseDefinition>;
|
|
91
|
+
}
|
|
@@ -0,0 +1,997 @@
|
|
|
1
|
+
import { $Hash } from '@vorplex/core';
|
|
2
|
+
import * as mssql from 'mssql';
|
|
3
|
+
import { Client } from 'pg';
|
|
4
|
+
export class $DatabaseMigrator {
|
|
5
|
+
static async generateMigration(from, to, options) {
|
|
6
|
+
const actions = [];
|
|
7
|
+
// Database doesn't exist - create everything
|
|
8
|
+
if (from === null) {
|
|
9
|
+
const preDatabaseActions = [
|
|
10
|
+
{
|
|
11
|
+
type: 'create-database',
|
|
12
|
+
name: to.name
|
|
13
|
+
}
|
|
14
|
+
];
|
|
15
|
+
const postDatabaseActions = [];
|
|
16
|
+
// Create all tables (indexes are created as part of table definition)
|
|
17
|
+
for (const toTable of Object.values(to.tables)) {
|
|
18
|
+
postDatabaseActions.push({
|
|
19
|
+
type: 'create-table',
|
|
20
|
+
definition: toTable
|
|
21
|
+
});
|
|
22
|
+
}
|
|
23
|
+
return {
|
|
24
|
+
hash: null,
|
|
25
|
+
preDatabaseActions,
|
|
26
|
+
postDatabaseActions
|
|
27
|
+
};
|
|
28
|
+
}
|
|
29
|
+
// Detect dropped tables
|
|
30
|
+
for (const [tableId, fromTable] of Object.entries(from.tables)) {
|
|
31
|
+
if (!to.tables[tableId]) {
|
|
32
|
+
actions.push({
|
|
33
|
+
type: 'drop-table',
|
|
34
|
+
name: fromTable.name,
|
|
35
|
+
policy: options?.strategy === 'archive'
|
|
36
|
+
? {
|
|
37
|
+
strategy: 'archive',
|
|
38
|
+
table: `${fromTable.name}_archive_${from.version.replace(/\./g, '_')}`
|
|
39
|
+
}
|
|
40
|
+
: undefined
|
|
41
|
+
});
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
// Process each table
|
|
45
|
+
for (const [tableId, toTable] of Object.entries(to.tables)) {
|
|
46
|
+
const fromTable = from.tables[tableId];
|
|
47
|
+
// New table - create it
|
|
48
|
+
if (!fromTable) {
|
|
49
|
+
actions.push({
|
|
50
|
+
type: 'create-table',
|
|
51
|
+
definition: toTable
|
|
52
|
+
});
|
|
53
|
+
continue;
|
|
54
|
+
}
|
|
55
|
+
// Table exists - check if we need to recreate or just rename
|
|
56
|
+
const needsRecreation = this.tableNeedsRecreation(fromTable, toTable, from.provider);
|
|
57
|
+
if (needsRecreation) {
|
|
58
|
+
// Recreate the entire table (handles all schema changes)
|
|
59
|
+
actions.push({
|
|
60
|
+
type: 'recreate-table',
|
|
61
|
+
oldTable: fromTable,
|
|
62
|
+
newTable: toTable,
|
|
63
|
+
policy: options?.strategy === 'archive'
|
|
64
|
+
? {
|
|
65
|
+
strategy: 'archive',
|
|
66
|
+
table: `${fromTable.name}_archive_${from.version.replace(/\./g, '_')}`
|
|
67
|
+
}
|
|
68
|
+
: undefined
|
|
69
|
+
});
|
|
70
|
+
}
|
|
71
|
+
else {
|
|
72
|
+
// Only metadata changes needed
|
|
73
|
+
// Handle table rename (metadata-only, safe)
|
|
74
|
+
if (fromTable.name !== toTable.name) {
|
|
75
|
+
actions.push({
|
|
76
|
+
type: 'rename-table',
|
|
77
|
+
from: fromTable.name,
|
|
78
|
+
to: toTable.name
|
|
79
|
+
});
|
|
80
|
+
}
|
|
81
|
+
// Handle column renames (metadata-only, safe)
|
|
82
|
+
for (const [columnId, toColumn] of Object.entries(toTable.columns)) {
|
|
83
|
+
const fromColumn = fromTable.columns[columnId];
|
|
84
|
+
if (fromColumn && fromColumn.name !== toColumn.name) {
|
|
85
|
+
actions.push({
|
|
86
|
+
type: 'rename-column',
|
|
87
|
+
table: toTable.name,
|
|
88
|
+
from: fromColumn.name,
|
|
89
|
+
to: toColumn.name
|
|
90
|
+
});
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
// Handle new nullable columns (safe, metadata-only)
|
|
94
|
+
for (const [columnId, toColumn] of Object.entries(toTable.columns)) {
|
|
95
|
+
if (!fromTable.columns[columnId] && (toColumn.nullable ?? true)) {
|
|
96
|
+
actions.push({
|
|
97
|
+
type: 'add-column',
|
|
98
|
+
table: toTable.name,
|
|
99
|
+
definition: toColumn
|
|
100
|
+
});
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
// Handle dropped columns (safe)
|
|
104
|
+
for (const [columnId, fromColumn] of Object.entries(fromTable.columns)) {
|
|
105
|
+
if (!toTable.columns[columnId]) {
|
|
106
|
+
actions.push({
|
|
107
|
+
type: 'drop-column',
|
|
108
|
+
table: toTable.name,
|
|
109
|
+
name: fromColumn.name
|
|
110
|
+
});
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
return {
|
|
116
|
+
hash: await $Hash.generateSha256Base64(from),
|
|
117
|
+
preDatabaseActions: [],
|
|
118
|
+
postDatabaseActions: actions
|
|
119
|
+
};
|
|
120
|
+
}
|
|
121
|
+
/**
|
|
122
|
+
* Determines if a table needs to be recreated based on schema changes.
|
|
123
|
+
* Returns true for any changes beyond simple metadata operations.
|
|
124
|
+
*
|
|
125
|
+
* Note: This compares tables from the SAME provider (e.g., comparing current postgres DB to desired postgres schema).
|
|
126
|
+
* Overrides are compared only for the provider being used, not all providers.
|
|
127
|
+
*/
|
|
128
|
+
static tableNeedsRecreation(fromTable, toTable, provider) {
|
|
129
|
+
// Check for column type changes
|
|
130
|
+
for (const [columnId, fromColumn] of Object.entries(fromTable.columns)) {
|
|
131
|
+
const toColumn = toTable.columns[columnId];
|
|
132
|
+
if (toColumn) {
|
|
133
|
+
// Column type changed
|
|
134
|
+
if (fromColumn.type !== toColumn.type)
|
|
135
|
+
return true;
|
|
136
|
+
// Column overrides changed (only compare the current provider's overrides)
|
|
137
|
+
// When comparing, we only care about the override for the provider we're actually using
|
|
138
|
+
const fromOverride = fromColumn.overrides?.[provider];
|
|
139
|
+
const toOverride = toColumn.overrides?.[provider];
|
|
140
|
+
if (JSON.stringify(fromOverride ?? null) !== JSON.stringify(toOverride ?? null))
|
|
141
|
+
return true;
|
|
142
|
+
// Nullability changed
|
|
143
|
+
if ((fromColumn.nullable ?? true) !== (toColumn.nullable ?? true))
|
|
144
|
+
return true;
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
// Check for new non-nullable columns (requires default value handling)
|
|
148
|
+
for (const [columnId, toColumn] of Object.entries(toTable.columns)) {
|
|
149
|
+
if (!fromTable.columns[columnId] && !(toColumn.nullable ?? true)) {
|
|
150
|
+
return true;
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
// Check for constraint changes (semantic comparison, ignoring database-generated names)
|
|
154
|
+
if (!this.constraintsMatch(fromTable.constraints ?? {}, toTable.constraints ?? {})) {
|
|
155
|
+
return true;
|
|
156
|
+
}
|
|
157
|
+
// Check for index changes (semantic comparison, ignoring database-generated names)
|
|
158
|
+
if (!this.indexesMatch(fromTable.indexes ?? {}, toTable.indexes ?? {})) {
|
|
159
|
+
return true;
|
|
160
|
+
}
|
|
161
|
+
return false;
|
|
162
|
+
}
|
|
163
|
+
/**
|
|
164
|
+
* Compares constraints semantically, ignoring database-generated names.
|
|
165
|
+
* Constraints are considered equal if they have the same semantic keys and types.
|
|
166
|
+
*/
|
|
167
|
+
static constraintsMatch(fromConstraints, toConstraints) {
|
|
168
|
+
const fromKeys = Object.keys(fromConstraints).sort();
|
|
169
|
+
const toKeys = Object.keys(toConstraints).sort();
|
|
170
|
+
// Different number of constraints
|
|
171
|
+
if (fromKeys.length !== toKeys.length)
|
|
172
|
+
return false;
|
|
173
|
+
// Different semantic keys
|
|
174
|
+
if (JSON.stringify(fromKeys) !== JSON.stringify(toKeys))
|
|
175
|
+
return false;
|
|
176
|
+
// Check each constraint's type and columns (ignore name field)
|
|
177
|
+
for (const key of fromKeys) {
|
|
178
|
+
const fromConstraint = fromConstraints[key];
|
|
179
|
+
const toConstraint = toConstraints[key];
|
|
180
|
+
if (fromConstraint.type !== toConstraint.type)
|
|
181
|
+
return false;
|
|
182
|
+
if (fromConstraint.type === 'primary-key' && toConstraint.type === 'primary-key') {
|
|
183
|
+
if (fromConstraint.column !== toConstraint.column)
|
|
184
|
+
return false;
|
|
185
|
+
}
|
|
186
|
+
else if (fromConstraint.type === 'composite-primary-key' && toConstraint.type === 'composite-primary-key') {
|
|
187
|
+
if (JSON.stringify(fromConstraint.columns) !== JSON.stringify(toConstraint.columns))
|
|
188
|
+
return false;
|
|
189
|
+
}
|
|
190
|
+
else if (fromConstraint.type === 'unique' && toConstraint.type === 'unique') {
|
|
191
|
+
if (fromConstraint.column !== toConstraint.column)
|
|
192
|
+
return false;
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
return true;
|
|
196
|
+
}
|
|
197
|
+
/**
|
|
198
|
+
* Compares indexes semantically, ignoring database-generated names.
|
|
199
|
+
* Indexes are considered equal if they have the same semantic keys, columns, and uniqueness.
|
|
200
|
+
*/
|
|
201
|
+
static indexesMatch(fromIndexes, toIndexes) {
|
|
202
|
+
const fromKeys = Object.keys(fromIndexes).sort();
|
|
203
|
+
const toKeys = Object.keys(toIndexes).sort();
|
|
204
|
+
// Different number of indexes
|
|
205
|
+
if (fromKeys.length !== toKeys.length)
|
|
206
|
+
return false;
|
|
207
|
+
// Different semantic keys
|
|
208
|
+
if (JSON.stringify(fromKeys) !== JSON.stringify(toKeys))
|
|
209
|
+
return false;
|
|
210
|
+
// Check each index's columns and uniqueness (ignore name field)
|
|
211
|
+
for (const key of fromKeys) {
|
|
212
|
+
const fromIndex = fromIndexes[key];
|
|
213
|
+
const toIndex = toIndexes[key];
|
|
214
|
+
if (JSON.stringify(fromIndex.columns) !== JSON.stringify(toIndex.columns))
|
|
215
|
+
return false;
|
|
216
|
+
if ((fromIndex.unique ?? false) !== (toIndex.unique ?? false))
|
|
217
|
+
return false;
|
|
218
|
+
}
|
|
219
|
+
return true;
|
|
220
|
+
}
|
|
221
|
+
/**
|
|
222
|
+
* Generates SQL queries for pre-database migration actions (e.g., CREATE DATABASE).
|
|
223
|
+
* These queries must be executed with a connection to the server WITHOUT specifying a database.
|
|
224
|
+
*/
|
|
225
|
+
static generatePreDatabaseMigrationQuery(provider, migration) {
|
|
226
|
+
return migration.preDatabaseActions.map(action => DatabaseTranslator.getMigrationActionQuery(provider, action));
|
|
227
|
+
}
|
|
228
|
+
/**
|
|
229
|
+
* Generates SQL queries for post-database migration actions (e.g., CREATE TABLE, CREATE INDEX).
|
|
230
|
+
* These queries must be executed with a connection to the TARGET database.
|
|
231
|
+
*/
|
|
232
|
+
static generatePostDatabaseMigrationQuery(provider, migration) {
|
|
233
|
+
return migration.postDatabaseActions.map(action => DatabaseTranslator.getMigrationActionQuery(provider, action));
|
|
234
|
+
}
|
|
235
|
+
/**
|
|
236
|
+
* Generates all SQL queries for the migration.
|
|
237
|
+
* Note: If preDatabaseActions exist, you must reconnect to the created database before executing postDatabaseActions.
|
|
238
|
+
*/
|
|
239
|
+
static generateMigrationQuery(provider, migration) {
|
|
240
|
+
return [
|
|
241
|
+
...migration.preDatabaseActions.map(action => DatabaseTranslator.getMigrationActionQuery(provider, action)),
|
|
242
|
+
...migration.postDatabaseActions.map(action => DatabaseTranslator.getMigrationActionQuery(provider, action))
|
|
243
|
+
];
|
|
244
|
+
}
|
|
245
|
+
static async generateDatabaseDefinition(providerConfig) {
|
|
246
|
+
if (!providerConfig.database) {
|
|
247
|
+
throw new Error('Database name is required to generate database definition');
|
|
248
|
+
}
|
|
249
|
+
if (providerConfig.provider === 'postgres') {
|
|
250
|
+
const client = new Client({
|
|
251
|
+
host: providerConfig.host,
|
|
252
|
+
port: providerConfig.port || 5432,
|
|
253
|
+
user: providerConfig.user,
|
|
254
|
+
password: providerConfig.password,
|
|
255
|
+
database: 'postgres' // Connect to default postgres database first
|
|
256
|
+
});
|
|
257
|
+
try {
|
|
258
|
+
await client.connect();
|
|
259
|
+
// Check if the target database exists
|
|
260
|
+
const result = await client.query('SELECT 1 FROM pg_database WHERE datname = $1', [providerConfig.database]);
|
|
261
|
+
if (result.rows.length === 0) {
|
|
262
|
+
return null;
|
|
263
|
+
}
|
|
264
|
+
await client.end();
|
|
265
|
+
// Reconnect to the target database
|
|
266
|
+
const targetClient = new Client({
|
|
267
|
+
host: providerConfig.host,
|
|
268
|
+
port: providerConfig.port || 5432,
|
|
269
|
+
user: providerConfig.user,
|
|
270
|
+
password: providerConfig.password,
|
|
271
|
+
database: providerConfig.database
|
|
272
|
+
});
|
|
273
|
+
try {
|
|
274
|
+
await targetClient.connect();
|
|
275
|
+
return await DatabaseTranslator.generateDefinition('postgres', targetClient);
|
|
276
|
+
}
|
|
277
|
+
finally {
|
|
278
|
+
await targetClient.end();
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
catch (error) {
|
|
282
|
+
await client.end();
|
|
283
|
+
throw error;
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
else if (providerConfig.provider === 'sql') {
|
|
287
|
+
const config = {
|
|
288
|
+
server: providerConfig.host,
|
|
289
|
+
port: providerConfig.port || 1433,
|
|
290
|
+
user: providerConfig.user,
|
|
291
|
+
password: providerConfig.password,
|
|
292
|
+
options: {
|
|
293
|
+
encrypt: true,
|
|
294
|
+
trustServerCertificate: true
|
|
295
|
+
}
|
|
296
|
+
};
|
|
297
|
+
const connection = new mssql.ConnectionPool(config);
|
|
298
|
+
try {
|
|
299
|
+
await connection.connect();
|
|
300
|
+
// Check if the target database exists
|
|
301
|
+
const result = await connection.request().query(`SELECT 1 FROM sys.databases WHERE name = '${providerConfig.database}'`);
|
|
302
|
+
if (result.recordset.length === 0) {
|
|
303
|
+
return null;
|
|
304
|
+
}
|
|
305
|
+
await connection.close();
|
|
306
|
+
// Reconnect to the target database
|
|
307
|
+
const targetConfig = {
|
|
308
|
+
server: providerConfig.host,
|
|
309
|
+
port: providerConfig.port || 1433,
|
|
310
|
+
user: providerConfig.user,
|
|
311
|
+
password: providerConfig.password,
|
|
312
|
+
database: providerConfig.database,
|
|
313
|
+
options: {
|
|
314
|
+
encrypt: true,
|
|
315
|
+
trustServerCertificate: true
|
|
316
|
+
}
|
|
317
|
+
};
|
|
318
|
+
const targetConnection = new mssql.ConnectionPool(targetConfig);
|
|
319
|
+
try {
|
|
320
|
+
await targetConnection.connect();
|
|
321
|
+
return await DatabaseTranslator.generateDefinition('sql', targetConnection);
|
|
322
|
+
}
|
|
323
|
+
finally {
|
|
324
|
+
await targetConnection.close();
|
|
325
|
+
}
|
|
326
|
+
}
|
|
327
|
+
catch (error) {
|
|
328
|
+
await connection.close();
|
|
329
|
+
throw error;
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
else {
|
|
333
|
+
throw new Error(`Unsupported provider: ${providerConfig.provider}`);
|
|
334
|
+
}
|
|
335
|
+
}
|
|
336
|
+
/**
|
|
337
|
+
* Executes migrations against a database.
|
|
338
|
+
* - Creates direct connections (no pooling)
|
|
339
|
+
* - Wraps post-database queries in a transaction
|
|
340
|
+
* - Handles pre-database actions (CREATE DATABASE) separately from post-database actions
|
|
341
|
+
*/
|
|
342
|
+
static async executeMigrations(providerConfig, migrations) {
|
|
343
|
+
if (providerConfig.provider === 'postgres') {
|
|
344
|
+
await this.executePostgresMigrations(providerConfig, migrations);
|
|
345
|
+
}
|
|
346
|
+
else if (providerConfig.provider === 'sql') {
|
|
347
|
+
await this.executeSqlServerMigrations(providerConfig, migrations);
|
|
348
|
+
}
|
|
349
|
+
else {
|
|
350
|
+
throw new Error(`Unsupported provider: ${providerConfig.provider}`);
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
static async executePostgresMigrations(providerConfig, migrations) {
|
|
354
|
+
for (const migration of migrations) {
|
|
355
|
+
// Execute pre-database actions (e.g., CREATE DATABASE) without specifying a database
|
|
356
|
+
if (migration.preDatabaseActions.length > 0) {
|
|
357
|
+
const client = new Client({
|
|
358
|
+
host: providerConfig.host,
|
|
359
|
+
port: providerConfig.port || 5432,
|
|
360
|
+
user: providerConfig.user,
|
|
361
|
+
password: providerConfig.password,
|
|
362
|
+
database: 'postgres' // Connect to default postgres database
|
|
363
|
+
});
|
|
364
|
+
try {
|
|
365
|
+
await client.connect();
|
|
366
|
+
for (const action of migration.preDatabaseActions) {
|
|
367
|
+
const query = DatabaseTranslator.getMigrationActionQuery('postgres', action);
|
|
368
|
+
await client.query(query);
|
|
369
|
+
}
|
|
370
|
+
}
|
|
371
|
+
finally {
|
|
372
|
+
await client.end();
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
// Execute post-database actions (e.g., CREATE TABLE, CREATE INDEX) in a transaction
|
|
376
|
+
if (migration.postDatabaseActions.length > 0) {
|
|
377
|
+
const targetDatabase = providerConfig.database;
|
|
378
|
+
if (!targetDatabase) {
|
|
379
|
+
throw new Error('Database name is required for post-database migration actions');
|
|
380
|
+
}
|
|
381
|
+
const client = new Client({
|
|
382
|
+
host: providerConfig.host,
|
|
383
|
+
port: providerConfig.port || 5432,
|
|
384
|
+
user: providerConfig.user,
|
|
385
|
+
password: providerConfig.password,
|
|
386
|
+
database: targetDatabase
|
|
387
|
+
});
|
|
388
|
+
try {
|
|
389
|
+
await client.connect();
|
|
390
|
+
await client.query('BEGIN');
|
|
391
|
+
for (const action of migration.postDatabaseActions) {
|
|
392
|
+
const query = DatabaseTranslator.getMigrationActionQuery('postgres', action);
|
|
393
|
+
await client.query(query);
|
|
394
|
+
}
|
|
395
|
+
await client.query('COMMIT');
|
|
396
|
+
}
|
|
397
|
+
catch (error) {
|
|
398
|
+
await client.query('ROLLBACK');
|
|
399
|
+
throw error;
|
|
400
|
+
}
|
|
401
|
+
finally {
|
|
402
|
+
await client.end();
|
|
403
|
+
}
|
|
404
|
+
}
|
|
405
|
+
}
|
|
406
|
+
}
|
|
407
|
+
static async executeSqlServerMigrations(providerConfig, migrations) {
|
|
408
|
+
for (const migration of migrations) {
|
|
409
|
+
// Execute pre-database actions (e.g., CREATE DATABASE) without specifying a database
|
|
410
|
+
if (migration.preDatabaseActions.length > 0) {
|
|
411
|
+
const config = {
|
|
412
|
+
server: providerConfig.host,
|
|
413
|
+
port: providerConfig.port || 1433,
|
|
414
|
+
user: providerConfig.user,
|
|
415
|
+
password: providerConfig.password,
|
|
416
|
+
options: {
|
|
417
|
+
encrypt: true,
|
|
418
|
+
trustServerCertificate: true
|
|
419
|
+
}
|
|
420
|
+
};
|
|
421
|
+
const connection = new mssql.ConnectionPool(config);
|
|
422
|
+
try {
|
|
423
|
+
await connection.connect();
|
|
424
|
+
for (const action of migration.preDatabaseActions) {
|
|
425
|
+
const query = DatabaseTranslator.getMigrationActionQuery('sql', action);
|
|
426
|
+
await connection.request().query(query);
|
|
427
|
+
}
|
|
428
|
+
}
|
|
429
|
+
finally {
|
|
430
|
+
await connection.close();
|
|
431
|
+
}
|
|
432
|
+
}
|
|
433
|
+
// Execute post-database actions (e.g., CREATE TABLE, CREATE INDEX) in a transaction
|
|
434
|
+
if (migration.postDatabaseActions.length > 0) {
|
|
435
|
+
const targetDatabase = providerConfig.database;
|
|
436
|
+
if (!targetDatabase) {
|
|
437
|
+
throw new Error('Database name is required for post-database migration actions');
|
|
438
|
+
}
|
|
439
|
+
const config = {
|
|
440
|
+
server: providerConfig.host,
|
|
441
|
+
port: providerConfig.port || 1433,
|
|
442
|
+
user: providerConfig.user,
|
|
443
|
+
password: providerConfig.password,
|
|
444
|
+
database: targetDatabase,
|
|
445
|
+
options: {
|
|
446
|
+
encrypt: true,
|
|
447
|
+
trustServerCertificate: true
|
|
448
|
+
}
|
|
449
|
+
};
|
|
450
|
+
const connection = new mssql.ConnectionPool(config);
|
|
451
|
+
try {
|
|
452
|
+
await connection.connect();
|
|
453
|
+
const transaction = new mssql.Transaction(connection);
|
|
454
|
+
await transaction.begin();
|
|
455
|
+
try {
|
|
456
|
+
for (const action of migration.postDatabaseActions) {
|
|
457
|
+
const query = DatabaseTranslator.getMigrationActionQuery('sql', action);
|
|
458
|
+
await transaction.request().query(query);
|
|
459
|
+
}
|
|
460
|
+
await transaction.commit();
|
|
461
|
+
}
|
|
462
|
+
catch (error) {
|
|
463
|
+
// SQL Server automatically aborts transactions on certain errors
|
|
464
|
+
// Attempt rollback but ignore errors if transaction was already aborted
|
|
465
|
+
try {
|
|
466
|
+
await transaction.rollback();
|
|
467
|
+
}
|
|
468
|
+
catch (rollbackError) {
|
|
469
|
+
// Transaction was already aborted, ignore rollback error
|
|
470
|
+
}
|
|
471
|
+
throw error;
|
|
472
|
+
}
|
|
473
|
+
}
|
|
474
|
+
finally {
|
|
475
|
+
await connection.close();
|
|
476
|
+
}
|
|
477
|
+
}
|
|
478
|
+
}
|
|
479
|
+
}
|
|
480
|
+
}
|
|
481
|
+
export const PostgresColumnType = {
|
|
482
|
+
text: 'TEXT',
|
|
483
|
+
bigInt: 'BIGINT',
|
|
484
|
+
boolean: 'BOOLEAN',
|
|
485
|
+
jsonb: 'JSONB'
|
|
486
|
+
};
|
|
487
|
+
export const SqlServerColumnType = {
|
|
488
|
+
nvarchar: 'NVARCHAR(MAX)',
|
|
489
|
+
bigInt: 'BIGINT',
|
|
490
|
+
bit: 'BIT',
|
|
491
|
+
nvarcharMax: 'NVARCHAR(MAX)'
|
|
492
|
+
};
|
|
493
|
+
export class DatabaseTranslator {
|
|
494
|
+
static providers = {
|
|
495
|
+
postgres: {
|
|
496
|
+
name: 'postgres',
|
|
497
|
+
columnMappings: {
|
|
498
|
+
string: {
|
|
499
|
+
type: PostgresColumnType.text,
|
|
500
|
+
default: `''`
|
|
501
|
+
},
|
|
502
|
+
number: {
|
|
503
|
+
type: PostgresColumnType.bigInt,
|
|
504
|
+
default: `0`
|
|
505
|
+
},
|
|
506
|
+
boolean: {
|
|
507
|
+
type: PostgresColumnType.boolean,
|
|
508
|
+
default: `FALSE`
|
|
509
|
+
},
|
|
510
|
+
json: {
|
|
511
|
+
type: PostgresColumnType.jsonb,
|
|
512
|
+
default: `'null':jsonb`
|
|
513
|
+
}
|
|
514
|
+
},
|
|
515
|
+
generateDefinition: async (client) => {
|
|
516
|
+
// 1️⃣ Fetch tables
|
|
517
|
+
const tablesRes = await client.query(`
|
|
518
|
+
SELECT table_name
|
|
519
|
+
FROM information_schema.tables
|
|
520
|
+
WHERE table_schema = 'public' AND table_type='BASE TABLE';
|
|
521
|
+
`);
|
|
522
|
+
const tables = {};
|
|
523
|
+
for (const row of tablesRes.rows) {
|
|
524
|
+
const tableName = row.table_name;
|
|
525
|
+
// 2️⃣ Fetch columns
|
|
526
|
+
const columnsRes = await client.query(`
|
|
527
|
+
SELECT column_name, is_nullable, data_type
|
|
528
|
+
FROM information_schema.columns
|
|
529
|
+
WHERE table_name = $1
|
|
530
|
+
ORDER BY ordinal_position;
|
|
531
|
+
`, [tableName]);
|
|
532
|
+
const columns = {};
|
|
533
|
+
for (const col of columnsRes.rows) {
|
|
534
|
+
let type;
|
|
535
|
+
let override;
|
|
536
|
+
let sqlOverride;
|
|
537
|
+
switch (col.data_type) {
|
|
538
|
+
case 'bigint':
|
|
539
|
+
case 'integer':
|
|
540
|
+
case 'numeric':
|
|
541
|
+
case 'smallint':
|
|
542
|
+
type = 'number';
|
|
543
|
+
override = { type: col.data_type, default: '0' };
|
|
544
|
+
break;
|
|
545
|
+
case 'boolean':
|
|
546
|
+
type = 'boolean';
|
|
547
|
+
break;
|
|
548
|
+
case 'uuid':
|
|
549
|
+
type = 'string';
|
|
550
|
+
override = { type: 'UUID', default: 'uuidv7()' };
|
|
551
|
+
sqlOverride = { type: 'UNIQUEIDENTIFIER', default: 'NEWSEQUENTIALID()' };
|
|
552
|
+
break;
|
|
553
|
+
default:
|
|
554
|
+
type = 'string';
|
|
555
|
+
}
|
|
556
|
+
columns[col.column_name] = {
|
|
557
|
+
name: col.column_name,
|
|
558
|
+
type,
|
|
559
|
+
nullable: col.is_nullable === 'YES',
|
|
560
|
+
...(override ? { overrides: { postgres: override, sql: sqlOverride } } : {})
|
|
561
|
+
};
|
|
562
|
+
}
|
|
563
|
+
// 3️⃣ Fetch constraints
|
|
564
|
+
const constraintsRes = await client.query(`
|
|
565
|
+
SELECT
|
|
566
|
+
tc.constraint_name,
|
|
567
|
+
tc.constraint_type,
|
|
568
|
+
kcu.column_name
|
|
569
|
+
FROM
|
|
570
|
+
information_schema.table_constraints AS tc
|
|
571
|
+
LEFT JOIN information_schema.key_column_usage AS kcu
|
|
572
|
+
ON tc.constraint_name = kcu.constraint_name
|
|
573
|
+
WHERE tc.table_name = $1 AND tc.constraint_type IN ('PRIMARY KEY', 'UNIQUE');
|
|
574
|
+
`, [tableName]);
|
|
575
|
+
const constraints = {};
|
|
576
|
+
for (const c of constraintsRes.rows) {
|
|
577
|
+
if (c.constraint_type === 'PRIMARY KEY') {
|
|
578
|
+
const constraint = { name: c.constraint_name, type: 'primary-key', column: c.column_name };
|
|
579
|
+
constraints[`pk:${c.column_name}`] = constraint;
|
|
580
|
+
}
|
|
581
|
+
else if (c.constraint_type === 'UNIQUE') {
|
|
582
|
+
const constraint = { name: c.constraint_name, type: 'unique', column: c.column_name };
|
|
583
|
+
constraints[`unique:${c.column_name}`] = constraint;
|
|
584
|
+
if (columns[c.column_name].type === 'string' && !columns[c.column_name].overrides?.sql)
|
|
585
|
+
columns[c.column_name].overrides = Object.assign({}, columns[c.column_name].overrides, { sql: { type: 'NVARCHAR(450)', default: `''` } });
|
|
586
|
+
}
|
|
587
|
+
}
|
|
588
|
+
// 4️⃣ Fetch indexes
|
|
589
|
+
const indexesRes = await client.query(`
|
|
590
|
+
SELECT
|
|
591
|
+
indexname,
|
|
592
|
+
indexdef
|
|
593
|
+
FROM
|
|
594
|
+
pg_indexes
|
|
595
|
+
WHERE tablename = $1;
|
|
596
|
+
`, [tableName]);
|
|
597
|
+
// Get constraint names to exclude their indexes
|
|
598
|
+
const constraintNames = Object.values(constraints).map(c => c.name);
|
|
599
|
+
const indexes = {};
|
|
600
|
+
for (const idx of indexesRes.rows) {
|
|
601
|
+
// Skip indexes that are managed by constraints
|
|
602
|
+
if (constraintNames.includes(idx.indexname)) {
|
|
603
|
+
continue;
|
|
604
|
+
}
|
|
605
|
+
// Simple parsing, assumes standard format: CREATE [UNIQUE] INDEX name ON table (col1, col2)
|
|
606
|
+
const match = idx.indexdef.match(/\((.+)\)/);
|
|
607
|
+
if (match) {
|
|
608
|
+
const cols = match[1].split(',').map(s => s.trim().replace(/"/g, ''));
|
|
609
|
+
const isUnique = idx.indexdef.startsWith('CREATE UNIQUE');
|
|
610
|
+
const indexDef = {
|
|
611
|
+
name: idx.indexname,
|
|
612
|
+
columns: cols,
|
|
613
|
+
unique: isUnique,
|
|
614
|
+
};
|
|
615
|
+
// Use semantic key based on uniqueness and columns
|
|
616
|
+
const key = `${isUnique ? 'unique' : 'index'}:${cols.sort().join(',')}`;
|
|
617
|
+
indexes[key] = indexDef;
|
|
618
|
+
}
|
|
619
|
+
}
|
|
620
|
+
tables[tableName] = {
|
|
621
|
+
name: tableName,
|
|
622
|
+
columns,
|
|
623
|
+
constraints: Object.keys(constraints).length ? constraints : undefined,
|
|
624
|
+
indexes: Object.keys(indexes).length ? indexes : undefined,
|
|
625
|
+
};
|
|
626
|
+
}
|
|
627
|
+
const res = await client.query('SELECT current_database()');
|
|
628
|
+
return {
|
|
629
|
+
name: res.rows[0].current_database,
|
|
630
|
+
provider: 'postgres',
|
|
631
|
+
version: '1.0.0',
|
|
632
|
+
tables,
|
|
633
|
+
};
|
|
634
|
+
},
|
|
635
|
+
actions: {
|
|
636
|
+
'create-database': (action) => {
|
|
637
|
+
return `CREATE DATABASE "${action.name}";`;
|
|
638
|
+
},
|
|
639
|
+
'create-table': (action) => {
|
|
640
|
+
const table = action.definition;
|
|
641
|
+
const columns = Object
|
|
642
|
+
.values(table.columns)
|
|
643
|
+
.map(column => `"${column.name}" ${DatabaseTranslator.mapColumnType('postgres', column)}${column.nullable ? '' : ' NOT NULL'}`);
|
|
644
|
+
const constraints = Object
|
|
645
|
+
.values(table.constraints ?? {})
|
|
646
|
+
.map(c => {
|
|
647
|
+
switch (c.type) {
|
|
648
|
+
case 'primary-key':
|
|
649
|
+
return `PRIMARY KEY ("${c.column}")`;
|
|
650
|
+
case 'composite-primary-key':
|
|
651
|
+
return `PRIMARY KEY (${c.columns.map(col => `"${col}"`).join(', ')})`;
|
|
652
|
+
case 'unique':
|
|
653
|
+
return `UNIQUE ("${c.column}")`;
|
|
654
|
+
default:
|
|
655
|
+
return '';
|
|
656
|
+
}
|
|
657
|
+
})
|
|
658
|
+
.filter(Boolean);
|
|
659
|
+
const createTableSQL = `CREATE TABLE "${table.name}" (${[...columns, ...constraints].join(', ')});`;
|
|
660
|
+
// Create indexes after table creation (deferred for performance)
|
|
661
|
+
const indexes = table.indexes ?? {};
|
|
662
|
+
const createIndexSQL = Object.values(indexes).map(idx => `CREATE ${idx.unique ? 'UNIQUE ' : ''}INDEX "${idx.name}" ON "${table.name}" (${idx.columns.map(c => `"${c}"`).join(', ')});`);
|
|
663
|
+
return [createTableSQL, ...createIndexSQL].join('\n');
|
|
664
|
+
},
|
|
665
|
+
'add-column': (action) => {
|
|
666
|
+
return `ALTER TABLE "${action.table}" ADD COLUMN "${action.definition.name}" ${DatabaseTranslator.mapColumnType('postgres', action.definition)}${action.definition.nullable ? '' : ' NOT NULL'};`;
|
|
667
|
+
},
|
|
668
|
+
'rename-table': (action) => {
|
|
669
|
+
return `ALTER TABLE "${action.from}" RENAME TO "${action.to}";`;
|
|
670
|
+
},
|
|
671
|
+
'rename-column': (action) => {
|
|
672
|
+
return `ALTER TABLE "${action.table}" RENAME COLUMN "${action.from}" TO "${action.to}";`;
|
|
673
|
+
},
|
|
674
|
+
'drop-table': (action) => {
|
|
675
|
+
if (action.policy?.strategy === 'archive' && action.policy.table) {
|
|
676
|
+
return `ALTER TABLE "${action.name}" RENAME TO "${action.policy.table}";`;
|
|
677
|
+
}
|
|
678
|
+
else {
|
|
679
|
+
return `DROP TABLE "${action.name}";`;
|
|
680
|
+
}
|
|
681
|
+
},
|
|
682
|
+
'drop-column': (action) => {
|
|
683
|
+
return `ALTER TABLE "${action.table}" DROP COLUMN "${action.name}";`;
|
|
684
|
+
},
|
|
685
|
+
'recreate-table': (action) => {
|
|
686
|
+
const oldTable = action.oldTable;
|
|
687
|
+
const newTable = action.newTable;
|
|
688
|
+
const tempTableName = `${newTable.name}_new`;
|
|
689
|
+
const archiveTableName = action.policy?.table;
|
|
690
|
+
const queries = [];
|
|
691
|
+
// 1. Create new table with target schema (without indexes initially)
|
|
692
|
+
const columns = Object
|
|
693
|
+
.values(newTable.columns)
|
|
694
|
+
.map(column => `"${column.name}" ${DatabaseTranslator.mapColumnType('postgres', column)}${column.nullable ? '' : ' NOT NULL'}`);
|
|
695
|
+
const constraints = Object
|
|
696
|
+
.values(newTable.constraints ?? {})
|
|
697
|
+
.map(c => {
|
|
698
|
+
switch (c.type) {
|
|
699
|
+
case 'primary-key':
|
|
700
|
+
return `PRIMARY KEY ("${c.column}")`;
|
|
701
|
+
case 'composite-primary-key':
|
|
702
|
+
return `PRIMARY KEY (${c.columns.map(col => `"${col}"`).join(', ')})`;
|
|
703
|
+
case 'unique':
|
|
704
|
+
return `UNIQUE ("${c.column}")`;
|
|
705
|
+
default:
|
|
706
|
+
return '';
|
|
707
|
+
}
|
|
708
|
+
})
|
|
709
|
+
.filter(Boolean);
|
|
710
|
+
queries.push(`CREATE TABLE "${tempTableName}" (${[...columns, ...constraints].join(', ')});`);
|
|
711
|
+
// 2. Copy data using bulk INSERT INTO ... SELECT
|
|
712
|
+
// Only copy columns that exist in both old and new tables
|
|
713
|
+
const commonColumns = Object.keys(newTable.columns).filter(colId => oldTable.columns[colId]);
|
|
714
|
+
if (commonColumns.length > 0) {
|
|
715
|
+
const columnNames = commonColumns.map(colId => `"${newTable.columns[colId].name}"`).join(', ');
|
|
716
|
+
queries.push(`INSERT INTO "${tempTableName}" (${columnNames}) SELECT ${columnNames} FROM "${oldTable.name}";`);
|
|
717
|
+
}
|
|
718
|
+
// 3. Create indexes on new table (deferred for performance)
|
|
719
|
+
const indexes = newTable.indexes ?? {};
|
|
720
|
+
for (const idx of Object.values(indexes)) {
|
|
721
|
+
queries.push(`CREATE ${idx.unique ? 'UNIQUE ' : ''}INDEX "${idx.name}" ON "${tempTableName}" (${idx.columns.map(c => `"${c}"`).join(', ')});`);
|
|
722
|
+
}
|
|
723
|
+
// 4. Drop or archive old table
|
|
724
|
+
if (archiveTableName) {
|
|
725
|
+
queries.push(`ALTER TABLE "${oldTable.name}" RENAME TO "${archiveTableName}";`);
|
|
726
|
+
}
|
|
727
|
+
else {
|
|
728
|
+
queries.push(`DROP TABLE "${oldTable.name}";`);
|
|
729
|
+
}
|
|
730
|
+
// 5. Rename new table to original name
|
|
731
|
+
queries.push(`ALTER TABLE "${tempTableName}" RENAME TO "${newTable.name}";`);
|
|
732
|
+
return queries.join('\n');
|
|
733
|
+
}
|
|
734
|
+
}
|
|
735
|
+
},
|
|
736
|
+
sql: {
|
|
737
|
+
name: 'sql',
|
|
738
|
+
columnMappings: {
|
|
739
|
+
string: {
|
|
740
|
+
type: SqlServerColumnType.nvarchar,
|
|
741
|
+
default: `''`
|
|
742
|
+
},
|
|
743
|
+
number: {
|
|
744
|
+
type: SqlServerColumnType.bigInt,
|
|
745
|
+
default: `0`
|
|
746
|
+
},
|
|
747
|
+
boolean: {
|
|
748
|
+
type: SqlServerColumnType.bit,
|
|
749
|
+
default: `0`
|
|
750
|
+
},
|
|
751
|
+
json: {
|
|
752
|
+
type: SqlServerColumnType.nvarcharMax,
|
|
753
|
+
default: `'null'`
|
|
754
|
+
}
|
|
755
|
+
},
|
|
756
|
+
generateDefinition: async (connectionPool) => {
|
|
757
|
+
// 1️⃣ Fetch tables
|
|
758
|
+
const tablesRes = await connectionPool.request().query(`
|
|
759
|
+
SELECT TABLE_NAME
|
|
760
|
+
FROM INFORMATION_SCHEMA.TABLES
|
|
761
|
+
WHERE TABLE_TYPE = 'BASE TABLE' AND TABLE_CATALOG = DB_NAME();
|
|
762
|
+
`);
|
|
763
|
+
const tables = {};
|
|
764
|
+
for (const row of tablesRes.recordset) {
|
|
765
|
+
const tableName = row.TABLE_NAME;
|
|
766
|
+
// 2️⃣ Fetch columns
|
|
767
|
+
const columnsRes = await connectionPool.request().query(`
|
|
768
|
+
SELECT COLUMN_NAME, IS_NULLABLE, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH
|
|
769
|
+
FROM INFORMATION_SCHEMA.COLUMNS
|
|
770
|
+
WHERE TABLE_NAME = '${tableName}'
|
|
771
|
+
ORDER BY ORDINAL_POSITION;
|
|
772
|
+
`);
|
|
773
|
+
const columns = {};
|
|
774
|
+
for (const col of columnsRes.recordset) {
|
|
775
|
+
let type;
|
|
776
|
+
let override;
|
|
777
|
+
let pgOverride;
|
|
778
|
+
switch (col.DATA_TYPE) {
|
|
779
|
+
case 'bigint':
|
|
780
|
+
case 'int':
|
|
781
|
+
case 'smallint':
|
|
782
|
+
case 'tinyint':
|
|
783
|
+
case 'numeric':
|
|
784
|
+
case 'decimal':
|
|
785
|
+
case 'float':
|
|
786
|
+
case 'real':
|
|
787
|
+
type = 'number';
|
|
788
|
+
break;
|
|
789
|
+
case 'bit':
|
|
790
|
+
type = 'boolean';
|
|
791
|
+
break;
|
|
792
|
+
case 'uniqueidentifier':
|
|
793
|
+
type = 'string';
|
|
794
|
+
override = { type: 'UNIQUEIDENTIFIER', default: 'NEWSEQUENTIALID()' };
|
|
795
|
+
pgOverride = { type: 'UUID', default: 'uuidv7()' };
|
|
796
|
+
break;
|
|
797
|
+
default:
|
|
798
|
+
type = 'string';
|
|
799
|
+
override = { type: `${col.DATA_TYPE}(${col.CHARACTER_MAXIMUM_LENGTH === -1 ? 'MAX' : col.CHARACTER_MAXIMUM_LENGTH})`, default: `''` };
|
|
800
|
+
}
|
|
801
|
+
columns[col.COLUMN_NAME] = {
|
|
802
|
+
name: col.COLUMN_NAME,
|
|
803
|
+
type,
|
|
804
|
+
nullable: col.IS_NULLABLE === 'YES',
|
|
805
|
+
...(override ? { overrides: { postgres: pgOverride, sql: override } } : {})
|
|
806
|
+
};
|
|
807
|
+
}
|
|
808
|
+
// 3️⃣ Fetch constraints
|
|
809
|
+
const constraintsRes = await connectionPool.request().query(`
|
|
810
|
+
SELECT
|
|
811
|
+
tc.CONSTRAINT_NAME,
|
|
812
|
+
tc.CONSTRAINT_TYPE,
|
|
813
|
+
kcu.COLUMN_NAME
|
|
814
|
+
FROM
|
|
815
|
+
INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc
|
|
816
|
+
LEFT JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS kcu
|
|
817
|
+
ON tc.CONSTRAINT_NAME = kcu.CONSTRAINT_NAME
|
|
818
|
+
WHERE tc.TABLE_NAME = '${tableName}' AND tc.CONSTRAINT_TYPE IN ('PRIMARY KEY', 'UNIQUE');
|
|
819
|
+
`);
|
|
820
|
+
const constraints = {};
|
|
821
|
+
for (const c of constraintsRes.recordset) {
|
|
822
|
+
if (c.CONSTRAINT_TYPE === 'PRIMARY KEY') {
|
|
823
|
+
const constraint = { name: c.CONSTRAINT_NAME, type: 'primary-key', column: c.COLUMN_NAME };
|
|
824
|
+
constraints[`pk:${c.COLUMN_NAME}`] = constraint;
|
|
825
|
+
}
|
|
826
|
+
else if (c.CONSTRAINT_TYPE === 'UNIQUE') {
|
|
827
|
+
const constraint = { name: c.CONSTRAINT_NAME, type: 'unique', column: c.COLUMN_NAME };
|
|
828
|
+
constraints[`unique:${c.COLUMN_NAME}`] = constraint;
|
|
829
|
+
}
|
|
830
|
+
}
|
|
831
|
+
// 4️⃣ Fetch indexes (exclude primary key and unique constraint indexes)
|
|
832
|
+
const indexesRes = await connectionPool.request().query(`
|
|
833
|
+
SELECT
|
|
834
|
+
i.name as index_name,
|
|
835
|
+
i.is_unique,
|
|
836
|
+
STRING_AGG(c.name, ',') as columns
|
|
837
|
+
FROM
|
|
838
|
+
sys.indexes i
|
|
839
|
+
INNER JOIN sys.index_columns ic ON i.object_id = ic.object_id AND i.index_id = ic.index_id
|
|
840
|
+
INNER JOIN sys.columns c ON ic.object_id = c.object_id AND ic.column_id = c.column_id
|
|
841
|
+
WHERE
|
|
842
|
+
i.object_id = OBJECT_ID('${tableName}')
|
|
843
|
+
AND i.is_primary_key = 0
|
|
844
|
+
AND i.is_unique_constraint = 0
|
|
845
|
+
GROUP BY i.name, i.is_unique;
|
|
846
|
+
`);
|
|
847
|
+
const indexes = {};
|
|
848
|
+
for (const idx of indexesRes.recordset) {
|
|
849
|
+
const cols = idx.columns.split(',').map((s) => s.trim());
|
|
850
|
+
const indexDef = {
|
|
851
|
+
name: idx.index_name,
|
|
852
|
+
columns: cols,
|
|
853
|
+
unique: idx.is_unique
|
|
854
|
+
};
|
|
855
|
+
// Use semantic key based on uniqueness and columns
|
|
856
|
+
const key = `${idx.is_unique ? 'unique' : 'index'}:${cols.sort().join(',')}`;
|
|
857
|
+
indexes[key] = indexDef;
|
|
858
|
+
}
|
|
859
|
+
tables[tableName] = {
|
|
860
|
+
name: tableName,
|
|
861
|
+
columns,
|
|
862
|
+
constraints: Object.keys(constraints).length ? constraints : undefined,
|
|
863
|
+
indexes: Object.keys(indexes).length ? indexes : undefined,
|
|
864
|
+
};
|
|
865
|
+
}
|
|
866
|
+
const dbNameRes = await connectionPool.request().query('SELECT DB_NAME() as db_name');
|
|
867
|
+
return {
|
|
868
|
+
name: dbNameRes.recordset[0].db_name,
|
|
869
|
+
provider: 'sql',
|
|
870
|
+
version: '1.0.0',
|
|
871
|
+
tables,
|
|
872
|
+
};
|
|
873
|
+
},
|
|
874
|
+
actions: {
|
|
875
|
+
'create-database': (action) => {
|
|
876
|
+
return `CREATE DATABASE [${action.name}];`;
|
|
877
|
+
},
|
|
878
|
+
'create-table': (action) => {
|
|
879
|
+
const table = action.definition;
|
|
880
|
+
const columns = Object
|
|
881
|
+
.values(table.columns)
|
|
882
|
+
.map(column => `[${column.name}] ${DatabaseTranslator.mapColumnType('sql', column)}${column.nullable ? '' : ' NOT NULL'}`);
|
|
883
|
+
const constraints = Object
|
|
884
|
+
.values(table.constraints ?? {})
|
|
885
|
+
.map(c => {
|
|
886
|
+
switch (c.type) {
|
|
887
|
+
case 'primary-key':
|
|
888
|
+
return `PRIMARY KEY ([${c.column}])`;
|
|
889
|
+
case 'composite-primary-key':
|
|
890
|
+
return `PRIMARY KEY (${c.columns.map(col => `[${col}]`).join(', ')})`;
|
|
891
|
+
case 'unique':
|
|
892
|
+
return `UNIQUE ([${c.column}])`;
|
|
893
|
+
default:
|
|
894
|
+
return '';
|
|
895
|
+
}
|
|
896
|
+
})
|
|
897
|
+
.filter(Boolean);
|
|
898
|
+
const createTableSQL = `CREATE TABLE [${table.name}] (${[...columns, ...constraints].join(', ')});`;
|
|
899
|
+
// Create indexes after table creation (deferred for performance)
|
|
900
|
+
const indexes = table.indexes ?? {};
|
|
901
|
+
const createIndexSQL = Object.values(indexes).map(idx => `CREATE ${idx.unique ? 'UNIQUE ' : ''}INDEX [${idx.name}] ON [${table.name}] (${idx.columns.map(c => `[${c}]`).join(', ')});`);
|
|
902
|
+
return [createTableSQL, ...createIndexSQL].join('\n');
|
|
903
|
+
},
|
|
904
|
+
'add-column': (action) => {
|
|
905
|
+
return `ALTER TABLE [${action.table}] ADD [${action.definition.name}] ${DatabaseTranslator.mapColumnType('sql', action.definition)}${action.definition.nullable ? '' : ' NOT NULL'};`;
|
|
906
|
+
},
|
|
907
|
+
'rename-table': (action) => {
|
|
908
|
+
return `EXEC sp_rename '${action.from}', '${action.to}';`;
|
|
909
|
+
},
|
|
910
|
+
'rename-column': (action) => {
|
|
911
|
+
return `EXEC sp_rename '${action.table}.${action.from}', '${action.to}', 'COLUMN';`;
|
|
912
|
+
},
|
|
913
|
+
'drop-table': (action) => {
|
|
914
|
+
if (action.policy?.strategy === 'archive' && action.policy.table) {
|
|
915
|
+
return `EXEC sp_rename '${action.name}', '${action.policy.table}';`;
|
|
916
|
+
}
|
|
917
|
+
else {
|
|
918
|
+
return `DROP TABLE [${action.name}];`;
|
|
919
|
+
}
|
|
920
|
+
},
|
|
921
|
+
'drop-column': (action) => {
|
|
922
|
+
return `ALTER TABLE [${action.table}] DROP COLUMN [${action.name}];`;
|
|
923
|
+
},
|
|
924
|
+
'recreate-table': (action) => {
|
|
925
|
+
const oldTable = action.oldTable;
|
|
926
|
+
const newTable = action.newTable;
|
|
927
|
+
const tempTableName = `${newTable.name}_new`;
|
|
928
|
+
const archiveTableName = action.policy?.table;
|
|
929
|
+
const queries = [];
|
|
930
|
+
// 1. Create new table with target schema (without indexes initially)
|
|
931
|
+
const columns = Object
|
|
932
|
+
.values(newTable.columns)
|
|
933
|
+
.map(column => `[${column.name}] ${DatabaseTranslator.mapColumnType('sql', column)}${column.nullable ? '' : ' NOT NULL'}`);
|
|
934
|
+
const constraints = Object
|
|
935
|
+
.values(newTable.constraints ?? {})
|
|
936
|
+
.map(c => {
|
|
937
|
+
switch (c.type) {
|
|
938
|
+
case 'primary-key':
|
|
939
|
+
return `PRIMARY KEY ([${c.column}])`;
|
|
940
|
+
case 'composite-primary-key':
|
|
941
|
+
return `PRIMARY KEY (${c.columns.map(col => `[${col}]`).join(', ')})`;
|
|
942
|
+
case 'unique':
|
|
943
|
+
return `UNIQUE ([${c.column}])`;
|
|
944
|
+
default:
|
|
945
|
+
return '';
|
|
946
|
+
}
|
|
947
|
+
})
|
|
948
|
+
.filter(Boolean);
|
|
949
|
+
queries.push(`CREATE TABLE [${tempTableName}] (${[...columns, ...constraints].join(', ')});`);
|
|
950
|
+
// 2. Copy data using bulk INSERT INTO ... SELECT
|
|
951
|
+
// Only copy columns that exist in both old and new tables
|
|
952
|
+
const commonColumns = Object.keys(newTable.columns).filter(colId => oldTable.columns[colId]);
|
|
953
|
+
if (commonColumns.length > 0) {
|
|
954
|
+
const columnNames = commonColumns.map(colId => `[${newTable.columns[colId].name}]`).join(', ');
|
|
955
|
+
queries.push(`INSERT INTO [${tempTableName}] (${columnNames}) SELECT ${columnNames} FROM [${oldTable.name}];`);
|
|
956
|
+
}
|
|
957
|
+
// 3. Create indexes on new table (deferred for performance)
|
|
958
|
+
const indexes = newTable.indexes ?? {};
|
|
959
|
+
for (const idx of Object.values(indexes)) {
|
|
960
|
+
queries.push(`CREATE ${idx.unique ? 'UNIQUE ' : ''}INDEX [${idx.name}] ON [${tempTableName}] (${idx.columns.map(c => `[${c}]`).join(', ')});`);
|
|
961
|
+
}
|
|
962
|
+
// 4. Drop or archive old table
|
|
963
|
+
if (archiveTableName) {
|
|
964
|
+
queries.push(`EXEC sp_rename '${oldTable.name}', '${archiveTableName}';`);
|
|
965
|
+
}
|
|
966
|
+
else {
|
|
967
|
+
queries.push(`DROP TABLE [${oldTable.name}];`);
|
|
968
|
+
}
|
|
969
|
+
// 5. Rename new table to original name
|
|
970
|
+
queries.push(`EXEC sp_rename '${tempTableName}', '${newTable.name}';`);
|
|
971
|
+
return queries.join('\n');
|
|
972
|
+
}
|
|
973
|
+
}
|
|
974
|
+
}
|
|
975
|
+
};
|
|
976
|
+
static mapColumnType(provider, column) {
|
|
977
|
+
if (typeof column === 'string')
|
|
978
|
+
return this.providers[provider].columnMappings[column].type;
|
|
979
|
+
if (column.overrides?.[provider])
|
|
980
|
+
return column.overrides[provider].type;
|
|
981
|
+
return this.providers[provider].columnMappings[column.type].type;
|
|
982
|
+
}
|
|
983
|
+
static getDefaultForColumnType(provider, column) {
|
|
984
|
+
if (typeof column === 'string')
|
|
985
|
+
return this.providers[provider].columnMappings[column].default;
|
|
986
|
+
if (column.overrides?.[provider])
|
|
987
|
+
return column.overrides[provider].default;
|
|
988
|
+
return this.providers[provider].columnMappings[column.type].default;
|
|
989
|
+
}
|
|
990
|
+
static getMigrationActionQuery(provider, action) {
|
|
991
|
+
const handler = this.providers[provider].actions[action.type];
|
|
992
|
+
return handler(action);
|
|
993
|
+
}
|
|
994
|
+
static generateDefinition(provider, client) {
|
|
995
|
+
return this.providers[provider].generateDefinition(client);
|
|
996
|
+
}
|
|
997
|
+
}
|
package/dist/index.d.ts
ADDED
package/dist/index.js
ADDED
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@vorplex/database",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.9",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"sideEffects": false,
|
|
6
6
|
"files": [
|
|
@@ -20,7 +20,7 @@
|
|
|
20
20
|
"mssql": "^11.0.1",
|
|
21
21
|
"pg": "^8.16.3",
|
|
22
22
|
"tslib": "^2.8.1",
|
|
23
|
-
"@vorplex/core": "0.0.
|
|
23
|
+
"@vorplex/core": "0.0.9"
|
|
24
24
|
},
|
|
25
25
|
"devDependencies": {
|
|
26
26
|
"@types/jest": "^29.5.2",
|