rake-db 2.4.12 → 2.4.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +1 -0
- package/dist/index.js +70 -27
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +70 -27
- package/dist/index.mjs.map +1 -1
- package/package.json +2 -2
package/dist/index.d.ts
CHANGED
|
@@ -40,6 +40,7 @@ declare type TextColumnCreator = () => TextColumn;
|
|
|
40
40
|
declare type MigrationColumnTypes = Omit<ColumnTypes, 'text' | 'string' | 'enum'> & {
|
|
41
41
|
text: TextColumnCreator;
|
|
42
42
|
string: TextColumnCreator;
|
|
43
|
+
citext: TextColumnCreator;
|
|
43
44
|
enum: (name: string) => EnumColumn;
|
|
44
45
|
};
|
|
45
46
|
declare type ColumnsShapeCallback = (t: MigrationColumnTypes & {
|
package/dist/index.js
CHANGED
|
@@ -1715,9 +1715,9 @@ WHERE ${filterSchema("n.nspname")}`
|
|
|
1715
1715
|
table_schema "schemaName",
|
|
1716
1716
|
table_name "tableName",
|
|
1717
1717
|
column_name "name",
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
data_type "
|
|
1718
|
+
typname "type",
|
|
1719
|
+
n.nspname "typeSchema",
|
|
1720
|
+
data_type = 'ARRAY' "isArray",
|
|
1721
1721
|
character_maximum_length AS "maxChars",
|
|
1722
1722
|
numeric_precision AS "numericPrecision",
|
|
1723
1723
|
numeric_scale AS "numericScale",
|
|
@@ -1728,15 +1728,23 @@ WHERE ${filterSchema("n.nspname")}`
|
|
|
1728
1728
|
NULLIF(a.attcompression, '') AS compression,
|
|
1729
1729
|
pgd.description AS "comment"
|
|
1730
1730
|
FROM information_schema.columns c
|
|
1731
|
-
|
|
1731
|
+
JOIN pg_catalog.pg_statio_all_tables st
|
|
1732
1732
|
ON c.table_schema = st.schemaname
|
|
1733
1733
|
AND c.table_name = st.relname
|
|
1734
1734
|
LEFT JOIN pg_catalog.pg_description pgd
|
|
1735
1735
|
ON pgd.objoid = st.relid
|
|
1736
1736
|
AND pgd.objsubid = c.ordinal_position
|
|
1737
|
-
|
|
1737
|
+
JOIN pg_catalog.pg_attribute a
|
|
1738
1738
|
ON a.attrelid = st.relid
|
|
1739
1739
|
AND a.attnum = c.ordinal_position
|
|
1740
|
+
JOIN pg_catalog.pg_type t
|
|
1741
|
+
ON (
|
|
1742
|
+
CASE WHEN data_type = 'ARRAY'
|
|
1743
|
+
THEN typarray
|
|
1744
|
+
ELSE oid
|
|
1745
|
+
END
|
|
1746
|
+
) = atttypid
|
|
1747
|
+
JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
|
|
1740
1748
|
WHERE ${filterSchema("table_schema")}
|
|
1741
1749
|
ORDER BY c.ordinal_position`
|
|
1742
1750
|
);
|
|
@@ -2043,7 +2051,7 @@ const fkeyActionMap = {
|
|
|
2043
2051
|
n: "SET NULL",
|
|
2044
2052
|
d: "SET DEFAULT"
|
|
2045
2053
|
};
|
|
2046
|
-
const structureToAst = async (
|
|
2054
|
+
const structureToAst = async (unsupportedTypes, db) => {
|
|
2047
2055
|
const ast = [];
|
|
2048
2056
|
const data = await getData(db);
|
|
2049
2057
|
for (const name of data.schemas) {
|
|
@@ -2071,19 +2079,24 @@ const structureToAst = async (config, db) => {
|
|
|
2071
2079
|
}
|
|
2072
2080
|
const domains = {};
|
|
2073
2081
|
for (const it of data.domains) {
|
|
2074
|
-
domains[`${it.schemaName}.${it.name}`] = getColumn(
|
|
2075
|
-
|
|
2076
|
-
|
|
2077
|
-
|
|
2078
|
-
|
|
2079
|
-
|
|
2080
|
-
|
|
2081
|
-
|
|
2082
|
+
domains[`${it.schemaName}.${it.name}`] = getColumn(
|
|
2083
|
+
unsupportedTypes,
|
|
2084
|
+
data,
|
|
2085
|
+
domains,
|
|
2086
|
+
{
|
|
2087
|
+
schemaName: it.schemaName,
|
|
2088
|
+
name: it.name,
|
|
2089
|
+
type: it.type,
|
|
2090
|
+
typeSchema: it.typeSchema,
|
|
2091
|
+
isArray: it.isArray,
|
|
2092
|
+
isSerial: false
|
|
2093
|
+
}
|
|
2094
|
+
);
|
|
2082
2095
|
}
|
|
2083
2096
|
for (const key in pendingTables) {
|
|
2084
2097
|
const { table, dependsOn } = pendingTables[key];
|
|
2085
2098
|
if (!dependsOn.size) {
|
|
2086
|
-
pushTableAst(
|
|
2099
|
+
pushTableAst(unsupportedTypes, ast, data, domains, table, pendingTables);
|
|
2087
2100
|
}
|
|
2088
2101
|
}
|
|
2089
2102
|
const outerFKeys = [];
|
|
@@ -2131,7 +2144,15 @@ const structureToAst = async (config, db) => {
|
|
|
2131
2144
|
outerFKeys.push([fkey, table]);
|
|
2132
2145
|
}
|
|
2133
2146
|
}
|
|
2134
|
-
pushTableAst(
|
|
2147
|
+
pushTableAst(
|
|
2148
|
+
unsupportedTypes,
|
|
2149
|
+
ast,
|
|
2150
|
+
data,
|
|
2151
|
+
domains,
|
|
2152
|
+
table,
|
|
2153
|
+
pendingTables,
|
|
2154
|
+
innerFKeys
|
|
2155
|
+
);
|
|
2135
2156
|
}
|
|
2136
2157
|
for (const [fkey, table] of outerFKeys) {
|
|
2137
2158
|
ast.push(__spreadProps(__spreadValues({}, foreignKeyToAst(fkey)), {
|
|
@@ -2191,7 +2212,7 @@ const getIsSerial = (item) => {
|
|
|
2191
2212
|
}
|
|
2192
2213
|
return false;
|
|
2193
2214
|
};
|
|
2194
|
-
const getColumn = (
|
|
2215
|
+
const getColumn = (unsupportedTypes, data, domains, _a) => {
|
|
2195
2216
|
var _b = _a, {
|
|
2196
2217
|
schemaName,
|
|
2197
2218
|
tableName,
|
|
@@ -2226,8 +2247,8 @@ const getColumn = (config, data, domains, _a) => {
|
|
|
2226
2247
|
column = new RakeDbEnumColumn({}, type, enumType.values);
|
|
2227
2248
|
} else {
|
|
2228
2249
|
column = new pqb.CustomTypeColumn({}, type);
|
|
2229
|
-
(_a2 =
|
|
2230
|
-
`${
|
|
2250
|
+
((_a2 = unsupportedTypes[type]) != null ? _a2 : unsupportedTypes[type] = []).push(
|
|
2251
|
+
`${schemaName}${tableName ? `.${tableName}` : ""}.${name}`
|
|
2231
2252
|
);
|
|
2232
2253
|
}
|
|
2233
2254
|
}
|
|
@@ -2239,7 +2260,7 @@ const getColumnType = (type, isSerial) => {
|
|
|
2239
2260
|
return type;
|
|
2240
2261
|
return type === "int2" ? "smallserial" : type === "int4" ? "serial" : "bigserial";
|
|
2241
2262
|
};
|
|
2242
|
-
const pushTableAst = (
|
|
2263
|
+
const pushTableAst = (unsupportedTypes, ast, data, domains, table, pendingTables, innerFKeys = data.foreignKeys) => {
|
|
2243
2264
|
const { schemaName, name } = table;
|
|
2244
2265
|
const key = `${schemaName}.${table.name}`;
|
|
2245
2266
|
delete pendingTables[key];
|
|
@@ -2262,10 +2283,9 @@ const pushTableAst = (config, ast, data, domains, table, pendingTables, innerFKe
|
|
|
2262
2283
|
if (isSerial) {
|
|
2263
2284
|
item = __spreadProps(__spreadValues({}, item), { default: void 0 });
|
|
2264
2285
|
}
|
|
2265
|
-
|
|
2266
|
-
|
|
2267
|
-
|
|
2268
|
-
isArray,
|
|
2286
|
+
let column = getColumn(unsupportedTypes, data, domains, __spreadProps(__spreadValues({}, item), {
|
|
2287
|
+
type: item.type,
|
|
2288
|
+
isArray: item.isArray,
|
|
2269
2289
|
isSerial
|
|
2270
2290
|
}));
|
|
2271
2291
|
if ((primaryKey == null ? void 0 : primaryKey.columnNames.length) === 1 && (primaryKey == null ? void 0 : primaryKey.columnNames[0]) === item.name) {
|
|
@@ -2346,7 +2366,14 @@ const pushTableAst = (config, ast, data, domains, table, pendingTables, innerFKe
|
|
|
2346
2366
|
for (const otherKey in pendingTables) {
|
|
2347
2367
|
const item = pendingTables[otherKey];
|
|
2348
2368
|
if (item.dependsOn.delete(key) && item.dependsOn.size === 0) {
|
|
2349
|
-
pushTableAst(
|
|
2369
|
+
pushTableAst(
|
|
2370
|
+
unsupportedTypes,
|
|
2371
|
+
ast,
|
|
2372
|
+
data,
|
|
2373
|
+
domains,
|
|
2374
|
+
item.table,
|
|
2375
|
+
pendingTables
|
|
2376
|
+
);
|
|
2350
2377
|
}
|
|
2351
2378
|
}
|
|
2352
2379
|
};
|
|
@@ -2518,10 +2545,11 @@ const createForeignKey = (item) => {
|
|
|
2518
2545
|
};
|
|
2519
2546
|
|
|
2520
2547
|
const pullDbStructure = async (options, config) => {
|
|
2521
|
-
var _a;
|
|
2548
|
+
var _a, _b, _c;
|
|
2522
2549
|
const adapter = new pqb.Adapter(options);
|
|
2523
2550
|
const db = new DbStructure(adapter);
|
|
2524
|
-
const
|
|
2551
|
+
const unsupportedTypes = {};
|
|
2552
|
+
const ast = await structureToAst(unsupportedTypes, db);
|
|
2525
2553
|
await adapter.close();
|
|
2526
2554
|
const result = astToMigration(config, ast);
|
|
2527
2555
|
if (!result)
|
|
@@ -2539,6 +2567,21 @@ const pullDbStructure = async (options, config) => {
|
|
|
2539
2567
|
logger: config.logger
|
|
2540
2568
|
}));
|
|
2541
2569
|
}
|
|
2570
|
+
const unsupportedEntries = Object.entries(unsupportedTypes);
|
|
2571
|
+
const len = unsupportedEntries.length;
|
|
2572
|
+
if (len) {
|
|
2573
|
+
let count = 0;
|
|
2574
|
+
(_b = config.logger) == null ? void 0 : _b.warn(
|
|
2575
|
+
`Found unsupported types:
|
|
2576
|
+
${unsupportedEntries.map(([type, columns]) => {
|
|
2577
|
+
count += columns.length;
|
|
2578
|
+
return `${type} is used for column${columns.length > 1 ? "s" : ""} ${columns.join(", ")}`;
|
|
2579
|
+
}).join("\n")}
|
|
2580
|
+
|
|
2581
|
+
Append \`as\` method manually to ${count > 1 ? "these" : "this"} column${count > 1 ? "s" : ""} to treat ${count > 1 ? "them" : "it"} as other column type`
|
|
2582
|
+
);
|
|
2583
|
+
}
|
|
2584
|
+
(_c = config.logger) == null ? void 0 : _c.log("Database pulled successfully");
|
|
2542
2585
|
};
|
|
2543
2586
|
|
|
2544
2587
|
const rakeDb = async (options, partialConfig = {}, args = process.argv.slice(2)) => {
|