@roostorg/db-migrator 1.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,52 @@
1
+ ## Concepts/Terminology
2
+
3
+ The CLI classifies scripts as "seeds" or a "migrations". The
4
+ fundamental difference is that a migration runs across every environment --
5
+ including production -- whereas a seed only runs in one environment. Therefore,
6
+ schema changes should always be migrations. "Reference data" that are the same
7
+ in every environment -- essentially, application constants that just happen to
8
+ be stored in the database -- are also appropriate to add as migrations. But
9
+ fixtures data for tests or other specific use cases is a seed.
10
+
11
+ Critically, seed files are timestamped just like migrations, and they're run
12
+ interspersed with the migrations. This approach is unconventional -- the normal
13
+ approach is to have seed scripts run after all migrations have been applied --
14
+ but it makes sense _given that we're writing the seeds as SQL or JS scripts that
15
+ don't have access to our Sequelize model's db mappings_.
16
+
17
+ Under the conventional approach, the seed scripts must be written against the
18
+ latest schema (since they run after all migrations). This isn't a problem when
19
+ the seed scripts are written using entity classes that the main application also
20
+ uses to talk to the database, because the ORM mapping for those classes will
21
+ also have updated when the migration is deployed. I.e., if we wrote our seed
22
+ scripts in typescript using our sequelize models classes, then updating the
23
+ model definitions to keep our classes working for our app would also keep our
24
+ seed scripts working, and we might even get type errors (from Typescript) if,
25
+ e.g., we removed a field from the model class that a seed script depended on.
26
+
27
+ However, when seed scripts don't go though the Sequelize model classes, and
28
+ instead include direct references to SQL column names etc (as ours do), then
29
+ using the conventional approach of running seeds at the end means that making a
30
+ schema change in a new migration can break the existing seed scripts. This is
31
+ bad. It creates extra work that must be done after each new migration to keep
32
+ the seed scripts working with the latest schema. More importantly, though, we
33
+ might not catch for quite a while that a migration has broken one of our seed
34
+ scripts, since we'd get no compile errors and since, even on deploy, only the
35
+ seed scripts for the environment being deployed get run. E.g., it'd be a pain to
36
+ find out three weeks after writing a migration, when we go to re-deploy demo,
37
+ that we'd actually broken its seed script; then we'd have to go back, remember
38
+ what we changed, and fix it.
39
+
40
+ For now, we don't want to introduce the complexity of exporting/depending on our
41
+ Sequelize model classes in our seed scripts, so it makes sense to ditch the
42
+ conventional approach of running seeds all at the end.
43
+
44
+ By instead interspersing the running of the seed scripts w/ the migrations'
45
+ schema changes, each script knows exactly what the schema will look like at the
46
+ time it runs, and can't be broken by future schema changes.
47
+
48
+ That said, long term, it would be nice to write the seed scripts using Sequelize
49
+ models and run them at the end, because that would allow us to have fewer,
50
+ more-intentionally-divided seed scripts, which makes it a bit easier to see
51
+ exactly what seed data we're setting up (without actually checking the db). It
52
+ would also be more performant.
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env node
2
+ export {};
@@ -0,0 +1,8 @@
1
+ #!/usr/bin/env node
2
+ import path from 'path';
3
+ import { makeCli } from './index.js';
4
+ // For now, discover the configs by reading the filesystem, at a path that's
5
+ // overrideable by an env var. This is simpler than taking a CLI option that
6
+ // we'd have to set with every command invocation.
7
+ const configPath = process.env.MIGRATOR_CONFIG_PATH ?? './configs/index.js';
8
+ makeCli(await import(path.resolve(process.cwd(), configPath)));
@@ -0,0 +1,5 @@
1
+ import '@total-typescript/ts-reset/array-includes';
2
+ import type { DatabaseConfig } from './typescript-types.js';
3
+ export declare function makeCli(dbs: {
4
+ [k: string]: DatabaseConfig<string, any>;
5
+ }): void;
@@ -0,0 +1,320 @@
1
+ import path from 'path';
2
+ import { promisify } from 'util';
3
+ import glob from 'glob';
4
+ import '@total-typescript/ts-reset/array-includes';
5
+ import { Umzug } from 'umzug';
6
+ import yargs from 'yargs';
7
+ import { nameScript, scriptTypes, shouldRun } from './script-generator.js';
8
+ const globAsync = promisify(glob);
9
+ export function makeCli(dbs) {
10
+ const dbNames = Object.keys(dbs);
11
+ const dbOpt = {
12
+ alias: 'database',
13
+ describe: 'The database(s) to run migration against',
14
+ type: 'array',
15
+ choices: dbNames,
16
+ default: process.env.MIGRATOR_DB_NAME?.split(',').map((it) => it.trim()) ??
17
+ dbNames,
18
+ };
19
+ const envOpt = {
20
+ alias: 'environment',
21
+ describe: "Environment you're creating a seed for or running the migrations/seeds " +
22
+ 'in (effects which/whether seeds are run and how generated files are named).',
23
+ type: 'string',
24
+ choices: [
25
+ ...new Set(dbNames.flatMap((it) => dbs[it].supportedEnvironments)),
26
+ ],
27
+ };
28
+ const formatOpt = {
29
+ alias: 'format',
30
+ describe: 'Whether this script (seed or migration) will be in SQL or JS.',
31
+ type: 'string',
32
+ choices: Object.values(dbs).flatMap((it) => it.supportedScriptFormats),
33
+ demandOption: false,
34
+ default: undefined,
35
+ };
36
+ yargs(process.argv.slice(2))
37
+ .command({
38
+ command: 'add <db> <type> <name>',
39
+ describe: 'Creates a blank migration or seed file, properly named.',
40
+ builder(yargs) {
41
+ return yargs
42
+ .positional('db', {
43
+ alias: 'database',
44
+ describe: 'The name of the database for which to create the script.',
45
+ choices: dbNames,
46
+ demandOption: true,
47
+ })
48
+ .positional('type', {
49
+ choices: scriptTypes,
50
+ demandOption: true,
51
+ })
52
+ .positional('name', {
53
+ describe: 'Name of the script to create.',
54
+ type: 'string',
55
+ demandOption: true,
56
+ })
57
+ .option('env', envOpt)
58
+ .option('format', formatOpt)
59
+ .check((opts) => {
60
+ if (opts.type === 'seed' && !opts.env) {
61
+ throw new Error('Environment is required when adding a seed file, to indicate' +
62
+ 'in which environment the seed should be applied.');
63
+ }
64
+ if (opts.type === 'migration' && opts.env) {
65
+ throw new Error('You cannot provide an environment when creating a migration; ' +
66
+ 'every migration is run in every environment for schema consistency.');
67
+ }
68
+ const dbConfig = dbs[opts.db];
69
+ if (opts.env &&
70
+ !dbConfig.supportedEnvironments.includes(opts.env)) {
71
+ throw new Error(`The db "${opts.db}" doesn't support the "${opts.env}" environment.`);
72
+ }
73
+ if (opts.format !== undefined &&
74
+ !dbConfig.supportedScriptFormats.includes(opts.format)) {
75
+ throw new Error(`The db "${opts.db}" doesn't support .${opts.format} files as scripts.`);
76
+ }
77
+ return true;
78
+ }, false);
79
+ },
80
+ async handler({ db, name, type, env, format: formatOptValue }) {
81
+ const { defaultScriptFormat, getTemplate, scriptsDirectory } = dbs[db];
82
+ // Umzug couples together script creation and running into one class,
83
+ // presumaly to support the `verify` behavior mentioned below, so we have
84
+ // to instantiate it w/ dummy values for `migrations` and `context` here.
85
+ const migrator = new Umzug({
86
+ async migrations() {
87
+ return [];
88
+ },
89
+ create: {
90
+ template: (filePath) => [[filePath, getTemplate?.(filePath) ?? '']],
91
+ folder: scriptsDirectory,
92
+ },
93
+ context: {},
94
+ logger: console,
95
+ });
96
+ const format = formatOptValue ?? defaultScriptFormat;
97
+ await migrator.create({
98
+ name: `${nameScript(type, env, name)}.${format}`,
99
+ allowExtension: `.${format}`,
100
+ prefix: 'TIMESTAMP',
101
+ // skipVerify lets us run this command without an active db connection,
102
+ // at least for pg, which is a bit safer. It will prevent umzug from
103
+ // checking that we haven't already run a migration with the same name,
104
+ // but that check isn't super useful (it only checks whatever db this
105
+ // script happens to be connected to when the migration is created) and
106
+ // this error should be prevented by the filesystem not allowing
107
+ // duplicate names anyway.
108
+ skipVerify: true,
109
+ });
110
+ },
111
+ })
112
+ .command({
113
+ command: ['apply-scripts [target] [name]', 'apply'],
114
+ describe: 'Runs one or more migration/seed scripts. By default, applies ' +
115
+ "all that haven't been applied to the db yet.",
116
+ builder: (yargs) => {
117
+ return yargs
118
+ .option('env', envOpt)
119
+ .option('db', dbOpt)
120
+ .demandOption('env')
121
+ .positional('target', {
122
+ choices: ['remaining', 'next', 'only', 'until'],
123
+ default: 'remaining',
124
+ })
125
+ .check(({ target, name, db, env }) => {
126
+ const needsSpecificScript = target === 'only' || target === 'until';
127
+ if (!needsSpecificScript && name) {
128
+ throw new Error("Can't provide a general script/set of scripts to run (with " +
129
+ '"next" or "remaining") and then also provide the name of a ' +
130
+ 'specific script.');
131
+ }
132
+ if (needsSpecificScript && !name) {
133
+ throw new Error('Must provide a script name when you use "only"/"until" to ' +
134
+ 'apply (only or up to) a specific script.');
135
+ }
136
+ if (env &&
137
+ !db.every((it) => dbs[it].supportedEnvironments.includes(env))) {
138
+ throw new Error(`The db "${db}" doesn't support the "${env}" environment.`);
139
+ }
140
+ return true;
141
+ });
142
+ },
143
+ handler: async function ({ target, name, env, db: optDbs }) {
144
+ const migrationTasks = Object.entries(dbs)
145
+ .filter(([dbName, _]) => optDbs.includes(dbName))
146
+ .map(([_, db]) => async () => {
147
+ const { scriptsDirectory, supportedScriptFormats } = db;
148
+ const [context, storage] = await Promise.all([
149
+ db.createContext(),
150
+ db.createStorage(),
151
+ ]);
152
+ const migrator = new Umzug({
153
+ migrations: async (context) => {
154
+ const supportedExtensions = supportedScriptFormats.length > 1
155
+ ? `{${supportedScriptFormats.join(',')}}`
156
+ : `${supportedScriptFormats[0]}`;
157
+ const matchingFilePaths = await globAsync(`${scriptsDirectory}/*.${supportedExtensions}`, { absolute: true });
158
+ return matchingFilePaths
159
+ .filter(shouldRun.bind(null, env, supportedScriptFormats))
160
+ .map((unresolvedPath) => {
161
+ const filepath = path.resolve(unresolvedPath);
162
+ const name = path.basename(filepath);
163
+ return {
164
+ path: filepath,
165
+ ...db.resolveScript({
166
+ name,
167
+ path: filepath,
168
+ context,
169
+ }),
170
+ };
171
+ });
172
+ },
173
+ context,
174
+ storage,
175
+ logger: console,
176
+ });
177
+ try {
178
+ switch (target) {
179
+ case 'remaining':
180
+ await migrator.up();
181
+ break;
182
+ case 'next':
183
+ await migrator.up({ step: 1 });
184
+ break;
185
+ case 'only':
186
+ await migrator.up({ migrations: [name] });
187
+ break;
188
+ case 'until':
189
+ await migrator.up({ to: name });
190
+ }
191
+ }
192
+ finally {
193
+ // Await not return so that any errors from the try aren't swallowed.
194
+ await db.destroyContext(context);
195
+ if ('destroyStorage' in db) {
196
+ await db.destroyStorage?.(storage);
197
+ }
198
+ }
199
+ });
200
+ // Run in sequence so that the logs don't get interleaved (easier to follow).
201
+ return migrationTasks.reduce((res, task) => res.then(task), Promise.resolve());
202
+ },
203
+ })
204
+ .command({
205
+ command: 'clean',
206
+ describe: 'Deletes all the data in the databases specified in the ENV vars.',
207
+ builder: (yargs) => {
208
+ return yargs
209
+ .option('db', dbOpt)
210
+ .option('env', {
211
+ ...envOpt,
212
+ demand: 'Must provide an environment (even though it has no effect; the ' +
213
+ 'connection-related env vars determine which db(s) are cleaned) ' +
214
+ 'to help prevent accidentally deleting prod!',
215
+ })
216
+ .check((opts) => {
217
+ return opts.env !== 'prod';
218
+ });
219
+ },
220
+ handler: async ({ db: optDbs }) => {
221
+ await Promise.all(Object.entries(dbs)
222
+ .filter(([dbName, _]) => optDbs.includes(dbName))
223
+ .map(async ([_, db]) => {
224
+ const { dropDbAndDisconnect, prepareDbAndDisconnect } = db;
225
+ // If drop fails, assume the db didn't exist, for convenience,
226
+ // and just move on to attempting the create. If the error was
227
+ // something different, then the create will fail (because we don't
228
+ // do CREATE IF NOT EXISTS, which isn't even supported by postgres)
229
+ // so this should be fine.
230
+ await dropDbAndDisconnect().catch(() => { });
231
+ await prepareDbAndDisconnect();
232
+ }));
233
+ },
234
+ })
235
+ .command({
236
+ command: 'drop',
237
+ describe: 'Drops the databases specified in the ENV vars.',
238
+ builder: (yargs) => {
239
+ return yargs
240
+ .option('db', dbOpt)
241
+ .option('env', {
242
+ ...envOpt,
243
+ demand: 'Must provide an environment (even though it has no effect; the ' +
244
+ 'connection-related env vars determine which db(s) are cleaned) ' +
245
+ 'to help prevent accidentally deleting prod!',
246
+ })
247
+ .check((opts) => {
248
+ return opts.env !== 'prod';
249
+ });
250
+ },
251
+ handler: async ({ db: optDbs }) => {
252
+ await Promise.all(Object.entries(dbs)
253
+ .filter(([dbName, _]) => optDbs.includes(dbName))
254
+ .map(([_, db]) => db.dropDbAndDisconnect()));
255
+ },
256
+ })
257
+ .command({
258
+ command: 'create',
259
+ describe: 'Creates the databases specified in the ENV vars.',
260
+ builder: (yargs) => {
261
+ return yargs
262
+ .option('db', dbOpt)
263
+ .option('env', {
264
+ ...envOpt,
265
+ demand: 'Must provide an environment (even though it has no effect; the ' +
266
+ 'connection-related env vars determine which db(s) are cleaned) ' +
267
+ 'to help prevent accidentally deleting prod!',
268
+ })
269
+ .check((opts) => {
270
+ return opts.env !== 'prod';
271
+ });
272
+ },
273
+ handler: async ({ db: optDbs }) => {
274
+ await Promise.all(Object.entries(dbs)
275
+ .filter(([dbName, _]) => optDbs.includes(dbName))
276
+ .map(([_, db]) => db.prepareDbAndDisconnect()));
277
+ },
278
+ })
279
+ .command({
280
+ command: 'restore <target>',
281
+ describe: "Restores an environment's database to the last backup of prod.",
282
+ builder: (yargs) => {
283
+ return yargs
284
+ .positional('target', {
285
+ alias: 'to',
286
+ describe: 'The environment whose data will be replaced with the backup of prod.',
287
+ type: 'string',
288
+ choices: [...envOpt.choices, 'local'],
289
+ demand: 'Must provide a target environment to restore to.',
290
+ })
291
+ .demandOption('target')
292
+ .option('db', dbOpt)
293
+ .option('force', {
294
+ alias: 'f',
295
+ default: false,
296
+ type: 'string',
297
+ })
298
+ .check(({ target, force, db: dbNames }) => {
299
+ if (target === 'prod') {
300
+ throw new Error('Cannot restore prod to itself');
301
+ }
302
+ if (!dbNames.every((it) => dbs[it].supportedEnvironments.includes(target))) {
303
+ throw new Error(`Some db(s) in "${dbNames}" doesn't support the "${target}" environment.`);
304
+ }
305
+ if (!['staging', 'local'].includes(target) && !force) {
306
+ throw new Error('We currently only imagine applying this command to `staging` or ' +
307
+ `\`local\`. If you really want to reset ${target}'s data to` +
308
+ 'the latest data from prod, run the command again with --force.');
309
+ }
310
+ return true;
311
+ });
312
+ },
313
+ handler: async (_) => {
314
+ // TODO: need to implement for AWS.
315
+ throw new Error('not implemented');
316
+ },
317
+ })
318
+ .demandCommand(1, 'Must invoke a command (e.g., "clean" or "migrate")')
319
+ .parse();
320
+ }
@@ -0,0 +1,20 @@
1
+ export declare const scriptTypes: readonly ["seed", "migration"];
2
+ type ScriptType = (typeof scriptTypes)[number];
3
+ /**
4
+ * Generates the name of a script file, excluding the file extension and the
5
+ * ordering-related prefixes added by umzug. This function enforces a convention
6
+ * that allows us to identify whether a script is a seed and, if so, which
7
+ * environment it should run in.
8
+ *
9
+ * @param type - Whether this is a migration or seed
10
+ * @param env - If a seed, which environment the seed applies to.
11
+ * @param userNamePortion - The portion of the name provided by the user to
12
+ * actually describe what the script does.
13
+ */
14
+ export declare function nameScript(type: ScriptType, env: string | undefined, userNamePortion: string): string;
15
+ /**
16
+ * Returns whether the given script, based on its name as generated by
17
+ * {@see nameScript}, should run in the given environment.
18
+ */
19
+ export declare function shouldRun(env: string, legalScriptFormats: readonly string[], scriptName: string): boolean;
20
+ export {};
@@ -0,0 +1,25 @@
1
+ export const scriptTypes = ['seed', 'migration'];
2
+ /**
3
+ * Generates the name of a script file, excluding the file extension and the
4
+ * ordering-related prefixes added by umzug. This function enforces a convention
5
+ * that allows us to identify whether a script is a seed and, if so, which
6
+ * environment it should run in.
7
+ *
8
+ * @param type - Whether this is a migration or seed
9
+ * @param env - If a seed, which environment the seed applies to.
10
+ * @param userNamePortion - The portion of the name provided by the user to
11
+ * actually describe what the script does.
12
+ */
13
+ export function nameScript(type, env, userNamePortion) {
14
+ return `${userNamePortion}${type === 'seed' ? `.seed.${env}` : ''}`;
15
+ }
16
+ /**
17
+ * Returns whether the given script, based on its name as generated by
18
+ * {@see nameScript}, should run in the given environment.
19
+ */
20
+ export function shouldRun(env, legalScriptFormats, scriptName) {
21
+ const anyEnvSeedFileRegex = new RegExp(`\\.seed\\.[^\\.]+\\.(${legalScriptFormats.join('|')})$`);
22
+ const thisEnvSeedFileRegex = new RegExp(`\\.seed\\.${env}\\.(${legalScriptFormats.join('|')})$`);
23
+ const isSeed = anyEnvSeedFileRegex.test(scriptName);
24
+ return !isSeed || thisEnvSeedFileRegex.test(scriptName);
25
+ }
@@ -0,0 +1,86 @@
1
+ import { type MigrationParams, type RunnableMigration, type UmzugStorage } from 'umzug';
2
+ export type Bind1<F extends (arg0: A0, ...args: never[]) => unknown, A0> = F extends (arg0: A0, ...args: infer Args) => infer R ? (...args: Args) => R : never;
3
+ /**
4
+ * Every database for which we want to support migrations must provide a config
5
+ * object for itself that satisfies this type.
6
+ *
7
+ * NB: file extensions in the options below should be given with no leading dot.
8
+ *
9
+ * NB: "scripts" refers collectively to migrations or seed files.
10
+ */
11
+ export type DatabaseConfig<SupportedScriptFormat extends string = string, ContextType = unknown, StorageType extends UmzugStorage = UmzugStorage> = {
12
+ /**
13
+ * The file type (i.e., extension) to use for a new script when a file type
14
+ * isn't specified explicitly.
15
+ */
16
+ readonly defaultScriptFormat: SupportedScriptFormat;
17
+ /**
18
+ * A list of supported file extensions for this db's scripts (no leading dot).
19
+ */
20
+ readonly supportedScriptFormats: readonly SupportedScriptFormat[];
21
+ /**
22
+ * The directory in which the migrator will look for this db's scripts and
23
+ * into which it'll create new scripts.
24
+ */
25
+ readonly scriptsDirectory: string;
26
+ /**
27
+ * Which environments does this db support?
28
+ */
29
+ readonly supportedEnvironments: readonly string[];
30
+ /**
31
+ * A reference to the database client/connection that is passed to the Umzug
32
+ * storage object. Used in the case of a custom UmzugStorage implementation
33
+ * to ensure we close all connections to the database.
34
+ */
35
+ storageDbClient?: unknown;
36
+ /**
37
+ * Creates this db to with an initial state and then closes any open
38
+ * connections/resources.
39
+ */
40
+ prepareDbAndDisconnect(): Promise<void>;
41
+ /**
42
+ * Deletes this db and then closes any open connections/resources.
43
+ */
44
+ dropDbAndDisconnect(): Promise<void>;
45
+ /**
46
+ * Returns an object capable of recording that a script has been run, listing
47
+ * the scripts that have run, and removing the record of a script (if it's
48
+ * rolled back).
49
+ */
50
+ createStorage(): UmzugStorage<ContextType>;
51
+ /**
52
+ * Takes the name and path of a script and turns it into a runnable object
53
+ * that has an `up` and (optionally) `down` method. `up` and `down` will be
54
+ * called with the context object (see below) and should actually update the
55
+ * database.
56
+ */
57
+ resolveScript(params: MigrationParams<ContextType> & {
58
+ path: string;
59
+ }): RunnableMigration<ContextType>;
60
+ /**
61
+ * Returns a "context" object, which is simply an object that'll be passed to
62
+ * all scripts. Often this context object is an instance of the db driver
63
+ * connected to the database.
64
+ */
65
+ createContext(): ContextType | Promise<ContextType>;
66
+ /**
67
+ * A function that destroys the context object and cleans up associated
68
+ * resources. This is called after all the migrations have been run with the
69
+ * context. If the context has an open db connection, that connection should
70
+ * be closed so the process can exit.
71
+ */
72
+ destroyContext(context: ContextType): Promise<void>;
73
+ /**
74
+ * A function that destroys the storage object and cleans up associated
75
+ * resources. This is called after all the migrations have been run with the
76
+ * storage. If the storage has an open db connection, that connection should
77
+ * be closed so the process can exit.
78
+ */
79
+ destroyStorage?(context: StorageType): Promise<void>;
80
+ /**
81
+ * Given the path of the new script file that is being created, returns a
82
+ * string that will be that file's initial contents. This template can include
83
+ * helper/boilerplate code, like common imports.
84
+ */
85
+ getTemplate?(filePath: string): string;
86
+ };
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,12 @@
1
+ import { type ModelOptions, type Sequelize } from 'sequelize';
2
+ import { RunnableMigration, SequelizeStorage } from 'umzug';
3
+ export declare function makeSequelizeUmzugStorage(sequelize: Sequelize, opts: ModelOptions): SequelizeStorage;
4
+ export declare function wrapMigration<T>(hooks: {
5
+ runBefore?: () => void | Promise<void>;
6
+ runAfter?: () => void | Promise<void>;
7
+ }, migration: RunnableMigration<T>): {
8
+ down?: import("umzug").MigrationFn<T>;
9
+ up(params: import("umzug").MigrationParams<T>): Promise<void>;
10
+ name: string;
11
+ path?: string;
12
+ };
@@ -0,0 +1,35 @@
1
+ import { DataTypes } from 'sequelize';
2
+ import { SequelizeStorage } from 'umzug';
3
+ export function makeSequelizeUmzugStorage(sequelize, opts) {
4
+ return new SequelizeStorage({
5
+ sequelize,
6
+ model: sequelize.define('SequelizeMeta', {
7
+ name: {
8
+ type: DataTypes.STRING,
9
+ allowNull: false,
10
+ unique: true,
11
+ primaryKey: true,
12
+ autoIncrement: false,
13
+ },
14
+ }, { timestamps: true, ...opts }),
15
+ });
16
+ }
17
+ export function wrapMigration(hooks, migration) {
18
+ return {
19
+ ...migration,
20
+ async up(params) {
21
+ await hooks.runBefore?.();
22
+ await migration.up(params);
23
+ await hooks.runAfter?.();
24
+ },
25
+ ...(migration.down
26
+ ? {
27
+ async down(params) {
28
+ await hooks.runBefore?.();
29
+ await migration.down(params);
30
+ await hooks.runAfter?.();
31
+ },
32
+ }
33
+ : {}),
34
+ };
35
+ }
@@ -0,0 +1,5 @@
1
+ export { default as ScyllaStorage } from './storage/scyllaStorage.js';
2
+ export { type DatabaseConfig } from './cli/typescript-types.js';
3
+ export { makeCli } from './cli/index.js';
4
+ export { makeSequelizeUmzugStorage, wrapMigration } from './cli/utils.js';
5
+ export declare const SCRIPT_TEMPLATES_DIR_ABSOLUTE_PATH: string;
package/build/index.js ADDED
@@ -0,0 +1,5 @@
1
+ import { fileURLToPath } from 'url';
2
+ export { default as ScyllaStorage } from './storage/scyllaStorage.js';
3
+ export { makeCli } from './cli/index.js';
4
+ export { makeSequelizeUmzugStorage, wrapMigration } from './cli/utils.js';
5
+ export const SCRIPT_TEMPLATES_DIR_ABSOLUTE_PATH = fileURLToPath(new URL('./script-templates', import.meta.url));
@@ -0,0 +1,18 @@
1
+ 'use strict';
2
+
3
+ // NB: The Datastax Cassandra/Scylla Driver only supports 1 SQL statement
4
+ // per API call. So Scylla migrations should use sequential, raw `query` calls.
5
+
6
+ /**
7
+ * @param {{ context: import("cassandra-driver").Client }} context
8
+ */
9
+ exports.up = async function ({ context }) {
10
+ const query = context.execute.bind(context);
11
+ };
12
+
13
+ /**
14
+ * @param {{ context: import("cassandra-driver").Client }} context
15
+ */
16
+ exports.down = async function ({ context }) {
17
+ const query = context.execute.bind(context);
18
+ };
@@ -0,0 +1,19 @@
1
+ 'use strict';
2
+
3
+ // NB: Snowflake migrations should use raw queries, in case Sequelize doesn't
4
+ // translate the query correctly. Also, Snowflake only supports 1 SQL statement
5
+ // per API call. So Snowflake migrations should use sequential, raw `query` calls.
6
+
7
+ /**
8
+ * @param {{ context: import("sequelize").QueryInterface }} context
9
+ */
10
+ exports.up = async function ({ context }) {
11
+ const query = context.sequelize.query.bind(context.sequelize);
12
+ };
13
+
14
+ /**
15
+ * @param {{ context: import("sequelize").QueryInterface }} context
16
+ */
17
+ exports.down = async function ({ context }) {
18
+ const query = context.sequelize.query.bind(context.sequelize);
19
+ };
@@ -0,0 +1,12 @@
1
+ 'use strict';
2
+ const { DataTypes } = require('sequelize');
3
+
4
+ /**
5
+ * @param {{ context: import("sequelize").QueryInterface }} context
6
+ */
7
+ exports.up = async function ({ context: queryInterface }) {};
8
+
9
+ /**
10
+ * @param {{ context: import("sequelize").QueryInterface }} context
11
+ */
12
+ exports.down = async function ({ context: queryInterface }) {};
@@ -0,0 +1,26 @@
1
+ import { type DseClientOptions } from 'cassandra-driver';
2
+ /**
3
+ * Returns an object that can store migration state in Scylla, in a table called
4
+ * `migrations_metadata`. If the table does not exist it will be created
5
+ * automatically upon the logging of the first migration.
6
+ */
7
+ export default class ScyllaStorage {
8
+ private scyllaClient;
9
+ private columnName;
10
+ private tableName;
11
+ private metadataExists;
12
+ constructor(options: {
13
+ driverOptions: DseClientOptions;
14
+ columnName?: string;
15
+ tableName?: string;
16
+ });
17
+ private createTable;
18
+ logMigration({ name: migrationName }: {
19
+ name: string;
20
+ }): Promise<void>;
21
+ unlogMigration({ name: migrationName }: {
22
+ name: string;
23
+ }): Promise<void>;
24
+ executed(): Promise<string[]>;
25
+ shutdown(): Promise<void>;
26
+ }
@@ -0,0 +1,62 @@
1
+ import { Client as ScyllaClient, } from 'cassandra-driver';
2
+ /**
3
+ * Returns an object that can store migration state in Scylla, in a table called
4
+ * `migrations_metadata`. If the table does not exist it will be created
5
+ * automatically upon the logging of the first migration.
6
+ */
7
+ export default class ScyllaStorage {
8
+ constructor(options) {
9
+ this.scyllaClient = new ScyllaClient(options.driverOptions);
10
+ this.columnName = options.columnName ?? 'name';
11
+ this.tableName = options.tableName ?? 'migrations_metadata';
12
+ this.metadataExists = false;
13
+ }
14
+ async createTable() {
15
+ await this.scyllaClient.execute(`CREATE TABLE IF NOT EXISTS "${this.tableName}"(
16
+ migration_static_key int,
17
+ ${this.columnName} text,
18
+ createdAt timestamp,
19
+ PRIMARY KEY (migration_static_key, createdAt))`);
20
+ this.metadataExists = true;
21
+ }
22
+ async logMigration({ name: migrationName }) {
23
+ if (!this.metadataExists) {
24
+ await this.createTable();
25
+ }
26
+ await this.scyllaClient.execute(`INSERT INTO "${this.tableName}"(
27
+ migration_static_key,
28
+ ${this.columnName},
29
+ createdAt
30
+ ) VALUES ( ?, ?, ? )`, [1, migrationName, Date.now()], { prepare: true });
31
+ }
32
+ async unlogMigration({ name: migrationName }) {
33
+ await this.scyllaClient.execute(`DELETE FROM ?
34
+ WHERE migration_static_key = 1
35
+ AND "${this.columnName}" = ?`, [migrationName], { prepare: true });
36
+ }
37
+ async executed() {
38
+ try {
39
+ const migrations = await this.scyllaClient.execute(`SELECT * FROM ${this.tableName} ALLOW FILTERING`);
40
+ const x = migrations.rows.map((migration) => {
41
+ const name = migration[this.columnName];
42
+ if (typeof name !== 'string') {
43
+ throw new TypeError(`Unexpected migration name type: expected string, got ${typeof name}`);
44
+ }
45
+ return name;
46
+ });
47
+ return x;
48
+ }
49
+ catch (e) {
50
+ // 8704 is the CQL error code for 'unconfigured table' and will be returned in the
51
+ // Error object when the metadata table has not been set up, e.g. on the first
52
+ // migration run
53
+ if (e.code === 8704) {
54
+ return [];
55
+ }
56
+ throw e;
57
+ }
58
+ }
59
+ async shutdown() {
60
+ return this.scyllaClient.shutdown();
61
+ }
62
+ }
package/package.json ADDED
@@ -0,0 +1,38 @@
1
+ {
2
+ "name": "@roostorg/db-migrator",
3
+ "version": "1.0.6",
4
+ "description": "Migrating (and eventually seeding) our dbs on deploy",
5
+ "type": "module",
6
+ "scripts": {
7
+ "build": "tsc",
8
+ "test": "echo \"Error: no test specified\" && exit 1",
9
+ "prepublishOnly": "rm -rf ./build && npm run build && cp -r script-templates build/script-templates"
10
+ },
11
+ "exports": {
12
+ ".": "./build/index.js",
13
+ "./cli": "./build/cli/cli.js"
14
+ },
15
+ "files": [
16
+ "build"
17
+ ],
18
+ "author": "",
19
+ "license": "ISC",
20
+ "dependencies": {
21
+ "@types/yargs": "^17.0.24",
22
+ "@types/glob": "^7.2.0",
23
+ "@types/umzug": "^2.3.3",
24
+ "aws-sdk": "^2.1691.0",
25
+ "cassandra-driver": "^4.6.4",
26
+ "glob": "^7.2.0",
27
+ "sequelize": "^6.32.1",
28
+ "umzug": "^3.0.0",
29
+ "yargs": "^16.2.0",
30
+ "@total-typescript/ts-reset": "^0.5.1"
31
+ },
32
+ "devDependencies": {
33
+ "typescript": "^5.2.2"
34
+ },
35
+ "publishConfig": {
36
+ "access": "public"
37
+ }
38
+ }