@stamhoofd/backend-backup 2.50.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,640 @@
1
+ import { DeleteObjectCommand, HeadObjectCommand, ListObjectsCommand, PutObjectCommand, S3Client } from '@aws-sdk/client-s3'; // ES Modules import
2
+ import { Database } from '@simonbackx/simple-database';
3
+ import { AutoEncoder, field, StringDecoder } from '@simonbackx/simple-encoding';
4
+ import { Formatter } from '@stamhoofd/utility';
5
+ import chalk from 'chalk';
6
+ import { exec } from 'child_process';
7
+ import fs from 'fs';
8
+ import { DateTime } from 'luxon';
9
+ import path from 'path';
10
+ import util from 'util';
11
+ const execPromise = util.promisify(exec);
12
+ import { createHash } from 'node:crypto';
13
+ import { createReadStream } from 'node:fs';
14
+ import { QueueHandler } from '@stamhoofd/queues';
15
+
16
+ // Normally we'll have ±24 binary logs per day (if max size is set to 50MB)
17
+ // Since well create a backup every day, keeping 1000 binary logs would give
18
+ // a full history of 40 days - and leaves us enough margin in case more
19
+ // logs are created in a day
20
+ const MAX_BINARY_LOGS = 1000;
21
+ const MAX_BACKUPS = 30; // in days
22
+ const BACKUP_PREFIX = 'backup-';
23
+ const BINARY_LOG_PREFIX = 'binlog.';
24
+
25
+ /**
26
+ * For the health endpoint
27
+ */
28
+
29
+ let LAST_BINARY_BACKUP: { date: Date } | null = null;
30
+
31
+ let LAST_BACKUP: { date: Date; size: number } | null = null;
32
+
33
+ export class BackupDateSize extends AutoEncoder {
34
+ @field({ decoder: StringDecoder })
35
+ date: string;
36
+
37
+ @field({ decoder: StringDecoder, optional: true })
38
+ size?: string;
39
+ }
40
+
41
+ async function hashFile(path: string, algo = 'md5') {
42
+ const hashFunc = createHash(algo); // you can also sha256, sha512 etc
43
+
44
+ const contentStream = createReadStream(path);
45
+ const updateDone = new Promise((resolve, reject) => {
46
+ contentStream.on('data', data => hashFunc.update(data));
47
+ contentStream.on('close', resolve);
48
+ contentStream.on('error', reject);
49
+ });
50
+
51
+ await updateDone;
52
+ return hashFunc.digest('base64'); // will return hash, formatted to HEX
53
+ }
54
+
55
+ export class BackupHealth extends AutoEncoder {
56
+ @field({ decoder: BackupDateSize, nullable: true })
57
+ lastBackup: BackupDateSize | null;
58
+
59
+ @field({ decoder: BackupDateSize, nullable: true })
60
+ lastBinaryBackup: BackupDateSize | null;
61
+
62
+ @field({ decoder: StringDecoder })
63
+ status: 'ok' | 'error';
64
+ }
65
+
66
+ export function getHealth(): BackupHealth {
67
+ const now = new Date();
68
+
69
+ let status: 'ok' | 'error' = 'ok';
70
+
71
+ if (!LAST_BINARY_BACKUP || !LAST_BACKUP) {
72
+ status = 'error';
73
+ }
74
+ else {
75
+ if (LAST_BINARY_BACKUP.date.getTime() - now.getTime() > 60 * 10 * 1000) {
76
+ status = 'error';
77
+ }
78
+ if (LAST_BACKUP.date.getTime() - now.getTime() > 60 * 60 * 1000 * 25) {
79
+ status = 'error';
80
+ }
81
+ if (LAST_BACKUP.size < 1 * 1000 * 1000) {
82
+ status = 'error';
83
+ }
84
+ }
85
+
86
+ return BackupHealth.create({
87
+ lastBinaryBackup: LAST_BINARY_BACKUP
88
+ ? BackupDateSize.create({
89
+ date: Formatter.dateTimeIso(LAST_BINARY_BACKUP.date),
90
+ })
91
+ : null,
92
+ lastBackup: LAST_BACKUP
93
+ ? BackupDateSize.create({
94
+ date: Formatter.dateTimeIso(LAST_BACKUP.date),
95
+ size: Formatter.fileSize(LAST_BACKUP.size),
96
+ })
97
+ : null,
98
+ status,
99
+ });
100
+ }
101
+
102
+ export async function cleanBackups() {
103
+ console.log('Cleaning backups...');
104
+
105
+ const client = getS3Client();
106
+
107
+ // List all backup files on the server
108
+ const allBackups = await listAllFiles(STAMHOOFD.objectStoragePath);
109
+
110
+ // Delete backups older than MAX_BACKUPS days (not using count because we want to keep the last 30 days)
111
+ const now = new Date();
112
+ const boundaryDateTime = new Date(now.getTime() - 1000 * 60 * 60 * 24 * MAX_BACKUPS);
113
+ const boundaryFileName = getBackupBaseFileName(boundaryDateTime) + '.gz.enc';
114
+ let lastBackup: ObjectStorageFile | null = null;
115
+
116
+ for (const file of allBackups) {
117
+ const filename = path.basename(file.key);
118
+ if (filename.startsWith(BACKUP_PREFIX) && filename.endsWith('.gz.enc')) {
119
+ if (filename < boundaryFileName) {
120
+ console.log('Deleting old backup ' + filename);
121
+
122
+ await client.send(new DeleteObjectCommand({
123
+ Bucket: STAMHOOFD.SPACES_BUCKET,
124
+ Key: file.key,
125
+ }));
126
+ }
127
+ else {
128
+ if (!lastBackup || lastBackup.key < file.key) {
129
+ lastBackup = file;
130
+ }
131
+ }
132
+ }
133
+ }
134
+
135
+ if (!LAST_BACKUP && lastBackup) {
136
+ // Read timestamp from last backup file
137
+ console.log('Setting last backup timestamp from last backup ' + lastBackup.key);
138
+ LAST_BACKUP = {
139
+ date: getBackupFileDate(path.basename(lastBackup.key)),
140
+ size: lastBackup.size,
141
+ };
142
+ }
143
+ }
144
+
145
+ export async function cleanBinaryLogBackups() {
146
+ console.log('Cleaning binary log backups...');
147
+
148
+ const client = getS3Client();
149
+
150
+ // List all backup files on the server
151
+ const allBackups = (await listAllFiles(STAMHOOFD.objectStoragePath + '/binlogs')).filter(f => f.key.endsWith('.enc') && path.basename(f.key).startsWith(BINARY_LOG_PREFIX));
152
+ const numberToDelete = allBackups.length - MAX_BINARY_LOGS;
153
+
154
+ if (numberToDelete <= 0) {
155
+ console.log('No binary logs to delete');
156
+ return;
157
+ }
158
+
159
+ console.log('Found ' + allBackups.length + ' binary logs, deleting ' + numberToDelete);
160
+
161
+ for (let i = 0; i < numberToDelete; i++) {
162
+ const file = allBackups[i];
163
+ console.log('Deleting old binary log ' + file.key);
164
+
165
+ await client.send(new DeleteObjectCommand({
166
+ Bucket: STAMHOOFD.SPACES_BUCKET,
167
+ Key: file.key,
168
+ }));
169
+ }
170
+ }
171
+
172
+ type ObjectStorageFile = { key: string; lastModified: Date; size: number };
173
+
174
+ export async function listAllFiles(prefix: string): Promise<ObjectStorageFile[]> {
175
+ const client = getS3Client();
176
+ const files: ObjectStorageFile[] = [];
177
+ let marker: string | undefined;
178
+
179
+ // List all backup files on the server
180
+ while (true) {
181
+ const response = await client.send(new ListObjectsCommand({
182
+ Bucket: STAMHOOFD.SPACES_BUCKET,
183
+ Prefix: prefix + '/', // / suffix is required to make Delimiter work
184
+ MaxKeys: 1000,
185
+ Marker: marker,
186
+ Delimiter: '/', // this makes sure we don't go into folders
187
+ }));
188
+
189
+ if (response.Contents === undefined) {
190
+ break;
191
+ }
192
+
193
+ for (const object of response.Contents) {
194
+ if (files.find(f => f.key === object.Key)) {
195
+ throw new Error('Duplicate key found: ' + object.Key);
196
+ }
197
+
198
+ files.push({
199
+ key: object.Key!,
200
+ lastModified: object.LastModified!,
201
+ size: object.Size!,
202
+ });
203
+ }
204
+
205
+ if (!response.NextMarker) {
206
+ break;
207
+ }
208
+
209
+ marker = response.NextMarker;
210
+ }
211
+
212
+ // Sort files: use the name
213
+ files.sort((a, b) => {
214
+ return a.key.localeCompare(b.key);
215
+ });
216
+
217
+ return files;
218
+ }
219
+
220
+ export function escapeShellArg(arg) {
221
+ return `'${arg.replace(/'/g, `'\\''`)}'`;
222
+ }
223
+
224
+ export async function diskSpace(): Promise<number> {
225
+ if (process.env.NODE_ENV === 'development') {
226
+ // Not working in MacOS
227
+ return 1000000000000;
228
+ }
229
+
230
+ const result = await execPromise('df --output=avail --block-size=1 /');
231
+ const parts = result.stdout.trim().split('\n');
232
+ if (parts.length !== 2 || parts[0].trim() !== 'Avail') {
233
+ throw new Error('Unexected result for df cmd:' + result.stdout);
234
+ }
235
+ const size = parseInt(parts[1].trim());
236
+ return size;
237
+ }
238
+
239
+ export function getS3Client() {
240
+ return new S3Client({
241
+ endpoint: 'https://' + STAMHOOFD.SPACES_ENDPOINT,
242
+ region: STAMHOOFD.AWS_REGION,
243
+ credentials: {
244
+ accessKeyId: STAMHOOFD.SPACES_KEY,
245
+ secretAccessKey: STAMHOOFD.SPACES_SECRET,
246
+ },
247
+ });
248
+ }
249
+
250
+ export function getBackupBaseFileName(date: Date) {
251
+ const timestamp = Formatter.dateIso(date) + '-' + Formatter.timeIso(date).replace(':', 'h');
252
+ const tmpFile = `${BACKUP_PREFIX}${timestamp}.sql`;
253
+ return tmpFile;
254
+ }
255
+
256
+ export function getBackupFileDate(filename: string) {
257
+ if (!filename.startsWith(BACKUP_PREFIX) || !filename.endsWith('.sql.gz.enc')) {
258
+ throw new Error('Invalid backup filename: ' + filename);
259
+ }
260
+
261
+ const dateString = filename.substring(BACKUP_PREFIX.length, filename.length - '.sql.gz.enc'.length);
262
+
263
+ const year = parseInt(dateString.substring(0, 4));
264
+ const month = parseInt(dateString.substring(5, 7));
265
+ const day = parseInt(dateString.substring(8, 10));
266
+
267
+ const hour = parseInt(dateString.substring(11, 13));
268
+ const minute = parseInt(dateString.substring(14, 16));
269
+
270
+ console.log('Found date ', {
271
+ year,
272
+ month,
273
+ day,
274
+ hour,
275
+ minute,
276
+ }, 'from', dateString);
277
+
278
+ // Convert Brussels timeezone string to date object
279
+ const date = DateTime.fromObject({
280
+ year,
281
+ month,
282
+ day,
283
+ hour,
284
+ minute,
285
+ }, { zone: 'Europe/Brussels' }).toJSDate();
286
+
287
+ // Check match
288
+ const expected = getBackupBaseFileName(date) + '.gz.enc';
289
+ if (expected !== filename) {
290
+ throw new Error('Invalid backup filename: ' + filename + ' - expected ' + expected);
291
+ }
292
+
293
+ return date;
294
+ }
295
+
296
+ export async function backup() {
297
+ if (QueueHandler.isRunning('backup')) {
298
+ console.log('Backup already running');
299
+ return;
300
+ }
301
+ await QueueHandler.schedule('backup', async () => {
302
+ // Create a backup of the local server and stores it in a .sql.gz file
303
+ const localBackupFolder = STAMHOOFD.localBackupFolder;
304
+
305
+ if (!localBackupFolder.endsWith('/')) {
306
+ throw new Error('Backup folder should end with a /');
307
+ }
308
+
309
+ if (!STAMHOOFD.DB_DATABASE) {
310
+ throw new Error('DB_DATABASE not set');
311
+ }
312
+
313
+ if (!STAMHOOFD.DB_USER) {
314
+ throw new Error('DB_USER not set');
315
+ }
316
+
317
+ if (!STAMHOOFD.DB_PASS) {
318
+ throw new Error('DB_PASS not set');
319
+ }
320
+
321
+ if (!STAMHOOFD.keyFingerprint) {
322
+ throw new Error('keyFingerprint not set');
323
+ }
324
+
325
+ const objectStoragePath = STAMHOOFD.objectStoragePath;
326
+
327
+ if (!objectStoragePath) {
328
+ throw new Error('No object storage path defined');
329
+ }
330
+
331
+ if (objectStoragePath.endsWith('/')) {
332
+ throw new Error('Object storage path should not end with a /');
333
+ }
334
+
335
+ if (objectStoragePath.startsWith('/')) {
336
+ throw new Error('Object storage path should not start with a /');
337
+ }
338
+
339
+ if (!STAMHOOFD.SPACES_ENDPOINT) {
340
+ throw new Error('No SPACES_ENDPOINT defined');
341
+ }
342
+
343
+ if (!STAMHOOFD.SPACES_BUCKET) {
344
+ throw new Error('No SPACES_BUCKET defined');
345
+ }
346
+
347
+ if (!STAMHOOFD.SPACES_KEY) {
348
+ throw new Error('No SPACES_KEY defined');
349
+ }
350
+
351
+ if (!STAMHOOFD.SPACES_SECRET) {
352
+ throw new Error('No SPACES_SECRET defined');
353
+ }
354
+
355
+ // Delete backups folder
356
+ console.log('Deleting old backups...');
357
+ await execPromise('rm -rf ' + escapeShellArg(localBackupFolder));
358
+ console.log('Deleted old backups');
359
+
360
+ // Assert disk space
361
+ const availableDiskSpace = (await diskSpace()) / 1000 / 1000 / 1000;
362
+ const required = Math.max(10, LAST_BACKUP ? Math.ceil(LAST_BACKUP?.size * 15 / 1000 / 1000 / 1000) : 0); // Minimum disk size = 15 times size of a backup (uncompressed size of backup is a lot more than compressed size)
363
+ if (availableDiskSpace < required) {
364
+ throw new Error(`Less than ${required.toFixed(2)}GB disk space available. Avoid creating backups now until this has been resolved.`);
365
+ }
366
+
367
+ // Recreate folder
368
+ await execPromise('mkdir -p ' + escapeShellArg(localBackupFolder));
369
+
370
+ const tmpFile = `${localBackupFolder}${getBackupBaseFileName(new Date())}`;
371
+ const compressedFile = tmpFile + '.gz';
372
+
373
+ const cmd = 'mysqldump -u ' + escapeShellArg(STAMHOOFD.DB_USER) + ' -p' + escapeShellArg(STAMHOOFD.DB_PASS) + ' --flush-logs --single-transaction --triggers --routines --events --lock-tables=false ' + escapeShellArg(STAMHOOFD.DB_DATABASE) + ' > ' + escapeShellArg(tmpFile);
374
+
375
+ console.log('Creating MySQL dump...');
376
+ await execPromise(cmd);
377
+
378
+ console.log('MySQL dump created at ' + tmpFile);
379
+
380
+ // gzipping
381
+ const cmd2 = `gzip -c ${escapeShellArg(tmpFile)} > ${escapeShellArg(compressedFile)}`;
382
+ console.log('Compressing dump...');
383
+
384
+ await execPromise(cmd2);
385
+ console.log('MySQL dump compressed at ' + compressedFile);
386
+
387
+ // Delete the uncompressed file
388
+ await execPromise('rm ' + escapeShellArg(tmpFile));
389
+
390
+ // Encrypt the compressed file using the public key in STAMHOOFD.publicEncryptionKey
391
+ const encryptedFile = compressedFile + '.enc';
392
+ const cmd3 = `gpg --recipient ${escapeShellArg(STAMHOOFD.keyFingerprint)} --encrypt --output ${escapeShellArg(encryptedFile)} ${escapeShellArg(compressedFile)}`;
393
+
394
+ console.log('Encrypting dump...');
395
+ await execPromise(cmd3);
396
+ console.log('MySQL dump encrypted at ' + encryptedFile);
397
+
398
+ // Delete the compressed file
399
+ await execPromise('rm ' + escapeShellArg(compressedFile));
400
+
401
+ // Upload
402
+
403
+ // Download last backup file
404
+ const client = getS3Client();
405
+
406
+ // Create read stream
407
+ const stream = fs.createReadStream(encryptedFile);
408
+ const key = objectStoragePath + '/' + encryptedFile.split('/').pop();
409
+ const fileSize = fs.statSync(encryptedFile).size;
410
+
411
+ console.log('Calculating MD5...');
412
+ const md5 = await hashFile(encryptedFile);
413
+
414
+ console.log('Uploading backup to object storage at ' + key + '...');
415
+
416
+ const command = new PutObjectCommand({
417
+ Bucket: STAMHOOFD.SPACES_BUCKET,
418
+ Key: key,
419
+ Body: stream,
420
+ ContentType: 'application/octet-stream',
421
+ ContentLength: fileSize,
422
+ ACL: 'private' as const,
423
+ CacheControl: 'no-cache',
424
+ ContentMD5: md5,
425
+ });
426
+
427
+ const response = await client.send(command);
428
+
429
+ if (response.$metadata.httpStatusCode !== 200) {
430
+ throw new Error('Failed to upload backup');
431
+ }
432
+
433
+ console.log(chalk.green('✓ Backup uploaded to object storage at ' + key));
434
+
435
+ LAST_BACKUP = {
436
+ date: new Date(),
437
+ size: fileSize,
438
+ };
439
+ });
440
+ }
441
+
442
+ export async function backupBinlogs() {
443
+ // Get all current binary logs
444
+ // SHOW BINARY LOGS;
445
+
446
+ const [firstRows] = await Database.select(`show variables like 'log_bin_basename'`);
447
+ const logBinBaseName = firstRows[0]['session_variables']['Value'];
448
+
449
+ if (!logBinBaseName || typeof logBinBaseName !== 'string') {
450
+ throw new Error('log_bin_basename not found');
451
+ }
452
+
453
+ const binLogPath = path.dirname(logBinBaseName);
454
+
455
+ const [rows] = await Database.select('SHOW BINARY LOGS');
456
+
457
+ if (rows.length > MAX_BINARY_LOGS) {
458
+ console.error(chalk.bold(chalk.red('Warning: MAX_BINARY_LOGS is larger than the binary logs stored on the system. Please check the MySQL configuration.')));
459
+
460
+ // Only copy last MAX_BINARY_LOGS rows
461
+ rows.splice(0, rows.length - MAX_BINARY_LOGS);
462
+ }
463
+
464
+ const lastRow = rows.pop();
465
+
466
+ const allBinaryLogs = await listAllFiles(STAMHOOFD.objectStoragePath + '/binlogs');
467
+
468
+ for (const row of rows) {
469
+ const data = row[''];
470
+ const logName = data['Log_name'];
471
+
472
+ const fullPath = binLogPath + '/' + logName;
473
+ await uploadBinaryLog(fullPath, false, true, allBinaryLogs);
474
+ }
475
+
476
+ // Upload partial lastRow
477
+ if (lastRow) {
478
+ const data = lastRow[''];
479
+ const logName = data['Log_name'];
480
+
481
+ const fullPath = binLogPath + '/' + logName;
482
+ await uploadBinaryLog(fullPath, true, true, allBinaryLogs);
483
+ }
484
+
485
+ LAST_BINARY_BACKUP = {
486
+ date: new Date(),
487
+ };
488
+ }
489
+
490
+ export async function uploadBinaryLog(binaryLogPath: string, partial: boolean, gzip = true, allBinaryLogs: ObjectStorageFile[]) {
491
+ const client = getS3Client();
492
+
493
+ const number = parseInt(path.basename(binaryLogPath).split('.')[1]);
494
+ if (isNaN(number) || !isFinite(number)) {
495
+ throw new Error('Invalid binary log name: ' + binaryLogPath);
496
+ }
497
+
498
+ // Increase the padding to 10 digits so we never need to worry about number sorting issues in the long future
499
+ const uploadedName = BINARY_LOG_PREFIX + number.toString().padStart(10, '0');
500
+
501
+ if (!partial) {
502
+ // Check if the file exists in S3
503
+ const key = STAMHOOFD.objectStoragePath + '/binlogs/' + uploadedName + (gzip ? '.gz' : '') + '.enc';
504
+
505
+ if (allBinaryLogs.find(f => f.key === key)) {
506
+ // Delete partial if found
507
+ await deletePartial(client, uploadedName, binaryLogPath, gzip, allBinaryLogs);
508
+ return;
509
+ }
510
+
511
+ // Double check using HEAD
512
+ try {
513
+ await client.send(new HeadObjectCommand({
514
+ Bucket: STAMHOOFD.SPACES_BUCKET,
515
+ Key: key,
516
+ ResponseCacheControl: 'no-cache',
517
+ }));
518
+ console.log('Binary log already exists: ' + uploadedName);
519
+ return;
520
+ }
521
+ catch (e) {
522
+ if (e.name !== 'NotFound') {
523
+ throw e;
524
+ }
525
+ console.log('Binary log does not exist: ' + uploadedName);
526
+ }
527
+ }
528
+
529
+ // Copy file to local backup folder and change ownership
530
+ const localBackupFolder = STAMHOOFD.localBackupFolder;
531
+
532
+ if (!localBackupFolder.endsWith('/')) {
533
+ throw new Error('Backup folder should end with a /');
534
+ }
535
+
536
+ let binaryLogPathMoved = localBackupFolder + 'binlogs/' + uploadedName + (partial ? '.partial' : '');
537
+
538
+ // Mkdir
539
+ await execPromise('mkdir -p ' + escapeShellArg(path.dirname(binaryLogPathMoved)));
540
+
541
+ await execPromise(`${STAMHOOFD.environment === 'development' ? '' : 'sudo '}cp ${escapeShellArg(binaryLogPath)} ${escapeShellArg(binaryLogPathMoved)}`);
542
+
543
+ if (STAMHOOFD.environment !== 'development') {
544
+ await execPromise(`sudo chown stamhoofd:stamhoofd ${escapeShellArg(binaryLogPathMoved)}`);
545
+ }
546
+
547
+ if (gzip) {
548
+ // Compress the binary log
549
+ const compressedFile = binaryLogPathMoved + '.gz';
550
+ const cmd = `gzip -c ${escapeShellArg(binaryLogPathMoved)} > ${escapeShellArg(compressedFile)}`;
551
+ console.log('Compressing ' + uploadedName + '...');
552
+ await execPromise(cmd);
553
+ console.log('Compressed at ' + compressedFile);
554
+
555
+ // Delete the uncompressed file
556
+ await execPromise('rm ' + escapeShellArg(binaryLogPathMoved));
557
+ binaryLogPathMoved = compressedFile;
558
+ }
559
+
560
+ // Encrypt the compressed file using the public key in STAMHOOFD.publicEncryptionKey
561
+ const encryptedFile = binaryLogPathMoved + '.enc';
562
+
563
+ // Delete encrypted file if it exists
564
+ try {
565
+ await execPromise('rm ' + escapeShellArg(encryptedFile));
566
+ }
567
+ catch (e) {
568
+ if (e.code !== 1) {
569
+ throw e;
570
+ }
571
+ }
572
+
573
+ const cmd3 = `gpg --recipient ${escapeShellArg(STAMHOOFD.keyFingerprint)} --encrypt --output ${escapeShellArg(encryptedFile)} ${escapeShellArg(binaryLogPathMoved)}`;
574
+
575
+ console.log('Encrypting binlog...');
576
+ await execPromise(cmd3);
577
+ console.log('MySQL binlog encrypted at ' + encryptedFile);
578
+
579
+ // Delete the compressed file
580
+ await execPromise('rm ' + escapeShellArg(binaryLogPathMoved));
581
+
582
+ // Calculate MD5
583
+ console.log('Calculating MD5...');
584
+ const md5 = await hashFile(encryptedFile);
585
+
586
+ // Create read stream
587
+ const stream = fs.createReadStream(encryptedFile);
588
+
589
+ const key = STAMHOOFD.objectStoragePath + '/binlogs/' + path.basename(encryptedFile);
590
+
591
+ const params = {
592
+ Bucket: STAMHOOFD.SPACES_BUCKET,
593
+ Key: key,
594
+ Body: stream,
595
+ ContentType: 'application/octet-stream',
596
+ ContentLength: fs.statSync(encryptedFile).size,
597
+ ACL: 'private' as const,
598
+ CacheControl: 'no-cache',
599
+ ContentMD5: md5,
600
+ };
601
+
602
+ console.log('Uploading binlog to ' + key + '...');
603
+ const command = new PutObjectCommand(params);
604
+ const response = await client.send(command);
605
+
606
+ if (response.$metadata.httpStatusCode !== 200) {
607
+ throw new Error('Failed to upload binlog');
608
+ }
609
+
610
+ console.log(chalk.green('✓ Binlog uploaded to ' + key));
611
+
612
+ // Rm encrypted file
613
+ await execPromise('rm ' + escapeShellArg(encryptedFile));
614
+
615
+ if (!partial) {
616
+ await deletePartial(client, uploadedName, binaryLogPath, gzip, allBinaryLogs);
617
+ }
618
+ }
619
+
620
+ async function deletePartial(client: S3Client, uploadedName: string, binaryLogPath: string, gzip: boolean, allBinaryLogs: ObjectStorageFile[]) {
621
+ // Check if a partial exists on the server and delete it to keep it clean
622
+ const key = STAMHOOFD.objectStoragePath + '/binlogs/' + uploadedName + '.partial' + (gzip ? '.gz' : '') + '.enc';
623
+ try {
624
+ if (allBinaryLogs.find(f => f.key === key)) {
625
+ console.log('Partial binary log exists at ' + key + ', which is no longer needed. Deleting...');
626
+
627
+ await client.send(new DeleteObjectCommand({
628
+ Bucket: STAMHOOFD.SPACES_BUCKET,
629
+ Key: key,
630
+ }));
631
+
632
+ console.log('Partial binary log deleted');
633
+ }
634
+ }
635
+ catch (e) {
636
+ if (e.name !== 'NotFound') {
637
+ throw e;
638
+ }
639
+ }
640
+ }
@@ -0,0 +1,42 @@
1
+ import { Database } from '@simonbackx/simple-database';
2
+
3
+ export async function checkReplicaStatus() {
4
+ const [rows] = await Database.select(`SHOW REPLICA STATUS`);
5
+
6
+ if (rows.length === 0) {
7
+ throw new Error('No replica status found');
8
+ }
9
+
10
+ console.log(rows);
11
+
12
+ const row = rows[0][''];
13
+
14
+ // Check Replica_SQL_Running = Yes
15
+ if (row['Replica_SQL_Running'] !== 'Yes') {
16
+ throw new Error('Replica_SQL_Running is not Yes');
17
+ }
18
+
19
+ // Check Replica_IO_Running = Yes
20
+ if (row['Replica_IO_Running'] !== 'Yes') {
21
+ throw new Error('Replica_IO_Running is not Yes');
22
+ }
23
+
24
+ // Check Last_IO_Error is empty
25
+ if (row['Last_IO_Error'] !== '') {
26
+ throw new Error('Last_IO_Error is not empty: ' + row['Last_IO_Error']);
27
+ }
28
+
29
+ // Check Last_SQL_Error is empty
30
+ if (row['Last_SQL_Error'] !== '') {
31
+ throw new Error('Last_SQL_Error is not empty: ' + row['Last_SQL_Error']);
32
+ }
33
+
34
+ if (typeof row['Seconds_Behind_Source'] !== 'number') {
35
+ throw new Error('Seconds_Behind_Source is not a number');
36
+ }
37
+
38
+ // Seconds_Behind_Source is not super accurate, so we add a large offset
39
+ if (row['Seconds_Behind_Source'] > 60 * 5) {
40
+ throw new Error('Seconds_Behind_Source is greater than 5 minutes');
41
+ }
42
+ }
package/stamhoofd.d.ts ADDED
@@ -0,0 +1,14 @@
1
+ export {};
2
+
3
+ /**
4
+ * Stamhoofd uses a global variable to store some configurations. We don't use process.env because we can only store
5
+ * strings into those files. And we need objects for our localized domains (different domains for each locale).
6
+ * Having to encode and decode those values would be inefficient.
7
+ *
8
+ * So we use our own global configuration variable: STAMHOOFD. Available everywhere and contains
9
+ * other information depending on the environment (frontend/backend/shared). TypeScript will
10
+ * always suggest the possible keys.
11
+ */
12
+ declare global {
13
+ const STAMHOOFD: BackupEnvironment;
14
+ }