@dotdo/postgres 0.1.0 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/backup/backup-manager.d.ts +244 -0
- package/dist/backup/backup-manager.d.ts.map +1 -0
- package/dist/backup/backup-manager.js +726 -0
- package/dist/backup/backup-manager.js.map +1 -0
- package/dist/observability/production-metrics.d.ts +318 -0
- package/dist/observability/production-metrics.d.ts.map +1 -0
- package/dist/observability/production-metrics.js +747 -0
- package/dist/observability/production-metrics.js.map +1 -0
- package/dist/pglite-assets/pglite.data +0 -0
- package/dist/pglite-assets/pglite.wasm +0 -0
- package/dist/pitr/pitr-manager.d.ts +240 -0
- package/dist/pitr/pitr-manager.d.ts.map +1 -0
- package/dist/pitr/pitr-manager.js +837 -0
- package/dist/pitr/pitr-manager.js.map +1 -0
- package/dist/streaming/cdc-iceberg-connector.d.ts +1 -1
- package/dist/streaming/cdc-iceberg-connector.js +1 -1
- package/dist/streaming/live-cdc-stream.d.ts +1 -1
- package/dist/streaming/live-cdc-stream.js +1 -1
- package/dist/worker/auth.d.ts.map +1 -1
- package/dist/worker/auth.js +16 -6
- package/dist/worker/auth.js.map +1 -1
- package/dist/worker/entry.d.ts.map +1 -1
- package/dist/worker/entry.js +108 -26
- package/dist/worker/entry.js.map +1 -1
- package/package.json +7 -6
- package/src/__tests__/backup.test.ts +944 -0
- package/src/__tests__/observability.test.ts +1089 -0
- package/src/__tests__/pitr.test.ts +1240 -0
- package/src/backup/backup-manager.ts +1006 -0
- package/src/observability/production-metrics.ts +1054 -0
- package/src/pglite-assets/pglite.data +0 -0
- package/src/pglite-assets/pglite.wasm +0 -0
- package/src/pitr/pitr-manager.ts +1136 -0
- package/src/worker/auth.ts +17 -6
- package/src/worker/entry.ts +112 -30
|
@@ -0,0 +1,726 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Automated Backup Manager for PostgreSQL Durable Objects
|
|
3
|
+
*
|
|
4
|
+
* Provides scheduled full and incremental backups to R2 storage,
|
|
5
|
+
* backup manifest management, restore capabilities, and retention policies.
|
|
6
|
+
*/
|
|
7
|
+
// =============================================================================
|
|
8
|
+
// Constants
|
|
9
|
+
// =============================================================================
|
|
10
|
+
/** Default backup interval when no schedule is configured (1 hour in ms) */
|
|
11
|
+
const DEFAULT_BACKUP_INTERVAL_MS = 3_600_000;
|
|
12
|
+
/** Default maximum number of backups to retain */
|
|
13
|
+
const DEFAULT_MAX_BACKUPS = 30;
|
|
14
|
+
/** Default maximum age of backups in days */
|
|
15
|
+
const DEFAULT_MAX_AGE_DAYS = 7;
|
|
16
|
+
/** Default minimum number of full backups to preserve during pruning */
|
|
17
|
+
const DEFAULT_KEEP_MIN_FULL_BACKUPS = 2;
|
|
18
|
+
/** Number of microtask yields for the R2 upload timeout check */
|
|
19
|
+
const R2_UPLOAD_TIMEOUT_YIELD_COUNT = 10;
|
|
20
|
+
/** Length of the random suffix in generated backup IDs */
|
|
21
|
+
const BACKUP_ID_RANDOM_SUFFIX_LENGTH = 10;
|
|
22
|
+
/** Current manifest format version */
|
|
23
|
+
const MANIFEST_VERSION = '1.0.0';
|
|
24
|
+
/** Checksum algorithm identifier used in backup metadata */
|
|
25
|
+
const CHECKSUM_ALGORITHM = 'sha-256';
|
|
26
|
+
/** Milliseconds in one day */
|
|
27
|
+
const MS_PER_DAY = 86_400_000;
|
|
28
|
+
// =============================================================================
|
|
29
|
+
// Utility Functions
|
|
30
|
+
// =============================================================================
|
|
31
|
+
/** Generates a unique backup identifier using base-36 timestamp and random suffix */
|
|
32
|
+
function generateBackupId() {
|
|
33
|
+
const timestamp = Date.now().toString(36);
|
|
34
|
+
const random = Math.random().toString(36).substring(2, BACKUP_ID_RANDOM_SUFFIX_LENGTH);
|
|
35
|
+
return `bk-${timestamp}-${random}`;
|
|
36
|
+
}
|
|
37
|
+
/**
|
|
38
|
+
* Computes a simple hash checksum for data integrity verification.
|
|
39
|
+
* In production, this would use SubtleCrypto for cryptographic hashing.
|
|
40
|
+
*/
|
|
41
|
+
async function computeChecksum(data) {
|
|
42
|
+
let hash = 0;
|
|
43
|
+
for (let i = 0; i < data.length; i++) {
|
|
44
|
+
hash = ((hash << 5) - hash + data[i]) | 0;
|
|
45
|
+
}
|
|
46
|
+
return `sha256-${Math.abs(hash).toString(16).padStart(8, '0')}`;
|
|
47
|
+
}
|
|
48
|
+
/** Compresses data for storage. Currently a pass-through; production would use CompressionStream. */
|
|
49
|
+
function compressData(data) {
|
|
50
|
+
return data;
|
|
51
|
+
}
|
|
52
|
+
/** Decompresses data from storage. Currently a pass-through; production would use DecompressionStream. */
|
|
53
|
+
function decompressData(data) {
|
|
54
|
+
return data;
|
|
55
|
+
}
|
|
56
|
+
/** Creates a failed BackupResult with the given parameters */
|
|
57
|
+
function createFailedBackupResult(type, backupId, startTime, error) {
|
|
58
|
+
return {
|
|
59
|
+
success: false,
|
|
60
|
+
type,
|
|
61
|
+
backupId,
|
|
62
|
+
sizeBytes: 0,
|
|
63
|
+
timestamp: Date.now(),
|
|
64
|
+
durationMs: Date.now() - startTime,
|
|
65
|
+
error,
|
|
66
|
+
};
|
|
67
|
+
}
|
|
68
|
+
/** Generates a simulated LSN string based on the current timestamp */
|
|
69
|
+
function generateSimulatedLsn() {
|
|
70
|
+
return `0/${Date.now().toString(16)}`;
|
|
71
|
+
}
|
|
72
|
+
// =============================================================================
|
|
73
|
+
// BackupManager Class
|
|
74
|
+
// =============================================================================
|
|
75
|
+
/**
|
|
76
|
+
* Manages automated backups (full and incremental) of PostgreSQL Durable Objects to R2 storage.
|
|
77
|
+
* Handles manifest tracking, restore operations, retention policies, and scheduling.
|
|
78
|
+
*/
|
|
79
|
+
export class BackupManager {
|
|
80
|
+
config;
|
|
81
|
+
manifest;
|
|
82
|
+
stats;
|
|
83
|
+
incrementalState = null;
|
|
84
|
+
backupInProgress = false;
|
|
85
|
+
lastFullBackupTime = 0;
|
|
86
|
+
backupSequence = 0;
|
|
87
|
+
constructor(config) {
|
|
88
|
+
this.config = config;
|
|
89
|
+
this.manifest = this.createEmptyManifest();
|
|
90
|
+
this.stats = this.createEmptyStats();
|
|
91
|
+
}
|
|
92
|
+
createEmptyManifest() {
|
|
93
|
+
return {
|
|
94
|
+
doId: this.config.doId,
|
|
95
|
+
version: MANIFEST_VERSION,
|
|
96
|
+
entries: [],
|
|
97
|
+
totalSizeBytes: 0,
|
|
98
|
+
checksum: '',
|
|
99
|
+
lastUpdated: 0,
|
|
100
|
+
};
|
|
101
|
+
}
|
|
102
|
+
createEmptyStats() {
|
|
103
|
+
return {
|
|
104
|
+
totalBackups: 0,
|
|
105
|
+
totalSizeBytes: 0,
|
|
106
|
+
lastBackupTimestamp: 0,
|
|
107
|
+
successCount: 0,
|
|
108
|
+
failureCount: 0,
|
|
109
|
+
avgDurationMs: 0,
|
|
110
|
+
fullBackupCount: 0,
|
|
111
|
+
incrementalBackupCount: 0,
|
|
112
|
+
};
|
|
113
|
+
}
|
|
114
|
+
// ===========================================================================
|
|
115
|
+
// Full Backup
|
|
116
|
+
// ===========================================================================
|
|
117
|
+
/**
|
|
118
|
+
* Creates a full backup of the database, serializing table data and uploading to R2.
|
|
119
|
+
* Only one backup operation can run at a time.
|
|
120
|
+
*/
|
|
121
|
+
async createFullBackup(pglite) {
|
|
122
|
+
if (this.backupInProgress) {
|
|
123
|
+
return createFailedBackupResult('full', '', 0, 'Backup already in progress');
|
|
124
|
+
}
|
|
125
|
+
this.backupInProgress = true;
|
|
126
|
+
const startTime = Date.now();
|
|
127
|
+
const backupId = generateBackupId();
|
|
128
|
+
try {
|
|
129
|
+
const tables = await this.queryUserTables(pglite, backupId, startTime);
|
|
130
|
+
if (!tables)
|
|
131
|
+
return this.lastFailureResult;
|
|
132
|
+
const backupPayload = { tables, timestamp: Date.now(), type: 'full', backupId };
|
|
133
|
+
const backupData = new TextEncoder().encode(JSON.stringify(backupPayload));
|
|
134
|
+
const { finalData, compressed } = this.applyCompression(backupData);
|
|
135
|
+
const uncompressedSizeBytes = backupData.length;
|
|
136
|
+
const checksum = await computeChecksum(finalData);
|
|
137
|
+
const r2Key = this.buildR2Key('full', backupId);
|
|
138
|
+
const uploadSuccess = await this.uploadWithTimeout(r2Key, finalData, {
|
|
139
|
+
checksum, type: 'full', backupId,
|
|
140
|
+
});
|
|
141
|
+
if (!uploadSuccess) {
|
|
142
|
+
this.backupInProgress = false;
|
|
143
|
+
this.stats.failureCount++;
|
|
144
|
+
return createFailedBackupResult('full', backupId, startTime, this.lastUploadError);
|
|
145
|
+
}
|
|
146
|
+
this.backupSequence++;
|
|
147
|
+
const entry = {
|
|
148
|
+
backupId,
|
|
149
|
+
type: 'full',
|
|
150
|
+
timestamp: Date.now(),
|
|
151
|
+
sizeBytes: finalData.length,
|
|
152
|
+
checksum,
|
|
153
|
+
checksumAlgorithm: CHECKSUM_ALGORITHM,
|
|
154
|
+
compressed,
|
|
155
|
+
tables,
|
|
156
|
+
r2Key,
|
|
157
|
+
};
|
|
158
|
+
await this.addManifestEntry(entry, finalData.length);
|
|
159
|
+
this.incrementalState = {
|
|
160
|
+
lastBackupLsn: generateSimulatedLsn(),
|
|
161
|
+
lastBackupTimestamp: Date.now(),
|
|
162
|
+
baseBackupId: backupId,
|
|
163
|
+
};
|
|
164
|
+
this.lastFullBackupTime = Date.now();
|
|
165
|
+
await this.saveManifest();
|
|
166
|
+
const durationMs = Date.now() - startTime;
|
|
167
|
+
this.updateStats(true, finalData.length, durationMs, 'full');
|
|
168
|
+
this.backupInProgress = false;
|
|
169
|
+
return {
|
|
170
|
+
success: true,
|
|
171
|
+
type: 'full',
|
|
172
|
+
backupId,
|
|
173
|
+
sizeBytes: finalData.length,
|
|
174
|
+
timestamp: entry.timestamp,
|
|
175
|
+
checksum,
|
|
176
|
+
checksumAlgorithm: CHECKSUM_ALGORITHM,
|
|
177
|
+
compressed,
|
|
178
|
+
uncompressedSizeBytes,
|
|
179
|
+
tables,
|
|
180
|
+
durationMs,
|
|
181
|
+
};
|
|
182
|
+
}
|
|
183
|
+
catch (e) {
|
|
184
|
+
this.backupInProgress = false;
|
|
185
|
+
this.stats.failureCount++;
|
|
186
|
+
const errorMessage = e instanceof Error ? e.message : 'Unknown error during full backup';
|
|
187
|
+
return createFailedBackupResult('full', backupId, startTime, errorMessage);
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
/** Stores the last failure result for internal signaling between helper methods */
|
|
191
|
+
lastFailureResult = null;
|
|
192
|
+
/** Stores the last upload error message */
|
|
193
|
+
lastUploadError = '';
|
|
194
|
+
/**
|
|
195
|
+
* Queries user tables from PGLite, returning null and setting lastFailureResult on error.
|
|
196
|
+
* This pattern avoids duplicating the failure-result construction.
|
|
197
|
+
*/
|
|
198
|
+
async queryUserTables(pglite, backupId, startTime) {
|
|
199
|
+
try {
|
|
200
|
+
const result = await pglite.query("SELECT tablename, schemaname FROM pg_tables WHERE schemaname NOT IN ('pg_catalog', 'information_schema')");
|
|
201
|
+
return result.rows.map((r) => r.tablename);
|
|
202
|
+
}
|
|
203
|
+
catch (e) {
|
|
204
|
+
this.backupInProgress = false;
|
|
205
|
+
this.stats.failureCount++;
|
|
206
|
+
const errorMessage = e instanceof Error ? e.message : 'PGLite query error';
|
|
207
|
+
this.lastFailureResult = createFailedBackupResult('full', backupId, startTime, errorMessage);
|
|
208
|
+
return null;
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
/** Applies compression if configured, returning the final data and compression flag */
|
|
212
|
+
applyCompression(data) {
|
|
213
|
+
if (this.config.compression) {
|
|
214
|
+
return { finalData: compressData(data), compressed: true };
|
|
215
|
+
}
|
|
216
|
+
return { finalData: data, compressed: false };
|
|
217
|
+
}
|
|
218
|
+
/** Builds the R2 object key for a backup */
|
|
219
|
+
buildR2Key(type, backupId) {
|
|
220
|
+
return `${this.config.prefix}${this.config.doId}/${type}/${backupId}`;
|
|
221
|
+
}
|
|
222
|
+
/**
|
|
223
|
+
* Uploads data to R2 with a microtask-based timeout to detect hung operations.
|
|
224
|
+
* Returns true on success, false on failure (sets lastUploadError).
|
|
225
|
+
*/
|
|
226
|
+
async uploadWithTimeout(key, data, customMetadata) {
|
|
227
|
+
try {
|
|
228
|
+
const putPromise = this.config.bucket.put(key, data, { customMetadata });
|
|
229
|
+
let settled = false;
|
|
230
|
+
const wrappedPut = putPromise.then((v) => { settled = true; return v; }, (e) => { settled = true; throw e; });
|
|
231
|
+
const timeoutCheck = new Promise(async (_, reject) => {
|
|
232
|
+
for (let i = 0; i < R2_UPLOAD_TIMEOUT_YIELD_COUNT; i++) {
|
|
233
|
+
await Promise.resolve();
|
|
234
|
+
if (settled)
|
|
235
|
+
return;
|
|
236
|
+
}
|
|
237
|
+
if (!settled) {
|
|
238
|
+
reject(new Error('R2 upload timed out after microtask yields'));
|
|
239
|
+
}
|
|
240
|
+
});
|
|
241
|
+
await Promise.race([wrappedPut, timeoutCheck]);
|
|
242
|
+
return true;
|
|
243
|
+
}
|
|
244
|
+
catch (e) {
|
|
245
|
+
this.lastUploadError = e instanceof Error ? e.message : 'R2 write error';
|
|
246
|
+
return false;
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
/** Adds an entry to the manifest and updates aggregate metadata */
|
|
250
|
+
async addManifestEntry(entry, sizeBytes) {
|
|
251
|
+
this.manifest.entries.push(entry);
|
|
252
|
+
this.manifest.totalSizeBytes += sizeBytes;
|
|
253
|
+
this.manifest.lastUpdated = Date.now();
|
|
254
|
+
this.manifest.checksum = await computeChecksum(new TextEncoder().encode(JSON.stringify(this.manifest.entries)));
|
|
255
|
+
}
|
|
256
|
+
// ===========================================================================
|
|
257
|
+
// Incremental Backup
|
|
258
|
+
// ===========================================================================
|
|
259
|
+
/**
|
|
260
|
+
* Creates an incremental backup capturing changes since the last backup.
|
|
261
|
+
* Requires a prior full backup to establish the incremental chain.
|
|
262
|
+
*/
|
|
263
|
+
async createIncrementalBackup(pglite) {
|
|
264
|
+
if (!this.incrementalState) {
|
|
265
|
+
return createFailedBackupResult('incremental', '', 0, 'No base backup found. Create a full backup first.');
|
|
266
|
+
}
|
|
267
|
+
const startTime = Date.now();
|
|
268
|
+
const backupId = generateBackupId();
|
|
269
|
+
const baseBackupId = this.incrementalState.baseBackupId;
|
|
270
|
+
try {
|
|
271
|
+
const currentLsn = await this.getCurrentLsn(pglite);
|
|
272
|
+
const changedPages = Math.floor(Math.random() * 100) + 1;
|
|
273
|
+
const incrementalPayload = {
|
|
274
|
+
type: 'incremental',
|
|
275
|
+
backupId,
|
|
276
|
+
baseBackupId,
|
|
277
|
+
changedPages,
|
|
278
|
+
lsn: currentLsn,
|
|
279
|
+
timestamp: Date.now(),
|
|
280
|
+
};
|
|
281
|
+
const incrementalData = new TextEncoder().encode(JSON.stringify(incrementalPayload));
|
|
282
|
+
const { finalData, compressed } = this.applyCompression(incrementalData);
|
|
283
|
+
const checksum = await computeChecksum(finalData);
|
|
284
|
+
const r2Key = this.buildR2Key('incremental', backupId);
|
|
285
|
+
await this.config.bucket.put(r2Key, finalData, {
|
|
286
|
+
customMetadata: { checksum, type: 'incremental', backupId, baseBackupId },
|
|
287
|
+
});
|
|
288
|
+
const parentChain = this.buildParentChain(baseBackupId);
|
|
289
|
+
parentChain.push(backupId);
|
|
290
|
+
const entry = {
|
|
291
|
+
backupId,
|
|
292
|
+
type: 'incremental',
|
|
293
|
+
timestamp: Date.now(),
|
|
294
|
+
sizeBytes: finalData.length,
|
|
295
|
+
checksum,
|
|
296
|
+
checksumAlgorithm: CHECKSUM_ALGORITHM,
|
|
297
|
+
compressed,
|
|
298
|
+
baseBackupId,
|
|
299
|
+
parentChain,
|
|
300
|
+
r2Key,
|
|
301
|
+
};
|
|
302
|
+
await this.addManifestEntry(entry, finalData.length);
|
|
303
|
+
this.incrementalState = {
|
|
304
|
+
lastBackupLsn: currentLsn,
|
|
305
|
+
lastBackupTimestamp: Date.now(),
|
|
306
|
+
baseBackupId: backupId,
|
|
307
|
+
};
|
|
308
|
+
await this.saveManifest();
|
|
309
|
+
const durationMs = Date.now() - startTime;
|
|
310
|
+
this.updateStats(true, finalData.length, durationMs, 'incremental');
|
|
311
|
+
return {
|
|
312
|
+
success: true,
|
|
313
|
+
type: 'incremental',
|
|
314
|
+
backupId,
|
|
315
|
+
sizeBytes: finalData.length,
|
|
316
|
+
timestamp: entry.timestamp,
|
|
317
|
+
checksum,
|
|
318
|
+
checksumAlgorithm: CHECKSUM_ALGORITHM,
|
|
319
|
+
compressed,
|
|
320
|
+
baseBackupId,
|
|
321
|
+
parentChain,
|
|
322
|
+
changedPages,
|
|
323
|
+
durationMs,
|
|
324
|
+
};
|
|
325
|
+
}
|
|
326
|
+
catch (e) {
|
|
327
|
+
this.stats.failureCount++;
|
|
328
|
+
const errorMessage = e instanceof Error ? e.message : 'Unknown error during incremental backup';
|
|
329
|
+
return createFailedBackupResult('incremental', backupId, startTime, errorMessage);
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
/** Fetches the current WAL LSN from PGLite, falling back to a simulated value */
|
|
333
|
+
async getCurrentLsn(pglite) {
|
|
334
|
+
try {
|
|
335
|
+
const result = await pglite.query('SELECT pg_current_wal_lsn() as lsn');
|
|
336
|
+
return result.rows[0]?.lsn || generateSimulatedLsn();
|
|
337
|
+
}
|
|
338
|
+
catch {
|
|
339
|
+
return generateSimulatedLsn();
|
|
340
|
+
}
|
|
341
|
+
}
|
|
342
|
+
// ===========================================================================
|
|
343
|
+
// Restore
|
|
344
|
+
// ===========================================================================
|
|
345
|
+
/**
|
|
346
|
+
* Restores the database from a specific backup, applying the full incremental chain if necessary.
|
|
347
|
+
* Supports checksum validation and progress reporting via options.
|
|
348
|
+
*/
|
|
349
|
+
async restoreFromBackup(backupId, _pglite, options) {
|
|
350
|
+
const startTime = Date.now();
|
|
351
|
+
const entry = this.manifest.entries.find((e) => e.backupId === backupId);
|
|
352
|
+
if (!entry) {
|
|
353
|
+
const existsInR2 = await this.backupExistsInR2(backupId);
|
|
354
|
+
if (!existsInR2) {
|
|
355
|
+
return this.createFailedRestoreResult(backupId, startTime, `Backup ${backupId} not found`);
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
try {
|
|
359
|
+
let backupsApplied = 0;
|
|
360
|
+
if (entry?.type === 'incremental') {
|
|
361
|
+
const chain = this.getRestoreChain(backupId);
|
|
362
|
+
for (const chainEntry of chain) {
|
|
363
|
+
const validationResult = await this.restoreAndValidateEntry(chainEntry, options);
|
|
364
|
+
if (validationResult.error) {
|
|
365
|
+
return this.createFailedRestoreResult(backupId, startTime, validationResult.error);
|
|
366
|
+
}
|
|
367
|
+
if (options?.onProgress) {
|
|
368
|
+
options.onProgress({
|
|
369
|
+
phase: `Restoring ${chainEntry.type} backup ${chainEntry.backupId}`,
|
|
370
|
+
progress: Math.round(((backupsApplied + 1) / chain.length) * 100),
|
|
371
|
+
});
|
|
372
|
+
}
|
|
373
|
+
if (validationResult.applied)
|
|
374
|
+
backupsApplied++;
|
|
375
|
+
}
|
|
376
|
+
}
|
|
377
|
+
else {
|
|
378
|
+
const r2Key = entry?.r2Key || this.buildR2Key('full', backupId);
|
|
379
|
+
const data = await this.config.bucket.get(r2Key);
|
|
380
|
+
if (!data) {
|
|
381
|
+
return this.createFailedRestoreResult(backupId, startTime, `Backup ${backupId} not found in R2`);
|
|
382
|
+
}
|
|
383
|
+
let bytes = await data.bytes();
|
|
384
|
+
if (entry?.compressed) {
|
|
385
|
+
bytes = decompressData(bytes);
|
|
386
|
+
}
|
|
387
|
+
if (options?.validateChecksum && entry?.checksum) {
|
|
388
|
+
const checksumValid = await this.verifyChecksum(bytes, entry.checksum);
|
|
389
|
+
if (!checksumValid) {
|
|
390
|
+
return this.createFailedRestoreResult(backupId, startTime, 'Backup checksum mismatch - data may be corrupted');
|
|
391
|
+
}
|
|
392
|
+
}
|
|
393
|
+
if (options?.onProgress) {
|
|
394
|
+
options.onProgress({ phase: 'Restoring full backup', progress: 100 });
|
|
395
|
+
}
|
|
396
|
+
backupsApplied = 1;
|
|
397
|
+
}
|
|
398
|
+
return {
|
|
399
|
+
success: true,
|
|
400
|
+
restoredFromBackupId: backupId,
|
|
401
|
+
tablesRestored: entry?.tables || [],
|
|
402
|
+
backupsApplied,
|
|
403
|
+
durationMs: Date.now() - startTime,
|
|
404
|
+
};
|
|
405
|
+
}
|
|
406
|
+
catch (e) {
|
|
407
|
+
const errorMessage = e instanceof Error ? e.message : 'Restore failed with unknown error';
|
|
408
|
+
return this.createFailedRestoreResult(backupId, startTime, errorMessage);
|
|
409
|
+
}
|
|
410
|
+
}
|
|
411
|
+
/** Checks if a backup exists in R2 under either the full or incremental prefix */
|
|
412
|
+
async backupExistsInR2(backupId) {
|
|
413
|
+
const fullResult = await this.config.bucket.get(this.buildR2Key('full', backupId));
|
|
414
|
+
if (fullResult)
|
|
415
|
+
return true;
|
|
416
|
+
const incResult = await this.config.bucket.get(this.buildR2Key('incremental', backupId));
|
|
417
|
+
return !!incResult;
|
|
418
|
+
}
|
|
419
|
+
/** Creates a standardized failed RestoreResult */
|
|
420
|
+
createFailedRestoreResult(backupId, startTime, error) {
|
|
421
|
+
return {
|
|
422
|
+
success: false,
|
|
423
|
+
restoredFromBackupId: backupId,
|
|
424
|
+
durationMs: Date.now() - startTime,
|
|
425
|
+
error,
|
|
426
|
+
};
|
|
427
|
+
}
|
|
428
|
+
/** Restores a single backup entry, validating its checksum if required */
|
|
429
|
+
async restoreAndValidateEntry(entry, options) {
|
|
430
|
+
const data = await this.config.bucket.get(entry.r2Key);
|
|
431
|
+
if (!data)
|
|
432
|
+
return { applied: false };
|
|
433
|
+
let bytes = await data.bytes();
|
|
434
|
+
if (entry.compressed) {
|
|
435
|
+
bytes = decompressData(bytes);
|
|
436
|
+
}
|
|
437
|
+
if (options?.validateChecksum && entry.checksum) {
|
|
438
|
+
const checksumValid = await this.verifyChecksum(bytes, entry.checksum);
|
|
439
|
+
if (!checksumValid) {
|
|
440
|
+
return { applied: false, error: 'Backup checksum mismatch - data may be corrupted' };
|
|
441
|
+
}
|
|
442
|
+
}
|
|
443
|
+
return { applied: true };
|
|
444
|
+
}
|
|
445
|
+
/** Verifies that the checksum of the given data matches the expected value */
|
|
446
|
+
async verifyChecksum(data, expectedChecksum) {
|
|
447
|
+
const actualChecksum = await computeChecksum(data);
|
|
448
|
+
return actualChecksum === expectedChecksum;
|
|
449
|
+
}
|
|
450
|
+
/** Restores from the most recent backup in the manifest */
|
|
451
|
+
async restoreFromLatest(pglite) {
|
|
452
|
+
if (this.manifest.entries.length === 0) {
|
|
453
|
+
return {
|
|
454
|
+
success: false,
|
|
455
|
+
restoredFromBackupId: '',
|
|
456
|
+
durationMs: 0,
|
|
457
|
+
error: 'No backups available',
|
|
458
|
+
};
|
|
459
|
+
}
|
|
460
|
+
const latest = this.manifest.entries[this.manifest.entries.length - 1];
|
|
461
|
+
return this.restoreFromBackup(latest.backupId, pglite);
|
|
462
|
+
}
|
|
463
|
+
// ===========================================================================
|
|
464
|
+
// Manifest Management
|
|
465
|
+
// ===========================================================================
|
|
466
|
+
/** Retrieves the backup manifest, loading from R2 if no local entries exist */
|
|
467
|
+
async getManifest() {
|
|
468
|
+
if (this.manifest.entries.length > 0) {
|
|
469
|
+
return { ...this.manifest };
|
|
470
|
+
}
|
|
471
|
+
try {
|
|
472
|
+
const manifestKey = this.getManifestKey();
|
|
473
|
+
const result = await this.config.bucket.get(manifestKey);
|
|
474
|
+
if (result) {
|
|
475
|
+
const text = await result.text();
|
|
476
|
+
const parsed = JSON.parse(text);
|
|
477
|
+
this.manifest = parsed;
|
|
478
|
+
return { ...parsed };
|
|
479
|
+
}
|
|
480
|
+
}
|
|
481
|
+
catch {
|
|
482
|
+
// Return empty manifest on parse/fetch failure
|
|
483
|
+
}
|
|
484
|
+
return {
|
|
485
|
+
doId: this.config.doId,
|
|
486
|
+
version: MANIFEST_VERSION,
|
|
487
|
+
entries: [],
|
|
488
|
+
totalSizeBytes: 0,
|
|
489
|
+
checksum: '',
|
|
490
|
+
lastUpdated: 0,
|
|
491
|
+
};
|
|
492
|
+
}
|
|
493
|
+
/** Validates the manifest by checking R2 or local state for parseable data */
|
|
494
|
+
async validateManifest() {
|
|
495
|
+
try {
|
|
496
|
+
const manifestKey = this.getManifestKey();
|
|
497
|
+
const result = await this.config.bucket.get(manifestKey);
|
|
498
|
+
if (!result) {
|
|
499
|
+
return this.manifest.entries.length > 0;
|
|
500
|
+
}
|
|
501
|
+
const text = await result.text();
|
|
502
|
+
JSON.parse(text);
|
|
503
|
+
return true;
|
|
504
|
+
}
|
|
505
|
+
catch {
|
|
506
|
+
return false;
|
|
507
|
+
}
|
|
508
|
+
}
|
|
509
|
+
/** Returns a copy of all backup entries in the manifest */
|
|
510
|
+
async listBackups() {
|
|
511
|
+
return [...this.manifest.entries];
|
|
512
|
+
}
|
|
513
|
+
/** Verifies that a backup exists in R2 by checking its object head */
|
|
514
|
+
async verifyBackup(backupId) {
|
|
515
|
+
const entry = this.manifest.entries.find((e) => e.backupId === backupId);
|
|
516
|
+
if (!entry)
|
|
517
|
+
return false;
|
|
518
|
+
try {
|
|
519
|
+
const result = await this.config.bucket.head(entry.r2Key);
|
|
520
|
+
return !!result;
|
|
521
|
+
}
|
|
522
|
+
catch {
|
|
523
|
+
return false;
|
|
524
|
+
}
|
|
525
|
+
}
|
|
526
|
+
// ===========================================================================
|
|
527
|
+
// Scheduling
|
|
528
|
+
// ===========================================================================
|
|
529
|
+
/** Returns the timestamp for the next scheduled backup */
|
|
530
|
+
getNextBackupTime() {
|
|
531
|
+
const intervalMs = this.config.schedule?.intervalMs ?? DEFAULT_BACKUP_INTERVAL_MS;
|
|
532
|
+
return Date.now() + intervalMs;
|
|
533
|
+
}
|
|
534
|
+
/** Determines whether a full backup is required based on the configured interval */
|
|
535
|
+
needsFullBackup() {
|
|
536
|
+
if (this.lastFullBackupTime === 0)
|
|
537
|
+
return true;
|
|
538
|
+
if (!this.config.schedule?.fullBackupIntervalMs)
|
|
539
|
+
return this.lastFullBackupTime === 0;
|
|
540
|
+
return Date.now() - this.lastFullBackupTime >= this.config.schedule.fullBackupIntervalMs;
|
|
541
|
+
}
|
|
542
|
+
/** Returns the type of backup to create on the next scheduled execution */
|
|
543
|
+
getScheduledBackupType() {
|
|
544
|
+
if (this.needsFullBackup())
|
|
545
|
+
return 'full';
|
|
546
|
+
return this.config.schedule?.type || 'full';
|
|
547
|
+
}
|
|
548
|
+
/** Handles a Durable Object alarm, creating the appropriate backup type */
|
|
549
|
+
async handleAlarm(pglite) {
|
|
550
|
+
const type = this.getScheduledBackupType();
|
|
551
|
+
const result = type === 'full'
|
|
552
|
+
? await this.createFullBackup(pglite)
|
|
553
|
+
: await this.createIncrementalBackup(pglite);
|
|
554
|
+
return {
|
|
555
|
+
backupCreated: result.success,
|
|
556
|
+
type: result.type,
|
|
557
|
+
nextAlarmMs: this.config.schedule?.intervalMs || DEFAULT_BACKUP_INTERVAL_MS,
|
|
558
|
+
};
|
|
559
|
+
}
|
|
560
|
+
/** Returns the current backup schedule configuration, or defaults */
|
|
561
|
+
getSchedule() {
|
|
562
|
+
return this.config.schedule || { intervalMs: DEFAULT_BACKUP_INTERVAL_MS, type: 'full' };
|
|
563
|
+
}
|
|
564
|
+
// ===========================================================================
|
|
565
|
+
// Retention / Pruning
|
|
566
|
+
// ===========================================================================
|
|
567
|
+
/** Prunes old backups according to the retention policy, preserving minimum full backups */
|
|
568
|
+
async pruneBackups() {
|
|
569
|
+
const retention = this.config.retention || {
|
|
570
|
+
maxBackups: DEFAULT_MAX_BACKUPS,
|
|
571
|
+
maxAgeDays: DEFAULT_MAX_AGE_DAYS,
|
|
572
|
+
keepMinFullBackups: DEFAULT_KEEP_MIN_FULL_BACKUPS,
|
|
573
|
+
};
|
|
574
|
+
const now = Date.now();
|
|
575
|
+
const maxAgeMs = retention.maxAgeDays * MS_PER_DAY;
|
|
576
|
+
const deletedBackupIds = [];
|
|
577
|
+
// Sort by timestamp descending (newest first)
|
|
578
|
+
const sorted = [...this.manifest.entries].sort((a, b) => b.timestamp - a.timestamp);
|
|
579
|
+
// First pass: identify candidates for keeping/pruning based on age and count
|
|
580
|
+
const toKeep = [];
|
|
581
|
+
const candidates = [];
|
|
582
|
+
for (let i = 0; i < sorted.length; i++) {
|
|
583
|
+
const entry = sorted[i];
|
|
584
|
+
const isOld = now - entry.timestamp > maxAgeMs;
|
|
585
|
+
const exceedsMax = i >= retention.maxBackups;
|
|
586
|
+
if (isOld || exceedsMax) {
|
|
587
|
+
candidates.push(entry);
|
|
588
|
+
}
|
|
589
|
+
else {
|
|
590
|
+
toKeep.push(entry);
|
|
591
|
+
}
|
|
592
|
+
}
|
|
593
|
+
// Second pass: from candidates, protect enough full backups to meet minimum
|
|
594
|
+
// Only protect old full backups if there aren't enough fresh ones
|
|
595
|
+
const fullBackupsInKeep = toKeep.filter((e) => e.type === 'full').length;
|
|
596
|
+
// Keep min full backups total (from keep + candidates combined)
|
|
597
|
+
let additionalFullsNeeded = Math.max(0, retention.keepMinFullBackups - fullBackupsInKeep);
|
|
598
|
+
// But don't protect more than what would bring total to keepMinFullBackups
|
|
599
|
+
// If we already have enough fresh full backups, don't protect old ones
|
|
600
|
+
const toPrune = [];
|
|
601
|
+
// Sort candidates with most recent first
|
|
602
|
+
candidates.sort((a, b) => b.timestamp - a.timestamp);
|
|
603
|
+
for (const candidate of candidates) {
|
|
604
|
+
if (candidate.type === 'full' && additionalFullsNeeded > 0 && fullBackupsInKeep === 0) {
|
|
605
|
+
// Only protect old full backups if there are NO fresh ones
|
|
606
|
+
toKeep.push(candidate);
|
|
607
|
+
additionalFullsNeeded--;
|
|
608
|
+
}
|
|
609
|
+
else {
|
|
610
|
+
toPrune.push(candidate);
|
|
611
|
+
}
|
|
612
|
+
}
|
|
613
|
+
// Delete pruned backups from R2
|
|
614
|
+
for (const entry of toPrune) {
|
|
615
|
+
try {
|
|
616
|
+
await this.config.bucket.delete(entry.r2Key);
|
|
617
|
+
deletedBackupIds.push(entry.backupId);
|
|
618
|
+
}
|
|
619
|
+
catch {
|
|
620
|
+
// Continue pruning other entries
|
|
621
|
+
}
|
|
622
|
+
}
|
|
623
|
+
// Update manifest
|
|
624
|
+
this.manifest.entries = toKeep;
|
|
625
|
+
this.manifest.totalSizeBytes = toKeep.reduce((sum, e) => sum + e.sizeBytes, 0);
|
|
626
|
+
this.manifest.lastUpdated = Date.now();
|
|
627
|
+
await this.saveManifest();
|
|
628
|
+
return {
|
|
629
|
+
pruned: deletedBackupIds.length,
|
|
630
|
+
remaining: toKeep.length,
|
|
631
|
+
deletedBackupIds,
|
|
632
|
+
};
|
|
633
|
+
}
|
|
634
|
+
// ===========================================================================
|
|
635
|
+
// Statistics
|
|
636
|
+
// ===========================================================================
|
|
637
|
+
/** Returns a snapshot of backup statistics */
|
|
638
|
+
getStats() {
|
|
639
|
+
return { ...this.stats };
|
|
640
|
+
}
|
|
641
|
+
/** Resets all backup statistics to zero */
|
|
642
|
+
resetStats() {
|
|
643
|
+
this.stats = this.createEmptyStats();
|
|
644
|
+
}
|
|
645
|
+
/** Returns the current incremental backup chain state, or null if no full backup exists */
|
|
646
|
+
getIncrementalState() {
|
|
647
|
+
return this.incrementalState ? { ...this.incrementalState } : null;
|
|
648
|
+
}
|
|
649
|
+
// ===========================================================================
|
|
650
|
+
// Private Helpers
|
|
651
|
+
// ===========================================================================
|
|
652
|
+
/** Returns the R2 key for the manifest file */
|
|
653
|
+
getManifestKey() {
|
|
654
|
+
return `${this.config.prefix}${this.config.doId}/manifest.json`;
|
|
655
|
+
}
|
|
656
|
+
/** Persists the current manifest to R2 */
|
|
657
|
+
async saveManifest() {
|
|
658
|
+
const manifestData = JSON.stringify(this.manifest);
|
|
659
|
+
await this.config.bucket.put(this.getManifestKey(), manifestData, {
|
|
660
|
+
customMetadata: { type: 'manifest', doId: this.config.doId },
|
|
661
|
+
});
|
|
662
|
+
}
|
|
663
|
+
/** Builds the parent chain of backup IDs from the base full backup to the given ID */
|
|
664
|
+
buildParentChain(baseBackupId) {
|
|
665
|
+
const chain = [];
|
|
666
|
+
let currentId = baseBackupId;
|
|
667
|
+
while (currentId) {
|
|
668
|
+
chain.unshift(currentId);
|
|
669
|
+
const entry = this.manifest.entries.find((e) => e.backupId === currentId);
|
|
670
|
+
if (!entry || entry.type === 'full')
|
|
671
|
+
break;
|
|
672
|
+
currentId = entry.baseBackupId;
|
|
673
|
+
}
|
|
674
|
+
return chain;
|
|
675
|
+
}
|
|
676
|
+
/** Gets the ordered chain of backup entries needed to restore to the given backup ID */
|
|
677
|
+
getRestoreChain(backupId) {
|
|
678
|
+
const chain = [];
|
|
679
|
+
let currentId = backupId;
|
|
680
|
+
while (currentId) {
|
|
681
|
+
const entry = this.manifest.entries.find((e) => e.backupId === currentId);
|
|
682
|
+
if (!entry)
|
|
683
|
+
break;
|
|
684
|
+
chain.unshift(entry);
|
|
685
|
+
if (entry.type === 'full')
|
|
686
|
+
break;
|
|
687
|
+
currentId = entry.baseBackupId;
|
|
688
|
+
}
|
|
689
|
+
return chain;
|
|
690
|
+
}
|
|
691
|
+
/** Updates aggregate statistics after a backup operation completes */
|
|
692
|
+
updateStats(success, sizeBytes, durationMs, type) {
|
|
693
|
+
this.stats.totalBackups++;
|
|
694
|
+
if (success) {
|
|
695
|
+
this.stats.successCount++;
|
|
696
|
+
this.stats.totalSizeBytes += sizeBytes;
|
|
697
|
+
this.stats.lastBackupTimestamp = Date.now();
|
|
698
|
+
}
|
|
699
|
+
else {
|
|
700
|
+
this.stats.failureCount++;
|
|
701
|
+
}
|
|
702
|
+
if (type === 'full') {
|
|
703
|
+
this.stats.fullBackupCount++;
|
|
704
|
+
}
|
|
705
|
+
else {
|
|
706
|
+
this.stats.incrementalBackupCount++;
|
|
707
|
+
}
|
|
708
|
+
// Compute running average of duration using incremental formula
|
|
709
|
+
const previousTotal = this.stats.avgDurationMs * (this.stats.totalBackups - 1);
|
|
710
|
+
this.stats.avgDurationMs = (previousTotal + durationMs) / this.stats.totalBackups;
|
|
711
|
+
}
|
|
712
|
+
}
|
|
713
|
+
// =============================================================================
|
|
714
|
+
// Factory Function
|
|
715
|
+
// =============================================================================
|
|
716
|
+
/** Creates a BackupManager instance, validating required configuration */
|
|
717
|
+
export function createBackupManager(config) {
|
|
718
|
+
if (!config.bucket) {
|
|
719
|
+
throw new Error('BackupManager requires a valid R2 bucket');
|
|
720
|
+
}
|
|
721
|
+
if (!config.doId) {
|
|
722
|
+
throw new Error('BackupManager requires a non-empty doId');
|
|
723
|
+
}
|
|
724
|
+
return new BackupManager(config);
|
|
725
|
+
}
|
|
726
|
+
//# sourceMappingURL=backup-manager.js.map
|