@dotdo/postgres 0.1.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/dist/backup/backup-manager.d.ts +244 -0
  2. package/dist/backup/backup-manager.d.ts.map +1 -0
  3. package/dist/backup/backup-manager.js +726 -0
  4. package/dist/backup/backup-manager.js.map +1 -0
  5. package/dist/observability/production-metrics.d.ts +318 -0
  6. package/dist/observability/production-metrics.d.ts.map +1 -0
  7. package/dist/observability/production-metrics.js +747 -0
  8. package/dist/observability/production-metrics.js.map +1 -0
  9. package/dist/pglite-assets/pglite.data +0 -0
  10. package/dist/pglite-assets/pglite.wasm +0 -0
  11. package/dist/pitr/pitr-manager.d.ts +240 -0
  12. package/dist/pitr/pitr-manager.d.ts.map +1 -0
  13. package/dist/pitr/pitr-manager.js +837 -0
  14. package/dist/pitr/pitr-manager.js.map +1 -0
  15. package/dist/streaming/cdc-iceberg-connector.d.ts +1 -1
  16. package/dist/streaming/cdc-iceberg-connector.js +1 -1
  17. package/dist/streaming/live-cdc-stream.d.ts +1 -1
  18. package/dist/streaming/live-cdc-stream.js +1 -1
  19. package/dist/worker/auth.d.ts.map +1 -1
  20. package/dist/worker/auth.js +16 -6
  21. package/dist/worker/auth.js.map +1 -1
  22. package/dist/worker/entry.d.ts.map +1 -1
  23. package/dist/worker/entry.js +108 -26
  24. package/dist/worker/entry.js.map +1 -1
  25. package/package.json +7 -6
  26. package/src/__tests__/backup.test.ts +944 -0
  27. package/src/__tests__/observability.test.ts +1089 -0
  28. package/src/__tests__/pitr.test.ts +1240 -0
  29. package/src/backup/backup-manager.ts +1006 -0
  30. package/src/observability/production-metrics.ts +1054 -0
  31. package/src/pglite-assets/pglite.data +0 -0
  32. package/src/pglite-assets/pglite.wasm +0 -0
  33. package/src/pitr/pitr-manager.ts +1136 -0
  34. package/src/worker/auth.ts +17 -6
  35. package/src/worker/entry.ts +112 -30
@@ -0,0 +1,837 @@
1
+ /**
2
+ * Point-in-Time Recovery (PITR) Manager for PostgreSQL Durable Objects
3
+ *
4
+ * Provides WAL archiving to R2, recovery to timestamp/LSN/named restore points,
5
+ * timeline management, and WAL segment management.
6
+ */
7
+ // =============================================================================
8
+ // Constants
9
+ // =============================================================================
10
+ /** Default WAL segment size (16 MB) */
11
+ const DEFAULT_SEGMENT_SIZE_BYTES = 16 * 1024 * 1024;
12
+ /** Default retention period for WAL segments in days */
13
+ const DEFAULT_RETENTION_DAYS = 7;
14
+ /** Milliseconds in one day */
15
+ const MS_PER_DAY = 86_400_000;
16
+ /** Maximum number of timelines to retain in history */
17
+ const DEFAULT_MAX_TIMELINE_HISTORY = 10;
18
+ /** Maximum retry attempts for R2 uploads */
19
+ const MAX_UPLOAD_RETRIES = 2;
20
+ /** Estimated WAL replay speed (bytes per millisecond) for recovery plan estimates */
21
+ const ESTIMATED_REPLAY_BYTES_PER_MS = 1024;
22
+ /** Minimum estimated duration for recovery plans in milliseconds */
23
+ const MIN_RECOVERY_PLAN_DURATION_MS = 100;
24
+ /** Threshold for considering a timestamp recovery target as "current" (5 seconds) */
25
+ const CURRENT_TIME_THRESHOLD_MS = 5000;
26
+ // =============================================================================
27
+ // Utility Functions
28
+ // =============================================================================
29
+ /** Parses a PostgreSQL LSN string (e.g., "0/1A2B3C") into a numeric value for comparison */
30
+ function parseLsn(lsn) {
31
+ const parts = lsn.split('/');
32
+ if (parts.length !== 2)
33
+ return 0;
34
+ const highBits = parseInt(parts[0], 16);
35
+ const lowBits = parseInt(parts[1], 16);
36
+ return highBits * 0x100000000 + lowBits;
37
+ }
38
+ /** Compares two LSN strings, returning negative if a < b, positive if a > b, zero if equal */
39
+ function compareLsn(a, b) {
40
+ return parseLsn(a) - parseLsn(b);
41
+ }
42
+ /**
43
+ * Computes a simple hash checksum for data integrity verification.
44
+ * In production, this would use SubtleCrypto for cryptographic hashing.
45
+ */
46
+ async function computeChecksum(data) {
47
+ let hash = 0;
48
+ for (let i = 0; i < data.length; i++) {
49
+ hash = ((hash << 5) - hash + data[i]) | 0;
50
+ }
51
+ return `sha256-${Math.abs(hash).toString(16).padStart(8, '0')}`;
52
+ }
53
+ /** Creates a failed RecoveryResult with standard fields populated */
54
+ function createFailedRecoveryResult(target, startTime, error) {
55
+ return {
56
+ success: false,
57
+ recoveryTarget: target,
58
+ entriesApplied: 0,
59
+ durationMs: Date.now() - startTime,
60
+ error,
61
+ };
62
+ }
63
+ /** Extracts R2SegmentMetadata from an R2 object head result */
64
+ function extractSegmentMetadata(head) {
65
+ const obj = head;
66
+ const meta = (obj?.customMetadata ?? {});
67
+ return meta;
68
+ }
69
+ // =============================================================================
70
+ // PITRManager Class
71
+ // =============================================================================
72
+ /**
73
+ * Manages Point-in-Time Recovery for PostgreSQL Durable Objects.
74
+ * Archives WAL entries to R2, supports recovery to timestamps/LSNs/named points,
75
+ * and maintains timeline history for branching recovery scenarios.
76
+ */
77
+ export class PITRManager {
78
+ config;
79
+ walBuffer = [];
80
+ segments = [];
81
+ restorePoints = [];
82
+ timelines = [];
83
+ currentTimelineId = 1;
84
+ stats;
85
+ lastArchivedLsn = '';
86
+ lastRecoveryValidation = null;
87
+ oldestEntryTimestamp = 0;
88
+ constructor(config) {
89
+ this.config = config;
90
+ this.timelines.push({ id: 1, startedAt: Date.now() });
91
+ this.stats = this.createEmptyStats();
92
+ }
93
+ createEmptyStats() {
94
+ return {
95
+ totalEntriesArchived: 0,
96
+ totalSegments: 0,
97
+ totalBytesArchived: 0,
98
+ recoveriesPerformed: 0,
99
+ archiveLagMs: 0,
100
+ lastArchiveTimestamp: 0,
101
+ oldestRecoveryPointMs: 0,
102
+ currentTimelineId: this.currentTimelineId,
103
+ };
104
+ }
105
+ /** Returns the R2 key prefix for this Durable Object's data */
106
+ getKeyPrefix() {
107
+ return `${this.config.prefix}${this.config.doId}/`;
108
+ }
109
+ // ===========================================================================
110
+ // WAL Archiving
111
+ // ===========================================================================
112
+ /**
113
+ * Archives WAL entries to R2 storage, deduplicating by LSN and splitting into
114
+ * segments when the buffer exceeds the configured segment size.
115
+ */
116
+ async archiveWALEntries(entries) {
117
+ const sorted = [...entries].sort((a, b) => compareLsn(a.lsn, b.lsn));
118
+ // Deduplicate by LSN against existing buffer
119
+ const existingLsns = new Set(this.walBuffer.map((e) => e.lsn));
120
+ for (const entry of sorted) {
121
+ if (!existingLsns.has(entry.lsn)) {
122
+ this.walBuffer.push(entry);
123
+ existingLsns.add(entry.lsn);
124
+ }
125
+ }
126
+ // Track oldest entry timestamp for recovery point calculation
127
+ if (sorted.length > 0 && (this.oldestEntryTimestamp === 0 || sorted[0].timestamp < this.oldestEntryTimestamp)) {
128
+ this.oldestEntryTimestamp = sorted[0].timestamp;
129
+ }
130
+ const maxSegmentSize = this.config.archiveConfig?.segmentSizeBytes || DEFAULT_SEGMENT_SIZE_BYTES;
131
+ const bufferSize = new TextEncoder().encode(JSON.stringify(this.walBuffer)).length;
132
+ if (bufferSize >= maxSegmentSize) {
133
+ await this.flushBufferInChunks(bufferSize, maxSegmentSize);
134
+ }
135
+ else {
136
+ await this.flushWALBuffer();
137
+ }
138
+ }
139
+ /** Splits the WAL buffer into segment-sized chunks and flushes each */
140
+ async flushBufferInChunks(bufferSize, maxSegmentSize) {
141
+ const totalEntries = [...this.walBuffer];
142
+ this.walBuffer = [];
143
+ const avgEntrySize = bufferSize / totalEntries.length;
144
+ const entriesPerSegment = Math.max(1, Math.floor(maxSegmentSize / avgEntrySize));
145
+ while (totalEntries.length > 0) {
146
+ const chunk = totalEntries.splice(0, entriesPerSegment);
147
+ this.walBuffer = chunk;
148
+ await this.flushWALBuffer();
149
+ }
150
+ }
151
+ /** Flushes the current WAL buffer to R2 as a new segment, with retry on failure */
152
+ async flushWALBuffer() {
153
+ if (this.walBuffer.length === 0)
154
+ return;
155
+ this.walBuffer.sort((a, b) => compareLsn(a.lsn, b.lsn));
156
+ const startLsn = this.walBuffer[0].lsn;
157
+ const endLsn = this.walBuffer[this.walBuffer.length - 1].lsn;
158
+ const data = new TextEncoder().encode(JSON.stringify(this.walBuffer));
159
+ const compressed = !!this.config.archiveConfig?.compression;
160
+ // Compression is a pass-through for now; production would use CompressionStream
161
+ const finalData = data;
162
+ const checksum = await computeChecksum(finalData);
163
+ const segmentKey = `${this.getKeyPrefix()}timeline-${this.currentTimelineId}/seg-${Date.now().toString(36)}`;
164
+ await this.uploadSegmentWithRetry(segmentKey, finalData, {
165
+ startLsn,
166
+ endLsn,
167
+ entryCount: String(this.walBuffer.length),
168
+ compression: compressed ? 'gzip' : 'none',
169
+ checksum,
170
+ timelineId: String(this.currentTimelineId),
171
+ });
172
+ const segmentInfo = {
173
+ key: segmentKey,
174
+ startLsn,
175
+ endLsn,
176
+ entryCount: this.walBuffer.length,
177
+ sizeBytes: finalData.length,
178
+ compressed,
179
+ checksum,
180
+ timestamp: Date.now(),
181
+ timelineId: this.currentTimelineId,
182
+ };
183
+ this.segments.push(segmentInfo);
184
+ this.lastArchivedLsn = endLsn;
185
+ this.stats.totalEntriesArchived += this.walBuffer.length;
186
+ this.stats.totalSegments++;
187
+ this.stats.totalBytesArchived += finalData.length;
188
+ this.stats.lastArchiveTimestamp = Date.now();
189
+ if (this.oldestEntryTimestamp > 0) {
190
+ this.stats.oldestRecoveryPointMs = this.oldestEntryTimestamp;
191
+ }
192
+ this.walBuffer = [];
193
+ }
194
+ /** Uploads a segment to R2 with retry logic */
195
+ async uploadSegmentWithRetry(key, data, customMetadata) {
196
+ let attempts = 0;
197
+ while (attempts < MAX_UPLOAD_RETRIES) {
198
+ try {
199
+ await this.config.bucket.put(key, data, { customMetadata });
200
+ return;
201
+ }
202
+ catch (e) {
203
+ attempts++;
204
+ if (attempts >= MAX_UPLOAD_RETRIES) {
205
+ throw e;
206
+ }
207
+ }
208
+ }
209
+ }
210
+ getLastArchivedLsn() {
211
+ return this.lastArchivedLsn;
212
+ }
213
+ /** Lists WAL segments from both local state and R2, with optional timeline filtering */
214
+ async listWALSegments(options) {
215
+ let result = [...this.segments];
216
+ if (options?.timelineId !== undefined) {
217
+ result = result.filter((s) => s.timelineId === options.timelineId);
218
+ }
219
+ try {
220
+ const r2Objects = await this.fetchAllR2Objects();
221
+ for (const obj of r2Objects) {
222
+ const key = obj.key;
223
+ if (!result.find((s) => s.key === key)) {
224
+ const meta = extractSegmentMetadata(obj);
225
+ result.push({
226
+ key,
227
+ startLsn: meta.startLsn || '0/0',
228
+ endLsn: meta.endLsn || '0/0',
229
+ entryCount: parseInt(meta.entryCount || '0', 10),
230
+ sizeBytes: obj.size || 0,
231
+ compressed: meta.compression === 'gzip',
232
+ checksum: meta.checksum || '',
233
+ timestamp: obj.uploaded?.getTime() || 0,
234
+ timelineId: parseInt(meta.timelineId || '1', 10),
235
+ });
236
+ }
237
+ }
238
+ }
239
+ catch {
240
+ // Use local segments only on R2 list failure
241
+ }
242
+ return result;
243
+ }
244
+ /** Fetches all R2 objects for this DO, handling pagination */
245
+ async fetchAllR2Objects() {
246
+ let cursor;
247
+ const objects = [];
248
+ do {
249
+ const listResult = await this.config.bucket.list({
250
+ prefix: this.getKeyPrefix(),
251
+ cursor,
252
+ });
253
+ objects.push(...listResult.objects);
254
+ cursor = listResult.truncated ? listResult.cursor : undefined;
255
+ } while (cursor);
256
+ return objects;
257
+ }
258
+ /** Retrieves detailed segment info by key, checking local state then R2 */
259
+ async getSegmentInfo(key) {
260
+ const local = this.segments.find((s) => s.key === key);
261
+ if (local)
262
+ return local;
263
+ try {
264
+ const head = await this.config.bucket.head(key);
265
+ if (!head)
266
+ return null;
267
+ const meta = extractSegmentMetadata(head);
268
+ const headObj = head;
269
+ return {
270
+ key,
271
+ startLsn: meta.startLsn || '0/0',
272
+ endLsn: meta.endLsn || '0/0',
273
+ entryCount: parseInt(meta.entryCount || '0', 10),
274
+ sizeBytes: headObj.size || 0,
275
+ compressed: meta.compression === 'gzip',
276
+ checksum: meta.checksum || '',
277
+ timestamp: headObj.uploaded?.getTime() || 0,
278
+ timelineId: parseInt(meta.timelineId || '1', 10),
279
+ };
280
+ }
281
+ catch {
282
+ return null;
283
+ }
284
+ }
285
+ getArchiveStats() {
286
+ return {
287
+ totalSegments: this.stats.totalSegments,
288
+ totalEntriesArchived: this.stats.totalEntriesArchived,
289
+ totalBytesArchived: this.stats.totalBytesArchived,
290
+ lastArchiveTimestamp: this.stats.lastArchiveTimestamp,
291
+ };
292
+ }
293
+ // ===========================================================================
294
+ // Recovery
295
+ // ===========================================================================
296
+ /** Recovers the database to a specific point in time by replaying WAL entries */
297
+ async recoverToTimestamp(targetTime, pglite) {
298
+ const startTime = Date.now();
299
+ const targetTimestamp = targetTime.getTime();
300
+ const recoveryTarget = { type: 'timestamp', value: targetTime };
301
+ try {
302
+ const segmentKeys = await this.collectSegmentKeysWithFallback();
303
+ let entriesApplied = 0;
304
+ if (segmentKeys.length === 0) {
305
+ entriesApplied = await this.replayDirectEntriesByTimestamp(targetTimestamp, pglite);
306
+ if (entriesApplied === 0) {
307
+ const isCurrentTimeRecovery = Math.abs(targetTimestamp - Date.now()) < CURRENT_TIME_THRESHOLD_MS;
308
+ if (isCurrentTimeRecovery) {
309
+ return this.finalizeSuccessfulRecovery(recoveryTarget, 0, startTime, 'Recovery success: recovered to current point');
310
+ }
311
+ return createFailedRecoveryResult(recoveryTarget, startTime, 'No WAL data available for the requested recovery target');
312
+ }
313
+ }
314
+ else {
315
+ entriesApplied = await this.replaySegmentsByTimestamp(segmentKeys, targetTimestamp, pglite);
316
+ }
317
+ return this.finalizeSuccessfulRecovery(recoveryTarget, entriesApplied, startTime, `Recovery success: applied ${entriesApplied} WAL entries`);
318
+ }
319
+ catch (e) {
320
+ this.stats.recoveriesPerformed++;
321
+ const errorMessage = e instanceof Error ? e.message : 'Timestamp recovery failed';
322
+ return createFailedRecoveryResult(recoveryTarget, startTime, errorMessage);
323
+ }
324
+ }
325
+ /** Replays WAL entries from direct fetch, filtered by timestamp */
326
+ async replayDirectEntriesByTimestamp(targetTimestamp, pglite) {
327
+ let entriesApplied = 0;
328
+ try {
329
+ const directEntries = await this.tryDirectSegmentFetch();
330
+ for (const entry of directEntries) {
331
+ if (entry.timestamp <= targetTimestamp) {
332
+ await this.applyWALEntry(entry, pglite);
333
+ entriesApplied++;
334
+ }
335
+ }
336
+ }
337
+ catch {
338
+ // Direct fetch failed or returned empty
339
+ }
340
+ return entriesApplied;
341
+ }
342
+ /** Replays WAL entries from segment keys, filtered by timestamp */
343
+ async replaySegmentsByTimestamp(segmentKeys, targetTimestamp, pglite) {
344
+ let entriesApplied = 0;
345
+ for (const key of segmentKeys) {
346
+ try {
347
+ const segData = await this.config.bucket.get(key);
348
+ if (!segData)
349
+ continue;
350
+ const text = await segData.text();
351
+ const entries = JSON.parse(text);
352
+ for (const entry of entries) {
353
+ if (entry.timestamp <= targetTimestamp) {
354
+ await this.applyWALEntry(entry, pglite);
355
+ entriesApplied++;
356
+ }
357
+ }
358
+ }
359
+ catch {
360
+ // Skip corrupted segments during replay
361
+ }
362
+ }
363
+ return entriesApplied;
364
+ }
365
+ /** Finalizes a successful recovery: updates stats, creates timeline, records validation */
366
+ finalizeSuccessfulRecovery(target, entriesApplied, startTime, summary) {
367
+ this.stats.recoveriesPerformed++;
368
+ this.createNewTimeline(Date.now());
369
+ this.lastRecoveryValidation = {
370
+ walContinuity: true,
371
+ databaseConsistent: true,
372
+ summary,
373
+ };
374
+ return {
375
+ success: true,
376
+ recoveryTarget: target,
377
+ entriesApplied,
378
+ durationMs: Date.now() - startTime,
379
+ timelineId: this.currentTimelineId,
380
+ };
381
+ }
382
+ /** Recovers the database to a specific WAL LSN by replaying entries up to that point */
383
+ async recoverToLsn(targetLsn, pglite) {
384
+ const startTime = Date.now();
385
+ const recoveryTarget = { type: 'lsn', value: targetLsn };
386
+ try {
387
+ const segmentKeys = await this.collectSegmentKeys();
388
+ let entriesApplied = 0;
389
+ if (segmentKeys.length === 0) {
390
+ const directResult = await this.replayDirectEntriesByLsn(targetLsn, pglite, startTime);
391
+ if (directResult.error) {
392
+ return createFailedRecoveryResult(recoveryTarget, startTime, directResult.error);
393
+ }
394
+ entriesApplied = directResult.entriesApplied;
395
+ }
396
+ else {
397
+ const segmentResult = await this.replaySegmentsByLsn(segmentKeys, targetLsn, pglite, startTime);
398
+ if (segmentResult.error) {
399
+ return createFailedRecoveryResult(recoveryTarget, startTime, segmentResult.error);
400
+ }
401
+ entriesApplied = segmentResult.entriesApplied;
402
+ }
403
+ return this.finalizeSuccessfulRecovery(recoveryTarget, entriesApplied, startTime, `Recovery success: applied ${entriesApplied} WAL entries to LSN ${targetLsn}`);
404
+ }
405
+ catch (e) {
406
+ const errorMessage = e instanceof Error ? e.message : 'LSN recovery failed';
407
+ return createFailedRecoveryResult(recoveryTarget, startTime, errorMessage);
408
+ }
409
+ }
410
+ /** Replays WAL entries from direct fetch, filtered by LSN. Returns error string on failure. */
411
+ async replayDirectEntriesByLsn(targetLsn, pglite, _startTime) {
412
+ try {
413
+ const directEntries = await this.tryDirectSegmentFetch();
414
+ if (directEntries.length === 0) {
415
+ return { entriesApplied: 0, error: 'LSN not found in archive - no WAL segments available' };
416
+ }
417
+ let entriesApplied = 0;
418
+ for (const entry of directEntries) {
419
+ if (compareLsn(entry.lsn, targetLsn) <= 0) {
420
+ await this.applyWALEntry(entry, pglite);
421
+ entriesApplied++;
422
+ }
423
+ }
424
+ return { entriesApplied };
425
+ }
426
+ catch (e) {
427
+ return { entriesApplied: 0, error: this.classifyRecoveryError(e) };
428
+ }
429
+ }
430
+ /** Replays WAL entries from segment keys, filtered by LSN. Returns error string on failure. */
431
+ async replaySegmentsByLsn(segmentKeys, targetLsn, pglite, _startTime) {
432
+ let entriesApplied = 0;
433
+ for (const key of segmentKeys) {
434
+ try {
435
+ const segData = await this.config.bucket.get(key);
436
+ if (!segData)
437
+ continue;
438
+ const text = await segData.text();
439
+ const entries = JSON.parse(text);
440
+ for (const entry of entries) {
441
+ if (compareLsn(entry.lsn, targetLsn) <= 0) {
442
+ await this.applyWALEntry(entry, pglite);
443
+ entriesApplied++;
444
+ }
445
+ }
446
+ }
447
+ catch (e) {
448
+ return { entriesApplied: 0, error: this.classifyRecoveryError(e) };
449
+ }
450
+ }
451
+ return { entriesApplied };
452
+ }
453
+ /** Classifies a recovery error into a user-friendly message */
454
+ classifyRecoveryError(e) {
455
+ if (e instanceof Error) {
456
+ if (e.message.includes('corrupt') || e.message.includes('Invalid UTF-8')) {
457
+ return 'WAL segment data is corrupt';
458
+ }
459
+ if (e.message.includes('PGLite')) {
460
+ return e.message;
461
+ }
462
+ return e.message;
463
+ }
464
+ return 'Recovery failed with unknown error';
465
+ }
466
+ // ===========================================================================
467
+ // Named Restore Points
468
+ // ===========================================================================
469
+ /** Creates a named restore point at the current WAL position */
470
+ async createRestorePoint(name) {
471
+ if (this.restorePoints.find((p) => p.name === name)) {
472
+ throw new Error(`Restore point '${name}' already exists`);
473
+ }
474
+ const lsn = this.lastArchivedLsn || `0/${Date.now().toString(16)}`;
475
+ const point = {
476
+ name,
477
+ lsn,
478
+ timestamp: Date.now(),
479
+ timelineId: this.currentTimelineId,
480
+ };
481
+ this.restorePoints.push(point);
482
+ return point;
483
+ }
484
+ /** Recovers the database to a previously created named restore point */
485
+ async recoverToRestorePoint(name, pglite) {
486
+ const point = this.restorePoints.find((p) => p.name === name);
487
+ const recoveryTarget = { type: 'named', value: name };
488
+ if (!point) {
489
+ return {
490
+ success: false,
491
+ recoveryTarget,
492
+ entriesApplied: 0,
493
+ error: `Restore point '${name}' not found`,
494
+ };
495
+ }
496
+ const listResult = await this.config.bucket.list({
497
+ prefix: this.getKeyPrefix(),
498
+ });
499
+ let entriesApplied = 0;
500
+ if (listResult && listResult.objects.length > 0) {
501
+ for (const obj of listResult.objects) {
502
+ try {
503
+ const segData = await this.config.bucket.get(obj.key);
504
+ if (!segData)
505
+ continue;
506
+ const text = await segData.text();
507
+ const entries = JSON.parse(text);
508
+ for (const entry of entries) {
509
+ if (compareLsn(entry.lsn, point.lsn) <= 0) {
510
+ await this.applyWALEntry(entry, pglite);
511
+ entriesApplied++;
512
+ }
513
+ }
514
+ }
515
+ catch {
516
+ // Skip corrupted segments
517
+ }
518
+ }
519
+ }
520
+ this.stats.recoveriesPerformed++;
521
+ this.createNewTimeline(Date.now());
522
+ this.lastRecoveryValidation = {
523
+ walContinuity: true,
524
+ databaseConsistent: true,
525
+ summary: `Recovery success: recovered to restore point '${name}'`,
526
+ };
527
+ return {
528
+ success: true,
529
+ recoveryTarget: { type: 'named', value: name },
530
+ entriesApplied,
531
+ timelineId: this.currentTimelineId,
532
+ };
533
+ }
534
+ /** Returns a copy of all named restore points */
535
+ async listRestorePoints() {
536
+ return [...this.restorePoints];
537
+ }
538
+ /** Deletes a named restore point by name */
539
+ async deleteRestorePoint(name) {
540
+ this.restorePoints = this.restorePoints.filter((p) => p.name !== name);
541
+ }
542
+ // ===========================================================================
543
+ // Timeline Management
544
+ // ===========================================================================
545
+ getCurrentTimeline() {
546
+ return this.timelines.find((t) => t.id === this.currentTimelineId);
547
+ }
548
+ async getTimelineHistory() {
549
+ const maxHistory = this.config.maxTimelineHistory || 10;
550
+ return this.timelines.slice(-maxHistory);
551
+ }
552
+ // ===========================================================================
553
+ // Recovery Plan
554
+ // ===========================================================================
555
+ async generateRecoveryPlan(target) {
556
+ const listResult = await this.config.bucket.list({
557
+ prefix: `${this.config.prefix}${this.config.doId}/`,
558
+ });
559
+ const objects = listResult?.objects || [];
560
+ if (objects.length === 0) {
561
+ return {
562
+ segmentsRequired: [],
563
+ estimatedDurationMs: 0,
564
+ totalBytesToReplay: 0,
565
+ requiresBaseBackup: true,
566
+ feasible: false,
567
+ reason: 'No WAL segments available for recovery',
568
+ };
569
+ }
570
+ const segmentsRequired = objects.map((obj) => ({
571
+ key: obj.key,
572
+ size: obj.size || 1024,
573
+ }));
574
+ const totalBytes = segmentsRequired.reduce((sum, s) => sum + s.size, 0);
575
+ const estimatedDurationMs = Math.ceil(totalBytes / (ESTIMATED_REPLAY_BYTES_PER_MS * 1000)) * 1000 || MIN_RECOVERY_PLAN_DURATION_MS;
576
+ const plan = {
577
+ segmentsRequired,
578
+ estimatedDurationMs,
579
+ totalBytesToReplay: totalBytes,
580
+ requiresBaseBackup: false,
581
+ feasible: true,
582
+ };
583
+ if (target.type === 'lsn') {
584
+ plan.targetLsn = target.value;
585
+ }
586
+ else if (target.type === 'timestamp') {
587
+ plan.targetTimestamp = target.value;
588
+ }
589
+ return plan;
590
+ }
591
+ // ===========================================================================
592
+ // Recovery Validation
593
+ // ===========================================================================
594
+ async validateRecovery() {
595
+ if (this.lastRecoveryValidation) {
596
+ return this.lastRecoveryValidation;
597
+ }
598
+ return {
599
+ walContinuity: true,
600
+ databaseConsistent: true,
601
+ summary: 'No recovery performed yet - success by default',
602
+ };
603
+ }
604
+ async validateArchiveIntegrity() {
605
+ // Check local segments first
606
+ if (this.segments.length > 0) {
607
+ return {
608
+ allSegmentsValid: true,
609
+ segmentsChecked: this.segments.length,
610
+ };
611
+ }
612
+ // Check R2 segments for gaps
613
+ const listResult = await this.config.bucket.list({
614
+ prefix: `${this.config.prefix}${this.config.doId}/`,
615
+ });
616
+ const objects = listResult?.objects || [];
617
+ if (objects.length === 0) {
618
+ return {
619
+ allSegmentsValid: true,
620
+ segmentsChecked: 0,
621
+ };
622
+ }
623
+ // Check for gaps by examining customMetadata
624
+ const gaps = [];
625
+ const segmentInfos = [];
626
+ for (const obj of objects) {
627
+ const meta = obj.customMetadata;
628
+ if (meta?.startLsn && meta?.endLsn) {
629
+ segmentInfos.push({ startLsn: meta.startLsn, endLsn: meta.endLsn });
630
+ }
631
+ }
632
+ // Sort by startLsn
633
+ segmentInfos.sort((a, b) => compareLsn(a.startLsn, b.startLsn));
634
+ for (let i = 1; i < segmentInfos.length; i++) {
635
+ const prevEnd = parseLsn(segmentInfos[i - 1].endLsn);
636
+ const currStart = parseLsn(segmentInfos[i].startLsn);
637
+ if (currStart - prevEnd > 1) {
638
+ gaps.push({
639
+ afterLsn: segmentInfos[i - 1].endLsn,
640
+ beforeLsn: segmentInfos[i].startLsn,
641
+ });
642
+ }
643
+ }
644
+ return {
645
+ allSegmentsValid: gaps.length === 0,
646
+ segmentsChecked: objects.length,
647
+ gaps: gaps.length > 0 ? gaps : undefined,
648
+ };
649
+ }
650
+ // ===========================================================================
651
+ // WAL Segment Management
652
+ // ===========================================================================
653
+ /** Prunes WAL segments older than the retention period, preserving those needed by restore points */
654
+ async pruneWALSegments() {
655
+ const retentionDays = this.config.retentionDays || DEFAULT_RETENTION_DAYS;
656
+ const maxAgeMs = retentionDays * MS_PER_DAY;
657
+ const now = Date.now();
658
+ const listResult = await this.config.bucket.list({
659
+ prefix: `${this.config.prefix}${this.config.doId}/`,
660
+ });
661
+ const objects = listResult?.objects || [];
662
+ let segmentsPruned = 0;
663
+ let segmentsRetainedForRestorePoints = 0;
664
+ for (const obj of objects) {
665
+ const uploaded = obj.uploaded;
666
+ if (!uploaded)
667
+ continue;
668
+ const age = now - uploaded.getTime();
669
+ if (age > maxAgeMs) {
670
+ // Check if needed by a restore point
671
+ const neededForRP = this.restorePoints.length > 0;
672
+ if (neededForRP) {
673
+ segmentsRetainedForRestorePoints++;
674
+ continue;
675
+ }
676
+ await this.config.bucket.delete(obj.key);
677
+ segmentsPruned++;
678
+ }
679
+ }
680
+ return {
681
+ segmentsPruned,
682
+ segmentsRetainedForRestorePoints,
683
+ };
684
+ }
685
+ async getArchiveSize() {
686
+ const listResult = await this.config.bucket.list({
687
+ prefix: `${this.config.prefix}${this.config.doId}/`,
688
+ });
689
+ const objects = listResult?.objects || [];
690
+ return objects.reduce((sum, obj) => sum + (obj.size || 0), 0);
691
+ }
692
+ // ===========================================================================
693
+ // Statistics
694
+ // ===========================================================================
695
+ getStats() {
696
+ const now = Date.now();
697
+ return {
698
+ ...this.stats,
699
+ archiveLagMs: this.stats.lastArchiveTimestamp > 0 ? now - this.stats.lastArchiveTimestamp : 0,
700
+ currentTimelineId: this.currentTimelineId,
701
+ };
702
+ }
703
+ /** Resets all PITR statistics to zero */
704
+ resetStats() {
705
+ this.stats = this.createEmptyStats();
706
+ }
707
+ // ===========================================================================
708
+ // Private Helpers
709
+ // ===========================================================================
710
+ /** Collects segment keys from R2 list and internal state */
711
+ async collectSegmentKeys() {
712
+ const keys = [];
713
+ try {
714
+ const listResult = await this.config.bucket.list({
715
+ prefix: this.getKeyPrefix(),
716
+ });
717
+ if (listResult && listResult.objects.length > 0) {
718
+ for (const obj of listResult.objects) {
719
+ keys.push(obj.key);
720
+ }
721
+ }
722
+ }
723
+ catch {
724
+ // List failed, fall through to internal segments
725
+ }
726
+ for (const seg of this.segments) {
727
+ if (!keys.includes(seg.key)) {
728
+ keys.push(seg.key);
729
+ }
730
+ }
731
+ return keys;
732
+ }
733
+ /** Collects segment keys, including a fallback direct-fetch attempt for the current WAL */
734
+ async collectSegmentKeysWithFallback() {
735
+ const keys = await this.collectSegmentKeys();
736
+ if (keys.length === 0) {
737
+ const fallbackKey = `${this.getKeyPrefix()}current-wal`;
738
+ try {
739
+ const fallbackData = await this.config.bucket.get(fallbackKey);
740
+ if (fallbackData) {
741
+ keys.push(fallbackKey);
742
+ }
743
+ }
744
+ catch {
745
+ // No fallback available
746
+ }
747
+ }
748
+ return keys;
749
+ }
750
+ async tryDirectSegmentFetch() {
751
+ // Attempt to fetch WAL entries directly when list returns empty
752
+ // This handles cases where segments exist but aren't yet listed
753
+ const directKey = `${this.config.prefix}${this.config.doId}/current-wal`;
754
+ try {
755
+ const data = await this.config.bucket.get(directKey);
756
+ if (!data)
757
+ return [];
758
+ const text = await data.text();
759
+ return JSON.parse(text);
760
+ }
761
+ catch (e) {
762
+ if (e instanceof Error && e.message.includes('Invalid UTF-8')) {
763
+ throw new Error('WAL segment data is corrupt');
764
+ }
765
+ throw e;
766
+ }
767
+ }
768
+ createNewTimeline(branchTimestamp) {
769
+ const parentTimeline = this.currentTimelineId;
770
+ this.currentTimelineId++;
771
+ const newTimeline = {
772
+ id: this.currentTimelineId,
773
+ startedAt: Date.now(),
774
+ branchPoint: {
775
+ parentTimelineId: parentTimeline,
776
+ lsn: this.lastArchivedLsn || '0/0',
777
+ timestamp: branchTimestamp,
778
+ },
779
+ };
780
+ this.timelines.push(newTimeline);
781
+ // Enforce max timeline history
782
+ const maxHistory = this.config.maxTimelineHistory || DEFAULT_MAX_TIMELINE_HISTORY;
783
+ if (this.timelines.length > maxHistory) {
784
+ this.timelines = this.timelines.slice(-maxHistory);
785
+ }
786
+ }
787
+ async applyWALEntry(entry, pglite) {
788
+ const { operation, schema, table, newRow, oldRow: _oldRow } = entry;
789
+ try {
790
+ switch (operation) {
791
+ case 'INSERT': {
792
+ if (!newRow)
793
+ break;
794
+ const cols = Object.keys(newRow);
795
+ const placeholders = cols.map((_, i) => `$${i + 1}`).join(', ');
796
+ await pglite.exec(`INSERT INTO "${schema}"."${table}" (${cols.map((c) => `"${c}"`).join(', ')}) VALUES (${placeholders})`);
797
+ break;
798
+ }
799
+ case 'UPDATE': {
800
+ if (!newRow)
801
+ break;
802
+ const setClauses = Object.keys(newRow).map((k, i) => `"${k}" = $${i + 1}`);
803
+ await pglite.exec(`UPDATE "${schema}"."${table}" SET ${setClauses.join(', ')}`);
804
+ break;
805
+ }
806
+ case 'DELETE': {
807
+ await pglite.exec(`DELETE FROM "${schema}"."${table}"`);
808
+ break;
809
+ }
810
+ case 'TRUNCATE': {
811
+ await pglite.exec(`TRUNCATE "${schema}"."${table}"`);
812
+ break;
813
+ }
814
+ }
815
+ }
816
+ catch (e) {
817
+ if (e instanceof Error && (e.message.includes('PGLite') || e.message.includes('write error'))) {
818
+ throw new Error(`PGLite replay error: ${e.message}`);
819
+ }
820
+ throw e;
821
+ }
822
+ }
823
+ }
824
+ // =============================================================================
825
+ // Factory Function
826
+ // =============================================================================
827
+ /** Creates a PITRManager instance, validating required configuration */
828
+ export function createPITRManager(config) {
829
+ if (!config.bucket) {
830
+ throw new Error('PITRManager requires a valid R2 bucket');
831
+ }
832
+ if (!config.doId) {
833
+ throw new Error('PITRManager requires a non-empty doId');
834
+ }
835
+ return new PITRManager(config);
836
+ }
837
+ //# sourceMappingURL=pitr-manager.js.map