@dotdo/postgres 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1240 @@
1
+ /**
2
+ * Tests for Point-in-Time Recovery (PITR)
3
+ * Task: postgres-7yr6.4 - Production readiness: Point-in-time recovery
4
+ *
5
+ * RED phase TDD - These tests define the expected API surface for:
6
+ * - WAL archiving to R2
7
+ * - Recovery targets (timestamp, LSN, named restore points)
8
+ * - Timeline management
9
+ * - WAL segment management
10
+ * - Recovery validation
11
+ */
12
+
13
+ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'
14
+
15
+ // These imports define the expected API surface - they will fail until implemented
16
+ import {
17
+ PITRManager,
18
+ createPITRManager,
19
+ type PITRConfig,
20
+ type WALArchiveConfig,
21
+ type RecoveryTarget,
22
+ type RecoveryResult,
23
+ type WALSegmentInfo,
24
+ type TimelineInfo,
25
+ type RestorePoint,
26
+ type PITRStats,
27
+ type WALArchiveStats,
28
+ type RecoveryPlan,
29
+ type RecoveryValidation,
30
+ } from '../pitr/pitr-manager'
31
+
32
+ // =============================================================================
33
+ // Mock Setup
34
+ // =============================================================================
35
+
36
+ const createMockR2Bucket = () => ({
37
+ get: vi.fn(),
38
+ put: vi.fn().mockResolvedValue(undefined),
39
+ delete: vi.fn().mockResolvedValue(undefined),
40
+ list: vi.fn().mockResolvedValue({ objects: [], truncated: false }),
41
+ head: vi.fn(),
42
+ })
43
+
44
+ const createMockDOStorage = () => ({
45
+ get: vi.fn(),
46
+ put: vi.fn().mockResolvedValue(undefined),
47
+ delete: vi.fn().mockResolvedValue(undefined),
48
+ list: vi.fn().mockResolvedValue(new Map()),
49
+ getAlarm: vi.fn().mockResolvedValue(null),
50
+ setAlarm: vi.fn().mockResolvedValue(undefined),
51
+ })
52
+
53
+ const createMockPGLite = () => ({
54
+ query: vi.fn().mockResolvedValue({ rows: [] }),
55
+ exec: vi.fn().mockResolvedValue(undefined),
56
+ close: vi.fn().mockResolvedValue(undefined),
57
+ })
58
+
59
+ // =============================================================================
60
+ // Tests: PITRManager Creation
61
+ // =============================================================================
62
+
63
+ describe('PITRManager', () => {
64
+ let mockBucket: ReturnType<typeof createMockR2Bucket>
65
+ let mockStorage: ReturnType<typeof createMockDOStorage>
66
+ let mockPGLite: ReturnType<typeof createMockPGLite>
67
+
68
+ beforeEach(() => {
69
+ vi.useFakeTimers()
70
+ mockBucket = createMockR2Bucket()
71
+ mockStorage = createMockDOStorage()
72
+ mockPGLite = createMockPGLite()
73
+ })
74
+
75
+ afterEach(() => {
76
+ vi.useRealTimers()
77
+ vi.clearAllMocks()
78
+ })
79
+
80
+ describe('createPITRManager()', () => {
81
+ it('should create a PITRManager instance with minimal config', () => {
82
+ const manager = createPITRManager({
83
+ bucket: mockBucket as unknown as R2Bucket,
84
+ doId: 'test-do-123',
85
+ prefix: 'wal-archive/',
86
+ })
87
+ expect(manager).toBeInstanceOf(PITRManager)
88
+ })
89
+
90
+ it('should create a PITRManager with full configuration', () => {
91
+ const config: PITRConfig = {
92
+ bucket: mockBucket as unknown as R2Bucket,
93
+ doId: 'test-do-123',
94
+ prefix: 'wal-archive/',
95
+ archiveConfig: {
96
+ segmentSizeBytes: 16 * 1024 * 1024, // 16MB
97
+ flushIntervalMs: 60000, // 1 minute
98
+ compression: true,
99
+ checksumValidation: true,
100
+ maxSegmentsInMemory: 5,
101
+ },
102
+ retentionDays: 30,
103
+ maxTimelineHistory: 10,
104
+ enableContinuousArchiving: true,
105
+ }
106
+ const manager = createPITRManager(config)
107
+ expect(manager).toBeInstanceOf(PITRManager)
108
+ })
109
+
110
+ it('should throw if bucket is not provided', () => {
111
+ expect(() =>
112
+ createPITRManager({
113
+ bucket: undefined as unknown as R2Bucket,
114
+ doId: 'test-do-123',
115
+ prefix: 'wal-archive/',
116
+ })
117
+ ).toThrow()
118
+ })
119
+
120
+ it('should throw if doId is empty', () => {
121
+ expect(() =>
122
+ createPITRManager({
123
+ bucket: mockBucket as unknown as R2Bucket,
124
+ doId: '',
125
+ prefix: 'wal-archive/',
126
+ })
127
+ ).toThrow()
128
+ })
129
+ })
130
+
131
+ // ===========================================================================
132
+ // Tests: WAL Archiving
133
+ // ===========================================================================
134
+
135
+ describe('WAL Archiving', () => {
136
+ let manager: InstanceType<typeof PITRManager>
137
+
138
+ beforeEach(() => {
139
+ manager = createPITRManager({
140
+ bucket: mockBucket as unknown as R2Bucket,
141
+ doId: 'test-do-123',
142
+ prefix: 'wal-archive/',
143
+ archiveConfig: {
144
+ segmentSizeBytes: 1024 * 1024, // 1MB for testing
145
+ flushIntervalMs: 5000,
146
+ compression: true,
147
+ checksumValidation: true,
148
+ },
149
+ })
150
+ })
151
+
152
+ it('should archive WAL entries to R2', async () => {
153
+ const walEntries = [
154
+ {
155
+ lsn: '0/1000',
156
+ operation: 'INSERT' as const,
157
+ schema: 'public',
158
+ table: 'users',
159
+ newRow: { id: 1, name: 'Alice' },
160
+ timestamp: Date.now(),
161
+ },
162
+ {
163
+ lsn: '0/2000',
164
+ operation: 'UPDATE' as const,
165
+ schema: 'public',
166
+ table: 'users',
167
+ newRow: { id: 1, name: 'Bob' },
168
+ oldRow: { id: 1, name: 'Alice' },
169
+ timestamp: Date.now(),
170
+ },
171
+ ]
172
+
173
+ await manager.archiveWALEntries(walEntries)
174
+
175
+ expect(mockBucket.put).toHaveBeenCalled()
176
+ })
177
+
178
+ it('should organize WAL segments by LSN range', async () => {
179
+ const entries = Array.from({ length: 100 }, (_, i) => ({
180
+ lsn: `0/${(i + 1) * 1000}`,
181
+ operation: 'INSERT' as const,
182
+ schema: 'public',
183
+ table: 'users',
184
+ newRow: { id: i, name: `user_${i}` },
185
+ timestamp: Date.now() + i,
186
+ }))
187
+
188
+ await manager.archiveWALEntries(entries)
189
+
190
+ const segments = await manager.listWALSegments()
191
+ expect(segments.length).toBeGreaterThan(0)
192
+ expect(segments[0].startLsn).toBeDefined()
193
+ expect(segments[0].endLsn).toBeDefined()
194
+ })
195
+
196
+ it('should flush WAL buffer when segment size exceeded', async () => {
197
+ // Generate enough data to exceed segment size
198
+ const largeEntries = Array.from({ length: 1000 }, (_, i) => ({
199
+ lsn: `0/${(i + 1) * 1000}`,
200
+ operation: 'INSERT' as const,
201
+ schema: 'public',
202
+ table: 'users',
203
+ newRow: { id: i, name: 'x'.repeat(1024) },
204
+ timestamp: Date.now() + i,
205
+ }))
206
+
207
+ await manager.archiveWALEntries(largeEntries)
208
+
209
+ // Should have created multiple segments
210
+ expect(mockBucket.put.mock.calls.length).toBeGreaterThan(1)
211
+ })
212
+
213
+ it('should flush WAL buffer on interval', async () => {
214
+ const entries = [
215
+ {
216
+ lsn: '0/1000',
217
+ operation: 'INSERT' as const,
218
+ schema: 'public',
219
+ table: 'users',
220
+ newRow: { id: 1, name: 'Alice' },
221
+ timestamp: Date.now(),
222
+ },
223
+ ]
224
+
225
+ await manager.archiveWALEntries(entries)
226
+
227
+ // Advance past flush interval
228
+ vi.advanceTimersByTime(6000)
229
+
230
+ await manager.flushWALBuffer()
231
+
232
+ expect(mockBucket.put).toHaveBeenCalled()
233
+ })
234
+
235
+ it('should compress WAL segments when compression enabled', async () => {
236
+ const entries = [
237
+ {
238
+ lsn: '0/1000',
239
+ operation: 'INSERT' as const,
240
+ schema: 'public',
241
+ table: 'users',
242
+ newRow: { id: 1, name: 'Alice' },
243
+ timestamp: Date.now(),
244
+ },
245
+ ]
246
+
247
+ await manager.archiveWALEntries(entries)
248
+ await manager.flushWALBuffer()
249
+
250
+ const segments = await manager.listWALSegments()
251
+ expect(segments[0].compressed).toBe(true)
252
+ })
253
+
254
+ it('should include checksums in WAL segment metadata', async () => {
255
+ const entries = [
256
+ {
257
+ lsn: '0/1000',
258
+ operation: 'INSERT' as const,
259
+ schema: 'public',
260
+ table: 'users',
261
+ newRow: { id: 1, name: 'Alice' },
262
+ timestamp: Date.now(),
263
+ },
264
+ ]
265
+
266
+ await manager.archiveWALEntries(entries)
267
+ await manager.flushWALBuffer()
268
+
269
+ const segments = await manager.listWALSegments()
270
+ expect(segments[0].checksum).toBeDefined()
271
+ })
272
+
273
+ it('should track the last archived LSN', async () => {
274
+ const entries = [
275
+ {
276
+ lsn: '0/5000',
277
+ operation: 'INSERT' as const,
278
+ schema: 'public',
279
+ table: 'users',
280
+ newRow: { id: 1, name: 'Alice' },
281
+ timestamp: Date.now(),
282
+ },
283
+ ]
284
+
285
+ await manager.archiveWALEntries(entries)
286
+ await manager.flushWALBuffer()
287
+
288
+ const lastLsn = manager.getLastArchivedLsn()
289
+ expect(lastLsn).toBe('0/5000')
290
+ })
291
+
292
+ it('should handle archiving failure with retry', async () => {
293
+ mockBucket.put
294
+ .mockRejectedValueOnce(new Error('R2 transient error'))
295
+ .mockResolvedValueOnce(undefined)
296
+
297
+ const entries = [
298
+ {
299
+ lsn: '0/1000',
300
+ operation: 'INSERT' as const,
301
+ schema: 'public',
302
+ table: 'users',
303
+ newRow: { id: 1 },
304
+ timestamp: Date.now(),
305
+ },
306
+ ]
307
+
308
+ await manager.archiveWALEntries(entries)
309
+ await manager.flushWALBuffer()
310
+
311
+ // Should eventually succeed after retry
312
+ expect(mockBucket.put).toHaveBeenCalledTimes(2)
313
+ })
314
+
315
+ it('should maintain WAL ordering guarantee', async () => {
316
+ const entries = [
317
+ { lsn: '0/3000', operation: 'INSERT' as const, schema: 'public', table: 'users', newRow: { id: 3 }, timestamp: 3 },
318
+ { lsn: '0/1000', operation: 'INSERT' as const, schema: 'public', table: 'users', newRow: { id: 1 }, timestamp: 1 },
319
+ { lsn: '0/2000', operation: 'INSERT' as const, schema: 'public', table: 'users', newRow: { id: 2 }, timestamp: 2 },
320
+ ]
321
+
322
+ await manager.archiveWALEntries(entries)
323
+ await manager.flushWALBuffer()
324
+
325
+ const segments = await manager.listWALSegments()
326
+ // Segments should be ordered by LSN
327
+ expect(segments[0].startLsn).toBe('0/1000')
328
+ })
329
+
330
+ it('should provide WAL archive statistics', async () => {
331
+ const entries = [
332
+ {
333
+ lsn: '0/1000',
334
+ operation: 'INSERT' as const,
335
+ schema: 'public',
336
+ table: 'users',
337
+ newRow: { id: 1 },
338
+ timestamp: Date.now(),
339
+ },
340
+ ]
341
+
342
+ await manager.archiveWALEntries(entries)
343
+ await manager.flushWALBuffer()
344
+
345
+ const stats = manager.getArchiveStats()
346
+
347
+ expect(stats.totalSegments).toBeGreaterThan(0)
348
+ expect(stats.totalEntriesArchived).toBeGreaterThan(0)
349
+ expect(stats.totalBytesArchived).toBeGreaterThan(0)
350
+ expect(stats.lastArchiveTimestamp).toBeDefined()
351
+ })
352
+ })
353
+
354
+ // ===========================================================================
355
+ // Tests: Recovery Targets
356
+ // ===========================================================================
357
+
358
+ describe('Recovery Targets', () => {
359
+ let manager: InstanceType<typeof PITRManager>
360
+
361
+ beforeEach(() => {
362
+ manager = createPITRManager({
363
+ bucket: mockBucket as unknown as R2Bucket,
364
+ doId: 'test-do-123',
365
+ prefix: 'wal-archive/',
366
+ })
367
+ })
368
+
369
+ describe('Timestamp-based Recovery', () => {
370
+ it('should recover to a specific timestamp', async () => {
371
+ const targetTime = new Date('2024-06-15T10:30:00Z')
372
+
373
+ // Mock WAL segments covering the target time
374
+ mockBucket.list.mockResolvedValueOnce({
375
+ objects: [
376
+ { key: 'wal-archive/test-do-123/seg-001', size: 1024, uploaded: new Date('2024-06-15T10:00:00Z') },
377
+ { key: 'wal-archive/test-do-123/seg-002', size: 1024, uploaded: new Date('2024-06-15T10:20:00Z') },
378
+ { key: 'wal-archive/test-do-123/seg-003', size: 1024, uploaded: new Date('2024-06-15T10:40:00Z') },
379
+ ],
380
+ truncated: false,
381
+ })
382
+
383
+ const result = await manager.recoverToTimestamp(
384
+ targetTime,
385
+ mockPGLite as any
386
+ )
387
+
388
+ expect(result.success).toBe(true)
389
+ expect(result.recoveryTarget.type).toBe('timestamp')
390
+ expect(result.recoveryTarget.value).toEqual(targetTime)
391
+ })
392
+
393
+ it('should apply WAL entries up to target timestamp', async () => {
394
+ const targetTime = new Date('2024-06-15T10:30:00Z')
395
+
396
+ mockBucket.get.mockResolvedValue({
397
+ bytes: () => Promise.resolve(new Uint8Array()),
398
+ text: () => Promise.resolve(JSON.stringify([
399
+ { lsn: '0/1000', timestamp: new Date('2024-06-15T10:25:00Z').getTime(), operation: 'INSERT', schema: 'public', table: 'users', newRow: { id: 1 } },
400
+ { lsn: '0/2000', timestamp: new Date('2024-06-15T10:35:00Z').getTime(), operation: 'INSERT', schema: 'public', table: 'users', newRow: { id: 2 } },
401
+ ])),
402
+ })
403
+
404
+ const result = await manager.recoverToTimestamp(
405
+ targetTime,
406
+ mockPGLite as any
407
+ )
408
+
409
+ expect(result.entriesApplied).toBe(1) // Only entry before target
410
+ })
411
+
412
+ it('should fail if no WAL data covers the target timestamp', async () => {
413
+ const futureTime = new Date('2030-01-01T00:00:00Z')
414
+
415
+ mockBucket.list.mockResolvedValueOnce({ objects: [], truncated: false })
416
+
417
+ const result = await manager.recoverToTimestamp(
418
+ futureTime,
419
+ mockPGLite as any
420
+ )
421
+
422
+ expect(result.success).toBe(false)
423
+ expect(result.error).toContain('No WAL data available')
424
+ })
425
+
426
+ it('should handle timestamp at exact WAL boundary', async () => {
427
+ const exactTime = new Date('2024-06-15T10:30:00.000Z')
428
+ const exactTimestamp = exactTime.getTime()
429
+
430
+ mockBucket.get.mockResolvedValue({
431
+ bytes: () => Promise.resolve(new Uint8Array()),
432
+ text: () => Promise.resolve(JSON.stringify([
433
+ { lsn: '0/1000', timestamp: exactTimestamp, operation: 'INSERT', schema: 'public', table: 'users', newRow: { id: 1 } },
434
+ ])),
435
+ })
436
+
437
+ const result = await manager.recoverToTimestamp(
438
+ exactTime,
439
+ mockPGLite as any
440
+ )
441
+
442
+ expect(result.success).toBe(true)
443
+ expect(result.entriesApplied).toBe(1) // Include the exact boundary entry
444
+ })
445
+ })
446
+
447
+ describe('LSN-based Recovery', () => {
448
+ it('should recover to a specific LSN', async () => {
449
+ const targetLsn = '0/5000'
450
+
451
+ mockBucket.get.mockResolvedValue({
452
+ bytes: () => Promise.resolve(new Uint8Array()),
453
+ text: () => Promise.resolve(JSON.stringify([
454
+ { lsn: '0/1000', timestamp: 1000, operation: 'INSERT', schema: 'public', table: 'users', newRow: { id: 1 } },
455
+ { lsn: '0/3000', timestamp: 2000, operation: 'INSERT', schema: 'public', table: 'users', newRow: { id: 2 } },
456
+ { lsn: '0/5000', timestamp: 3000, operation: 'INSERT', schema: 'public', table: 'users', newRow: { id: 3 } },
457
+ { lsn: '0/7000', timestamp: 4000, operation: 'INSERT', schema: 'public', table: 'users', newRow: { id: 4 } },
458
+ ])),
459
+ })
460
+
461
+ const result = await manager.recoverToLsn(targetLsn, mockPGLite as any)
462
+
463
+ expect(result.success).toBe(true)
464
+ expect(result.recoveryTarget.type).toBe('lsn')
465
+ expect(result.recoveryTarget.value).toBe(targetLsn)
466
+ expect(result.entriesApplied).toBe(3) // Up to and including target LSN
467
+ })
468
+
469
+ it('should fail if target LSN is not found in archive', async () => {
470
+ const targetLsn = '0/99999'
471
+
472
+ mockBucket.list.mockResolvedValueOnce({ objects: [], truncated: false })
473
+
474
+ const result = await manager.recoverToLsn(targetLsn, mockPGLite as any)
475
+
476
+ expect(result.success).toBe(false)
477
+ expect(result.error).toContain('LSN not found')
478
+ })
479
+
480
+ it('should handle LSN comparison correctly', async () => {
481
+ // LSN '0/A000' > '0/9000' in hex comparison
482
+ const targetLsn = '0/A000'
483
+
484
+ mockBucket.get.mockResolvedValue({
485
+ bytes: () => Promise.resolve(new Uint8Array()),
486
+ text: () => Promise.resolve(JSON.stringify([
487
+ { lsn: '0/9000', timestamp: 1000, operation: 'INSERT', schema: 'public', table: 'users', newRow: { id: 1 } },
488
+ { lsn: '0/A000', timestamp: 2000, operation: 'INSERT', schema: 'public', table: 'users', newRow: { id: 2 } },
489
+ { lsn: '0/B000', timestamp: 3000, operation: 'INSERT', schema: 'public', table: 'users', newRow: { id: 3 } },
490
+ ])),
491
+ })
492
+
493
+ const result = await manager.recoverToLsn(targetLsn, mockPGLite as any)
494
+
495
+ expect(result.success).toBe(true)
496
+ expect(result.entriesApplied).toBe(2) // 0/9000 and 0/A000
497
+ })
498
+ })
499
+
500
+ describe('Named Restore Points', () => {
501
+ it('should create a named restore point', async () => {
502
+ const point = await manager.createRestorePoint('before-migration')
503
+
504
+ expect(point.name).toBe('before-migration')
505
+ expect(point.lsn).toBeDefined()
506
+ expect(point.timestamp).toBeDefined()
507
+ })
508
+
509
+ it('should recover to a named restore point', async () => {
510
+ await manager.createRestorePoint('pre-deploy')
511
+
512
+ mockBucket.get.mockResolvedValue({
513
+ bytes: () => Promise.resolve(new Uint8Array()),
514
+ text: () => Promise.resolve('[]'),
515
+ })
516
+
517
+ const result = await manager.recoverToRestorePoint(
518
+ 'pre-deploy',
519
+ mockPGLite as any
520
+ )
521
+
522
+ expect(result.success).toBe(true)
523
+ expect(result.recoveryTarget.type).toBe('named')
524
+ expect(result.recoveryTarget.value).toBe('pre-deploy')
525
+ })
526
+
527
+ it('should list all named restore points', async () => {
528
+ await manager.createRestorePoint('point-1')
529
+ await manager.createRestorePoint('point-2')
530
+ await manager.createRestorePoint('point-3')
531
+
532
+ const points = await manager.listRestorePoints()
533
+
534
+ expect(points.length).toBe(3)
535
+ expect(points.map((p) => p.name)).toContain('point-1')
536
+ expect(points.map((p) => p.name)).toContain('point-2')
537
+ expect(points.map((p) => p.name)).toContain('point-3')
538
+ })
539
+
540
+ it('should delete a named restore point', async () => {
541
+ await manager.createRestorePoint('temporary-point')
542
+
543
+ await manager.deleteRestorePoint('temporary-point')
544
+
545
+ const points = await manager.listRestorePoints()
546
+ expect(points.find((p) => p.name === 'temporary-point')).toBeUndefined()
547
+ })
548
+
549
+ it('should fail recovery if restore point not found', async () => {
550
+ const result = await manager.recoverToRestorePoint(
551
+ 'non-existent',
552
+ mockPGLite as any
553
+ )
554
+
555
+ expect(result.success).toBe(false)
556
+ expect(result.error).toContain('Restore point not found')
557
+ })
558
+
559
+ it('should prevent duplicate restore point names', async () => {
560
+ await manager.createRestorePoint('unique-name')
561
+
562
+ await expect(
563
+ manager.createRestorePoint('unique-name')
564
+ ).rejects.toThrow('already exists')
565
+ })
566
+ })
567
+ })
568
+
569
+ // ===========================================================================
570
+ // Tests: Timeline Management
571
+ // ===========================================================================
572
+
573
+ describe('Timeline Management', () => {
574
+ let manager: InstanceType<typeof PITRManager>
575
+
576
+ beforeEach(() => {
577
+ manager = createPITRManager({
578
+ bucket: mockBucket as unknown as R2Bucket,
579
+ doId: 'test-do-123',
580
+ prefix: 'wal-archive/',
581
+ maxTimelineHistory: 5,
582
+ })
583
+ })
584
+
585
+ it('should track current timeline ID', () => {
586
+ const timeline = manager.getCurrentTimeline()
587
+
588
+ expect(timeline.id).toBeDefined()
589
+ expect(timeline.id).toBeGreaterThanOrEqual(1)
590
+ })
591
+
592
+ it('should create a new timeline on recovery', async () => {
593
+ const beforeTimeline = manager.getCurrentTimeline()
594
+
595
+ mockBucket.get.mockResolvedValue({
596
+ bytes: () => Promise.resolve(new Uint8Array()),
597
+ text: () => Promise.resolve('[]'),
598
+ })
599
+
600
+ await manager.recoverToTimestamp(
601
+ new Date('2024-01-01T00:00:00Z'),
602
+ mockPGLite as any
603
+ )
604
+
605
+ const afterTimeline = manager.getCurrentTimeline()
606
+
607
+ expect(afterTimeline.id).toBeGreaterThan(beforeTimeline.id)
608
+ })
609
+
610
+ it('should record timeline branch point', async () => {
611
+ mockBucket.get.mockResolvedValue({
612
+ bytes: () => Promise.resolve(new Uint8Array()),
613
+ text: () => Promise.resolve('[]'),
614
+ })
615
+
616
+ await manager.recoverToTimestamp(
617
+ new Date('2024-01-01T00:00:00Z'),
618
+ mockPGLite as any
619
+ )
620
+
621
+ const timeline = manager.getCurrentTimeline()
622
+
623
+ expect(timeline.branchPoint).toBeDefined()
624
+ expect(timeline.branchPoint!.parentTimelineId).toBeDefined()
625
+ expect(timeline.branchPoint!.lsn).toBeDefined()
626
+ expect(timeline.branchPoint!.timestamp).toBeDefined()
627
+ })
628
+
629
+ it('should list timeline history', async () => {
630
+ // Perform multiple recoveries to create timeline history
631
+ mockBucket.get.mockResolvedValue({
632
+ bytes: () => Promise.resolve(new Uint8Array()),
633
+ text: () => Promise.resolve('[]'),
634
+ })
635
+
636
+ await manager.recoverToTimestamp(new Date('2024-01-01'), mockPGLite as any)
637
+ await manager.recoverToTimestamp(new Date('2024-02-01'), mockPGLite as any)
638
+
639
+ const history = await manager.getTimelineHistory()
640
+
641
+ expect(history.length).toBeGreaterThan(1)
642
+ })
643
+
644
+ it('should limit timeline history to maxTimelineHistory', async () => {
645
+ mockBucket.get.mockResolvedValue({
646
+ bytes: () => Promise.resolve(new Uint8Array()),
647
+ text: () => Promise.resolve('[]'),
648
+ })
649
+
650
+ // Create more timelines than the limit
651
+ for (let i = 0; i < 7; i++) {
652
+ await manager.recoverToTimestamp(
653
+ new Date(`2024-0${i + 1}-01`),
654
+ mockPGLite as any
655
+ )
656
+ }
657
+
658
+ const history = await manager.getTimelineHistory()
659
+
660
+ expect(history.length).toBeLessThanOrEqual(5)
661
+ })
662
+
663
+ it('should store WAL segments per timeline', async () => {
664
+ const entries = [
665
+ {
666
+ lsn: '0/1000',
667
+ operation: 'INSERT' as const,
668
+ schema: 'public',
669
+ table: 'users',
670
+ newRow: { id: 1 },
671
+ timestamp: Date.now(),
672
+ },
673
+ ]
674
+
675
+ await manager.archiveWALEntries(entries)
676
+ await manager.flushWALBuffer()
677
+
678
+ const timeline = manager.getCurrentTimeline()
679
+ const segments = await manager.listWALSegments({ timelineId: timeline.id })
680
+
681
+ expect(segments.length).toBeGreaterThan(0)
682
+ expect(segments[0].timelineId).toBe(timeline.id)
683
+ })
684
+
685
+ it('should allow switching to a different timeline for reads', async () => {
686
+ mockBucket.get.mockResolvedValue({
687
+ bytes: () => Promise.resolve(new Uint8Array()),
688
+ text: () => Promise.resolve('[]'),
689
+ })
690
+
691
+ // Create timeline 2
692
+ await manager.recoverToTimestamp(new Date('2024-01-01'), mockPGLite as any)
693
+ const timeline2 = manager.getCurrentTimeline()
694
+
695
+ // Create timeline 3
696
+ await manager.recoverToTimestamp(new Date('2024-02-01'), mockPGLite as any)
697
+
698
+ // Should be able to list segments from timeline 2
699
+ const segments = await manager.listWALSegments({ timelineId: timeline2.id })
700
+ expect(segments).toBeDefined()
701
+ })
702
+ })
703
+
704
+ // ===========================================================================
705
+ // Tests: Recovery Plan
706
+ // ===========================================================================
707
+
708
+ describe('Recovery Plan', () => {
709
+ let manager: InstanceType<typeof PITRManager>
710
+
711
+ beforeEach(() => {
712
+ manager = createPITRManager({
713
+ bucket: mockBucket as unknown as R2Bucket,
714
+ doId: 'test-do-123',
715
+ prefix: 'wal-archive/',
716
+ })
717
+ })
718
+
719
+ it('should generate a recovery plan for timestamp target', async () => {
720
+ mockBucket.list.mockResolvedValueOnce({
721
+ objects: [
722
+ { key: 'seg-001', size: 1024, customMetadata: { startLsn: '0/1000', endLsn: '0/5000' } },
723
+ { key: 'seg-002', size: 2048, customMetadata: { startLsn: '0/5001', endLsn: '0/9000' } },
724
+ ],
725
+ truncated: false,
726
+ })
727
+
728
+ const plan = await manager.generateRecoveryPlan({
729
+ type: 'timestamp',
730
+ value: new Date('2024-06-15T10:30:00Z'),
731
+ })
732
+
733
+ expect(plan.segmentsRequired).toBeDefined()
734
+ expect(plan.segmentsRequired.length).toBeGreaterThan(0)
735
+ expect(plan.estimatedDurationMs).toBeDefined()
736
+ expect(plan.totalBytesToReplay).toBeGreaterThan(0)
737
+ })
738
+
739
+ it('should generate a recovery plan for LSN target', async () => {
740
+ mockBucket.list.mockResolvedValueOnce({
741
+ objects: [
742
+ { key: 'seg-001', size: 1024 },
743
+ ],
744
+ truncated: false,
745
+ })
746
+
747
+ const plan = await manager.generateRecoveryPlan({
748
+ type: 'lsn',
749
+ value: '0/5000',
750
+ })
751
+
752
+ expect(plan.segmentsRequired).toBeDefined()
753
+ expect(plan.targetLsn).toBe('0/5000')
754
+ })
755
+
756
+ it('should estimate recovery time in the plan', async () => {
757
+ mockBucket.list.mockResolvedValueOnce({
758
+ objects: [
759
+ { key: 'seg-001', size: 1024 * 1024 },
760
+ { key: 'seg-002', size: 2 * 1024 * 1024 },
761
+ ],
762
+ truncated: false,
763
+ })
764
+
765
+ const plan = await manager.generateRecoveryPlan({
766
+ type: 'timestamp',
767
+ value: new Date('2024-06-15T10:30:00Z'),
768
+ })
769
+
770
+ expect(plan.estimatedDurationMs).toBeGreaterThan(0)
771
+ })
772
+
773
+ it('should indicate if base backup is needed', async () => {
774
+ const plan = await manager.generateRecoveryPlan({
775
+ type: 'timestamp',
776
+ value: new Date('2024-06-15T10:30:00Z'),
777
+ })
778
+
779
+ expect(plan.requiresBaseBackup).toBeDefined()
780
+ })
781
+
782
+ it('should fail plan generation if recovery is impossible', async () => {
783
+ mockBucket.list.mockResolvedValueOnce({ objects: [], truncated: false })
784
+
785
+ const plan = await manager.generateRecoveryPlan({
786
+ type: 'timestamp',
787
+ value: new Date('2030-01-01'),
788
+ })
789
+
790
+ expect(plan.feasible).toBe(false)
791
+ expect(plan.reason).toBeDefined()
792
+ })
793
+ })
794
+
795
+ // ===========================================================================
796
+ // Tests: Recovery Validation
797
+ // ===========================================================================
798
+
799
+ describe('Recovery Validation', () => {
800
+ let manager: InstanceType<typeof PITRManager>
801
+
802
+ beforeEach(() => {
803
+ manager = createPITRManager({
804
+ bucket: mockBucket as unknown as R2Bucket,
805
+ doId: 'test-do-123',
806
+ prefix: 'wal-archive/',
807
+ })
808
+ })
809
+
810
+ it('should validate WAL continuity after recovery', async () => {
811
+ mockBucket.get.mockResolvedValue({
812
+ bytes: () => Promise.resolve(new Uint8Array()),
813
+ text: () => Promise.resolve(JSON.stringify([
814
+ { lsn: '0/1000', timestamp: 1000, operation: 'INSERT', schema: 'public', table: 'users', newRow: { id: 1 } },
815
+ { lsn: '0/2000', timestamp: 2000, operation: 'INSERT', schema: 'public', table: 'users', newRow: { id: 2 } },
816
+ ])),
817
+ })
818
+
819
+ await manager.recoverToLsn('0/2000', mockPGLite as any)
820
+
821
+ const validation = await manager.validateRecovery()
822
+
823
+ expect(validation.walContinuity).toBe(true)
824
+ })
825
+
826
+ it('should validate database consistency after recovery', async () => {
827
+ mockBucket.get.mockResolvedValue({
828
+ bytes: () => Promise.resolve(new Uint8Array()),
829
+ text: () => Promise.resolve('[]'),
830
+ })
831
+
832
+ await manager.recoverToTimestamp(
833
+ new Date('2024-01-01'),
834
+ mockPGLite as any
835
+ )
836
+
837
+ mockPGLite.query.mockResolvedValueOnce({
838
+ rows: [{ consistent: true }],
839
+ })
840
+
841
+ const validation = await manager.validateRecovery()
842
+
843
+ expect(validation.databaseConsistent).toBe(true)
844
+ })
845
+
846
+ it('should verify checksum integrity of WAL segments', async () => {
847
+ const entries = [
848
+ {
849
+ lsn: '0/1000',
850
+ operation: 'INSERT' as const,
851
+ schema: 'public',
852
+ table: 'users',
853
+ newRow: { id: 1 },
854
+ timestamp: Date.now(),
855
+ },
856
+ ]
857
+
858
+ await manager.archiveWALEntries(entries)
859
+ await manager.flushWALBuffer()
860
+
861
+ const validation = await manager.validateArchiveIntegrity()
862
+
863
+ expect(validation.allSegmentsValid).toBe(true)
864
+ expect(validation.segmentsChecked).toBeGreaterThan(0)
865
+ })
866
+
867
+ it('should detect WAL gaps', async () => {
868
+ mockBucket.list.mockResolvedValueOnce({
869
+ objects: [
870
+ { key: 'seg-001', customMetadata: { startLsn: '0/1000', endLsn: '0/3000' } },
871
+ // Gap: 0/3001 to 0/6999
872
+ { key: 'seg-003', customMetadata: { startLsn: '0/7000', endLsn: '0/9000' } },
873
+ ],
874
+ truncated: false,
875
+ })
876
+
877
+ const validation = await manager.validateArchiveIntegrity()
878
+
879
+ expect(validation.allSegmentsValid).toBe(false)
880
+ expect(validation.gaps).toBeDefined()
881
+ expect(validation.gaps!.length).toBeGreaterThan(0)
882
+ })
883
+
884
+ it('should report recovery validation summary', async () => {
885
+ mockBucket.get.mockResolvedValue({
886
+ bytes: () => Promise.resolve(new Uint8Array()),
887
+ text: () => Promise.resolve('[]'),
888
+ })
889
+
890
+ await manager.recoverToTimestamp(new Date('2024-01-01'), mockPGLite as any)
891
+
892
+ const validation = await manager.validateRecovery()
893
+
894
+ expect(validation.summary).toBeDefined()
895
+ expect(validation.summary).toContain('success') // or failure reason
896
+ })
897
+ })
898
+
899
+ // ===========================================================================
900
+ // Tests: WAL Segment Management
901
+ // ===========================================================================
902
+
903
+ describe('WAL Segment Management', () => {
904
+ let manager: InstanceType<typeof PITRManager>
905
+
906
+ beforeEach(() => {
907
+ manager = createPITRManager({
908
+ bucket: mockBucket as unknown as R2Bucket,
909
+ doId: 'test-do-123',
910
+ prefix: 'wal-archive/',
911
+ retentionDays: 7,
912
+ })
913
+ })
914
+
915
+ it('should list WAL segments with pagination', async () => {
916
+ mockBucket.list
917
+ .mockResolvedValueOnce({
918
+ objects: [{ key: 'seg-001', size: 1024 }],
919
+ truncated: true,
920
+ cursor: 'page2',
921
+ })
922
+ .mockResolvedValueOnce({
923
+ objects: [{ key: 'seg-002', size: 2048 }],
924
+ truncated: false,
925
+ })
926
+
927
+ const segments = await manager.listWALSegments({ limit: 100 })
928
+
929
+ expect(segments.length).toBe(2)
930
+ })
931
+
932
+ it('should get WAL segment info by key', async () => {
933
+ mockBucket.head.mockResolvedValueOnce({
934
+ size: 1024,
935
+ etag: 'abc123',
936
+ uploaded: new Date('2024-06-15'),
937
+ customMetadata: {
938
+ startLsn: '0/1000',
939
+ endLsn: '0/5000',
940
+ entryCount: '42',
941
+ compression: 'gzip',
942
+ checksum: 'sha256-hash',
943
+ },
944
+ })
945
+
946
+ const info = await manager.getSegmentInfo('seg-001')
947
+
948
+ expect(info).toBeDefined()
949
+ expect(info!.startLsn).toBe('0/1000')
950
+ expect(info!.endLsn).toBe('0/5000')
951
+ expect(info!.entryCount).toBe(42)
952
+ expect(info!.compressed).toBe(true)
953
+ })
954
+
955
+ it('should prune WAL segments older than retention period', async () => {
956
+ mockBucket.list.mockResolvedValueOnce({
957
+ objects: [
958
+ { key: 'seg-old', uploaded: new Date(Date.now() - 10 * 24 * 60 * 60 * 1000) }, // 10 days old
959
+ { key: 'seg-recent', uploaded: new Date(Date.now() - 1 * 24 * 60 * 60 * 1000) }, // 1 day old
960
+ ],
961
+ truncated: false,
962
+ })
963
+
964
+ const pruneResult = await manager.pruneWALSegments()
965
+
966
+ expect(pruneResult.segmentsPruned).toBe(1) // Only old segment
967
+ expect(mockBucket.delete).toHaveBeenCalledTimes(1)
968
+ })
969
+
970
+ it('should not prune segments needed for restore points', async () => {
971
+ await manager.createRestorePoint('important-point')
972
+
973
+ // The segment associated with the restore point should not be pruned
974
+ const pruneResult = await manager.pruneWALSegments()
975
+
976
+ expect(pruneResult.segmentsRetainedForRestorePoints).toBeDefined()
977
+ })
978
+
979
+ it('should calculate total WAL archive size', async () => {
980
+ mockBucket.list.mockResolvedValueOnce({
981
+ objects: [
982
+ { key: 'seg-001', size: 1024 },
983
+ { key: 'seg-002', size: 2048 },
984
+ { key: 'seg-003', size: 4096 },
985
+ ],
986
+ truncated: false,
987
+ })
988
+
989
+ const totalSize = await manager.getArchiveSize()
990
+
991
+ expect(totalSize).toBe(1024 + 2048 + 4096)
992
+ })
993
+ })
994
+
995
+ // ===========================================================================
996
+ // Tests: PITR Statistics
997
+ // ===========================================================================
998
+
999
+ describe('PITR Statistics', () => {
1000
+ let manager: InstanceType<typeof PITRManager>
1001
+
1002
+ beforeEach(() => {
1003
+ manager = createPITRManager({
1004
+ bucket: mockBucket as unknown as R2Bucket,
1005
+ doId: 'test-do-123',
1006
+ prefix: 'wal-archive/',
1007
+ })
1008
+ })
1009
+
1010
+ it('should track total WAL entries archived', async () => {
1011
+ const entries = Array.from({ length: 10 }, (_, i) => ({
1012
+ lsn: `0/${(i + 1) * 1000}`,
1013
+ operation: 'INSERT' as const,
1014
+ schema: 'public',
1015
+ table: 'users',
1016
+ newRow: { id: i },
1017
+ timestamp: Date.now() + i,
1018
+ }))
1019
+
1020
+ await manager.archiveWALEntries(entries)
1021
+ await manager.flushWALBuffer()
1022
+
1023
+ const stats = manager.getStats()
1024
+
1025
+ expect(stats.totalEntriesArchived).toBe(10)
1026
+ })
1027
+
1028
+ it('should track number of recoveries performed', async () => {
1029
+ mockBucket.get.mockResolvedValue({
1030
+ bytes: () => Promise.resolve(new Uint8Array()),
1031
+ text: () => Promise.resolve('[]'),
1032
+ })
1033
+
1034
+ await manager.recoverToTimestamp(new Date('2024-01-01'), mockPGLite as any)
1035
+ await manager.recoverToTimestamp(new Date('2024-02-01'), mockPGLite as any)
1036
+
1037
+ const stats = manager.getStats()
1038
+
1039
+ expect(stats.recoveriesPerformed).toBe(2)
1040
+ })
1041
+
1042
+ it('should track archive lag (time since last archival)', async () => {
1043
+ const entries = [
1044
+ {
1045
+ lsn: '0/1000',
1046
+ operation: 'INSERT' as const,
1047
+ schema: 'public',
1048
+ table: 'users',
1049
+ newRow: { id: 1 },
1050
+ timestamp: Date.now(),
1051
+ },
1052
+ ]
1053
+
1054
+ await manager.archiveWALEntries(entries)
1055
+ await manager.flushWALBuffer()
1056
+
1057
+ vi.advanceTimersByTime(5000) // 5 seconds
1058
+
1059
+ const stats = manager.getStats()
1060
+
1061
+ expect(stats.archiveLagMs).toBeGreaterThanOrEqual(5000)
1062
+ })
1063
+
1064
+ it('should track oldest available recovery point timestamp', async () => {
1065
+ const entries = [
1066
+ {
1067
+ lsn: '0/1000',
1068
+ operation: 'INSERT' as const,
1069
+ schema: 'public',
1070
+ table: 'users',
1071
+ newRow: { id: 1 },
1072
+ timestamp: 1000,
1073
+ },
1074
+ ]
1075
+
1076
+ await manager.archiveWALEntries(entries)
1077
+ await manager.flushWALBuffer()
1078
+
1079
+ const stats = manager.getStats()
1080
+
1081
+ expect(stats.oldestRecoveryPointMs).toBeDefined()
1082
+ })
1083
+
1084
+ it('should track current timeline info in stats', () => {
1085
+ const stats = manager.getStats()
1086
+
1087
+ expect(stats.currentTimelineId).toBeDefined()
1088
+ expect(stats.currentTimelineId).toBeGreaterThanOrEqual(1)
1089
+ })
1090
+
1091
+ it('should reset statistics', () => {
1092
+ manager.resetStats()
1093
+
1094
+ const stats = manager.getStats()
1095
+
1096
+ expect(stats.totalEntriesArchived).toBe(0)
1097
+ expect(stats.recoveriesPerformed).toBe(0)
1098
+ })
1099
+ })
1100
+
1101
+ // ===========================================================================
1102
+ // Tests: Edge Cases
1103
+ // ===========================================================================
1104
+
1105
+ describe('Edge Cases', () => {
1106
+ let manager: InstanceType<typeof PITRManager>
1107
+
1108
+ beforeEach(() => {
1109
+ manager = createPITRManager({
1110
+ bucket: mockBucket as unknown as R2Bucket,
1111
+ doId: 'test-do-123',
1112
+ prefix: 'wal-archive/',
1113
+ })
1114
+ })
1115
+
1116
+ it('should handle empty WAL buffer flush', async () => {
1117
+ await manager.flushWALBuffer()
1118
+
1119
+ expect(mockBucket.put).not.toHaveBeenCalled()
1120
+ })
1121
+
1122
+ it('should handle duplicate WAL entries gracefully', async () => {
1123
+ const entry = {
1124
+ lsn: '0/1000',
1125
+ operation: 'INSERT' as const,
1126
+ schema: 'public',
1127
+ table: 'users',
1128
+ newRow: { id: 1 },
1129
+ timestamp: Date.now(),
1130
+ }
1131
+
1132
+ await manager.archiveWALEntries([entry, entry]) // Duplicate
1133
+
1134
+ await manager.flushWALBuffer()
1135
+
1136
+ const stats = manager.getStats()
1137
+ // Should deduplicate or handle gracefully
1138
+ expect(stats.totalEntriesArchived).toBeDefined()
1139
+ })
1140
+
1141
+ it('should handle R2 errors during segment retrieval', async () => {
1142
+ mockBucket.get.mockRejectedValueOnce(new Error('R2 unavailable'))
1143
+
1144
+ const result = await manager.recoverToLsn('0/1000', mockPGLite as any)
1145
+
1146
+ expect(result.success).toBe(false)
1147
+ expect(result.error).toBeDefined()
1148
+ })
1149
+
1150
+ it('should handle corrupted WAL segment data', async () => {
1151
+ mockBucket.get.mockResolvedValueOnce({
1152
+ bytes: () => Promise.resolve(new Uint8Array([0xFF, 0xFE, 0xFD])), // Invalid data
1153
+ text: () => { throw new Error('Invalid UTF-8') },
1154
+ })
1155
+
1156
+ const result = await manager.recoverToLsn('0/1000', mockPGLite as any)
1157
+
1158
+ expect(result.success).toBe(false)
1159
+ expect(result.error).toContain('corrupt')
1160
+ })
1161
+
1162
+ it('should handle recovery of very large WAL history', async () => {
1163
+ // Mock many segments
1164
+ const objects = Array.from({ length: 1000 }, (_, i) => ({
1165
+ key: `seg-${String(i).padStart(6, '0')}`,
1166
+ size: 1024 * 1024,
1167
+ }))
1168
+
1169
+ mockBucket.list.mockResolvedValueOnce({
1170
+ objects,
1171
+ truncated: false,
1172
+ })
1173
+
1174
+ const plan = await manager.generateRecoveryPlan({
1175
+ type: 'timestamp',
1176
+ value: new Date(),
1177
+ })
1178
+
1179
+ expect(plan.segmentsRequired.length).toBeGreaterThan(0)
1180
+ })
1181
+
1182
+ it('should handle concurrent archival and recovery', async () => {
1183
+ const archivePromise = manager.archiveWALEntries([
1184
+ {
1185
+ lsn: '0/1000',
1186
+ operation: 'INSERT' as const,
1187
+ schema: 'public',
1188
+ table: 'users',
1189
+ newRow: { id: 1 },
1190
+ timestamp: Date.now(),
1191
+ },
1192
+ ])
1193
+
1194
+ mockBucket.get.mockResolvedValue({
1195
+ bytes: () => Promise.resolve(new Uint8Array()),
1196
+ text: () => Promise.resolve('[]'),
1197
+ })
1198
+
1199
+ const recoveryPromise = manager.recoverToTimestamp(
1200
+ new Date('2024-01-01'),
1201
+ mockPGLite as any
1202
+ )
1203
+
1204
+ const [archiveResult, recoveryResult] = await Promise.allSettled([
1205
+ archivePromise,
1206
+ recoveryPromise,
1207
+ ])
1208
+
1209
+ // Both should resolve (not throw)
1210
+ expect(archiveResult.status).toBe('fulfilled')
1211
+ expect(recoveryResult.status).toBe('fulfilled')
1212
+ })
1213
+
1214
+ it('should handle PGLite errors during recovery replay', async () => {
1215
+ mockBucket.get.mockResolvedValue({
1216
+ bytes: () => Promise.resolve(new Uint8Array()),
1217
+ text: () => Promise.resolve(JSON.stringify([
1218
+ { lsn: '0/1000', timestamp: 1000, operation: 'INSERT', schema: 'public', table: 'users', newRow: { id: 1 } },
1219
+ ])),
1220
+ })
1221
+
1222
+ mockPGLite.exec.mockRejectedValueOnce(new Error('PGLite write error'))
1223
+
1224
+ const result = await manager.recoverToLsn('0/1000', mockPGLite as any)
1225
+
1226
+ expect(result.success).toBe(false)
1227
+ expect(result.error).toContain('PGLite')
1228
+ })
1229
+
1230
+ it('should handle recovery to the current point in time (no-op)', async () => {
1231
+ const result = await manager.recoverToTimestamp(
1232
+ new Date(), // Current time
1233
+ mockPGLite as any
1234
+ )
1235
+
1236
+ // Should succeed with 0 entries applied
1237
+ expect(result.entriesApplied).toBe(0)
1238
+ })
1239
+ })
1240
+ })