@powersync/service-module-postgres 0.0.0-dev-20240918092408

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. package/CHANGELOG.md +18 -0
  2. package/LICENSE +67 -0
  3. package/README.md +3 -0
  4. package/dist/api/PostgresRouteAPIAdapter.d.ts +22 -0
  5. package/dist/api/PostgresRouteAPIAdapter.js +273 -0
  6. package/dist/api/PostgresRouteAPIAdapter.js.map +1 -0
  7. package/dist/auth/SupabaseKeyCollector.d.ts +22 -0
  8. package/dist/auth/SupabaseKeyCollector.js +64 -0
  9. package/dist/auth/SupabaseKeyCollector.js.map +1 -0
  10. package/dist/index.d.ts +3 -0
  11. package/dist/index.js +4 -0
  12. package/dist/index.js.map +1 -0
  13. package/dist/module/PostgresModule.d.ts +14 -0
  14. package/dist/module/PostgresModule.js +108 -0
  15. package/dist/module/PostgresModule.js.map +1 -0
  16. package/dist/replication/ConnectionManagerFactory.d.ts +10 -0
  17. package/dist/replication/ConnectionManagerFactory.js +21 -0
  18. package/dist/replication/ConnectionManagerFactory.js.map +1 -0
  19. package/dist/replication/PgManager.d.ts +25 -0
  20. package/dist/replication/PgManager.js +60 -0
  21. package/dist/replication/PgManager.js.map +1 -0
  22. package/dist/replication/PgRelation.d.ts +6 -0
  23. package/dist/replication/PgRelation.js +27 -0
  24. package/dist/replication/PgRelation.js.map +1 -0
  25. package/dist/replication/PostgresErrorRateLimiter.d.ts +11 -0
  26. package/dist/replication/PostgresErrorRateLimiter.js +43 -0
  27. package/dist/replication/PostgresErrorRateLimiter.js.map +1 -0
  28. package/dist/replication/WalStream.d.ts +53 -0
  29. package/dist/replication/WalStream.js +536 -0
  30. package/dist/replication/WalStream.js.map +1 -0
  31. package/dist/replication/WalStreamReplicationJob.d.ts +27 -0
  32. package/dist/replication/WalStreamReplicationJob.js +131 -0
  33. package/dist/replication/WalStreamReplicationJob.js.map +1 -0
  34. package/dist/replication/WalStreamReplicator.d.ts +13 -0
  35. package/dist/replication/WalStreamReplicator.js +36 -0
  36. package/dist/replication/WalStreamReplicator.js.map +1 -0
  37. package/dist/replication/replication-index.d.ts +5 -0
  38. package/dist/replication/replication-index.js +6 -0
  39. package/dist/replication/replication-index.js.map +1 -0
  40. package/dist/replication/replication-utils.d.ts +32 -0
  41. package/dist/replication/replication-utils.js +272 -0
  42. package/dist/replication/replication-utils.js.map +1 -0
  43. package/dist/types/types.d.ts +76 -0
  44. package/dist/types/types.js +110 -0
  45. package/dist/types/types.js.map +1 -0
  46. package/dist/utils/migration_lib.d.ts +11 -0
  47. package/dist/utils/migration_lib.js +64 -0
  48. package/dist/utils/migration_lib.js.map +1 -0
  49. package/dist/utils/pgwire_utils.d.ts +16 -0
  50. package/dist/utils/pgwire_utils.js +70 -0
  51. package/dist/utils/pgwire_utils.js.map +1 -0
  52. package/dist/utils/populate_test_data.d.ts +8 -0
  53. package/dist/utils/populate_test_data.js +65 -0
  54. package/dist/utils/populate_test_data.js.map +1 -0
  55. package/package.json +49 -0
  56. package/src/api/PostgresRouteAPIAdapter.ts +307 -0
  57. package/src/auth/SupabaseKeyCollector.ts +70 -0
  58. package/src/index.ts +5 -0
  59. package/src/module/PostgresModule.ts +122 -0
  60. package/src/replication/ConnectionManagerFactory.ts +28 -0
  61. package/src/replication/PgManager.ts +70 -0
  62. package/src/replication/PgRelation.ts +31 -0
  63. package/src/replication/PostgresErrorRateLimiter.ts +44 -0
  64. package/src/replication/WalStream.ts +639 -0
  65. package/src/replication/WalStreamReplicationJob.ts +142 -0
  66. package/src/replication/WalStreamReplicator.ts +45 -0
  67. package/src/replication/replication-index.ts +5 -0
  68. package/src/replication/replication-utils.ts +329 -0
  69. package/src/types/types.ts +159 -0
  70. package/src/utils/migration_lib.ts +79 -0
  71. package/src/utils/pgwire_utils.ts +73 -0
  72. package/src/utils/populate_test_data.ts +77 -0
  73. package/test/src/__snapshots__/pg_test.test.ts.snap +256 -0
  74. package/test/src/env.ts +7 -0
  75. package/test/src/large_batch.test.ts +195 -0
  76. package/test/src/pg_test.test.ts +450 -0
  77. package/test/src/schema_changes.test.ts +543 -0
  78. package/test/src/setup.ts +7 -0
  79. package/test/src/slow_tests.test.ts +335 -0
  80. package/test/src/util.ts +105 -0
  81. package/test/src/validation.test.ts +64 -0
  82. package/test/src/wal_stream.test.ts +319 -0
  83. package/test/src/wal_stream_utils.ts +121 -0
  84. package/test/tsconfig.json +28 -0
  85. package/tsconfig.json +31 -0
  86. package/tsconfig.tsbuildinfo +1 -0
  87. package/vitest.config.ts +9 -0
@@ -0,0 +1,543 @@
1
+ import { compareIds, putOp, removeOp } from '@core-tests/stream_utils.js';
2
+ import { describe, expect, test } from 'vitest';
3
+ import { walStreamTest } from './wal_stream_utils.js';
4
+ import { INITIALIZED_MONGO_STORAGE_FACTORY, StorageFactory } from './util.js';
5
+
6
+ describe(
7
+ 'schema changes',
8
+ function () {
9
+ defineTests(INITIALIZED_MONGO_STORAGE_FACTORY);
10
+ },
11
+ { timeout: 20_000 }
12
+ );
13
+
14
+ const BASIC_SYNC_RULES = `
15
+ bucket_definitions:
16
+ global:
17
+ data:
18
+ - SELECT id, * FROM "test_data"
19
+ `;
20
+
21
+ const PUT_T1 = putOp('test_data', { id: 't1', description: 'test1' });
22
+ const PUT_T2 = putOp('test_data', { id: 't2', description: 'test2' });
23
+ const PUT_T3 = putOp('test_data', { id: 't3', description: 'test3' });
24
+
25
+ const REMOVE_T1 = removeOp('test_data', 't1');
26
+ const REMOVE_T2 = removeOp('test_data', 't2');
27
+
28
+ function defineTests(factory: StorageFactory) {
29
+ test(
30
+ 're-create table',
31
+ walStreamTest(factory, async (context) => {
32
+ // Drop a table and re-create it.
33
+ await context.updateSyncRules(BASIC_SYNC_RULES);
34
+ const { pool } = context;
35
+
36
+ await pool.query(`DROP TABLE IF EXISTS test_data`);
37
+ await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
38
+ await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
39
+
40
+ await context.replicateSnapshot();
41
+ context.startStreaming();
42
+
43
+ await pool.query(`INSERT INTO test_data(id, description) VALUES('t2', 'test2')`);
44
+
45
+ await pool.query(
46
+ { statement: `DROP TABLE test_data` },
47
+ { statement: `CREATE TABLE test_data(id text primary key, description text)` },
48
+ { statement: `INSERT INTO test_data(id, description) VALUES('t3', 'test3')` }
49
+ );
50
+
51
+ const data = await context.getBucketData('global[]');
52
+
53
+ // Initial inserts
54
+ expect(data.slice(0, 2)).toMatchObject([PUT_T1, PUT_T2]);
55
+
56
+ // Truncate - order doesn't matter
57
+ expect(data.slice(2, 4).sort(compareIds)).toMatchObject([REMOVE_T1, REMOVE_T2]);
58
+
59
+ expect(data.slice(4)).toMatchObject([
60
+ // Snapshot insert
61
+ PUT_T3,
62
+ // Replicated insert
63
+ // We may eventually be able to de-duplicate this
64
+ PUT_T3
65
+ ]);
66
+ })
67
+ );
68
+
69
+ test(
70
+ 'add table',
71
+ walStreamTest(factory, async (context) => {
72
+ // Add table after initial replication
73
+ await context.updateSyncRules(BASIC_SYNC_RULES);
74
+ const { pool } = context;
75
+
76
+ await context.replicateSnapshot();
77
+ context.startStreaming();
78
+
79
+ await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
80
+ await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
81
+
82
+ const data = await context.getBucketData('global[]');
83
+
84
+ expect(data).toMatchObject([
85
+ // Snapshot insert
86
+ PUT_T1,
87
+ // Replicated insert
88
+ // We may eventually be able to de-duplicate this
89
+ PUT_T1
90
+ ]);
91
+ })
92
+ );
93
+
94
+ test(
95
+ 'rename table (1)',
96
+ walStreamTest(factory, async (context) => {
97
+ const { pool } = context;
98
+
99
+ await context.updateSyncRules(BASIC_SYNC_RULES);
100
+
101
+ // Rename table not in sync rules -> in sync rules
102
+ await pool.query(`CREATE TABLE test_data_old(id text primary key, description text)`);
103
+ await pool.query(`INSERT INTO test_data_old(id, description) VALUES('t1', 'test1')`);
104
+
105
+ await context.replicateSnapshot();
106
+ context.startStreaming();
107
+
108
+ await pool.query(
109
+ { statement: `ALTER TABLE test_data_old RENAME TO test_data` },
110
+ // We need an operation to detect the change
111
+ { statement: `INSERT INTO test_data(id, description) VALUES('t2', 'test2')` }
112
+ );
113
+
114
+ const data = await context.getBucketData('global[]');
115
+
116
+ expect(data.slice(0, 2).sort(compareIds)).toMatchObject([
117
+ // Snapshot insert
118
+ PUT_T1,
119
+ PUT_T2
120
+ ]);
121
+ expect(data.slice(2)).toMatchObject([
122
+ // Replicated insert
123
+ // We may eventually be able to de-duplicate this
124
+ PUT_T2
125
+ ]);
126
+ })
127
+ );
128
+
129
+ test(
130
+ 'rename table (2)',
131
+ walStreamTest(factory, async (context) => {
132
+ // Rename table in sync rules -> in sync rules
133
+ const { pool } = context;
134
+
135
+ await context.updateSyncRules(`
136
+ bucket_definitions:
137
+ global:
138
+ data:
139
+ - SELECT id, * FROM "test_data%"
140
+ `);
141
+
142
+ await pool.query(`CREATE TABLE test_data1(id text primary key, description text)`);
143
+ await pool.query(`INSERT INTO test_data1(id, description) VALUES('t1', 'test1')`);
144
+
145
+ await context.replicateSnapshot();
146
+ context.startStreaming();
147
+
148
+ await pool.query(
149
+ { statement: `ALTER TABLE test_data1 RENAME TO test_data2` },
150
+ // We need an operation to detect the change
151
+ { statement: `INSERT INTO test_data2(id, description) VALUES('t2', 'test2')` }
152
+ );
153
+
154
+ const data = await context.getBucketData('global[]');
155
+
156
+ expect(data.slice(0, 2)).toMatchObject([
157
+ // Initial replication
158
+ putOp('test_data1', { id: 't1', description: 'test1' }),
159
+ // Initial truncate
160
+ removeOp('test_data1', 't1')
161
+ ]);
162
+
163
+ expect(data.slice(2, 4).sort(compareIds)).toMatchObject([
164
+ // Snapshot insert
165
+ putOp('test_data2', { id: 't1', description: 'test1' }),
166
+ putOp('test_data2', { id: 't2', description: 'test2' })
167
+ ]);
168
+ expect(data.slice(4)).toMatchObject([
169
+ // Replicated insert
170
+ // We may eventually be able to de-duplicate this
171
+ putOp('test_data2', { id: 't2', description: 'test2' })
172
+ ]);
173
+ })
174
+ );
175
+
176
+ test(
177
+ 'rename table (3)',
178
+ walStreamTest(factory, async (context) => {
179
+ // Rename table in sync rules -> not in sync rules
180
+
181
+ const { pool } = context;
182
+
183
+ await context.updateSyncRules(BASIC_SYNC_RULES);
184
+
185
+ await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
186
+ await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
187
+
188
+ await context.replicateSnapshot();
189
+ context.startStreaming();
190
+
191
+ await pool.query(
192
+ { statement: `ALTER TABLE test_data RENAME TO test_data_na` },
193
+ // We need an operation to detect the change
194
+ { statement: `INSERT INTO test_data_na(id, description) VALUES('t2', 'test2')` }
195
+ );
196
+
197
+ const data = await context.getBucketData('global[]');
198
+
199
+ expect(data).toMatchObject([
200
+ // Initial replication
201
+ PUT_T1,
202
+ // Truncate
203
+ REMOVE_T1
204
+ ]);
205
+ })
206
+ );
207
+
208
+ test(
209
+ 'change replica id',
210
+ walStreamTest(factory, async (context) => {
211
+ // Change replica id from default to full
212
+ // Causes a re-import of the table.
213
+
214
+ const { pool } = context;
215
+ await context.updateSyncRules(BASIC_SYNC_RULES);
216
+
217
+ await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
218
+ await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
219
+
220
+ await context.replicateSnapshot();
221
+ context.startStreaming();
222
+
223
+ await pool.query(
224
+ { statement: `ALTER TABLE test_data REPLICA IDENTITY FULL` },
225
+ // We need an operation to detect the change
226
+ { statement: `INSERT INTO test_data(id, description) VALUES('t2', 'test2')` }
227
+ );
228
+
229
+ const data = await context.getBucketData('global[]');
230
+
231
+ expect(data.slice(0, 2)).toMatchObject([
232
+ // Initial inserts
233
+ PUT_T1,
234
+ // Truncate
235
+ REMOVE_T1
236
+ ]);
237
+
238
+ // Snapshot - order doesn't matter
239
+ expect(data.slice(2, 4).sort(compareIds)).toMatchObject([PUT_T1, PUT_T2]);
240
+
241
+ expect(data.slice(4).sort(compareIds)).toMatchObject([
242
+ // Replicated insert
243
+ // We may eventually be able to de-duplicate this
244
+ PUT_T2
245
+ ]);
246
+ })
247
+ );
248
+
249
+ test(
250
+ 'change full replica id by adding column',
251
+ walStreamTest(factory, async (context) => {
252
+ // Change replica id from full by adding column
253
+ // Causes a re-import of the table.
254
+ // Other changes such as renaming column would have the same effect
255
+
256
+ const { pool } = context;
257
+ await context.updateSyncRules(BASIC_SYNC_RULES);
258
+
259
+ await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
260
+ await pool.query(`ALTER TABLE test_data REPLICA IDENTITY FULL`);
261
+ await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
262
+
263
+ await context.replicateSnapshot();
264
+ context.startStreaming();
265
+
266
+ await pool.query(
267
+ { statement: `ALTER TABLE test_data ADD COLUMN other TEXT` },
268
+ { statement: `INSERT INTO test_data(id, description) VALUES('t2', 'test2')` }
269
+ );
270
+
271
+ const data = await context.getBucketData('global[]');
272
+
273
+ expect(data.slice(0, 2)).toMatchObject([
274
+ // Initial inserts
275
+ PUT_T1,
276
+ // Truncate
277
+ REMOVE_T1
278
+ ]);
279
+
280
+ // Snapshot - order doesn't matter
281
+ expect(data.slice(2, 4).sort(compareIds)).toMatchObject([
282
+ putOp('test_data', { id: 't1', description: 'test1', other: null }),
283
+ putOp('test_data', { id: 't2', description: 'test2', other: null })
284
+ ]);
285
+
286
+ expect(data.slice(4).sort(compareIds)).toMatchObject([
287
+ // Replicated insert
288
+ // We may eventually be able to de-duplicate this
289
+ putOp('test_data', { id: 't2', description: 'test2', other: null })
290
+ ]);
291
+ })
292
+ );
293
+
294
+ test(
295
+ 'change default replica id by changing column type',
296
+ walStreamTest(factory, async (context) => {
297
+ // Change default replica id by changing column type
298
+ // Causes a re-import of the table.
299
+ const { pool } = context;
300
+ await context.updateSyncRules(BASIC_SYNC_RULES);
301
+
302
+ await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
303
+ await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
304
+
305
+ await context.replicateSnapshot();
306
+ context.startStreaming();
307
+
308
+ await pool.query(
309
+ { statement: `ALTER TABLE test_data ALTER COLUMN id TYPE varchar` },
310
+ { statement: `INSERT INTO test_data(id, description) VALUES('t2', 'test2')` }
311
+ );
312
+
313
+ const data = await context.getBucketData('global[]');
314
+
315
+ expect(data.slice(0, 2)).toMatchObject([
316
+ // Initial inserts
317
+ PUT_T1,
318
+ // Truncate
319
+ REMOVE_T1
320
+ ]);
321
+
322
+ // Snapshot - order doesn't matter
323
+ expect(data.slice(2, 4).sort(compareIds)).toMatchObject([PUT_T1, PUT_T2]);
324
+
325
+ expect(data.slice(4).sort(compareIds)).toMatchObject([
326
+ // Replicated insert
327
+ // We may eventually be able to de-duplicate this
328
+ PUT_T2
329
+ ]);
330
+ })
331
+ );
332
+
333
+ test(
334
+ 'change index id by changing column type',
335
+ walStreamTest(factory, async (context) => {
336
+ // Change index replica id by changing column type
337
+ // Causes a re-import of the table.
338
+ // Secondary functionality tested here is that replica id column order stays
339
+ // the same between initial and incremental replication.
340
+ const { pool } = context;
341
+ await context.updateSyncRules(BASIC_SYNC_RULES);
342
+
343
+ await pool.query(`CREATE TABLE test_data(id text primary key, description text not null)`);
344
+ await pool.query(`CREATE UNIQUE INDEX i1 ON test_data(description, id)`);
345
+ await pool.query(`ALTER TABLE test_data REPLICA IDENTITY USING INDEX i1`);
346
+
347
+ await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
348
+
349
+ await context.replicateSnapshot();
350
+ context.startStreaming();
351
+
352
+ await pool.query(`INSERT INTO test_data(id, description) VALUES('t2', 'test2')`);
353
+
354
+ await pool.query(
355
+ { statement: `ALTER TABLE test_data ALTER COLUMN description TYPE varchar` },
356
+ { statement: `INSERT INTO test_data(id, description) VALUES('t3', 'test3')` }
357
+ );
358
+
359
+ const data = await context.getBucketData('global[]');
360
+
361
+ expect(data.slice(0, 2)).toMatchObject([
362
+ // Initial snapshot
363
+ PUT_T1,
364
+ // Streamed
365
+ PUT_T2
366
+ ]);
367
+
368
+ expect(data.slice(2, 4).sort(compareIds)).toMatchObject([
369
+ // Truncate - any order
370
+ REMOVE_T1,
371
+ REMOVE_T2
372
+ ]);
373
+
374
+ // Snapshot - order doesn't matter
375
+ expect(data.slice(4, 7).sort(compareIds)).toMatchObject([PUT_T1, PUT_T2, PUT_T3]);
376
+
377
+ expect(data.slice(7).sort(compareIds)).toMatchObject([
378
+ // Replicated insert
379
+ // We may eventually be able to de-duplicate this
380
+ PUT_T3
381
+ ]);
382
+ })
383
+ );
384
+
385
+ test(
386
+ 'add to publication',
387
+ walStreamTest(factory, async (context) => {
388
+ // Add table to publication after initial replication
389
+ const { pool } = context;
390
+
391
+ await pool.query(`DROP PUBLICATION powersync`);
392
+ await pool.query(`CREATE TABLE test_foo(id text primary key, description text)`);
393
+ await pool.query(`CREATE PUBLICATION powersync FOR table test_foo`);
394
+
395
+ const storage = await context.updateSyncRules(BASIC_SYNC_RULES);
396
+
397
+ await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
398
+ await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
399
+
400
+ await context.replicateSnapshot();
401
+ context.startStreaming();
402
+
403
+ await pool.query(`INSERT INTO test_data(id, description) VALUES('t2', 'test2')`);
404
+
405
+ await pool.query(`ALTER PUBLICATION powersync ADD TABLE test_data`);
406
+ await pool.query(`INSERT INTO test_data(id, description) VALUES('t3', 'test3')`);
407
+
408
+ const data = await context.getBucketData('global[]');
409
+
410
+ expect(data.slice(0, 3).sort(compareIds)).toMatchObject([
411
+ // Snapshot insert - any order
412
+ PUT_T1,
413
+ PUT_T2,
414
+ PUT_T3
415
+ ]);
416
+
417
+ expect(data.slice(3)).toMatchObject([
418
+ // Replicated insert
419
+ // We may eventually be able to de-duplicate this
420
+ PUT_T3
421
+ ]);
422
+
423
+ const metrics = await storage.factory.getStorageMetrics();
424
+ expect(metrics.replication_size_bytes).toBeGreaterThan(0);
425
+ })
426
+ );
427
+
428
+ test(
429
+ 'add to publication (not in sync rules)',
430
+ walStreamTest(factory, async (context) => {
431
+ // Add table to publication after initial replication
432
+ // Since the table is not in sync rules, it should not be replicated.
433
+ const { pool } = context;
434
+
435
+ await pool.query(`DROP PUBLICATION powersync`);
436
+ await pool.query(`CREATE TABLE test_foo(id text primary key, description text)`);
437
+ await pool.query(`CREATE PUBLICATION powersync FOR table test_foo`);
438
+
439
+ const storage = await context.updateSyncRules(BASIC_SYNC_RULES);
440
+
441
+ await pool.query(`CREATE TABLE test_other(id text primary key, description text)`);
442
+ await pool.query(`INSERT INTO test_other(id, description) VALUES('t1', 'test1')`);
443
+
444
+ await context.replicateSnapshot();
445
+ context.startStreaming();
446
+
447
+ await pool.query(`INSERT INTO test_other(id, description) VALUES('t2', 'test2')`);
448
+
449
+ await pool.query(`ALTER PUBLICATION powersync ADD TABLE test_other`);
450
+ await pool.query(`INSERT INTO test_other(id, description) VALUES('t3', 'test3')`);
451
+
452
+ const data = await context.getBucketData('global[]');
453
+ expect(data).toMatchObject([]);
454
+
455
+ const metrics = await storage.factory.getStorageMetrics();
456
+ expect(metrics.replication_size_bytes).toEqual(0);
457
+ })
458
+ );
459
+
460
+ test(
461
+ 'replica identity nothing',
462
+ walStreamTest(factory, async (context) => {
463
+ // Technically not a schema change, but fits here.
464
+ // Replica ID works a little differently here - the table doesn't have
465
+ // one defined, but we generate a unique one for each replicated row.
466
+
467
+ const { pool } = context;
468
+ await context.updateSyncRules(BASIC_SYNC_RULES);
469
+
470
+ await pool.query(`CREATE TABLE test_data(id text primary key, description text)`);
471
+ await pool.query(`ALTER TABLE test_data REPLICA IDENTITY NOTHING`);
472
+ await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
473
+
474
+ await context.replicateSnapshot();
475
+ context.startStreaming();
476
+
477
+ await pool.query(`INSERT INTO test_data(id, description) VALUES('t2', 'test2')`);
478
+
479
+ // Just as an FYI - cannot update or delete here
480
+ expect(pool.query(`UPDATE test_data SET description = 'test2b' WHERE id = 't2'`)).rejects.toThrow(
481
+ 'does not have a replica identity and publishes updates'
482
+ );
483
+
484
+ // Testing TRUNCATE is important here - this depends on current_data having unique
485
+ // ids.
486
+ await pool.query(`TRUNCATE TABLE test_data`);
487
+
488
+ const data = await context.getBucketData('global[]');
489
+
490
+ expect(data.slice(0, 2)).toMatchObject([
491
+ // Initial inserts
492
+ PUT_T1,
493
+ PUT_T2
494
+ ]);
495
+
496
+ expect(data.slice(2).sort(compareIds)).toMatchObject([
497
+ // Truncate
498
+ REMOVE_T1,
499
+ REMOVE_T2
500
+ ]);
501
+ })
502
+ );
503
+
504
+ test(
505
+ 'replica identity default without PK',
506
+ walStreamTest(factory, async (context) => {
507
+ // Same as no replica identity
508
+ const { pool } = context;
509
+ await context.updateSyncRules(BASIC_SYNC_RULES);
510
+
511
+ await pool.query(`CREATE TABLE test_data(id text, description text)`);
512
+ await pool.query(`INSERT INTO test_data(id, description) VALUES('t1', 'test1')`);
513
+
514
+ await context.replicateSnapshot();
515
+ context.startStreaming();
516
+
517
+ await pool.query(`INSERT INTO test_data(id, description) VALUES('t2', 'test2')`);
518
+
519
+ // Just as an FYI - cannot update or delete here
520
+ expect(pool.query(`UPDATE test_data SET description = 'test2b' WHERE id = 't2'`)).rejects.toThrow(
521
+ 'does not have a replica identity and publishes updates'
522
+ );
523
+
524
+ // Testing TRUNCATE is important here - this depends on current_data having unique
525
+ // ids.
526
+ await pool.query(`TRUNCATE TABLE test_data`);
527
+
528
+ const data = await context.getBucketData('global[]');
529
+
530
+ expect(data.slice(0, 2)).toMatchObject([
531
+ // Initial inserts
532
+ PUT_T1,
533
+ PUT_T2
534
+ ]);
535
+
536
+ expect(data.slice(2).sort(compareIds)).toMatchObject([
537
+ // Truncate
538
+ REMOVE_T1,
539
+ REMOVE_T2
540
+ ]);
541
+ })
542
+ );
543
+ }
@@ -0,0 +1,7 @@
1
+ import { container } from '@powersync/lib-services-framework';
2
+ import { beforeAll } from 'vitest';
3
+
4
+ beforeAll(() => {
5
+ // Executes for every test file
6
+ container.registerDefaults();
7
+ });