@powersync/service-core 0.13.0 → 0.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (181) hide show
  1. package/CHANGELOG.md +31 -0
  2. package/dist/entry/commands/compact-action.js +14 -14
  3. package/dist/entry/commands/compact-action.js.map +1 -1
  4. package/dist/entry/commands/migrate-action.js +15 -4
  5. package/dist/entry/commands/migrate-action.js.map +1 -1
  6. package/dist/index.d.ts +1 -3
  7. package/dist/index.js +1 -3
  8. package/dist/index.js.map +1 -1
  9. package/dist/migrations/PowerSyncMigrationManager.d.ts +17 -0
  10. package/dist/migrations/PowerSyncMigrationManager.js +21 -0
  11. package/dist/migrations/PowerSyncMigrationManager.js.map +1 -0
  12. package/dist/migrations/ensure-automatic-migrations.d.ts +4 -0
  13. package/dist/migrations/ensure-automatic-migrations.js +14 -0
  14. package/dist/migrations/ensure-automatic-migrations.js.map +1 -0
  15. package/dist/migrations/migrations-index.d.ts +2 -3
  16. package/dist/migrations/migrations-index.js +2 -3
  17. package/dist/migrations/migrations-index.js.map +1 -1
  18. package/dist/routes/configure-fastify.d.ts +12 -12
  19. package/dist/routes/endpoints/admin.d.ts +24 -24
  20. package/dist/storage/BucketStorage.d.ts +51 -3
  21. package/dist/storage/BucketStorage.js +26 -0
  22. package/dist/storage/BucketStorage.js.map +1 -1
  23. package/dist/storage/bson.d.ts +24 -0
  24. package/dist/storage/bson.js +73 -0
  25. package/dist/storage/bson.js.map +1 -0
  26. package/dist/storage/storage-index.d.ts +3 -14
  27. package/dist/storage/storage-index.js +3 -14
  28. package/dist/storage/storage-index.js.map +1 -1
  29. package/dist/sync/sync.js +3 -1
  30. package/dist/sync/sync.js.map +1 -1
  31. package/dist/system/ServiceContext.d.ts +3 -0
  32. package/dist/system/ServiceContext.js +11 -3
  33. package/dist/system/ServiceContext.js.map +1 -1
  34. package/dist/util/config/types.d.ts +2 -2
  35. package/dist/util/utils.d.ts +17 -1
  36. package/dist/util/utils.js +49 -1
  37. package/dist/util/utils.js.map +1 -1
  38. package/package.json +7 -8
  39. package/src/entry/commands/compact-action.ts +19 -14
  40. package/src/entry/commands/migrate-action.ts +17 -4
  41. package/src/index.ts +1 -4
  42. package/src/migrations/PowerSyncMigrationManager.ts +42 -0
  43. package/src/migrations/ensure-automatic-migrations.ts +15 -0
  44. package/src/migrations/migrations-index.ts +2 -3
  45. package/src/storage/BucketStorage.ts +59 -3
  46. package/src/storage/bson.ts +78 -0
  47. package/src/storage/storage-index.ts +3 -15
  48. package/src/sync/sync.ts +3 -1
  49. package/src/system/ServiceContext.ts +17 -4
  50. package/src/util/config/types.ts +2 -2
  51. package/src/util/utils.ts +47 -1
  52. package/test/src/env.ts +0 -1
  53. package/tsconfig.tsbuildinfo +1 -1
  54. package/dist/db/db-index.d.ts +0 -1
  55. package/dist/db/db-index.js +0 -2
  56. package/dist/db/db-index.js.map +0 -1
  57. package/dist/db/mongo.d.ts +0 -35
  58. package/dist/db/mongo.js +0 -73
  59. package/dist/db/mongo.js.map +0 -1
  60. package/dist/locks/LockManager.d.ts +0 -10
  61. package/dist/locks/LockManager.js +0 -7
  62. package/dist/locks/LockManager.js.map +0 -1
  63. package/dist/locks/MongoLocks.d.ts +0 -36
  64. package/dist/locks/MongoLocks.js +0 -81
  65. package/dist/locks/MongoLocks.js.map +0 -1
  66. package/dist/locks/locks-index.d.ts +0 -2
  67. package/dist/locks/locks-index.js +0 -3
  68. package/dist/locks/locks-index.js.map +0 -1
  69. package/dist/migrations/db/migrations/1684951997326-init.d.ts +0 -3
  70. package/dist/migrations/db/migrations/1684951997326-init.js +0 -33
  71. package/dist/migrations/db/migrations/1684951997326-init.js.map +0 -1
  72. package/dist/migrations/db/migrations/1688556755264-initial-sync-rules.d.ts +0 -2
  73. package/dist/migrations/db/migrations/1688556755264-initial-sync-rules.js +0 -5
  74. package/dist/migrations/db/migrations/1688556755264-initial-sync-rules.js.map +0 -1
  75. package/dist/migrations/db/migrations/1702295701188-sync-rule-state.d.ts +0 -3
  76. package/dist/migrations/db/migrations/1702295701188-sync-rule-state.js +0 -56
  77. package/dist/migrations/db/migrations/1702295701188-sync-rule-state.js.map +0 -1
  78. package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.d.ts +0 -3
  79. package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.js +0 -29
  80. package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.js.map +0 -1
  81. package/dist/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.d.ts +0 -3
  82. package/dist/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.js +0 -31
  83. package/dist/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.js.map +0 -1
  84. package/dist/migrations/definitions.d.ts +0 -18
  85. package/dist/migrations/definitions.js +0 -6
  86. package/dist/migrations/definitions.js.map +0 -1
  87. package/dist/migrations/executor.d.ts +0 -16
  88. package/dist/migrations/executor.js +0 -64
  89. package/dist/migrations/executor.js.map +0 -1
  90. package/dist/migrations/migrations.d.ts +0 -18
  91. package/dist/migrations/migrations.js +0 -110
  92. package/dist/migrations/migrations.js.map +0 -1
  93. package/dist/migrations/store/migration-store.d.ts +0 -11
  94. package/dist/migrations/store/migration-store.js +0 -46
  95. package/dist/migrations/store/migration-store.js.map +0 -1
  96. package/dist/storage/MongoBucketStorage.d.ts +0 -48
  97. package/dist/storage/MongoBucketStorage.js +0 -427
  98. package/dist/storage/MongoBucketStorage.js.map +0 -1
  99. package/dist/storage/mongo/MongoBucketBatch.d.ts +0 -74
  100. package/dist/storage/mongo/MongoBucketBatch.js +0 -683
  101. package/dist/storage/mongo/MongoBucketBatch.js.map +0 -1
  102. package/dist/storage/mongo/MongoCompactor.d.ts +0 -40
  103. package/dist/storage/mongo/MongoCompactor.js +0 -310
  104. package/dist/storage/mongo/MongoCompactor.js.map +0 -1
  105. package/dist/storage/mongo/MongoIdSequence.d.ts +0 -12
  106. package/dist/storage/mongo/MongoIdSequence.js +0 -21
  107. package/dist/storage/mongo/MongoIdSequence.js.map +0 -1
  108. package/dist/storage/mongo/MongoPersistedSyncRules.d.ts +0 -9
  109. package/dist/storage/mongo/MongoPersistedSyncRules.js +0 -9
  110. package/dist/storage/mongo/MongoPersistedSyncRules.js.map +0 -1
  111. package/dist/storage/mongo/MongoPersistedSyncRulesContent.d.ts +0 -20
  112. package/dist/storage/mongo/MongoPersistedSyncRulesContent.js +0 -26
  113. package/dist/storage/mongo/MongoPersistedSyncRulesContent.js.map +0 -1
  114. package/dist/storage/mongo/MongoStorageProvider.d.ts +0 -5
  115. package/dist/storage/mongo/MongoStorageProvider.js +0 -26
  116. package/dist/storage/mongo/MongoStorageProvider.js.map +0 -1
  117. package/dist/storage/mongo/MongoSyncBucketStorage.d.ts +0 -38
  118. package/dist/storage/mongo/MongoSyncBucketStorage.js +0 -534
  119. package/dist/storage/mongo/MongoSyncBucketStorage.js.map +0 -1
  120. package/dist/storage/mongo/MongoSyncRulesLock.d.ts +0 -16
  121. package/dist/storage/mongo/MongoSyncRulesLock.js +0 -65
  122. package/dist/storage/mongo/MongoSyncRulesLock.js.map +0 -1
  123. package/dist/storage/mongo/MongoWriteCheckpointAPI.d.ts +0 -20
  124. package/dist/storage/mongo/MongoWriteCheckpointAPI.js +0 -104
  125. package/dist/storage/mongo/MongoWriteCheckpointAPI.js.map +0 -1
  126. package/dist/storage/mongo/OperationBatch.d.ts +0 -35
  127. package/dist/storage/mongo/OperationBatch.js +0 -119
  128. package/dist/storage/mongo/OperationBatch.js.map +0 -1
  129. package/dist/storage/mongo/PersistedBatch.d.ts +0 -46
  130. package/dist/storage/mongo/PersistedBatch.js +0 -223
  131. package/dist/storage/mongo/PersistedBatch.js.map +0 -1
  132. package/dist/storage/mongo/config.d.ts +0 -19
  133. package/dist/storage/mongo/config.js +0 -26
  134. package/dist/storage/mongo/config.js.map +0 -1
  135. package/dist/storage/mongo/db.d.ts +0 -36
  136. package/dist/storage/mongo/db.js +0 -47
  137. package/dist/storage/mongo/db.js.map +0 -1
  138. package/dist/storage/mongo/models.d.ts +0 -163
  139. package/dist/storage/mongo/models.js +0 -27
  140. package/dist/storage/mongo/models.js.map +0 -1
  141. package/dist/storage/mongo/util.d.ts +0 -54
  142. package/dist/storage/mongo/util.js +0 -190
  143. package/dist/storage/mongo/util.js.map +0 -1
  144. package/src/db/db-index.ts +0 -1
  145. package/src/db/mongo.ts +0 -81
  146. package/src/locks/LockManager.ts +0 -16
  147. package/src/locks/MongoLocks.ts +0 -142
  148. package/src/locks/locks-index.ts +0 -2
  149. package/src/migrations/db/migrations/1684951997326-init.ts +0 -38
  150. package/src/migrations/db/migrations/1688556755264-initial-sync-rules.ts +0 -5
  151. package/src/migrations/db/migrations/1702295701188-sync-rule-state.ts +0 -102
  152. package/src/migrations/db/migrations/1711543888062-write-checkpoint-index.ts +0 -34
  153. package/src/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.ts +0 -37
  154. package/src/migrations/definitions.ts +0 -21
  155. package/src/migrations/executor.ts +0 -87
  156. package/src/migrations/migrations.ts +0 -142
  157. package/src/migrations/store/migration-store.ts +0 -63
  158. package/src/storage/MongoBucketStorage.ts +0 -541
  159. package/src/storage/mongo/MongoBucketBatch.ts +0 -900
  160. package/src/storage/mongo/MongoCompactor.ts +0 -393
  161. package/src/storage/mongo/MongoIdSequence.ts +0 -24
  162. package/src/storage/mongo/MongoPersistedSyncRules.ts +0 -16
  163. package/src/storage/mongo/MongoPersistedSyncRulesContent.ts +0 -50
  164. package/src/storage/mongo/MongoStorageProvider.ts +0 -31
  165. package/src/storage/mongo/MongoSyncBucketStorage.ts +0 -640
  166. package/src/storage/mongo/MongoSyncRulesLock.ts +0 -85
  167. package/src/storage/mongo/MongoWriteCheckpointAPI.ts +0 -154
  168. package/src/storage/mongo/OperationBatch.ts +0 -131
  169. package/src/storage/mongo/PersistedBatch.ts +0 -285
  170. package/src/storage/mongo/config.ts +0 -40
  171. package/src/storage/mongo/db.ts +0 -88
  172. package/src/storage/mongo/models.ts +0 -187
  173. package/src/storage/mongo/util.ts +0 -203
  174. package/test/src/__snapshots__/sync.test.ts.snap +0 -332
  175. package/test/src/bucket_validation.test.ts +0 -143
  176. package/test/src/bucket_validation.ts +0 -60
  177. package/test/src/compacting.test.ts +0 -295
  178. package/test/src/data_storage.test.ts +0 -1569
  179. package/test/src/stream_utils.ts +0 -42
  180. package/test/src/sync.test.ts +0 -511
  181. package/test/src/util.ts +0 -150
@@ -1,1569 +0,0 @@
1
- import { BucketDataBatchOptions, SaveOperationTag } from '@/storage/BucketStorage.js';
2
- import { getUuidReplicaIdentityBson } from '@/util/util-index.js';
3
- import { RequestParameters } from '@powersync/service-sync-rules';
4
- import { describe, expect, test } from 'vitest';
5
- import { fromAsync, oneFromAsync } from './stream_utils.js';
6
- import {
7
- BATCH_OPTIONS,
8
- getBatchData,
9
- getBatchMeta,
10
- makeTestTable,
11
- MONGO_STORAGE_FACTORY,
12
- PARSE_OPTIONS,
13
- rid,
14
- StorageFactory,
15
- testRules
16
- } from './util.js';
17
-
18
- const TEST_TABLE = makeTestTable('test', ['id']);
19
-
20
- describe('store - mongodb', function () {
21
- defineDataStorageTests(MONGO_STORAGE_FACTORY);
22
- });
23
-
24
- function defineDataStorageTests(factory: StorageFactory) {
25
- test('save and load parameters', async () => {
26
- const sync_rules = testRules(`
27
- bucket_definitions:
28
- mybucket:
29
- parameters:
30
- - SELECT group_id FROM test WHERE id1 = token_parameters.user_id OR id2 = token_parameters.user_id
31
- data: []
32
- `);
33
-
34
- const storage = (await factory()).getInstance(sync_rules);
35
-
36
- const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
37
- await batch.save({
38
- sourceTable: TEST_TABLE,
39
- tag: SaveOperationTag.INSERT,
40
- after: {
41
- id: 't2',
42
- id1: 'user3',
43
- id2: 'user4',
44
- group_id: 'group2a'
45
- },
46
- afterReplicaId: rid('t2')
47
- });
48
-
49
- await batch.save({
50
- sourceTable: TEST_TABLE,
51
- tag: SaveOperationTag.INSERT,
52
- after: {
53
- id: 't1',
54
- id1: 'user1',
55
- id2: 'user2',
56
- group_id: 'group1a'
57
- },
58
- afterReplicaId: rid('t1')
59
- });
60
- });
61
-
62
- const parameters = await storage.getParameterSets(result!.flushed_op, [['mybucket', '1', 'user1']]);
63
- expect(parameters).toEqual([
64
- {
65
- group_id: 'group1a'
66
- }
67
- ]);
68
- });
69
-
70
- test('it should use the latest version', async () => {
71
- const sync_rules = testRules(
72
- `
73
- bucket_definitions:
74
- mybucket:
75
- parameters:
76
- - SELECT group_id FROM test WHERE id = token_parameters.user_id
77
- data: []
78
- `
79
- );
80
-
81
- const storage = (await factory()).getInstance(sync_rules);
82
-
83
- const result1 = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
84
- await batch.save({
85
- sourceTable: TEST_TABLE,
86
- tag: SaveOperationTag.INSERT,
87
- after: {
88
- id: 'user1',
89
- group_id: 'group1'
90
- },
91
- afterReplicaId: rid('user1')
92
- });
93
- });
94
- const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
95
- await batch.save({
96
- sourceTable: TEST_TABLE,
97
- tag: SaveOperationTag.INSERT,
98
- after: {
99
- id: 'user1',
100
- group_id: 'group2'
101
- },
102
- afterReplicaId: rid('user1')
103
- });
104
- });
105
-
106
- const parameters = await storage.getParameterSets(result2!.flushed_op, [['mybucket', '1', 'user1']]);
107
- expect(parameters).toEqual([
108
- {
109
- group_id: 'group2'
110
- }
111
- ]);
112
-
113
- // Use the checkpoint to get older data if relevant
114
- const parameters2 = await storage.getParameterSets(result1!.flushed_op, [['mybucket', '1', 'user1']]);
115
- expect(parameters2).toEqual([
116
- {
117
- group_id: 'group1'
118
- }
119
- ]);
120
- });
121
-
122
- test('it should use the latest version after updates', async () => {
123
- const sync_rules = testRules(
124
- `
125
- bucket_definitions:
126
- mybucket:
127
- parameters:
128
- - SELECT id AS todo_id
129
- FROM todos
130
- WHERE list_id IN token_parameters.list_id
131
- data: []
132
- `
133
- );
134
-
135
- const storage = (await factory()).getInstance(sync_rules);
136
-
137
- const table = makeTestTable('todos', ['id', 'list_id']);
138
-
139
- await storage.startBatch(BATCH_OPTIONS, async (batch) => {
140
- // Create two todos which initially belong to different lists
141
- await batch.save({
142
- sourceTable: table,
143
- tag: SaveOperationTag.INSERT,
144
- after: {
145
- id: 'todo1',
146
- list_id: 'list1'
147
- },
148
- afterReplicaId: rid('todo1')
149
- });
150
- await batch.save({
151
- sourceTable: table,
152
- tag: SaveOperationTag.INSERT,
153
- after: {
154
- id: 'todo2',
155
- list_id: 'list2'
156
- },
157
- afterReplicaId: rid('todo2')
158
- });
159
- });
160
-
161
- const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
162
- // Update the second todo item to now belong to list 1
163
- await batch.save({
164
- sourceTable: table,
165
- tag: SaveOperationTag.UPDATE,
166
- after: {
167
- id: 'todo2',
168
- list_id: 'list1'
169
- },
170
- afterReplicaId: rid('todo2')
171
- });
172
- });
173
-
174
- // We specifically request the todo_ids for both lists.
175
- // There removal operation for the association of `list2`::`todo2` should not interfere with the new
176
- // association of `list1`::`todo2`
177
- const parameters = await storage.getParameterSets(BigInt(result2!.flushed_op).toString(), [
178
- ['mybucket', '1', 'list1'],
179
- ['mybucket', '1', 'list2']
180
- ]);
181
-
182
- expect(parameters.sort((a, b) => (a.todo_id as string).localeCompare(b.todo_id as string))).toEqual([
183
- {
184
- todo_id: 'todo1'
185
- },
186
- {
187
- todo_id: 'todo2'
188
- }
189
- ]);
190
- });
191
-
192
- test('save and load parameters with different number types', async () => {
193
- const sync_rules = testRules(
194
- `
195
- bucket_definitions:
196
- mybucket:
197
- parameters:
198
- - SELECT group_id FROM test WHERE n1 = token_parameters.n1 and f2 = token_parameters.f2 and f3 = token_parameters.f3
199
- data: []
200
- `
201
- );
202
-
203
- const storage = (await factory()).getInstance(sync_rules);
204
-
205
- const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
206
- await batch.save({
207
- sourceTable: TEST_TABLE,
208
- tag: SaveOperationTag.INSERT,
209
- after: {
210
- id: 't1',
211
- group_id: 'group1',
212
- n1: 314n,
213
- f2: 314,
214
- f3: 3.14
215
- },
216
- afterReplicaId: rid('t1')
217
- });
218
- });
219
-
220
- const TEST_PARAMS = { group_id: 'group1' };
221
-
222
- const checkpoint = result!.flushed_op;
223
-
224
- const parameters1 = await storage.getParameterSets(checkpoint, [['mybucket', '1', 314n, 314, 3.14]]);
225
- expect(parameters1).toEqual([TEST_PARAMS]);
226
- const parameters2 = await storage.getParameterSets(checkpoint, [['mybucket', '1', 314, 314n, 3.14]]);
227
- expect(parameters2).toEqual([TEST_PARAMS]);
228
- const parameters3 = await storage.getParameterSets(checkpoint, [['mybucket', '1', 314n, 314, 3]]);
229
- expect(parameters3).toEqual([]);
230
- });
231
-
232
- test('save and load parameters with large numbers', async () => {
233
- // This ensures serialization / deserialization of "current_data" is done correctly.
234
- // This specific case tested here cannot happen with postgres in practice, but we still
235
- // test this to ensure correct deserialization.
236
-
237
- const sync_rules = testRules(
238
- `
239
- bucket_definitions:
240
- mybucket:
241
- parameters:
242
- - SELECT group_id FROM test WHERE n1 = token_parameters.n1
243
- data: []
244
- `
245
- );
246
-
247
- const storage = (await factory()).getInstance(sync_rules);
248
-
249
- const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
250
- await batch.save({
251
- sourceTable: TEST_TABLE,
252
- tag: SaveOperationTag.INSERT,
253
- after: {
254
- id: 't1',
255
- group_id: 'group1',
256
- n1: 1152921504606846976n // 2^60
257
- },
258
- afterReplicaId: rid('t1')
259
- });
260
-
261
- await batch.save({
262
- sourceTable: TEST_TABLE,
263
- tag: SaveOperationTag.UPDATE,
264
- after: {
265
- id: 't1',
266
- group_id: 'group1',
267
- // Simulate a TOAST value, even though it can't happen for values like this
268
- // in practice.
269
- n1: undefined
270
- },
271
- afterReplicaId: rid('t1')
272
- });
273
- });
274
-
275
- const TEST_PARAMS = { group_id: 'group1' };
276
-
277
- const checkpoint = result!.flushed_op;
278
-
279
- const parameters1 = await storage.getParameterSets(checkpoint, [['mybucket', '1', 1152921504606846976n]]);
280
- expect(parameters1).toEqual([TEST_PARAMS]);
281
- });
282
-
283
- test('removing row', async () => {
284
- const sync_rules = testRules(
285
- `
286
- bucket_definitions:
287
- global:
288
- data:
289
- - SELECT id, description FROM "%"
290
- `
291
- );
292
- const storage = (await factory()).getInstance(sync_rules);
293
-
294
- const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
295
- const sourceTable = TEST_TABLE;
296
-
297
- await batch.save({
298
- sourceTable,
299
- tag: SaveOperationTag.INSERT,
300
- after: {
301
- id: 'test1',
302
- description: 'test1'
303
- },
304
- afterReplicaId: rid('test1')
305
- });
306
- await batch.save({
307
- sourceTable,
308
- tag: SaveOperationTag.DELETE,
309
- beforeReplicaId: rid('test1')
310
- });
311
- });
312
-
313
- const checkpoint = result!.flushed_op;
314
-
315
- const batch = await fromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
316
- const data = batch[0].batch.data.map((d) => {
317
- return {
318
- op: d.op,
319
- object_id: d.object_id,
320
- checksum: d.checksum
321
- };
322
- });
323
-
324
- const c1 = 2871785649;
325
- const c2 = 2872534815;
326
-
327
- expect(data).toEqual([
328
- { op: 'PUT', object_id: 'test1', checksum: c1 },
329
- { op: 'REMOVE', object_id: 'test1', checksum: c2 }
330
- ]);
331
-
332
- const checksums = [...(await storage.getChecksums(checkpoint, ['global[]'])).values()];
333
- expect(checksums).toEqual([
334
- {
335
- bucket: 'global[]',
336
- checksum: (c1 + c2) & 0xffffffff,
337
- count: 2
338
- }
339
- ]);
340
- });
341
-
342
- test('save and load parameters with workspaceId', async () => {
343
- const WORKSPACE_TABLE = makeTestTable('workspace', ['id']);
344
-
345
- const sync_rules_content = testRules(
346
- `
347
- bucket_definitions:
348
- by_workspace:
349
- parameters:
350
- - SELECT id as workspace_id FROM workspace WHERE
351
- workspace."userId" = token_parameters.user_id
352
- data: []
353
- `
354
- );
355
- const sync_rules = sync_rules_content.parsed(PARSE_OPTIONS).sync_rules;
356
-
357
- const storage = (await factory()).getInstance(sync_rules_content);
358
-
359
- const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
360
- await batch.save({
361
- sourceTable: WORKSPACE_TABLE,
362
- tag: SaveOperationTag.INSERT,
363
- after: {
364
- id: 'workspace1',
365
- userId: 'u1'
366
- },
367
- afterReplicaId: rid('workspace1')
368
- });
369
- });
370
-
371
- const checkpoint = result!.flushed_op;
372
-
373
- const parameters = new RequestParameters({ sub: 'u1' }, {});
374
-
375
- const q1 = sync_rules.bucket_descriptors[0].parameter_queries[0];
376
-
377
- const lookups = q1.getLookups(parameters);
378
- expect(lookups).toEqual([['by_workspace', '1', 'u1']]);
379
-
380
- const parameter_sets = await storage.getParameterSets(checkpoint, lookups);
381
- expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }]);
382
-
383
- const buckets = await sync_rules.queryBucketIds({
384
- getParameterSets(lookups) {
385
- return storage.getParameterSets(checkpoint, lookups);
386
- },
387
- parameters
388
- });
389
- expect(buckets).toEqual(['by_workspace["workspace1"]']);
390
- });
391
-
392
- test('save and load parameters with dynamic global buckets', async () => {
393
- const WORKSPACE_TABLE = makeTestTable('workspace');
394
-
395
- const sync_rules_content = testRules(
396
- `
397
- bucket_definitions:
398
- by_public_workspace:
399
- parameters:
400
- - SELECT id as workspace_id FROM workspace WHERE
401
- workspace.visibility = 'public'
402
- data: []
403
- `
404
- );
405
- const sync_rules = sync_rules_content.parsed(PARSE_OPTIONS).sync_rules;
406
-
407
- const storage = (await factory()).getInstance(sync_rules_content);
408
-
409
- const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
410
- await batch.save({
411
- sourceTable: WORKSPACE_TABLE,
412
- tag: SaveOperationTag.INSERT,
413
- after: {
414
- id: 'workspace1',
415
- visibility: 'public'
416
- },
417
- afterReplicaId: rid('workspace1')
418
- });
419
-
420
- await batch.save({
421
- sourceTable: WORKSPACE_TABLE,
422
- tag: SaveOperationTag.INSERT,
423
- after: {
424
- id: 'workspace2',
425
- visibility: 'private'
426
- },
427
- afterReplicaId: rid('workspace2')
428
- });
429
-
430
- await batch.save({
431
- sourceTable: WORKSPACE_TABLE,
432
- tag: SaveOperationTag.INSERT,
433
- after: {
434
- id: 'workspace3',
435
- visibility: 'public'
436
- },
437
- afterReplicaId: rid('workspace3')
438
- });
439
- });
440
-
441
- const checkpoint = result!.flushed_op;
442
-
443
- const parameters = new RequestParameters({ sub: 'unknown' }, {});
444
-
445
- const q1 = sync_rules.bucket_descriptors[0].parameter_queries[0];
446
-
447
- const lookups = q1.getLookups(parameters);
448
- expect(lookups).toEqual([['by_public_workspace', '1']]);
449
-
450
- const parameter_sets = await storage.getParameterSets(checkpoint, lookups);
451
- parameter_sets.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
452
- expect(parameter_sets).toEqual([{ workspace_id: 'workspace1' }, { workspace_id: 'workspace3' }]);
453
-
454
- const buckets = await sync_rules.queryBucketIds({
455
- getParameterSets(lookups) {
456
- return storage.getParameterSets(checkpoint, lookups);
457
- },
458
- parameters
459
- });
460
- buckets.sort();
461
- expect(buckets).toEqual(['by_public_workspace["workspace1"]', 'by_public_workspace["workspace3"]']);
462
- });
463
-
464
- test('multiple parameter queries', async () => {
465
- const WORKSPACE_TABLE = makeTestTable('workspace');
466
-
467
- const sync_rules_content = testRules(
468
- `
469
- bucket_definitions:
470
- by_workspace:
471
- parameters:
472
- - SELECT id as workspace_id FROM workspace WHERE
473
- workspace.visibility = 'public'
474
- - SELECT id as workspace_id FROM workspace WHERE
475
- workspace.user_id = token_parameters.user_id
476
- data: []
477
- `
478
- );
479
- const sync_rules = sync_rules_content.parsed(PARSE_OPTIONS).sync_rules;
480
-
481
- const storage = (await factory()).getInstance(sync_rules_content);
482
-
483
- const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
484
- await batch.save({
485
- sourceTable: WORKSPACE_TABLE,
486
- tag: SaveOperationTag.INSERT,
487
- after: {
488
- id: 'workspace1',
489
- visibility: 'public'
490
- },
491
- afterReplicaId: rid('workspace1')
492
- });
493
-
494
- await batch.save({
495
- sourceTable: WORKSPACE_TABLE,
496
- tag: SaveOperationTag.INSERT,
497
- after: {
498
- id: 'workspace2',
499
- visibility: 'private'
500
- },
501
- afterReplicaId: rid('workspace2')
502
- });
503
-
504
- await batch.save({
505
- sourceTable: WORKSPACE_TABLE,
506
- tag: SaveOperationTag.INSERT,
507
- after: {
508
- id: 'workspace3',
509
- user_id: 'u1',
510
- visibility: 'private'
511
- },
512
- afterReplicaId: rid('workspace3')
513
- });
514
-
515
- await batch.save({
516
- sourceTable: WORKSPACE_TABLE,
517
- tag: SaveOperationTag.INSERT,
518
- after: {
519
- id: 'workspace4',
520
- user_id: 'u2',
521
- visibility: 'private'
522
- },
523
- afterReplicaId: rid('workspace4')
524
- });
525
- });
526
-
527
- const checkpoint = result!.flushed_op;
528
-
529
- const parameters = new RequestParameters({ sub: 'u1' }, {});
530
-
531
- // Test intermediate values - could be moved to sync_rules.test.ts
532
- const q1 = sync_rules.bucket_descriptors[0].parameter_queries[0];
533
- const lookups1 = q1.getLookups(parameters);
534
- expect(lookups1).toEqual([['by_workspace', '1']]);
535
-
536
- const parameter_sets1 = await storage.getParameterSets(checkpoint, lookups1);
537
- parameter_sets1.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
538
- expect(parameter_sets1).toEqual([{ workspace_id: 'workspace1' }]);
539
-
540
- const q2 = sync_rules.bucket_descriptors[0].parameter_queries[1];
541
- const lookups2 = q2.getLookups(parameters);
542
- expect(lookups2).toEqual([['by_workspace', '2', 'u1']]);
543
-
544
- const parameter_sets2 = await storage.getParameterSets(checkpoint, lookups2);
545
- parameter_sets2.sort((a, b) => JSON.stringify(a).localeCompare(JSON.stringify(b)));
546
- expect(parameter_sets2).toEqual([{ workspace_id: 'workspace3' }]);
547
-
548
- // Test final values - the important part
549
- const buckets = await sync_rules.queryBucketIds({
550
- getParameterSets(lookups) {
551
- return storage.getParameterSets(checkpoint, lookups);
552
- },
553
- parameters
554
- });
555
- buckets.sort();
556
- expect(buckets).toEqual(['by_workspace["workspace1"]', 'by_workspace["workspace3"]']);
557
- });
558
-
559
- test('changing client ids', async () => {
560
- const sync_rules = testRules(
561
- `
562
- bucket_definitions:
563
- global:
564
- data:
565
- - SELECT client_id as id, description FROM "%"
566
- `
567
- );
568
- const storage = (await factory()).getInstance(sync_rules);
569
-
570
- const sourceTable = TEST_TABLE;
571
- const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
572
- await batch.save({
573
- sourceTable,
574
- tag: SaveOperationTag.INSERT,
575
- after: {
576
- id: 'test1',
577
- client_id: 'client1a',
578
- description: 'test1a'
579
- },
580
- afterReplicaId: rid('test1')
581
- });
582
- await batch.save({
583
- sourceTable,
584
- tag: SaveOperationTag.UPDATE,
585
- after: {
586
- id: 'test1',
587
- client_id: 'client1b',
588
- description: 'test1b'
589
- },
590
- afterReplicaId: rid('test1')
591
- });
592
-
593
- await batch.save({
594
- sourceTable,
595
- tag: SaveOperationTag.INSERT,
596
- after: {
597
- id: 'test2',
598
- client_id: 'client2',
599
- description: 'test2'
600
- },
601
- afterReplicaId: rid('test2')
602
- });
603
- });
604
- const checkpoint = result!.flushed_op;
605
- const batch = await fromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
606
- const data = batch[0].batch.data.map((d) => {
607
- return {
608
- op: d.op,
609
- object_id: d.object_id
610
- };
611
- });
612
-
613
- expect(data).toEqual([
614
- { op: 'PUT', object_id: 'client1a' },
615
- { op: 'PUT', object_id: 'client1b' },
616
- { op: 'REMOVE', object_id: 'client1a' },
617
- { op: 'PUT', object_id: 'client2' }
618
- ]);
619
- });
620
-
621
- test('re-apply delete', async () => {
622
- const sync_rules = testRules(
623
- `
624
- bucket_definitions:
625
- global:
626
- data:
627
- - SELECT id, description FROM "%"
628
- `
629
- );
630
- const storage = (await factory()).getInstance(sync_rules);
631
-
632
- await storage.startBatch(BATCH_OPTIONS, async (batch) => {
633
- const sourceTable = TEST_TABLE;
634
-
635
- await batch.save({
636
- sourceTable,
637
- tag: SaveOperationTag.INSERT,
638
- after: {
639
- id: 'test1',
640
- description: 'test1'
641
- },
642
- afterReplicaId: rid('test1')
643
- });
644
- });
645
-
646
- await storage.startBatch(BATCH_OPTIONS, async (batch) => {
647
- const sourceTable = TEST_TABLE;
648
-
649
- await batch.save({
650
- sourceTable,
651
- tag: SaveOperationTag.DELETE,
652
- beforeReplicaId: rid('test1')
653
- });
654
- });
655
-
656
- const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
657
- const sourceTable = TEST_TABLE;
658
-
659
- await batch.save({
660
- sourceTable,
661
- tag: SaveOperationTag.DELETE,
662
- beforeReplicaId: rid('test1')
663
- });
664
- });
665
-
666
- const checkpoint = result!.flushed_op;
667
-
668
- const batch = await fromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
669
- const data = batch[0].batch.data.map((d) => {
670
- return {
671
- op: d.op,
672
- object_id: d.object_id,
673
- checksum: d.checksum
674
- };
675
- });
676
-
677
- const c1 = 2871785649;
678
- const c2 = 2872534815;
679
-
680
- expect(data).toEqual([
681
- { op: 'PUT', object_id: 'test1', checksum: c1 },
682
- { op: 'REMOVE', object_id: 'test1', checksum: c2 }
683
- ]);
684
-
685
- const checksums = [...(await storage.getChecksums(checkpoint, ['global[]'])).values()];
686
- expect(checksums).toEqual([
687
- {
688
- bucket: 'global[]',
689
- checksum: (c1 + c2) & 0xffffffff,
690
- count: 2
691
- }
692
- ]);
693
- });
694
-
695
- test('re-apply update + delete', async () => {
696
- const sync_rules = testRules(
697
- `
698
- bucket_definitions:
699
- global:
700
- data:
701
- - SELECT id, description FROM "%"
702
- `
703
- );
704
- const storage = (await factory()).getInstance(sync_rules);
705
-
706
- await storage.startBatch(BATCH_OPTIONS, async (batch) => {
707
- const sourceTable = TEST_TABLE;
708
-
709
- await batch.save({
710
- sourceTable,
711
- tag: SaveOperationTag.INSERT,
712
- after: {
713
- id: 'test1',
714
- description: 'test1'
715
- },
716
- afterReplicaId: rid('test1')
717
- });
718
- });
719
-
720
- await storage.startBatch(BATCH_OPTIONS, async (batch) => {
721
- const sourceTable = TEST_TABLE;
722
-
723
- await batch.save({
724
- sourceTable,
725
- tag: SaveOperationTag.UPDATE,
726
- after: {
727
- id: 'test1',
728
- description: undefined
729
- },
730
- afterReplicaId: rid('test1')
731
- });
732
-
733
- await batch.save({
734
- sourceTable,
735
- tag: SaveOperationTag.UPDATE,
736
- after: {
737
- id: 'test1',
738
- description: undefined
739
- },
740
- afterReplicaId: rid('test1')
741
- });
742
-
743
- await batch.save({
744
- sourceTable,
745
- tag: SaveOperationTag.DELETE,
746
- beforeReplicaId: rid('test1')
747
- });
748
- });
749
-
750
- const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
751
- const sourceTable = TEST_TABLE;
752
-
753
- await batch.save({
754
- sourceTable,
755
- tag: SaveOperationTag.UPDATE,
756
- after: {
757
- id: 'test1',
758
- description: undefined
759
- },
760
- afterReplicaId: rid('test1')
761
- });
762
-
763
- await batch.save({
764
- sourceTable,
765
- tag: SaveOperationTag.UPDATE,
766
- after: {
767
- id: 'test1',
768
- description: undefined
769
- },
770
- afterReplicaId: rid('test1')
771
- });
772
-
773
- await batch.save({
774
- sourceTable,
775
- tag: SaveOperationTag.DELETE,
776
- beforeReplicaId: rid('test1')
777
- });
778
- });
779
-
780
- const checkpoint = result!.flushed_op;
781
-
782
- const batch = await fromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']])));
783
-
784
- const data = batch[0].batch.data.map((d) => {
785
- return {
786
- op: d.op,
787
- object_id: d.object_id,
788
- checksum: d.checksum
789
- };
790
- });
791
-
792
- const c1 = 2871785649;
793
- const c2 = 2872534815;
794
-
795
- expect(data).toEqual([
796
- { op: 'PUT', object_id: 'test1', checksum: c1 },
797
- { op: 'PUT', object_id: 'test1', checksum: c1 },
798
- { op: 'PUT', object_id: 'test1', checksum: c1 },
799
- { op: 'REMOVE', object_id: 'test1', checksum: c2 }
800
- ]);
801
-
802
- const checksums = [...(await storage.getChecksums(checkpoint, ['global[]'])).values()];
803
- expect(checksums).toEqual([
804
- {
805
- bucket: 'global[]',
806
- checksum: (c1 + c1 + c1 + c2) & 0xffffffff,
807
- count: 4
808
- }
809
- ]);
810
- });
811
-
812
- test('truncate parameters', async () => {
813
- const sync_rules = testRules(
814
- `
815
- bucket_definitions:
816
- mybucket:
817
- parameters:
818
- - SELECT group_id FROM test WHERE id1 = token_parameters.user_id OR id2 = token_parameters.user_id
819
- data: []
820
- `
821
- );
822
-
823
- const storage = (await factory()).getInstance(sync_rules);
824
-
825
- await storage.startBatch(BATCH_OPTIONS, async (batch) => {
826
- await batch.save({
827
- sourceTable: TEST_TABLE,
828
- tag: SaveOperationTag.INSERT,
829
- after: {
830
- id: 't2',
831
- id1: 'user3',
832
- id2: 'user4',
833
- group_id: 'group2a'
834
- },
835
- afterReplicaId: rid('t2')
836
- });
837
-
838
- await batch.truncate([TEST_TABLE]);
839
- });
840
-
841
- const { checkpoint } = await storage.getCheckpoint();
842
-
843
- const parameters = await storage.getParameterSets(checkpoint, [['mybucket', '1', 'user1']]);
844
- expect(parameters).toEqual([]);
845
- });
846
-
847
- test('batch with overlapping replica ids', async () => {
848
- // This test checks that we get the correct output when processing rows with:
849
- // 1. changing replica ids
850
- // 2. overlapping with replica ids of other rows in the same transaction (at different times)
851
- // If operations are not processing in input order, this breaks easily.
852
- // It can break at two places:
853
- // 1. Not getting the correct "current_data" state for each operation.
854
- // 2. Output order not being correct.
855
-
856
- const sync_rules = testRules(
857
- `
858
- bucket_definitions:
859
- global:
860
- data:
861
- - SELECT id, description FROM "test"
862
- `
863
- );
864
- const storage = (await factory()).getInstance(sync_rules);
865
-
866
- // Pre-setup
867
- const result1 = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
868
- const sourceTable = TEST_TABLE;
869
-
870
- await batch.save({
871
- sourceTable,
872
- tag: SaveOperationTag.INSERT,
873
- after: {
874
- id: 'test1',
875
- description: 'test1a'
876
- },
877
- afterReplicaId: rid('test1')
878
- });
879
-
880
- await batch.save({
881
- sourceTable,
882
- tag: SaveOperationTag.INSERT,
883
- after: {
884
- id: 'test2',
885
- description: 'test2a'
886
- },
887
- afterReplicaId: rid('test2')
888
- });
889
- });
890
-
891
- const checkpoint1 = result1?.flushed_op ?? '0';
892
-
893
- // Test batch
894
- const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
895
- const sourceTable = TEST_TABLE;
896
- // b
897
- await batch.save({
898
- sourceTable,
899
- tag: SaveOperationTag.INSERT,
900
- after: {
901
- id: 'test1',
902
- description: 'test1b'
903
- },
904
- afterReplicaId: rid('test1')
905
- });
906
-
907
- await batch.save({
908
- sourceTable,
909
- tag: SaveOperationTag.UPDATE,
910
- before: {
911
- id: 'test1'
912
- },
913
- beforeReplicaId: rid('test1'),
914
- after: {
915
- id: 'test2',
916
- description: 'test2b'
917
- },
918
- afterReplicaId: rid('test2')
919
- });
920
-
921
- await batch.save({
922
- sourceTable,
923
- tag: SaveOperationTag.UPDATE,
924
- before: {
925
- id: 'test2'
926
- },
927
- beforeReplicaId: rid('test2'),
928
- after: {
929
- id: 'test3',
930
- description: 'test3b'
931
- },
932
-
933
- afterReplicaId: rid('test3')
934
- });
935
-
936
- // c
937
- await batch.save({
938
- sourceTable,
939
- tag: SaveOperationTag.UPDATE,
940
- after: {
941
- id: 'test2',
942
- description: 'test2c'
943
- },
944
- afterReplicaId: rid('test2')
945
- });
946
-
947
- // d
948
- await batch.save({
949
- sourceTable,
950
- tag: SaveOperationTag.INSERT,
951
- after: {
952
- id: 'test4',
953
- description: 'test4d'
954
- },
955
- afterReplicaId: rid('test4')
956
- });
957
-
958
- await batch.save({
959
- sourceTable,
960
- tag: SaveOperationTag.UPDATE,
961
- before: {
962
- id: 'test4'
963
- },
964
- beforeReplicaId: rid('test4'),
965
- after: {
966
- id: 'test5',
967
- description: 'test5d'
968
- },
969
- afterReplicaId: rid('test5')
970
- });
971
- });
972
-
973
- const checkpoint2 = result2!.flushed_op;
974
-
975
- const batch = await fromAsync(storage.getBucketDataBatch(checkpoint2, new Map([['global[]', checkpoint1]])));
976
- const data = batch[0].batch.data.map((d) => {
977
- return {
978
- op: d.op,
979
- object_id: d.object_id,
980
- data: d.data
981
- };
982
- });
983
-
984
- // Operations must be in this order
985
- expect(data).toEqual([
986
- // b
987
- { op: 'PUT', object_id: 'test1', data: JSON.stringify({ id: 'test1', description: 'test1b' }) },
988
- { op: 'REMOVE', object_id: 'test1', data: null },
989
- { op: 'PUT', object_id: 'test2', data: JSON.stringify({ id: 'test2', description: 'test2b' }) },
990
- { op: 'REMOVE', object_id: 'test2', data: null },
991
- { op: 'PUT', object_id: 'test3', data: JSON.stringify({ id: 'test3', description: 'test3b' }) },
992
-
993
- // c
994
- { op: 'PUT', object_id: 'test2', data: JSON.stringify({ id: 'test2', description: 'test2c' }) },
995
-
996
- // d
997
- { op: 'PUT', object_id: 'test4', data: JSON.stringify({ id: 'test4', description: 'test4d' }) },
998
- { op: 'REMOVE', object_id: 'test4', data: null },
999
- { op: 'PUT', object_id: 'test5', data: JSON.stringify({ id: 'test5', description: 'test5d' }) }
1000
- ]);
1001
- });
1002
-
1003
- test('changed data with replica identity full', async () => {
1004
- const sync_rules = testRules(
1005
- `
1006
- bucket_definitions:
1007
- global:
1008
- data:
1009
- - SELECT id, description FROM "test"
1010
- `
1011
- );
1012
- function rid2(id: string, description: string) {
1013
- return getUuidReplicaIdentityBson({ id, description }, [
1014
- { name: 'id', type: 'VARCHAR', typeId: 25 },
1015
- { name: 'description', type: 'VARCHAR', typeId: 25 }
1016
- ]);
1017
- }
1018
- const storage = (await factory()).getInstance(sync_rules);
1019
-
1020
- const sourceTable = makeTestTable('test', ['id', 'description']);
1021
-
1022
- // Pre-setup
1023
- const result1 = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
1024
- await batch.save({
1025
- sourceTable,
1026
- tag: SaveOperationTag.INSERT,
1027
- after: {
1028
- id: 'test1',
1029
- description: 'test1a'
1030
- },
1031
- afterReplicaId: rid2('test1', 'test1a')
1032
- });
1033
- });
1034
-
1035
- const checkpoint1 = result1?.flushed_op ?? '0';
1036
-
1037
- const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
1038
- // Unchanged, but has a before id
1039
- await batch.save({
1040
- sourceTable,
1041
- tag: SaveOperationTag.UPDATE,
1042
- before: {
1043
- id: 'test1',
1044
- description: 'test1a'
1045
- },
1046
- beforeReplicaId: rid2('test1', 'test1a'),
1047
- after: {
1048
- id: 'test1',
1049
- description: 'test1b'
1050
- },
1051
- afterReplicaId: rid2('test1', 'test1b')
1052
- });
1053
- });
1054
-
1055
- const result3 = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
1056
- // Delete
1057
- await batch.save({
1058
- sourceTable,
1059
- tag: SaveOperationTag.DELETE,
1060
- before: {
1061
- id: 'test1',
1062
- description: 'test1b'
1063
- },
1064
- beforeReplicaId: rid2('test1', 'test1b'),
1065
- after: undefined
1066
- });
1067
- });
1068
-
1069
- const checkpoint3 = result3!.flushed_op;
1070
-
1071
- const batch = await fromAsync(storage.getBucketDataBatch(checkpoint3, new Map([['global[]', checkpoint1]])));
1072
- const data = batch[0].batch.data.map((d) => {
1073
- return {
1074
- op: d.op,
1075
- object_id: d.object_id,
1076
- data: d.data,
1077
- subkey: d.subkey
1078
- };
1079
- });
1080
-
1081
- // Operations must be in this order
1082
- expect(data).toEqual([
1083
- // 2
1084
- // The REMOVE is expected because the subkey changes
1085
- {
1086
- op: 'REMOVE',
1087
- object_id: 'test1',
1088
- data: null,
1089
- subkey: '6544e3899293153fa7b38331/740ba9f2-8b0f-53e3-bb17-5f38a9616f0e'
1090
- },
1091
- {
1092
- op: 'PUT',
1093
- object_id: 'test1',
1094
- data: JSON.stringify({ id: 'test1', description: 'test1b' }),
1095
- subkey: '6544e3899293153fa7b38331/500e9b68-a2fd-51ff-9c00-313e2fb9f562'
1096
- },
1097
- // 3
1098
- {
1099
- op: 'REMOVE',
1100
- object_id: 'test1',
1101
- data: null,
1102
- subkey: '6544e3899293153fa7b38331/500e9b68-a2fd-51ff-9c00-313e2fb9f562'
1103
- }
1104
- ]);
1105
- });
1106
-
1107
- test('unchanged data with replica identity full', async () => {
1108
- const sync_rules = testRules(
1109
- `
1110
- bucket_definitions:
1111
- global:
1112
- data:
1113
- - SELECT id, description FROM "test"
1114
- `
1115
- );
1116
- function rid2(id: string, description: string) {
1117
- return getUuidReplicaIdentityBson({ id, description }, [
1118
- { name: 'id', type: 'VARCHAR', typeId: 25 },
1119
- { name: 'description', type: 'VARCHAR', typeId: 25 }
1120
- ]);
1121
- }
1122
-
1123
- const storage = (await factory()).getInstance(sync_rules);
1124
-
1125
- const sourceTable = makeTestTable('test', ['id', 'description']);
1126
-
1127
- // Pre-setup
1128
- const result1 = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
1129
- await batch.save({
1130
- sourceTable,
1131
- tag: SaveOperationTag.INSERT,
1132
- after: {
1133
- id: 'test1',
1134
- description: 'test1a'
1135
- },
1136
- afterReplicaId: rid2('test1', 'test1a')
1137
- });
1138
- });
1139
-
1140
- const checkpoint1 = result1?.flushed_op ?? '0';
1141
-
1142
- const result2 = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
1143
- // Unchanged, but has a before id
1144
- await batch.save({
1145
- sourceTable,
1146
- tag: SaveOperationTag.UPDATE,
1147
- before: {
1148
- id: 'test1',
1149
- description: 'test1a'
1150
- },
1151
- beforeReplicaId: rid2('test1', 'test1a'),
1152
- after: {
1153
- id: 'test1',
1154
- description: 'test1a'
1155
- },
1156
- afterReplicaId: rid2('test1', 'test1a')
1157
- });
1158
- });
1159
-
1160
- const result3 = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
1161
- // Delete
1162
- await batch.save({
1163
- sourceTable,
1164
- tag: SaveOperationTag.DELETE,
1165
- before: {
1166
- id: 'test1',
1167
- description: 'test1a'
1168
- },
1169
- beforeReplicaId: rid2('test1', 'test1a'),
1170
- after: undefined
1171
- });
1172
- });
1173
-
1174
- const checkpoint3 = result3!.flushed_op;
1175
-
1176
- const batch = await fromAsync(storage.getBucketDataBatch(checkpoint3, new Map([['global[]', checkpoint1]])));
1177
- const data = batch[0].batch.data.map((d) => {
1178
- return {
1179
- op: d.op,
1180
- object_id: d.object_id,
1181
- data: d.data,
1182
- subkey: d.subkey
1183
- };
1184
- });
1185
-
1186
- // Operations must be in this order
1187
- expect(data).toEqual([
1188
- // 2
1189
- {
1190
- op: 'PUT',
1191
- object_id: 'test1',
1192
- data: JSON.stringify({ id: 'test1', description: 'test1a' }),
1193
- subkey: '6544e3899293153fa7b38331/740ba9f2-8b0f-53e3-bb17-5f38a9616f0e'
1194
- },
1195
- // 3
1196
- {
1197
- op: 'REMOVE',
1198
- object_id: 'test1',
1199
- data: null,
1200
- subkey: '6544e3899293153fa7b38331/740ba9f2-8b0f-53e3-bb17-5f38a9616f0e'
1201
- }
1202
- ]);
1203
- });
1204
-
1205
- test('large batch', async () => {
1206
- // Test syncing a batch of data that is small in count,
1207
- // but large enough in size to be split over multiple returned batches.
1208
- // The specific batch splits is an implementation detail of the storage driver,
1209
- // and the test will have to updated when other implementations are added.
1210
- const sync_rules = testRules(
1211
- `
1212
- bucket_definitions:
1213
- global:
1214
- data:
1215
- - SELECT id, description FROM "%"
1216
- `
1217
- );
1218
- const storage = (await factory()).getInstance(sync_rules);
1219
-
1220
- const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
1221
- const sourceTable = TEST_TABLE;
1222
-
1223
- const largeDescription = '0123456789'.repeat(12_000_00);
1224
-
1225
- await batch.save({
1226
- sourceTable,
1227
- tag: SaveOperationTag.INSERT,
1228
- after: {
1229
- id: 'test1',
1230
- description: 'test1'
1231
- },
1232
- afterReplicaId: rid('test1')
1233
- });
1234
-
1235
- await batch.save({
1236
- sourceTable,
1237
- tag: SaveOperationTag.INSERT,
1238
- after: {
1239
- id: 'large1',
1240
- description: largeDescription
1241
- },
1242
- afterReplicaId: rid('large1')
1243
- });
1244
-
1245
- // Large enough to split the returned batch
1246
- await batch.save({
1247
- sourceTable,
1248
- tag: SaveOperationTag.INSERT,
1249
- after: {
1250
- id: 'large2',
1251
- description: largeDescription
1252
- },
1253
- afterReplicaId: rid('large2')
1254
- });
1255
-
1256
- await batch.save({
1257
- sourceTable,
1258
- tag: SaveOperationTag.INSERT,
1259
- after: {
1260
- id: 'test3',
1261
- description: 'test3'
1262
- },
1263
- afterReplicaId: rid('test3')
1264
- });
1265
- });
1266
-
1267
- const checkpoint = result!.flushed_op;
1268
-
1269
- const options: BucketDataBatchOptions = {
1270
- chunkLimitBytes: 16 * 1024 * 1024
1271
- };
1272
-
1273
- const batch1 = await fromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]), options));
1274
- expect(getBatchData(batch1)).toEqual([
1275
- { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 },
1276
- { op_id: '2', op: 'PUT', object_id: 'large1', checksum: 454746904 }
1277
- ]);
1278
- expect(getBatchMeta(batch1)).toEqual({
1279
- after: '0',
1280
- has_more: true,
1281
- next_after: '2'
1282
- });
1283
-
1284
- const batch2 = await fromAsync(
1285
- storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1[0].batch.next_after]]), options)
1286
- );
1287
- expect(getBatchData(batch2)).toEqual([
1288
- { op_id: '3', op: 'PUT', object_id: 'large2', checksum: 1795508474 },
1289
- { op_id: '4', op: 'PUT', object_id: 'test3', checksum: 1359888332 }
1290
- ]);
1291
- expect(getBatchMeta(batch2)).toEqual({
1292
- after: '2',
1293
- has_more: false,
1294
- next_after: '4'
1295
- });
1296
-
1297
- const batch3 = await fromAsync(
1298
- storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2[0].batch.next_after]]), options)
1299
- );
1300
- expect(getBatchData(batch3)).toEqual([]);
1301
- expect(getBatchMeta(batch3)).toEqual(null);
1302
- });
1303
-
1304
- test('large batch (2)', async () => {
1305
- // Test syncing a batch of data that is small in count,
1306
- // but large enough in size to be split over multiple returned chunks.
1307
- // Similar to the above test, but splits over 1MB chunks.
1308
- const sync_rules = testRules(
1309
- `
1310
- bucket_definitions:
1311
- global:
1312
- data:
1313
- - SELECT id, description FROM "%"
1314
- `
1315
- );
1316
- const storage = (await factory()).getInstance(sync_rules);
1317
-
1318
- const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
1319
- const sourceTable = TEST_TABLE;
1320
-
1321
- const largeDescription = '0123456789'.repeat(2_000_00);
1322
-
1323
- await batch.save({
1324
- sourceTable,
1325
- tag: SaveOperationTag.INSERT,
1326
- after: {
1327
- id: 'test1',
1328
- description: 'test1'
1329
- },
1330
- afterReplicaId: rid('test1')
1331
- });
1332
-
1333
- await batch.save({
1334
- sourceTable,
1335
- tag: SaveOperationTag.INSERT,
1336
- after: {
1337
- id: 'large1',
1338
- description: largeDescription
1339
- },
1340
- afterReplicaId: rid('large1')
1341
- });
1342
-
1343
- // Large enough to split the returned batch
1344
- await batch.save({
1345
- sourceTable,
1346
- tag: SaveOperationTag.INSERT,
1347
- after: {
1348
- id: 'large2',
1349
- description: largeDescription
1350
- },
1351
- afterReplicaId: rid('large2')
1352
- });
1353
-
1354
- await batch.save({
1355
- sourceTable,
1356
- tag: SaveOperationTag.INSERT,
1357
- after: {
1358
- id: 'test3',
1359
- description: 'test3'
1360
- },
1361
- afterReplicaId: rid('test3')
1362
- });
1363
- });
1364
-
1365
- const checkpoint = result!.flushed_op;
1366
-
1367
- const options: BucketDataBatchOptions = {};
1368
-
1369
- const batch1 = await fromAsync(storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]), options));
1370
- expect(getBatchData(batch1)).toEqual([
1371
- { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 },
1372
- { op_id: '2', op: 'PUT', object_id: 'large1', checksum: 1178768505 }
1373
- ]);
1374
- expect(getBatchMeta(batch1)).toEqual({
1375
- after: '0',
1376
- has_more: true,
1377
- next_after: '2'
1378
- });
1379
-
1380
- const batch2 = await fromAsync(
1381
- storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1[0].batch.next_after]]), options)
1382
- );
1383
- expect(getBatchData(batch2)).toEqual([{ op_id: '3', op: 'PUT', object_id: 'large2', checksum: 1607205872 }]);
1384
- expect(getBatchMeta(batch2)).toEqual({
1385
- after: '2',
1386
- has_more: true,
1387
- next_after: '3'
1388
- });
1389
-
1390
- const batch3 = await fromAsync(
1391
- storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2[0].batch.next_after]]), options)
1392
- );
1393
- expect(getBatchData(batch3)).toEqual([{ op_id: '4', op: 'PUT', object_id: 'test3', checksum: 1359888332 }]);
1394
- expect(getBatchMeta(batch3)).toEqual({
1395
- after: '3',
1396
- has_more: false,
1397
- next_after: '4'
1398
- });
1399
- });
1400
-
1401
- test('long batch', async () => {
1402
- // Test syncing a batch of data that is limited by count.
1403
- const sync_rules = testRules(
1404
- `
1405
- bucket_definitions:
1406
- global:
1407
- data:
1408
- - SELECT id, description FROM "%"
1409
- `
1410
- );
1411
- const storage = (await factory()).getInstance(sync_rules);
1412
-
1413
- const result = await storage.startBatch(BATCH_OPTIONS, async (batch) => {
1414
- const sourceTable = TEST_TABLE;
1415
-
1416
- for (let i = 1; i <= 6; i++) {
1417
- await batch.save({
1418
- sourceTable,
1419
- tag: SaveOperationTag.INSERT,
1420
- after: {
1421
- id: `test${i}`,
1422
- description: `test${i}`
1423
- },
1424
- afterReplicaId: `test${i}`
1425
- });
1426
- }
1427
- });
1428
-
1429
- const checkpoint = result!.flushed_op;
1430
-
1431
- const batch1 = await oneFromAsync(
1432
- storage.getBucketDataBatch(checkpoint, new Map([['global[]', '0']]), { limit: 4 })
1433
- );
1434
-
1435
- expect(getBatchData(batch1)).toEqual([
1436
- { op_id: '1', op: 'PUT', object_id: 'test1', checksum: 2871785649 },
1437
- { op_id: '2', op: 'PUT', object_id: 'test2', checksum: 730027011 },
1438
- { op_id: '3', op: 'PUT', object_id: 'test3', checksum: 1359888332 },
1439
- { op_id: '4', op: 'PUT', object_id: 'test4', checksum: 2049153252 }
1440
- ]);
1441
-
1442
- expect(getBatchMeta(batch1)).toEqual({
1443
- after: '0',
1444
- has_more: true,
1445
- next_after: '4'
1446
- });
1447
-
1448
- const batch2 = await oneFromAsync(
1449
- storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch1.batch.next_after]]), {
1450
- limit: 4
1451
- })
1452
- );
1453
- expect(getBatchData(batch2)).toEqual([
1454
- { op_id: '5', op: 'PUT', object_id: 'test5', checksum: 3686902721 },
1455
- { op_id: '6', op: 'PUT', object_id: 'test6', checksum: 1974820016 }
1456
- ]);
1457
-
1458
- expect(getBatchMeta(batch2)).toEqual({
1459
- after: '4',
1460
- has_more: false,
1461
- next_after: '6'
1462
- });
1463
-
1464
- const batch3 = await fromAsync(
1465
- storage.getBucketDataBatch(checkpoint, new Map([['global[]', batch2.batch.next_after]]), {
1466
- limit: 4
1467
- })
1468
- );
1469
- expect(getBatchData(batch3)).toEqual([]);
1470
-
1471
- expect(getBatchMeta(batch3)).toEqual(null);
1472
- });
1473
-
1474
- test('batch should be disposed automatically', async () => {
1475
- const sync_rules = testRules(`
1476
- bucket_definitions:
1477
- global:
1478
- data: []
1479
- `);
1480
-
1481
- const storage = (await factory()).getInstance(sync_rules);
1482
-
1483
- let isDisposed = false;
1484
- await storage.startBatch(BATCH_OPTIONS, async (batch) => {
1485
- batch.registerListener({
1486
- disposed: () => {
1487
- isDisposed = true;
1488
- }
1489
- });
1490
- });
1491
- expect(isDisposed).true;
1492
-
1493
- isDisposed = false;
1494
- let errorCaught = false;
1495
- try {
1496
- await storage.startBatch(BATCH_OPTIONS, async (batch) => {
1497
- batch.registerListener({
1498
- disposed: () => {
1499
- isDisposed = true;
1500
- }
1501
- });
1502
- throw new Error(`Testing exceptions`);
1503
- });
1504
- } catch (ex) {
1505
- errorCaught = true;
1506
- expect(ex.message.includes('Testing')).true;
1507
- }
1508
- expect(errorCaught).true;
1509
- expect(isDisposed).true;
1510
- });
1511
-
1512
- test('empty storage metrics', async () => {
1513
- const f = await factory({ dropAll: true });
1514
-
1515
- const metrics = await f.getStorageMetrics();
1516
- expect(metrics).toEqual({
1517
- operations_size_bytes: 0,
1518
- parameters_size_bytes: 0,
1519
- replication_size_bytes: 0
1520
- });
1521
-
1522
- const r = await f.configureSyncRules('bucket_definitions: {}');
1523
- const storage = f.getInstance(r.persisted_sync_rules!);
1524
- await storage.autoActivate();
1525
-
1526
- const metrics2 = await f.getStorageMetrics();
1527
- expect(metrics2).toEqual({
1528
- operations_size_bytes: 0,
1529
- parameters_size_bytes: 0,
1530
- replication_size_bytes: 0
1531
- });
1532
- });
1533
-
1534
- test('invalidate cached parsed sync rules', async () => {
1535
- const sync_rules_content = testRules(
1536
- `
1537
- bucket_definitions:
1538
- by_workspace:
1539
- parameters:
1540
- - SELECT id as workspace_id FROM workspace WHERE
1541
- workspace."userId" = token_parameters.user_id
1542
- data: []
1543
- `
1544
- );
1545
-
1546
- const bucketStorageFactory = await factory();
1547
- const syncBucketStorage = bucketStorageFactory.getInstance(sync_rules_content);
1548
-
1549
- const parsedSchema1 = syncBucketStorage.getParsedSyncRules({
1550
- defaultSchema: 'public'
1551
- });
1552
-
1553
- const parsedSchema2 = syncBucketStorage.getParsedSyncRules({
1554
- defaultSchema: 'public'
1555
- });
1556
-
1557
- // These should be cached, this will be the same instance
1558
- expect(parsedSchema2).equals(parsedSchema1);
1559
- expect(parsedSchema1.getSourceTables()[0].schema).equals('public');
1560
-
1561
- const parsedSchema3 = syncBucketStorage.getParsedSyncRules({
1562
- defaultSchema: 'databasename'
1563
- });
1564
-
1565
- // The cache should not be used
1566
- expect(parsedSchema3).not.equals(parsedSchema2);
1567
- expect(parsedSchema3.getSourceTables()[0].schema).equals('databasename');
1568
- });
1569
- }