@powersync/service-module-postgres-storage 0.0.0-dev-20250122110924 → 0.0.0-dev-20250227082606
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +120 -12
- package/README.md +7 -1
- package/dist/.tsbuildinfo +1 -1
- package/dist/@types/storage/PostgresBucketStorageFactory.d.ts +8 -13
- package/dist/@types/storage/PostgresCompactor.d.ts +0 -6
- package/dist/@types/storage/PostgresSyncRulesStorage.d.ts +9 -3
- package/dist/@types/storage/batch/PostgresBucketBatch.d.ts +7 -6
- package/dist/@types/types/codecs.d.ts +2 -2
- package/dist/@types/types/models/BucketData.d.ts +1 -1
- package/dist/@types/types/models/BucketParameters.d.ts +2 -2
- package/dist/@types/types/models/CurrentData.d.ts +3 -3
- package/dist/@types/types/models/SourceTable.d.ts +1 -1
- package/dist/@types/types/types.d.ts +5 -0
- package/dist/storage/PostgresBucketStorageFactory.js +68 -158
- package/dist/storage/PostgresBucketStorageFactory.js.map +1 -1
- package/dist/storage/PostgresCompactor.js +19 -6
- package/dist/storage/PostgresCompactor.js.map +1 -1
- package/dist/storage/PostgresSyncRulesStorage.js +221 -123
- package/dist/storage/PostgresSyncRulesStorage.js.map +1 -1
- package/dist/storage/batch/PostgresBucketBatch.js +31 -19
- package/dist/storage/batch/PostgresBucketBatch.js.map +1 -1
- package/dist/storage/batch/PostgresPersistedBatch.js +39 -81
- package/dist/storage/batch/PostgresPersistedBatch.js.map +1 -1
- package/dist/types/types.js.map +1 -1
- package/package.json +9 -9
- package/src/storage/PostgresBucketStorageFactory.ts +73 -179
- package/src/storage/PostgresCompactor.ts +19 -14
- package/src/storage/PostgresSyncRulesStorage.ts +231 -38
- package/src/storage/batch/PostgresBucketBatch.ts +39 -22
- package/src/storage/batch/PostgresPersistedBatch.ts +39 -81
- package/src/types/models/SourceTable.ts +1 -1
- package/src/types/types.ts +1 -0
- package/test/src/__snapshots__/storage_sync.test.ts.snap +138 -0
- package/test/src/storage.test.ts +1 -1
- package/test/src/storage_compacting.test.ts +1 -1
- package/tsconfig.json +0 -2
|
@@ -1,11 +1,8 @@
|
|
|
1
1
|
import * as framework from '@powersync/lib-services-framework';
|
|
2
|
-
import { storage,
|
|
2
|
+
import { GetIntanceOptions, storage, SyncRulesBucketStorage, UpdateSyncRulesOptions } from '@powersync/service-core';
|
|
3
3
|
import * as pg_wire from '@powersync/service-jpgwire';
|
|
4
4
|
import * as sync_rules from '@powersync/service-sync-rules';
|
|
5
5
|
import crypto from 'crypto';
|
|
6
|
-
import { wrapWithAbort } from 'ix/asynciterable/operators/withabort.js';
|
|
7
|
-
import { LRUCache } from 'lru-cache/min';
|
|
8
|
-
import * as timers from 'timers/promises';
|
|
9
6
|
import * as uuid from 'uuid';
|
|
10
7
|
|
|
11
8
|
import * as lib_postgres from '@powersync/lib-service-postgres';
|
|
@@ -22,38 +19,13 @@ export type PostgresBucketStorageOptions = {
|
|
|
22
19
|
};
|
|
23
20
|
|
|
24
21
|
export class PostgresBucketStorageFactory
|
|
25
|
-
extends framework.
|
|
22
|
+
extends framework.BaseObserver<storage.BucketStorageFactoryListener>
|
|
26
23
|
implements storage.BucketStorageFactory
|
|
27
24
|
{
|
|
28
25
|
readonly db: lib_postgres.DatabaseClient;
|
|
29
26
|
public readonly slot_name_prefix: string;
|
|
30
27
|
|
|
31
|
-
private
|
|
32
|
-
|
|
33
|
-
private readonly storageCache = new LRUCache<number, storage.SyncRulesBucketStorage>({
|
|
34
|
-
max: 3,
|
|
35
|
-
fetchMethod: async (id) => {
|
|
36
|
-
const syncRulesRow = await this.db.sql`
|
|
37
|
-
SELECT
|
|
38
|
-
*
|
|
39
|
-
FROM
|
|
40
|
-
sync_rules
|
|
41
|
-
WHERE
|
|
42
|
-
id = ${{ value: id, type: 'int4' }}
|
|
43
|
-
`
|
|
44
|
-
.decoded(models.SyncRules)
|
|
45
|
-
.first();
|
|
46
|
-
if (syncRulesRow == null) {
|
|
47
|
-
// Deleted in the meantime?
|
|
48
|
-
return undefined;
|
|
49
|
-
}
|
|
50
|
-
const rules = new PostgresPersistedSyncRulesContent(this.db, syncRulesRow);
|
|
51
|
-
return this.getInstance(rules);
|
|
52
|
-
},
|
|
53
|
-
dispose: (storage) => {
|
|
54
|
-
storage[Symbol.dispose]();
|
|
55
|
-
}
|
|
56
|
-
});
|
|
28
|
+
private activeStorageCache: storage.SyncRulesBucketStorage | undefined;
|
|
57
29
|
|
|
58
30
|
constructor(protected options: PostgresBucketStorageOptions) {
|
|
59
31
|
super();
|
|
@@ -70,7 +42,6 @@ export class PostgresBucketStorageFactory
|
|
|
70
42
|
}
|
|
71
43
|
|
|
72
44
|
async [Symbol.asyncDispose]() {
|
|
73
|
-
super[Symbol.dispose]();
|
|
74
45
|
await this.db[Symbol.asyncDispose]();
|
|
75
46
|
}
|
|
76
47
|
|
|
@@ -79,18 +50,22 @@ export class PostgresBucketStorageFactory
|
|
|
79
50
|
// This has not been implemented yet.
|
|
80
51
|
}
|
|
81
52
|
|
|
82
|
-
getInstance(
|
|
53
|
+
getInstance(
|
|
54
|
+
syncRules: storage.PersistedSyncRulesContent,
|
|
55
|
+
options?: GetIntanceOptions
|
|
56
|
+
): storage.SyncRulesBucketStorage {
|
|
83
57
|
const storage = new PostgresSyncRulesStorage({
|
|
84
58
|
factory: this,
|
|
85
59
|
db: this.db,
|
|
86
60
|
sync_rules: syncRules,
|
|
87
61
|
batchLimits: this.options.config.batch_limits
|
|
88
62
|
});
|
|
89
|
-
|
|
63
|
+
if (!options?.skipLifecycleHooks) {
|
|
64
|
+
this.iterateListeners((cb) => cb.syncStorageCreated?.(storage));
|
|
65
|
+
}
|
|
90
66
|
storage.registerListener({
|
|
91
67
|
batchStarted: (batch) => {
|
|
92
|
-
|
|
93
|
-
batch.registerManagedListener(storage, {
|
|
68
|
+
batch.registerListener({
|
|
94
69
|
replicationEvent: (payload) => this.iterateListeners((cb) => cb.replicationEvent?.(payload))
|
|
95
70
|
});
|
|
96
71
|
}
|
|
@@ -157,11 +132,19 @@ export class PostgresBucketStorageFactory
|
|
|
157
132
|
return newInstanceRow!.id;
|
|
158
133
|
}
|
|
159
134
|
|
|
135
|
+
async getSystemIdentifier(): Promise<storage.BucketStorageSystemIdentifier> {
|
|
136
|
+
const id = lib_postgres.utils.encodePostgresSystemIdentifier(
|
|
137
|
+
await lib_postgres.utils.queryPostgresSystemIdentifier(this.db.pool)
|
|
138
|
+
);
|
|
139
|
+
|
|
140
|
+
return {
|
|
141
|
+
id,
|
|
142
|
+
type: lib_postgres.POSTGRES_CONNECTION_TYPE
|
|
143
|
+
};
|
|
144
|
+
}
|
|
145
|
+
|
|
160
146
|
// TODO possibly share implementation in abstract class
|
|
161
|
-
async configureSyncRules(
|
|
162
|
-
sync_rules: string,
|
|
163
|
-
options?: { lock?: boolean }
|
|
164
|
-
): Promise<{
|
|
147
|
+
async configureSyncRules(options: UpdateSyncRulesOptions): Promise<{
|
|
165
148
|
updated: boolean;
|
|
166
149
|
persisted_sync_rules?: storage.PersistedSyncRulesContent;
|
|
167
150
|
lock?: storage.ReplicationLock;
|
|
@@ -169,31 +152,32 @@ export class PostgresBucketStorageFactory
|
|
|
169
152
|
const next = await this.getNextSyncRulesContent();
|
|
170
153
|
const active = await this.getActiveSyncRulesContent();
|
|
171
154
|
|
|
172
|
-
if (next?.sync_rules_content ==
|
|
155
|
+
if (next?.sync_rules_content == options.content) {
|
|
173
156
|
framework.logger.info('Sync rules from configuration unchanged');
|
|
174
157
|
return { updated: false };
|
|
175
|
-
} else if (next == null && active?.sync_rules_content ==
|
|
158
|
+
} else if (next == null && active?.sync_rules_content == options.content) {
|
|
176
159
|
framework.logger.info('Sync rules from configuration unchanged');
|
|
177
160
|
return { updated: false };
|
|
178
161
|
} else {
|
|
179
162
|
framework.logger.info('Sync rules updated from configuration');
|
|
180
|
-
const persisted_sync_rules = await this.updateSyncRules(
|
|
181
|
-
content: sync_rules,
|
|
182
|
-
lock: options?.lock
|
|
183
|
-
});
|
|
163
|
+
const persisted_sync_rules = await this.updateSyncRules(options);
|
|
184
164
|
return { updated: true, persisted_sync_rules, lock: persisted_sync_rules.current_lock ?? undefined };
|
|
185
165
|
}
|
|
186
166
|
}
|
|
187
167
|
|
|
188
168
|
async updateSyncRules(options: storage.UpdateSyncRulesOptions): Promise<PostgresPersistedSyncRulesContent> {
|
|
189
169
|
// TODO some shared implementation for this might be nice
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
170
|
+
if (options.validate) {
|
|
171
|
+
// Parse and validate before applying any changes
|
|
172
|
+
sync_rules.SqlSyncRules.fromYaml(options.content, {
|
|
173
|
+
// No schema-based validation at this point
|
|
174
|
+
schema: undefined,
|
|
175
|
+
defaultSchema: 'not_applicable', // Not needed for validation
|
|
176
|
+
throwOnError: true
|
|
177
|
+
});
|
|
178
|
+
} else {
|
|
179
|
+
// Apply unconditionally. Any errors will be reported via the diagnostics API.
|
|
180
|
+
}
|
|
197
181
|
|
|
198
182
|
return this.db.transaction(async (db) => {
|
|
199
183
|
await db.sql`
|
|
@@ -246,16 +230,17 @@ export class PostgresBucketStorageFactory
|
|
|
246
230
|
});
|
|
247
231
|
}
|
|
248
232
|
|
|
249
|
-
async
|
|
233
|
+
async restartReplication(sync_rules_group_id: number): Promise<void> {
|
|
250
234
|
const next = await this.getNextSyncRulesContent();
|
|
251
235
|
const active = await this.getActiveSyncRulesContent();
|
|
252
236
|
|
|
253
237
|
// In both the below cases, we create a new sync rules instance.
|
|
254
|
-
// The current one will continue
|
|
255
|
-
if (next != null && next.
|
|
238
|
+
// The current one will continue serving sync requests until the next one has finished processing.
|
|
239
|
+
if (next != null && next.id == sync_rules_group_id) {
|
|
256
240
|
// We need to redo the "next" sync rules
|
|
257
241
|
await this.updateSyncRules({
|
|
258
|
-
content: next.sync_rules_content
|
|
242
|
+
content: next.sync_rules_content,
|
|
243
|
+
validate: false
|
|
259
244
|
});
|
|
260
245
|
// Pro-actively stop replicating
|
|
261
246
|
await this.db.sql`
|
|
@@ -266,17 +251,30 @@ export class PostgresBucketStorageFactory
|
|
|
266
251
|
id = ${{ value: next.id, type: 'int4' }}
|
|
267
252
|
AND state = ${{ value: storage.SyncRuleState.PROCESSING, type: 'varchar' }}
|
|
268
253
|
`.execute();
|
|
269
|
-
} else if (next == null && active?.
|
|
254
|
+
} else if (next == null && active?.id == sync_rules_group_id) {
|
|
270
255
|
// Slot removed for "active" sync rules, while there is no "next" one.
|
|
271
256
|
await this.updateSyncRules({
|
|
272
|
-
content: active.sync_rules_content
|
|
257
|
+
content: active.sync_rules_content,
|
|
258
|
+
validate: false
|
|
273
259
|
});
|
|
274
260
|
|
|
275
|
-
// Pro-actively stop replicating
|
|
261
|
+
// Pro-actively stop replicating, but still serve clients with existing data
|
|
276
262
|
await this.db.sql`
|
|
277
263
|
UPDATE sync_rules
|
|
278
264
|
SET
|
|
279
|
-
state = ${{ value: storage.SyncRuleState.
|
|
265
|
+
state = ${{ value: storage.SyncRuleState.ERRORED, type: 'varchar' }}
|
|
266
|
+
WHERE
|
|
267
|
+
id = ${{ value: active.id, type: 'int4' }}
|
|
268
|
+
AND state = ${{ value: storage.SyncRuleState.ACTIVE, type: 'varchar' }}
|
|
269
|
+
`.execute();
|
|
270
|
+
} else if (next != null && active?.id == sync_rules_group_id) {
|
|
271
|
+
// Already have "next" sync rules - don't update any.
|
|
272
|
+
|
|
273
|
+
// Pro-actively stop replicating, but still serve clients with existing data
|
|
274
|
+
await this.db.sql`
|
|
275
|
+
UPDATE sync_rules
|
|
276
|
+
SET
|
|
277
|
+
state = ${{ value: storage.SyncRuleState.ERRORED, type: 'varchar' }}
|
|
280
278
|
WHERE
|
|
281
279
|
id = ${{ value: active.id, type: 'int4' }}
|
|
282
280
|
AND state = ${{ value: storage.SyncRuleState.ACTIVE, type: 'varchar' }}
|
|
@@ -298,6 +296,7 @@ export class PostgresBucketStorageFactory
|
|
|
298
296
|
sync_rules
|
|
299
297
|
WHERE
|
|
300
298
|
state = ${{ value: storage.SyncRuleState.ACTIVE, type: 'varchar' }}
|
|
299
|
+
OR state = ${{ value: storage.SyncRuleState.ERRORED, type: 'varchar' }}
|
|
301
300
|
ORDER BY
|
|
302
301
|
id DESC
|
|
303
302
|
LIMIT
|
|
@@ -371,126 +370,21 @@ export class PostgresBucketStorageFactory
|
|
|
371
370
|
return rows.map((row) => new PostgresPersistedSyncRulesContent(this.db, row));
|
|
372
371
|
}
|
|
373
372
|
|
|
374
|
-
async
|
|
375
|
-
const
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
last_checkpoint,
|
|
379
|
-
last_checkpoint_lsn
|
|
380
|
-
FROM
|
|
381
|
-
sync_rules
|
|
382
|
-
WHERE
|
|
383
|
-
state = ${{ value: storage.SyncRuleState.ACTIVE, type: 'varchar' }}
|
|
384
|
-
ORDER BY
|
|
385
|
-
id DESC
|
|
386
|
-
LIMIT
|
|
387
|
-
1
|
|
388
|
-
`
|
|
389
|
-
.decoded(models.ActiveCheckpoint)
|
|
390
|
-
.first();
|
|
391
|
-
|
|
392
|
-
return this.makeActiveCheckpoint(activeCheckpoint);
|
|
393
|
-
}
|
|
394
|
-
|
|
395
|
-
async *watchWriteCheckpoint(user_id: string, signal: AbortSignal): AsyncIterable<storage.WriteCheckpoint> {
|
|
396
|
-
let lastCheckpoint: utils.OpId | null = null;
|
|
397
|
-
let lastWriteCheckpoint: bigint | null = null;
|
|
398
|
-
|
|
399
|
-
const iter = wrapWithAbort(this.sharedIterator, signal);
|
|
400
|
-
for await (const cp of iter) {
|
|
401
|
-
const { checkpoint, lsn } = cp;
|
|
402
|
-
|
|
403
|
-
// lsn changes are not important by itself.
|
|
404
|
-
// What is important is:
|
|
405
|
-
// 1. checkpoint (op_id) changes.
|
|
406
|
-
// 2. write checkpoint changes for the specific user
|
|
407
|
-
const bucketStorage = await cp.getBucketStorage();
|
|
408
|
-
if (!bucketStorage) {
|
|
409
|
-
continue;
|
|
410
|
-
}
|
|
411
|
-
|
|
412
|
-
const lsnFilters: Record<string, string> = lsn ? { 1: lsn } : {};
|
|
413
|
-
|
|
414
|
-
const currentWriteCheckpoint = await bucketStorage.lastWriteCheckpoint({
|
|
415
|
-
user_id,
|
|
416
|
-
heads: {
|
|
417
|
-
...lsnFilters
|
|
418
|
-
}
|
|
419
|
-
});
|
|
420
|
-
|
|
421
|
-
if (currentWriteCheckpoint == lastWriteCheckpoint && checkpoint == lastCheckpoint) {
|
|
422
|
-
// No change - wait for next one
|
|
423
|
-
// In some cases, many LSNs may be produced in a short time.
|
|
424
|
-
// Add a delay to throttle the write checkpoint lookup a bit.
|
|
425
|
-
await timers.setTimeout(20 + 10 * Math.random());
|
|
426
|
-
continue;
|
|
427
|
-
}
|
|
428
|
-
|
|
429
|
-
lastWriteCheckpoint = currentWriteCheckpoint;
|
|
430
|
-
lastCheckpoint = checkpoint;
|
|
431
|
-
|
|
432
|
-
yield { base: cp, writeCheckpoint: currentWriteCheckpoint };
|
|
373
|
+
async getActiveStorage(): Promise<SyncRulesBucketStorage | null> {
|
|
374
|
+
const content = await this.getActiveSyncRulesContent();
|
|
375
|
+
if (content == null) {
|
|
376
|
+
return null;
|
|
433
377
|
}
|
|
434
|
-
}
|
|
435
378
|
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
state = ${{ type: 'varchar', value: storage.SyncRuleState.ACTIVE }}
|
|
446
|
-
LIMIT
|
|
447
|
-
1
|
|
448
|
-
`
|
|
449
|
-
.decoded(models.ActiveCheckpoint)
|
|
450
|
-
.first();
|
|
451
|
-
|
|
452
|
-
const sink = new sync.LastValueSink<string>(undefined);
|
|
453
|
-
|
|
454
|
-
const disposeListener = this.db.registerListener({
|
|
455
|
-
notification: (notification) => sink.next(notification.payload)
|
|
456
|
-
});
|
|
457
|
-
|
|
458
|
-
signal.addEventListener('aborted', async () => {
|
|
459
|
-
disposeListener();
|
|
460
|
-
sink.complete();
|
|
461
|
-
});
|
|
462
|
-
|
|
463
|
-
yield this.makeActiveCheckpoint(doc);
|
|
464
|
-
|
|
465
|
-
let lastOp: storage.ActiveCheckpoint | null = null;
|
|
466
|
-
for await (const payload of sink.withSignal(signal)) {
|
|
467
|
-
if (signal.aborted) {
|
|
468
|
-
return;
|
|
469
|
-
}
|
|
470
|
-
|
|
471
|
-
const notification = models.ActiveCheckpointNotification.decode(payload);
|
|
472
|
-
const activeCheckpoint = this.makeActiveCheckpoint(notification.active_checkpoint);
|
|
473
|
-
|
|
474
|
-
if (lastOp == null || activeCheckpoint.lsn != lastOp.lsn || activeCheckpoint.checkpoint != lastOp.checkpoint) {
|
|
475
|
-
lastOp = activeCheckpoint;
|
|
476
|
-
yield activeCheckpoint;
|
|
477
|
-
}
|
|
379
|
+
// It is important that this instance is cached.
|
|
380
|
+
// Not for the instance construction itself, but to ensure that internal caches on the instance
|
|
381
|
+
// are re-used properly.
|
|
382
|
+
if (this.activeStorageCache?.group_id == content.id) {
|
|
383
|
+
return this.activeStorageCache;
|
|
384
|
+
} else {
|
|
385
|
+
const instance = this.getInstance(content);
|
|
386
|
+
this.activeStorageCache = instance;
|
|
387
|
+
return instance;
|
|
478
388
|
}
|
|
479
389
|
}
|
|
480
|
-
|
|
481
|
-
private makeActiveCheckpoint(row: models.ActiveCheckpointDecoded | null) {
|
|
482
|
-
return {
|
|
483
|
-
checkpoint: utils.timestampToOpId(row?.last_checkpoint ?? 0n),
|
|
484
|
-
lsn: row?.last_checkpoint_lsn ?? null,
|
|
485
|
-
hasSyncRules() {
|
|
486
|
-
return row != null;
|
|
487
|
-
},
|
|
488
|
-
getBucketStorage: async () => {
|
|
489
|
-
if (row == null) {
|
|
490
|
-
return null;
|
|
491
|
-
}
|
|
492
|
-
return (await this.storageCache.fetch(Number(row.id))) ?? null;
|
|
493
|
-
}
|
|
494
|
-
} satisfies storage.ActiveCheckpoint;
|
|
495
|
-
}
|
|
496
390
|
}
|
|
@@ -35,14 +35,7 @@ interface CurrentBucketState {
|
|
|
35
35
|
/**
|
|
36
36
|
* Additional options, primarily for testing.
|
|
37
37
|
*/
|
|
38
|
-
export interface PostgresCompactOptions extends storage.CompactOptions {
|
|
39
|
-
/** Minimum of 2 */
|
|
40
|
-
clearBatchLimit?: number;
|
|
41
|
-
/** Minimum of 1 */
|
|
42
|
-
moveBatchLimit?: number;
|
|
43
|
-
/** Minimum of 1 */
|
|
44
|
-
moveBatchQueryLimit?: number;
|
|
45
|
-
}
|
|
38
|
+
export interface PostgresCompactOptions extends storage.CompactOptions {}
|
|
46
39
|
|
|
47
40
|
const DEFAULT_CLEAR_BATCH_LIMIT = 5000;
|
|
48
41
|
const DEFAULT_MOVE_BATCH_LIMIT = 2000;
|
|
@@ -99,15 +92,19 @@ export class PostgresCompactor {
|
|
|
99
92
|
|
|
100
93
|
let bucketLower: string | null = null;
|
|
101
94
|
let bucketUpper: string | null = null;
|
|
95
|
+
const MAX_CHAR = String.fromCodePoint(0xffff);
|
|
102
96
|
|
|
103
|
-
if (bucket
|
|
97
|
+
if (bucket == null) {
|
|
98
|
+
bucketLower = '';
|
|
99
|
+
bucketUpper = MAX_CHAR;
|
|
100
|
+
} else if (bucket?.includes('[')) {
|
|
104
101
|
// Exact bucket name
|
|
105
102
|
bucketLower = bucket;
|
|
106
103
|
bucketUpper = bucket;
|
|
107
104
|
} else if (bucket) {
|
|
108
105
|
// Bucket definition name
|
|
109
106
|
bucketLower = `${bucket}[`;
|
|
110
|
-
bucketUpper = `${bucket}[
|
|
107
|
+
bucketUpper = `${bucket}[${MAX_CHAR}`;
|
|
111
108
|
}
|
|
112
109
|
|
|
113
110
|
let upperOpIdLimit = BIGINT_MAX;
|
|
@@ -126,10 +123,16 @@ export class PostgresCompactor {
|
|
|
126
123
|
bucket_data
|
|
127
124
|
WHERE
|
|
128
125
|
group_id = ${{ type: 'int4', value: this.group_id }}
|
|
129
|
-
AND bucket_name
|
|
130
|
-
AND
|
|
126
|
+
AND bucket_name >= ${{ type: 'varchar', value: bucketLower }}
|
|
127
|
+
AND (
|
|
128
|
+
(
|
|
129
|
+
bucket_name = ${{ type: 'varchar', value: bucketUpper }}
|
|
130
|
+
AND op_id < ${{ type: 'int8', value: upperOpIdLimit }}
|
|
131
|
+
)
|
|
132
|
+
OR bucket_name < ${{ type: 'varchar', value: bucketUpper }} COLLATE "C" -- Use binary comparison
|
|
133
|
+
)
|
|
131
134
|
ORDER BY
|
|
132
|
-
bucket_name,
|
|
135
|
+
bucket_name DESC,
|
|
133
136
|
op_id DESC
|
|
134
137
|
LIMIT
|
|
135
138
|
${{ type: 'int4', value: this.moveBatchQueryLimit }}
|
|
@@ -145,7 +148,9 @@ export class PostgresCompactor {
|
|
|
145
148
|
}
|
|
146
149
|
|
|
147
150
|
// Set upperBound for the next batch
|
|
148
|
-
|
|
151
|
+
const lastBatchItem = batch[batch.length - 1];
|
|
152
|
+
upperOpIdLimit = lastBatchItem.op_id;
|
|
153
|
+
bucketUpper = lastBatchItem.bucket_name;
|
|
149
154
|
|
|
150
155
|
for (const doc of batch) {
|
|
151
156
|
if (currentState == null || doc.bucket_name != currentState.bucket) {
|