@powersync/service-core 0.18.1 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/dist/api/RouteAPI.d.ts +1 -1
- package/dist/api/diagnostics.js +107 -169
- package/dist/api/diagnostics.js.map +1 -1
- package/dist/entry/commands/compact-action.js +10 -73
- package/dist/entry/commands/compact-action.js.map +1 -1
- package/dist/modules/AbstractModule.d.ts +1 -1
- package/dist/replication/AbstractReplicator.js +2 -65
- package/dist/replication/AbstractReplicator.js.map +1 -1
- package/dist/routes/endpoints/checkpointing.js +3 -2
- package/dist/routes/endpoints/checkpointing.js.map +1 -1
- package/dist/routes/endpoints/socket-route.js +5 -5
- package/dist/routes/endpoints/socket-route.js.map +1 -1
- package/dist/routes/endpoints/sync-stream.js +5 -5
- package/dist/routes/endpoints/sync-stream.js.map +1 -1
- package/dist/runner/teardown.js +3 -65
- package/dist/runner/teardown.js.map +1 -1
- package/dist/storage/BucketStorage.d.ts +1 -442
- package/dist/storage/BucketStorage.js +0 -9
- package/dist/storage/BucketStorage.js.map +1 -1
- package/dist/storage/BucketStorageBatch.d.ts +130 -0
- package/dist/storage/BucketStorageBatch.js +10 -0
- package/dist/storage/BucketStorageBatch.js.map +1 -0
- package/dist/storage/BucketStorageFactory.d.ts +136 -0
- package/dist/storage/BucketStorageFactory.js +2 -0
- package/dist/storage/BucketStorageFactory.js.map +1 -0
- package/dist/storage/ChecksumCache.js.map +1 -1
- package/dist/storage/PersistedSyncRulesContent.d.ts +20 -0
- package/dist/storage/PersistedSyncRulesContent.js +2 -0
- package/dist/storage/PersistedSyncRulesContent.js.map +1 -0
- package/dist/storage/ReplicationEventPayload.d.ts +1 -1
- package/dist/storage/ReplicationLock.d.ts +4 -0
- package/dist/storage/ReplicationLock.js +2 -0
- package/dist/storage/ReplicationLock.js.map +1 -0
- package/dist/storage/StorageEngine.d.ts +4 -4
- package/dist/storage/StorageEngine.js +2 -2
- package/dist/storage/StorageEngine.js.map +1 -1
- package/dist/storage/StorageProvider.d.ts +4 -1
- package/dist/storage/SyncRulesBucketStorage.d.ts +201 -0
- package/dist/storage/SyncRulesBucketStorage.js +7 -0
- package/dist/storage/SyncRulesBucketStorage.js.map +1 -0
- package/dist/storage/bson.d.ts +11 -3
- package/dist/storage/bson.js +24 -2
- package/dist/storage/bson.js.map +1 -1
- package/dist/storage/storage-index.d.ts +5 -0
- package/dist/storage/storage-index.js +5 -0
- package/dist/storage/storage-index.js.map +1 -1
- package/dist/sync/BucketChecksumState.d.ts +91 -0
- package/dist/sync/BucketChecksumState.js +313 -0
- package/dist/sync/BucketChecksumState.js.map +1 -0
- package/dist/sync/sync-index.d.ts +1 -0
- package/dist/sync/sync-index.js +1 -0
- package/dist/sync/sync-index.js.map +1 -1
- package/dist/sync/sync.d.ts +7 -3
- package/dist/sync/sync.js +131 -135
- package/dist/sync/sync.js.map +1 -1
- package/dist/sync/util.d.ts +9 -0
- package/dist/sync/util.js +44 -0
- package/dist/sync/util.js.map +1 -1
- package/dist/util/checkpointing.d.ts +1 -1
- package/dist/util/checkpointing.js +15 -78
- package/dist/util/checkpointing.js.map +1 -1
- package/dist/util/protocol-types.d.ts +13 -4
- package/package.json +4 -4
- package/src/api/RouteAPI.ts +1 -1
- package/src/api/diagnostics.ts +1 -1
- package/src/entry/commands/compact-action.ts +2 -3
- package/src/modules/AbstractModule.ts +1 -1
- package/src/replication/AbstractReplicator.ts +1 -2
- package/src/routes/endpoints/checkpointing.ts +3 -3
- package/src/routes/endpoints/socket-route.ts +7 -5
- package/src/routes/endpoints/sync-stream.ts +8 -5
- package/src/runner/teardown.ts +1 -1
- package/src/storage/BucketStorage.ts +1 -552
- package/src/storage/BucketStorageBatch.ts +158 -0
- package/src/storage/BucketStorageFactory.ts +156 -0
- package/src/storage/ChecksumCache.ts +1 -0
- package/src/storage/PersistedSyncRulesContent.ts +26 -0
- package/src/storage/ReplicationEventPayload.ts +1 -1
- package/src/storage/ReplicationLock.ts +5 -0
- package/src/storage/StorageEngine.ts +4 -4
- package/src/storage/StorageProvider.ts +4 -1
- package/src/storage/SyncRulesBucketStorage.ts +256 -0
- package/src/storage/bson.ts +28 -4
- package/src/storage/storage-index.ts +5 -0
- package/src/sync/BucketChecksumState.ts +392 -0
- package/src/sync/sync-index.ts +1 -0
- package/src/sync/sync.ts +173 -157
- package/src/sync/util.ts +54 -0
- package/src/util/checkpointing.ts +4 -6
- package/src/util/protocol-types.ts +16 -4
- package/test/src/auth.test.ts +5 -5
- package/test/src/sync/BucketChecksumState.test.ts +565 -0
- package/test/src/sync/util.test.ts +34 -0
- package/tsconfig.tsbuildinfo +1 -1
|
@@ -1,33 +1,4 @@
|
|
|
1
|
-
import {
|
|
2
|
-
AsyncDisposableObserverClient,
|
|
3
|
-
DisposableListener,
|
|
4
|
-
DisposableObserverClient
|
|
5
|
-
} from '@powersync/lib-services-framework';
|
|
6
|
-
import {
|
|
7
|
-
EvaluatedParameters,
|
|
8
|
-
EvaluatedRow,
|
|
9
|
-
SqlSyncRules,
|
|
10
|
-
SqliteJsonRow,
|
|
11
|
-
SqliteJsonValue,
|
|
12
|
-
SqliteRow,
|
|
13
|
-
ToastableSqliteRow
|
|
14
|
-
} from '@powersync/service-sync-rules';
|
|
15
|
-
import { BSON } from 'bson';
|
|
16
|
-
import * as util from '../util/util-index.js';
|
|
17
|
-
import { ReplicationEventPayload } from './ReplicationEventPayload.js';
|
|
18
|
-
import { SourceEntityDescriptor } from './SourceEntity.js';
|
|
19
|
-
import { SourceTable } from './SourceTable.js';
|
|
20
|
-
import { BatchedCustomWriteCheckpointOptions } from './storage-index.js';
|
|
21
|
-
import { SyncStorageWriteCheckpointAPI } from './WriteCheckpointAPI.js';
|
|
22
|
-
|
|
23
|
-
/**
|
|
24
|
-
* Replica id uniquely identifying a row on the source database.
|
|
25
|
-
*
|
|
26
|
-
* Can be any value serializable to BSON.
|
|
27
|
-
*
|
|
28
|
-
* If the value is an entire document, the data serialized to a v5 UUID may be a good choice here.
|
|
29
|
-
*/
|
|
30
|
-
export type ReplicaId = BSON.UUID | BSON.Document | any;
|
|
1
|
+
import { ToastableSqliteRow } from '@powersync/service-sync-rules';
|
|
31
2
|
|
|
32
3
|
export enum SyncRuleState {
|
|
33
4
|
/**
|
|
@@ -55,479 +26,10 @@ export enum SyncRuleState {
|
|
|
55
26
|
*/
|
|
56
27
|
TERMINATED = 'TERMINATED'
|
|
57
28
|
}
|
|
58
|
-
export interface BucketStorageFactoryListener extends DisposableListener {
|
|
59
|
-
syncStorageCreated: (storage: SyncRulesBucketStorage) => void;
|
|
60
|
-
replicationEvent: (event: ReplicationEventPayload) => void;
|
|
61
|
-
}
|
|
62
|
-
|
|
63
|
-
export interface BucketStorageSystemIdentifier {
|
|
64
|
-
/**
|
|
65
|
-
* A unique identifier for the system used for storage.
|
|
66
|
-
* For Postgres this can be the cluster `system_identifier` and database name.
|
|
67
|
-
* For MongoDB this can be the replica set name.
|
|
68
|
-
*/
|
|
69
|
-
id: string;
|
|
70
|
-
/**
|
|
71
|
-
* A unique type for the storage implementation.
|
|
72
|
-
* e.g. `mongodb`, `postgresql`.
|
|
73
|
-
*/
|
|
74
|
-
type: string;
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
export interface BucketStorageFactory extends AsyncDisposableObserverClient<BucketStorageFactoryListener> {
|
|
78
|
-
/**
|
|
79
|
-
* Update sync rules from configuration, if changed.
|
|
80
|
-
*/
|
|
81
|
-
configureSyncRules(
|
|
82
|
-
options: UpdateSyncRulesOptions
|
|
83
|
-
): Promise<{ updated: boolean; persisted_sync_rules?: PersistedSyncRulesContent; lock?: ReplicationLock }>;
|
|
84
|
-
|
|
85
|
-
/**
|
|
86
|
-
* Get a storage instance to query sync data for specific sync rules.
|
|
87
|
-
*/
|
|
88
|
-
getInstance(options: PersistedSyncRulesContent): SyncRulesBucketStorage;
|
|
89
|
-
|
|
90
|
-
/**
|
|
91
|
-
* Deploy new sync rules.
|
|
92
|
-
*
|
|
93
|
-
* Similar to configureSyncRules, but applies the update unconditionally.
|
|
94
|
-
*/
|
|
95
|
-
updateSyncRules(options: UpdateSyncRulesOptions): Promise<PersistedSyncRulesContent>;
|
|
96
|
-
|
|
97
|
-
/**
|
|
98
|
-
* Indicate that a slot was removed, and we should re-sync by creating
|
|
99
|
-
* a new sync rules instance.
|
|
100
|
-
*
|
|
101
|
-
* This is roughly the same as deploying a new version of the current sync
|
|
102
|
-
* rules, but also accounts for cases where the current sync rules are not
|
|
103
|
-
* the latest ones.
|
|
104
|
-
*
|
|
105
|
-
* Replication should be restarted after this.
|
|
106
|
-
*
|
|
107
|
-
* @param slot_name The removed slot
|
|
108
|
-
*/
|
|
109
|
-
slotRemoved(slot_name: string): Promise<void>;
|
|
110
|
-
|
|
111
|
-
/**
|
|
112
|
-
* Get the sync rules used for querying.
|
|
113
|
-
*/
|
|
114
|
-
getActiveSyncRules(options: ParseSyncRulesOptions): Promise<PersistedSyncRules | null>;
|
|
115
|
-
|
|
116
|
-
/**
|
|
117
|
-
* Get the sync rules used for querying.
|
|
118
|
-
*/
|
|
119
|
-
getActiveSyncRulesContent(): Promise<PersistedSyncRulesContent | null>;
|
|
120
|
-
|
|
121
|
-
/**
|
|
122
|
-
* Get the sync rules that will be active next once done with initial replicatino.
|
|
123
|
-
*/
|
|
124
|
-
getNextSyncRules(options: ParseSyncRulesOptions): Promise<PersistedSyncRules | null>;
|
|
125
|
-
|
|
126
|
-
/**
|
|
127
|
-
* Get the sync rules that will be active next once done with initial replicatino.
|
|
128
|
-
*/
|
|
129
|
-
getNextSyncRulesContent(): Promise<PersistedSyncRulesContent | null>;
|
|
130
|
-
|
|
131
|
-
/**
|
|
132
|
-
* Get all sync rules currently replicating. Typically this is the "active" and "next" sync rules.
|
|
133
|
-
*/
|
|
134
|
-
getReplicatingSyncRules(): Promise<PersistedSyncRulesContent[]>;
|
|
135
|
-
|
|
136
|
-
/**
|
|
137
|
-
* Get all sync rules stopped but not terminated yet.
|
|
138
|
-
*/
|
|
139
|
-
getStoppedSyncRules(): Promise<PersistedSyncRulesContent[]>;
|
|
140
|
-
|
|
141
|
-
/**
|
|
142
|
-
* Same as:
|
|
143
|
-
* getInstance(await getActiveSyncRules()).getCheckpoint().
|
|
144
|
-
*/
|
|
145
|
-
getActiveCheckpoint(): Promise<ActiveCheckpoint>;
|
|
146
|
-
|
|
147
|
-
/**
|
|
148
|
-
* Yields the latest user write checkpoint whenever the sync checkpoint updates.
|
|
149
|
-
*/
|
|
150
|
-
watchWriteCheckpoint(user_id: string, signal: AbortSignal): AsyncIterable<WriteCheckpoint>;
|
|
151
|
-
|
|
152
|
-
/**
|
|
153
|
-
* Get storage size of active sync rules.
|
|
154
|
-
*/
|
|
155
|
-
getStorageMetrics(): Promise<StorageMetrics>;
|
|
156
|
-
|
|
157
|
-
/**
|
|
158
|
-
* Get the unique identifier for this instance of Powersync
|
|
159
|
-
*/
|
|
160
|
-
getPowerSyncInstanceId(): Promise<string>;
|
|
161
|
-
|
|
162
|
-
/**
|
|
163
|
-
* Get a unique identifier for the system used for storage.
|
|
164
|
-
*/
|
|
165
|
-
getSystemIdentifier(): Promise<BucketStorageSystemIdentifier>;
|
|
166
|
-
}
|
|
167
|
-
|
|
168
|
-
export interface ReplicationCheckpoint {
|
|
169
|
-
readonly checkpoint: util.OpId;
|
|
170
|
-
readonly lsn: string | null;
|
|
171
|
-
}
|
|
172
|
-
|
|
173
|
-
export interface ActiveCheckpoint extends ReplicationCheckpoint {
|
|
174
|
-
hasSyncRules(): boolean;
|
|
175
|
-
|
|
176
|
-
getBucketStorage(): Promise<SyncRulesBucketStorage | null>;
|
|
177
|
-
}
|
|
178
|
-
|
|
179
|
-
export interface WriteCheckpoint {
|
|
180
|
-
base: ActiveCheckpoint;
|
|
181
|
-
writeCheckpoint: bigint | null;
|
|
182
|
-
}
|
|
183
|
-
|
|
184
|
-
export interface StorageMetrics {
|
|
185
|
-
/**
|
|
186
|
-
* Size of operations (bucket_data)
|
|
187
|
-
*/
|
|
188
|
-
operations_size_bytes: number;
|
|
189
|
-
|
|
190
|
-
/**
|
|
191
|
-
* Size of parameter storage.
|
|
192
|
-
*
|
|
193
|
-
* Replication storage -> raw data as received from Postgres.
|
|
194
|
-
*/
|
|
195
|
-
parameters_size_bytes: number;
|
|
196
|
-
|
|
197
|
-
/**
|
|
198
|
-
* Size of current_data.
|
|
199
|
-
*/
|
|
200
|
-
replication_size_bytes: number;
|
|
201
|
-
}
|
|
202
|
-
|
|
203
|
-
export interface ParseSyncRulesOptions {
|
|
204
|
-
defaultSchema: string;
|
|
205
|
-
}
|
|
206
|
-
|
|
207
|
-
export interface PersistedSyncRulesContent {
|
|
208
|
-
readonly id: number;
|
|
209
|
-
readonly sync_rules_content: string;
|
|
210
|
-
readonly slot_name: string;
|
|
211
|
-
|
|
212
|
-
readonly last_fatal_error?: string | null;
|
|
213
|
-
readonly last_keepalive_ts?: Date | null;
|
|
214
|
-
readonly last_checkpoint_ts?: Date | null;
|
|
215
|
-
|
|
216
|
-
parsed(options: ParseSyncRulesOptions): PersistedSyncRules;
|
|
217
|
-
|
|
218
|
-
lock(): Promise<ReplicationLock>;
|
|
219
|
-
}
|
|
220
|
-
|
|
221
|
-
export interface ReplicationLock {
|
|
222
|
-
sync_rules_id: number;
|
|
223
|
-
|
|
224
|
-
release(): Promise<void>;
|
|
225
|
-
}
|
|
226
|
-
|
|
227
|
-
export interface PersistedSyncRules {
|
|
228
|
-
readonly id: number;
|
|
229
|
-
readonly sync_rules: SqlSyncRules;
|
|
230
|
-
readonly slot_name: string;
|
|
231
|
-
}
|
|
232
|
-
|
|
233
|
-
export interface UpdateSyncRulesOptions {
|
|
234
|
-
content: string;
|
|
235
|
-
lock?: boolean;
|
|
236
|
-
validate?: boolean;
|
|
237
|
-
}
|
|
238
|
-
|
|
239
|
-
export interface SyncRulesBucketStorageOptions {
|
|
240
|
-
sync_rules: SqlSyncRules;
|
|
241
|
-
group_id: number;
|
|
242
|
-
}
|
|
243
29
|
|
|
244
30
|
export const DEFAULT_DOCUMENT_BATCH_LIMIT = 1000;
|
|
245
31
|
export const DEFAULT_DOCUMENT_CHUNK_LIMIT_BYTES = 1 * 1024 * 1024;
|
|
246
32
|
|
|
247
|
-
export interface BucketDataBatchOptions {
|
|
248
|
-
/** Limit number of documents returned. Defaults to 1000. */
|
|
249
|
-
limit?: number;
|
|
250
|
-
|
|
251
|
-
/**
|
|
252
|
-
* Limit size of chunks returned. Defaults to 1MB.
|
|
253
|
-
*
|
|
254
|
-
* This is a lower bound, not an upper bound. As soon as the chunk size goes over this limit,
|
|
255
|
-
* it is returned.
|
|
256
|
-
*
|
|
257
|
-
* Note that an individual data row can be close to 16MB in size, so this does not help in
|
|
258
|
-
* extreme cases.
|
|
259
|
-
*/
|
|
260
|
-
chunkLimitBytes?: number;
|
|
261
|
-
}
|
|
262
|
-
|
|
263
|
-
export interface StartBatchOptions extends ParseSyncRulesOptions {
|
|
264
|
-
zeroLSN: string;
|
|
265
|
-
/**
|
|
266
|
-
* Whether or not to store a copy of the current data.
|
|
267
|
-
*
|
|
268
|
-
* This is needed if we need to apply partial updates, for example
|
|
269
|
-
* when we get TOAST values from Postgres.
|
|
270
|
-
*
|
|
271
|
-
* This is not needed when we get the full document from the source
|
|
272
|
-
* database, for example from MongoDB.
|
|
273
|
-
*/
|
|
274
|
-
storeCurrentData: boolean;
|
|
275
|
-
|
|
276
|
-
/**
|
|
277
|
-
* Set to true for initial replication.
|
|
278
|
-
*
|
|
279
|
-
* This will avoid creating new operations for rows previously replicated.
|
|
280
|
-
*/
|
|
281
|
-
skipExistingRows?: boolean;
|
|
282
|
-
}
|
|
283
|
-
|
|
284
|
-
export interface SyncRulesBucketStorageListener extends DisposableListener {
|
|
285
|
-
batchStarted: (batch: BucketStorageBatch) => void;
|
|
286
|
-
}
|
|
287
|
-
|
|
288
|
-
export interface SyncRulesBucketStorage
|
|
289
|
-
extends DisposableObserverClient<SyncRulesBucketStorageListener>,
|
|
290
|
-
SyncStorageWriteCheckpointAPI {
|
|
291
|
-
readonly group_id: number;
|
|
292
|
-
readonly slot_name: string;
|
|
293
|
-
|
|
294
|
-
readonly factory: BucketStorageFactory;
|
|
295
|
-
|
|
296
|
-
resolveTable(options: ResolveTableOptions): Promise<ResolveTableResult>;
|
|
297
|
-
|
|
298
|
-
startBatch(
|
|
299
|
-
options: StartBatchOptions,
|
|
300
|
-
callback: (batch: BucketStorageBatch) => Promise<void>
|
|
301
|
-
): Promise<FlushedResult | null>;
|
|
302
|
-
|
|
303
|
-
getCheckpoint(): Promise<ReplicationCheckpoint>;
|
|
304
|
-
|
|
305
|
-
getParsedSyncRules(options: ParseSyncRulesOptions): SqlSyncRules;
|
|
306
|
-
|
|
307
|
-
getParameterSets(checkpoint: util.OpId, lookups: SqliteJsonValue[][]): Promise<SqliteJsonRow[]>;
|
|
308
|
-
|
|
309
|
-
/**
|
|
310
|
-
* Get a "batch" of data for a checkpoint.
|
|
311
|
-
*
|
|
312
|
-
* The results will be split into separate SyncBucketData chunks to:
|
|
313
|
-
* 1. Separate buckets.
|
|
314
|
-
* 2. Limit the size of each individual chunk according to options.batchSizeLimitBytes.
|
|
315
|
-
*
|
|
316
|
-
* @param checkpoint the checkpoint
|
|
317
|
-
* @param dataBuckets current bucket states
|
|
318
|
-
* @param options batch size options
|
|
319
|
-
*/
|
|
320
|
-
getBucketDataBatch(
|
|
321
|
-
checkpoint: util.OpId,
|
|
322
|
-
dataBuckets: Map<string, string>,
|
|
323
|
-
options?: BucketDataBatchOptions
|
|
324
|
-
): AsyncIterable<SyncBucketDataBatch>;
|
|
325
|
-
|
|
326
|
-
/**
|
|
327
|
-
* Compute checksums for a given list of buckets.
|
|
328
|
-
*
|
|
329
|
-
* Returns zero checksums for any buckets not found.
|
|
330
|
-
*/
|
|
331
|
-
getChecksums(checkpoint: util.OpId, buckets: string[]): Promise<util.ChecksumMap>;
|
|
332
|
-
|
|
333
|
-
/**
|
|
334
|
-
* Terminate the sync rules.
|
|
335
|
-
*
|
|
336
|
-
* This clears the storage, and sets state to TERMINATED.
|
|
337
|
-
*
|
|
338
|
-
* Must only be called on stopped sync rules.
|
|
339
|
-
*/
|
|
340
|
-
terminate(options?: TerminateOptions): Promise<void>;
|
|
341
|
-
|
|
342
|
-
getStatus(): Promise<SyncRuleStatus>;
|
|
343
|
-
|
|
344
|
-
/**
|
|
345
|
-
* Clear the storage, without changing state.
|
|
346
|
-
*/
|
|
347
|
-
clear(): Promise<void>;
|
|
348
|
-
|
|
349
|
-
autoActivate(): Promise<void>;
|
|
350
|
-
|
|
351
|
-
/**
|
|
352
|
-
* Record a replication error.
|
|
353
|
-
*
|
|
354
|
-
* This could be a recoverable error (e.g. temporary network failure),
|
|
355
|
-
* or a permanent error (e.g. missing toast data).
|
|
356
|
-
*
|
|
357
|
-
* Errors are cleared on commit.
|
|
358
|
-
*/
|
|
359
|
-
reportError(e: any): Promise<void>;
|
|
360
|
-
|
|
361
|
-
compact(options?: CompactOptions): Promise<void>;
|
|
362
|
-
}
|
|
363
|
-
|
|
364
|
-
export interface SyncRuleStatus {
|
|
365
|
-
checkpoint_lsn: string | null;
|
|
366
|
-
active: boolean;
|
|
367
|
-
snapshot_done: boolean;
|
|
368
|
-
}
|
|
369
|
-
export interface ResolveTableOptions {
|
|
370
|
-
group_id: number;
|
|
371
|
-
connection_id: number;
|
|
372
|
-
connection_tag: string;
|
|
373
|
-
entity_descriptor: SourceEntityDescriptor;
|
|
374
|
-
|
|
375
|
-
sync_rules: SqlSyncRules;
|
|
376
|
-
}
|
|
377
|
-
|
|
378
|
-
export interface ResolveTableResult {
|
|
379
|
-
table: SourceTable;
|
|
380
|
-
dropTables: SourceTable[];
|
|
381
|
-
}
|
|
382
|
-
|
|
383
|
-
export interface FlushedResult {
|
|
384
|
-
flushed_op: string;
|
|
385
|
-
}
|
|
386
|
-
|
|
387
|
-
export interface BucketBatchStorageListener extends DisposableListener {
|
|
388
|
-
replicationEvent: (payload: ReplicationEventPayload) => void;
|
|
389
|
-
}
|
|
390
|
-
|
|
391
|
-
export interface BucketBatchCommitOptions {
|
|
392
|
-
/**
|
|
393
|
-
* Creates a new checkpoint even if there were no persisted operations.
|
|
394
|
-
* Defaults to true.
|
|
395
|
-
*/
|
|
396
|
-
createEmptyCheckpoints?: boolean;
|
|
397
|
-
}
|
|
398
|
-
|
|
399
|
-
export type ResolvedBucketBatchCommitOptions = Required<BucketBatchCommitOptions>;
|
|
400
|
-
|
|
401
|
-
export const DEFAULT_BUCKET_BATCH_COMMIT_OPTIONS: ResolvedBucketBatchCommitOptions = {
|
|
402
|
-
createEmptyCheckpoints: true
|
|
403
|
-
};
|
|
404
|
-
|
|
405
|
-
export interface BucketStorageBatch extends DisposableObserverClient<BucketBatchStorageListener> {
|
|
406
|
-
/**
|
|
407
|
-
* Save an op, and potentially flush.
|
|
408
|
-
*
|
|
409
|
-
* This can be an insert, update or delete op.
|
|
410
|
-
*/
|
|
411
|
-
save(record: SaveOptions): Promise<FlushedResult | null>;
|
|
412
|
-
|
|
413
|
-
/**
|
|
414
|
-
* Replicate a truncate op - deletes all data in the specified tables.
|
|
415
|
-
*/
|
|
416
|
-
truncate(sourceTables: SourceTable[]): Promise<FlushedResult | null>;
|
|
417
|
-
|
|
418
|
-
/**
|
|
419
|
-
* Drop one or more tables.
|
|
420
|
-
*
|
|
421
|
-
* This is the same as truncate, but additionally removes the SourceTable record.
|
|
422
|
-
*/
|
|
423
|
-
drop(sourceTables: SourceTable[]): Promise<FlushedResult | null>;
|
|
424
|
-
|
|
425
|
-
/**
|
|
426
|
-
* Explicitly flush all pending changes in the batch.
|
|
427
|
-
*
|
|
428
|
-
* This does not create a new checkpoint until `commit()` is called. This means it's
|
|
429
|
-
* safe to flush multiple times in the middle of a large transaction.
|
|
430
|
-
*
|
|
431
|
-
* @returns null if there are no changes to flush.
|
|
432
|
-
*/
|
|
433
|
-
flush(): Promise<FlushedResult | null>;
|
|
434
|
-
|
|
435
|
-
/**
|
|
436
|
-
* Flush and commit any saved ops. This creates a new checkpoint by default.
|
|
437
|
-
*
|
|
438
|
-
* Only call this after a transaction.
|
|
439
|
-
*/
|
|
440
|
-
commit(lsn: string, options?: BucketBatchCommitOptions): Promise<boolean>;
|
|
441
|
-
|
|
442
|
-
/**
|
|
443
|
-
* Advance the checkpoint LSN position, without any associated op.
|
|
444
|
-
*
|
|
445
|
-
* This must only be called when not inside a transaction.
|
|
446
|
-
*
|
|
447
|
-
* @returns true if the checkpoint was advanced, false if this was a no-op
|
|
448
|
-
*/
|
|
449
|
-
keepalive(lsn: string): Promise<boolean>;
|
|
450
|
-
|
|
451
|
-
/**
|
|
452
|
-
* Get the last checkpoint LSN, from either commit or keepalive.
|
|
453
|
-
*/
|
|
454
|
-
lastCheckpointLsn: string | null;
|
|
455
|
-
|
|
456
|
-
markSnapshotDone(tables: SourceTable[], no_checkpoint_before_lsn: string): Promise<SourceTable[]>;
|
|
457
|
-
|
|
458
|
-
/**
|
|
459
|
-
* Queues the creation of a custom Write Checkpoint. This will be persisted after operations are flushed.
|
|
460
|
-
*/
|
|
461
|
-
addCustomWriteCheckpoint(checkpoint: BatchedCustomWriteCheckpointOptions): void;
|
|
462
|
-
}
|
|
463
|
-
|
|
464
|
-
export interface SaveParameterData {
|
|
465
|
-
sourceTable: SourceTable;
|
|
466
|
-
/** UUID */
|
|
467
|
-
sourceKey: string;
|
|
468
|
-
evaluated: EvaluatedParameters[];
|
|
469
|
-
}
|
|
470
|
-
|
|
471
|
-
export interface SaveBucketData {
|
|
472
|
-
sourceTable: SourceTable;
|
|
473
|
-
/** UUID */
|
|
474
|
-
sourceKey: string;
|
|
475
|
-
|
|
476
|
-
evaluated: EvaluatedRow[];
|
|
477
|
-
}
|
|
478
|
-
|
|
479
|
-
export type SaveOp = 'insert' | 'update' | 'delete';
|
|
480
|
-
|
|
481
|
-
export type SaveOptions = SaveInsert | SaveUpdate | SaveDelete;
|
|
482
|
-
|
|
483
|
-
export enum SaveOperationTag {
|
|
484
|
-
INSERT = 'insert',
|
|
485
|
-
UPDATE = 'update',
|
|
486
|
-
DELETE = 'delete'
|
|
487
|
-
}
|
|
488
|
-
|
|
489
|
-
export interface SaveInsert {
|
|
490
|
-
tag: SaveOperationTag.INSERT;
|
|
491
|
-
sourceTable: SourceTable;
|
|
492
|
-
before?: undefined;
|
|
493
|
-
beforeReplicaId?: undefined;
|
|
494
|
-
after: SqliteRow;
|
|
495
|
-
afterReplicaId: ReplicaId;
|
|
496
|
-
}
|
|
497
|
-
|
|
498
|
-
export interface SaveUpdate {
|
|
499
|
-
tag: SaveOperationTag.UPDATE;
|
|
500
|
-
sourceTable: SourceTable;
|
|
501
|
-
|
|
502
|
-
/**
|
|
503
|
-
* This is only present when the id has changed, and will only contain replica identity columns.
|
|
504
|
-
*/
|
|
505
|
-
before?: SqliteRow;
|
|
506
|
-
beforeReplicaId?: ReplicaId;
|
|
507
|
-
|
|
508
|
-
/**
|
|
509
|
-
* A null value means null column.
|
|
510
|
-
*
|
|
511
|
-
* An undefined value means it's a TOAST value - must be copied from another record.
|
|
512
|
-
*/
|
|
513
|
-
after: ToastableSqliteRow;
|
|
514
|
-
afterReplicaId: ReplicaId;
|
|
515
|
-
}
|
|
516
|
-
|
|
517
|
-
export interface SaveDelete {
|
|
518
|
-
tag: SaveOperationTag.DELETE;
|
|
519
|
-
sourceTable: SourceTable;
|
|
520
|
-
before?: SqliteRow;
|
|
521
|
-
beforeReplicaId: ReplicaId;
|
|
522
|
-
after?: undefined;
|
|
523
|
-
afterReplicaId?: undefined;
|
|
524
|
-
}
|
|
525
|
-
|
|
526
|
-
export interface SyncBucketDataBatch {
|
|
527
|
-
batch: util.SyncBucketData;
|
|
528
|
-
targetOp: bigint | null;
|
|
529
|
-
}
|
|
530
|
-
|
|
531
33
|
export function mergeToast(record: ToastableSqliteRow, persisted: ToastableSqliteRow): ToastableSqliteRow {
|
|
532
34
|
const newRecord: ToastableSqliteRow = {};
|
|
533
35
|
for (let key in record) {
|
|
@@ -539,56 +41,3 @@ export function mergeToast(record: ToastableSqliteRow, persisted: ToastableSqlit
|
|
|
539
41
|
}
|
|
540
42
|
return newRecord;
|
|
541
43
|
}
|
|
542
|
-
|
|
543
|
-
export interface CompactOptions {
|
|
544
|
-
/**
|
|
545
|
-
* Heap memory limit for the compact process.
|
|
546
|
-
*
|
|
547
|
-
* Add around 64MB to this to determine the "--max-old-space-size" argument.
|
|
548
|
-
* Add another 80MB to get RSS usage / memory limits.
|
|
549
|
-
*/
|
|
550
|
-
memoryLimitMB?: number;
|
|
551
|
-
|
|
552
|
-
/**
|
|
553
|
-
* If specified, ignore any operations newer than this when compacting.
|
|
554
|
-
*
|
|
555
|
-
* This is primarily for tests, where we want to test compacting at a specific
|
|
556
|
-
* point.
|
|
557
|
-
*
|
|
558
|
-
* This can also be used to create a "safe buffer" of recent operations that should
|
|
559
|
-
* not be compacted, to avoid invalidating checkpoints in use.
|
|
560
|
-
*/
|
|
561
|
-
maxOpId?: bigint;
|
|
562
|
-
|
|
563
|
-
/**
|
|
564
|
-
* If specified, compact only the specific buckets.
|
|
565
|
-
*
|
|
566
|
-
* If not specified, compacts all buckets.
|
|
567
|
-
*
|
|
568
|
-
* These can be individual bucket names, or bucket definition names.
|
|
569
|
-
*/
|
|
570
|
-
compactBuckets?: string[];
|
|
571
|
-
}
|
|
572
|
-
|
|
573
|
-
export interface TerminateOptions {
|
|
574
|
-
/**
|
|
575
|
-
* If true, also clear the storage before terminating.
|
|
576
|
-
*/
|
|
577
|
-
clearStorage: boolean;
|
|
578
|
-
}
|
|
579
|
-
|
|
580
|
-
/**
|
|
581
|
-
* Helper for tests.
|
|
582
|
-
* This is not in the `service-core-tests` package in order for storage modules
|
|
583
|
-
* to provide relevant factories without requiring `service-core-tests` as a direct dependency.
|
|
584
|
-
*/
|
|
585
|
-
export interface TestStorageOptions {
|
|
586
|
-
/**
|
|
587
|
-
* By default, collections are only cleared/
|
|
588
|
-
* Setting this to true will drop the collections completely.
|
|
589
|
-
*/
|
|
590
|
-
dropAll?: boolean;
|
|
591
|
-
|
|
592
|
-
doNotClear?: boolean;
|
|
593
|
-
}
|
|
594
|
-
export type TestStorageFactory = (options?: TestStorageOptions) => Promise<BucketStorageFactory>;
|