@powersync/service-core 0.18.1 → 1.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. package/CHANGELOG.md +16 -0
  2. package/dist/api/RouteAPI.d.ts +1 -1
  3. package/dist/api/diagnostics.js +107 -169
  4. package/dist/api/diagnostics.js.map +1 -1
  5. package/dist/entry/commands/compact-action.js +10 -73
  6. package/dist/entry/commands/compact-action.js.map +1 -1
  7. package/dist/modules/AbstractModule.d.ts +1 -1
  8. package/dist/replication/AbstractReplicator.js +2 -65
  9. package/dist/replication/AbstractReplicator.js.map +1 -1
  10. package/dist/routes/endpoints/checkpointing.js +3 -2
  11. package/dist/routes/endpoints/checkpointing.js.map +1 -1
  12. package/dist/routes/endpoints/socket-route.js +5 -5
  13. package/dist/routes/endpoints/socket-route.js.map +1 -1
  14. package/dist/routes/endpoints/sync-stream.js +5 -5
  15. package/dist/routes/endpoints/sync-stream.js.map +1 -1
  16. package/dist/runner/teardown.js +3 -65
  17. package/dist/runner/teardown.js.map +1 -1
  18. package/dist/storage/BucketStorage.d.ts +1 -442
  19. package/dist/storage/BucketStorage.js +0 -9
  20. package/dist/storage/BucketStorage.js.map +1 -1
  21. package/dist/storage/BucketStorageBatch.d.ts +130 -0
  22. package/dist/storage/BucketStorageBatch.js +10 -0
  23. package/dist/storage/BucketStorageBatch.js.map +1 -0
  24. package/dist/storage/BucketStorageFactory.d.ts +136 -0
  25. package/dist/storage/BucketStorageFactory.js +2 -0
  26. package/dist/storage/BucketStorageFactory.js.map +1 -0
  27. package/dist/storage/ChecksumCache.js.map +1 -1
  28. package/dist/storage/PersistedSyncRulesContent.d.ts +20 -0
  29. package/dist/storage/PersistedSyncRulesContent.js +2 -0
  30. package/dist/storage/PersistedSyncRulesContent.js.map +1 -0
  31. package/dist/storage/ReplicationEventPayload.d.ts +1 -1
  32. package/dist/storage/ReplicationLock.d.ts +4 -0
  33. package/dist/storage/ReplicationLock.js +2 -0
  34. package/dist/storage/ReplicationLock.js.map +1 -0
  35. package/dist/storage/StorageEngine.d.ts +4 -4
  36. package/dist/storage/StorageEngine.js +2 -2
  37. package/dist/storage/StorageEngine.js.map +1 -1
  38. package/dist/storage/StorageProvider.d.ts +4 -1
  39. package/dist/storage/SyncRulesBucketStorage.d.ts +201 -0
  40. package/dist/storage/SyncRulesBucketStorage.js +7 -0
  41. package/dist/storage/SyncRulesBucketStorage.js.map +1 -0
  42. package/dist/storage/bson.d.ts +11 -3
  43. package/dist/storage/bson.js +24 -2
  44. package/dist/storage/bson.js.map +1 -1
  45. package/dist/storage/storage-index.d.ts +5 -0
  46. package/dist/storage/storage-index.js +5 -0
  47. package/dist/storage/storage-index.js.map +1 -1
  48. package/dist/sync/BucketChecksumState.d.ts +91 -0
  49. package/dist/sync/BucketChecksumState.js +313 -0
  50. package/dist/sync/BucketChecksumState.js.map +1 -0
  51. package/dist/sync/sync-index.d.ts +1 -0
  52. package/dist/sync/sync-index.js +1 -0
  53. package/dist/sync/sync-index.js.map +1 -1
  54. package/dist/sync/sync.d.ts +7 -3
  55. package/dist/sync/sync.js +131 -135
  56. package/dist/sync/sync.js.map +1 -1
  57. package/dist/sync/util.d.ts +9 -0
  58. package/dist/sync/util.js +44 -0
  59. package/dist/sync/util.js.map +1 -1
  60. package/dist/util/checkpointing.d.ts +1 -1
  61. package/dist/util/checkpointing.js +15 -78
  62. package/dist/util/checkpointing.js.map +1 -1
  63. package/dist/util/protocol-types.d.ts +13 -4
  64. package/package.json +4 -4
  65. package/src/api/RouteAPI.ts +1 -1
  66. package/src/api/diagnostics.ts +1 -1
  67. package/src/entry/commands/compact-action.ts +2 -3
  68. package/src/modules/AbstractModule.ts +1 -1
  69. package/src/replication/AbstractReplicator.ts +1 -2
  70. package/src/routes/endpoints/checkpointing.ts +3 -3
  71. package/src/routes/endpoints/socket-route.ts +7 -5
  72. package/src/routes/endpoints/sync-stream.ts +8 -5
  73. package/src/runner/teardown.ts +1 -1
  74. package/src/storage/BucketStorage.ts +1 -552
  75. package/src/storage/BucketStorageBatch.ts +158 -0
  76. package/src/storage/BucketStorageFactory.ts +156 -0
  77. package/src/storage/ChecksumCache.ts +1 -0
  78. package/src/storage/PersistedSyncRulesContent.ts +26 -0
  79. package/src/storage/ReplicationEventPayload.ts +1 -1
  80. package/src/storage/ReplicationLock.ts +5 -0
  81. package/src/storage/StorageEngine.ts +4 -4
  82. package/src/storage/StorageProvider.ts +4 -1
  83. package/src/storage/SyncRulesBucketStorage.ts +256 -0
  84. package/src/storage/bson.ts +28 -4
  85. package/src/storage/storage-index.ts +5 -0
  86. package/src/sync/BucketChecksumState.ts +392 -0
  87. package/src/sync/sync-index.ts +1 -0
  88. package/src/sync/sync.ts +173 -157
  89. package/src/sync/util.ts +54 -0
  90. package/src/util/checkpointing.ts +4 -6
  91. package/src/util/protocol-types.ts +16 -4
  92. package/test/src/auth.test.ts +5 -5
  93. package/test/src/sync/BucketChecksumState.test.ts +565 -0
  94. package/test/src/sync/util.test.ts +34 -0
  95. package/tsconfig.tsbuildinfo +1 -1
@@ -0,0 +1,158 @@
1
+ import { ObserverClient } from '@powersync/lib-services-framework';
2
+ import { EvaluatedParameters, EvaluatedRow, SqliteRow, ToastableSqliteRow } from '@powersync/service-sync-rules';
3
+ import { BSON } from 'bson';
4
+ import { ReplicationEventPayload } from './ReplicationEventPayload.js';
5
+ import { SourceTable } from './SourceTable.js';
6
+ import { BatchedCustomWriteCheckpointOptions } from './storage-index.js';
7
+
8
+ export const DEFAULT_BUCKET_BATCH_COMMIT_OPTIONS: ResolvedBucketBatchCommitOptions = {
9
+ createEmptyCheckpoints: true
10
+ };
11
+
12
+ export interface BucketStorageBatch extends ObserverClient<BucketBatchStorageListener>, AsyncDisposable {
13
+ /**
14
+ * Save an op, and potentially flush.
15
+ *
16
+ * This can be an insert, update or delete op.
17
+ */
18
+ save(record: SaveOptions): Promise<FlushedResult | null>;
19
+
20
+ /**
21
+ * Replicate a truncate op - deletes all data in the specified tables.
22
+ */
23
+ truncate(sourceTables: SourceTable[]): Promise<FlushedResult | null>;
24
+
25
+ /**
26
+ * Drop one or more tables.
27
+ *
28
+ * This is the same as truncate, but additionally removes the SourceTable record.
29
+ */
30
+ drop(sourceTables: SourceTable[]): Promise<FlushedResult | null>;
31
+
32
+ /**
33
+ * Explicitly flush all pending changes in the batch.
34
+ *
35
+ * This does not create a new checkpoint until `commit()` is called. This means it's
36
+ * safe to flush multiple times in the middle of a large transaction.
37
+ *
38
+ * @returns null if there are no changes to flush.
39
+ */
40
+ flush(): Promise<FlushedResult | null>;
41
+
42
+ /**
43
+ * Flush and commit any saved ops. This creates a new checkpoint by default.
44
+ *
45
+ * Only call this after a transaction.
46
+ */
47
+ commit(lsn: string, options?: BucketBatchCommitOptions): Promise<boolean>;
48
+
49
+ /**
50
+ * Advance the checkpoint LSN position, without any associated op.
51
+ *
52
+ * This must only be called when not inside a transaction.
53
+ *
54
+ * @returns true if the checkpoint was advanced, false if this was a no-op
55
+ */
56
+ keepalive(lsn: string): Promise<boolean>;
57
+
58
+ /**
59
+ * Get the last checkpoint LSN, from either commit or keepalive.
60
+ */
61
+ lastCheckpointLsn: string | null;
62
+
63
+ markSnapshotDone(tables: SourceTable[], no_checkpoint_before_lsn: string): Promise<SourceTable[]>;
64
+
65
+ /**
66
+ * Queues the creation of a custom Write Checkpoint. This will be persisted after operations are flushed.
67
+ */
68
+ addCustomWriteCheckpoint(checkpoint: BatchedCustomWriteCheckpointOptions): void;
69
+ }
70
+
71
+ /**
72
+ * Replica id uniquely identifying a row on the source database.
73
+ *
74
+ * Can be any value serializable to BSON.
75
+ *
76
+ * If the value is an entire document, the data serialized to a v5 UUID may be a good choice here.
77
+ */
78
+ export type ReplicaId = BSON.UUID | BSON.Document | any;
79
+
80
+ export interface SaveParameterData {
81
+ sourceTable: SourceTable;
82
+ /** UUID */
83
+ sourceKey: string;
84
+ evaluated: EvaluatedParameters[];
85
+ }
86
+
87
+ export interface SaveBucketData {
88
+ sourceTable: SourceTable;
89
+ /** UUID */
90
+ sourceKey: string;
91
+
92
+ evaluated: EvaluatedRow[];
93
+ }
94
+
95
+ export type SaveOp = 'insert' | 'update' | 'delete';
96
+
97
+ export type SaveOptions = SaveInsert | SaveUpdate | SaveDelete;
98
+
99
+ export enum SaveOperationTag {
100
+ INSERT = 'insert',
101
+ UPDATE = 'update',
102
+ DELETE = 'delete'
103
+ }
104
+
105
+ export interface SaveInsert {
106
+ tag: SaveOperationTag.INSERT;
107
+ sourceTable: SourceTable;
108
+ before?: undefined;
109
+ beforeReplicaId?: undefined;
110
+ after: SqliteRow;
111
+ afterReplicaId: ReplicaId;
112
+ }
113
+
114
+ export interface SaveUpdate {
115
+ tag: SaveOperationTag.UPDATE;
116
+ sourceTable: SourceTable;
117
+
118
+ /**
119
+ * This is only present when the id has changed, and will only contain replica identity columns.
120
+ */
121
+ before?: SqliteRow;
122
+ beforeReplicaId?: ReplicaId;
123
+
124
+ /**
125
+ * A null value means null column.
126
+ *
127
+ * An undefined value means it's a TOAST value - must be copied from another record.
128
+ */
129
+ after: ToastableSqliteRow;
130
+ afterReplicaId: ReplicaId;
131
+ }
132
+
133
+ export interface SaveDelete {
134
+ tag: SaveOperationTag.DELETE;
135
+ sourceTable: SourceTable;
136
+ before?: SqliteRow;
137
+ beforeReplicaId: ReplicaId;
138
+ after?: undefined;
139
+ afterReplicaId?: undefined;
140
+ }
141
+
142
+ export interface BucketBatchStorageListener {
143
+ replicationEvent: (payload: ReplicationEventPayload) => void;
144
+ }
145
+
146
+ export interface FlushedResult {
147
+ flushed_op: string;
148
+ }
149
+
150
+ export interface BucketBatchCommitOptions {
151
+ /**
152
+ * Creates a new checkpoint even if there were no persisted operations.
153
+ * Defaults to true.
154
+ */
155
+ createEmptyCheckpoints?: boolean;
156
+ }
157
+
158
+ export type ResolvedBucketBatchCommitOptions = Required<BucketBatchCommitOptions>;
@@ -0,0 +1,156 @@
1
+ import { ObserverClient } from '@powersync/lib-services-framework';
2
+ import { ParseSyncRulesOptions, PersistedSyncRules, PersistedSyncRulesContent } from './PersistedSyncRulesContent.js';
3
+ import { ReplicationEventPayload } from './ReplicationEventPayload.js';
4
+ import { ReplicationLock } from './ReplicationLock.js';
5
+ import { SyncRulesBucketStorage } from './SyncRulesBucketStorage.js';
6
+
7
+ /**
8
+ * Represents a configured storage provider.
9
+ *
10
+ * The provider can handle multiple copies of sync rules concurrently, each with their own storage.
11
+ * This is to handle replication of a new version of sync rules, while the old version is still active.
12
+ *
13
+ * Storage APIs for a specific copy of sync rules are provided by the `SyncRulesBucketStorage` instances.
14
+ */
15
+ export interface BucketStorageFactory extends ObserverClient<BucketStorageFactoryListener>, AsyncDisposable {
16
+ /**
17
+ * Update sync rules from configuration, if changed.
18
+ */
19
+ configureSyncRules(
20
+ options: UpdateSyncRulesOptions
21
+ ): Promise<{ updated: boolean; persisted_sync_rules?: PersistedSyncRulesContent; lock?: ReplicationLock }>;
22
+
23
+ /**
24
+ * Get a storage instance to query sync data for specific sync rules.
25
+ */
26
+ getInstance(options: PersistedSyncRulesContent): SyncRulesBucketStorage;
27
+
28
+ /**
29
+ * Deploy new sync rules.
30
+ */
31
+ updateSyncRules(options: UpdateSyncRulesOptions): Promise<PersistedSyncRulesContent>;
32
+
33
+ /**
34
+ * Indicate that a slot was removed, and we should re-sync by creating
35
+ * a new sync rules instance.
36
+ *
37
+ * This is roughly the same as deploying a new version of the current sync
38
+ * rules, but also accounts for cases where the current sync rules are not
39
+ * the latest ones.
40
+ *
41
+ * Replication should be restarted after this.
42
+ *
43
+ * @param slot_name The removed slot
44
+ */
45
+ slotRemoved(slot_name: string): Promise<void>;
46
+
47
+ /**
48
+ * Get the sync rules used for querying.
49
+ */
50
+ getActiveSyncRules(options: ParseSyncRulesOptions): Promise<PersistedSyncRules | null>;
51
+
52
+ /**
53
+ * Get the sync rules used for querying.
54
+ */
55
+ getActiveSyncRulesContent(): Promise<PersistedSyncRulesContent | null>;
56
+
57
+ /**
58
+ * Get the sync rules that will be active next once done with initial replicatino.
59
+ */
60
+ getNextSyncRules(options: ParseSyncRulesOptions): Promise<PersistedSyncRules | null>;
61
+
62
+ /**
63
+ * Get the sync rules that will be active next once done with initial replicatino.
64
+ */
65
+ getNextSyncRulesContent(): Promise<PersistedSyncRulesContent | null>;
66
+
67
+ /**
68
+ * Get all sync rules currently replicating. Typically this is the "active" and "next" sync rules.
69
+ */
70
+ getReplicatingSyncRules(): Promise<PersistedSyncRulesContent[]>;
71
+
72
+ /**
73
+ * Get all sync rules stopped but not terminated yet.
74
+ */
75
+ getStoppedSyncRules(): Promise<PersistedSyncRulesContent[]>;
76
+
77
+ /**
78
+ * Get the active storage instance.
79
+ */
80
+ getActiveStorage(): Promise<SyncRulesBucketStorage | null>;
81
+
82
+ /**
83
+ * Get storage size of active sync rules.
84
+ */
85
+ getStorageMetrics(): Promise<StorageMetrics>;
86
+
87
+ /**
88
+ * Get the unique identifier for this instance of Powersync
89
+ */
90
+ getPowerSyncInstanceId(): Promise<string>;
91
+
92
+ /**
93
+ * Get a unique identifier for the system used for storage.
94
+ */
95
+ getSystemIdentifier(): Promise<BucketStorageSystemIdentifier>;
96
+ }
97
+
98
+ export interface BucketStorageFactoryListener {
99
+ syncStorageCreated: (storage: SyncRulesBucketStorage) => void;
100
+ replicationEvent: (event: ReplicationEventPayload) => void;
101
+ }
102
+
103
+ export interface StorageMetrics {
104
+ /**
105
+ * Size of operations (bucket_data)
106
+ */
107
+ operations_size_bytes: number;
108
+
109
+ /**
110
+ * Size of parameter storage.
111
+ *
112
+ * Replication storage -> raw data as received from Postgres.
113
+ */
114
+ parameters_size_bytes: number;
115
+
116
+ /**
117
+ * Size of current_data.
118
+ */
119
+ replication_size_bytes: number;
120
+ }
121
+
122
+ export interface UpdateSyncRulesOptions {
123
+ content: string;
124
+ lock?: boolean;
125
+ validate?: boolean;
126
+ }
127
+
128
+ export interface BucketStorageSystemIdentifier {
129
+ /**
130
+ * A unique identifier for the system used for storage.
131
+ * For Postgres this can be the cluster `system_identifier` and database name.
132
+ * For MongoDB this can be the replica set name.
133
+ */
134
+ id: string;
135
+ /**
136
+ * A unique type for the storage implementation.
137
+ * e.g. `mongodb`, `postgresql`.
138
+ */
139
+ type: string;
140
+ }
141
+
142
+ /**
143
+ * Helper for tests.
144
+ * This is not in the `service-core-tests` package in order for storage modules
145
+ * to provide relevant factories without requiring `service-core-tests` as a direct dependency.
146
+ */
147
+ export interface TestStorageOptions {
148
+ /**
149
+ * By default, collections are only cleared/
150
+ * Setting this to true will drop the collections completely.
151
+ */
152
+ dropAll?: boolean;
153
+
154
+ doNotClear?: boolean;
155
+ }
156
+ export type TestStorageFactory = (options?: TestStorageOptions) => Promise<BucketStorageFactory>;
@@ -2,6 +2,7 @@ import { BucketChecksum, OpId } from '../util/protocol-types.js';
2
2
  import { ChecksumMap, addBucketChecksums } from '../util/utils.js';
3
3
  import { LRUCache } from 'lru-cache/min';
4
4
  import { OrderedSet } from '@js-sdsl/ordered-set';
5
+ import { BucketPriority } from '@powersync/service-sync-rules';
5
6
 
6
7
  interface ChecksumFetchContext {
7
8
  fetch(bucket: string): Promise<BucketChecksum>;
@@ -0,0 +1,26 @@
1
+ import { SqlSyncRules } from '@powersync/service-sync-rules';
2
+ import { ReplicationLock } from './ReplicationLock.js';
3
+
4
+ export interface ParseSyncRulesOptions {
5
+ defaultSchema: string;
6
+ }
7
+
8
+ export interface PersistedSyncRulesContent {
9
+ readonly id: number;
10
+ readonly sync_rules_content: string;
11
+ readonly slot_name: string;
12
+
13
+ readonly last_fatal_error?: string | null;
14
+ readonly last_keepalive_ts?: Date | null;
15
+ readonly last_checkpoint_ts?: Date | null;
16
+
17
+ parsed(options: ParseSyncRulesOptions): PersistedSyncRules;
18
+
19
+ lock(): Promise<ReplicationLock>;
20
+ }
21
+
22
+ export interface PersistedSyncRules {
23
+ readonly id: number;
24
+ readonly sync_rules: SqlSyncRules;
25
+ readonly slot_name: string;
26
+ }
@@ -1,6 +1,6 @@
1
1
  import * as sync_rules from '@powersync/service-sync-rules';
2
- import { BucketStorageBatch, SaveOp } from './BucketStorage.js';
3
2
  import { SourceTable } from './SourceTable.js';
3
+ import { BucketStorageBatch, SaveOp } from './BucketStorageBatch.js';
4
4
 
5
5
  export type EventData = {
6
6
  op: SaveOp;
@@ -0,0 +1,5 @@
1
+ export interface ReplicationLock {
2
+ sync_rules_id: number;
3
+
4
+ release(): Promise<void>;
5
+ }
@@ -1,17 +1,17 @@
1
- import { DisposableListener, DisposableObserver, logger } from '@powersync/lib-services-framework';
1
+ import { BaseObserver, logger } from '@powersync/lib-services-framework';
2
2
  import { ResolvedPowerSyncConfig } from '../util/util-index.js';
3
- import { BucketStorageFactory } from './BucketStorage.js';
4
3
  import { ActiveStorage, BucketStorageProvider } from './StorageProvider.js';
4
+ import { BucketStorageFactory } from './BucketStorageFactory.js';
5
5
 
6
6
  export type StorageEngineOptions = {
7
7
  configuration: ResolvedPowerSyncConfig;
8
8
  };
9
9
 
10
- export interface StorageEngineListener extends DisposableListener {
10
+ export interface StorageEngineListener {
11
11
  storageActivated: (storage: BucketStorageFactory) => void;
12
12
  }
13
13
 
14
- export class StorageEngine extends DisposableObserver<StorageEngineListener> {
14
+ export class StorageEngine extends BaseObserver<StorageEngineListener> {
15
15
  // TODO: This will need to revisited when we actually support multiple storage providers.
16
16
  private storageProviders: Map<string, BucketStorageProvider> = new Map();
17
17
  private currentActiveStorage: ActiveStorage | null = null;
@@ -1,5 +1,5 @@
1
1
  import * as util from '../util/util-index.js';
2
- import { BucketStorageFactory } from './BucketStorage.js';
2
+ import { BucketStorageFactory } from './BucketStorageFactory.js';
3
3
 
4
4
  export interface ActiveStorage {
5
5
  storage: BucketStorageFactory;
@@ -16,6 +16,9 @@ export interface GetStorageOptions {
16
16
  resolvedConfig: util.ResolvedPowerSyncConfig;
17
17
  }
18
18
 
19
+ /**
20
+ * Represents a provider that can create a storage instance for a specific storage type from configuration.
21
+ */
19
22
  export interface BucketStorageProvider {
20
23
  /**
21
24
  * The storage type that this provider provides.
@@ -0,0 +1,256 @@
1
+ import { ObserverClient } from '@powersync/lib-services-framework';
2
+ import { SqlSyncRules, SqliteJsonRow, SqliteJsonValue } from '@powersync/service-sync-rules';
3
+ import * as util from '../util/util-index.js';
4
+ import { BucketStorageBatch, FlushedResult } from './BucketStorageBatch.js';
5
+ import { BucketStorageFactory } from './BucketStorageFactory.js';
6
+ import { ParseSyncRulesOptions } from './PersistedSyncRulesContent.js';
7
+ import { SourceEntityDescriptor } from './SourceEntity.js';
8
+ import { SourceTable } from './SourceTable.js';
9
+ import { SyncStorageWriteCheckpointAPI } from './WriteCheckpointAPI.js';
10
+
11
+ /**
12
+ * Storage for a specific copy of sync rules.
13
+ */
14
+ export interface SyncRulesBucketStorage
15
+ extends ObserverClient<SyncRulesBucketStorageListener>,
16
+ SyncStorageWriteCheckpointAPI {
17
+ readonly group_id: number;
18
+ readonly slot_name: string;
19
+
20
+ readonly factory: BucketStorageFactory;
21
+
22
+ /**
23
+ * Resolve a table, keeping track of it internally.
24
+ */
25
+ resolveTable(options: ResolveTableOptions): Promise<ResolveTableResult>;
26
+
27
+ /**
28
+ * Use this to get access to update storage data.
29
+ */
30
+ startBatch(
31
+ options: StartBatchOptions,
32
+ callback: (batch: BucketStorageBatch) => Promise<void>
33
+ ): Promise<FlushedResult | null>;
34
+
35
+ getParsedSyncRules(options: ParseSyncRulesOptions): SqlSyncRules;
36
+
37
+ /**
38
+ * Terminate the sync rules.
39
+ *
40
+ * This clears the storage, and sets state to TERMINATED.
41
+ *
42
+ * Must only be called on stopped sync rules.
43
+ */
44
+ terminate(options?: TerminateOptions): Promise<void>;
45
+
46
+ getStatus(): Promise<SyncRuleStatus>;
47
+
48
+ /**
49
+ * Clear the storage, without changing state.
50
+ */
51
+ clear(): Promise<void>;
52
+
53
+ autoActivate(): Promise<void>;
54
+
55
+ /**
56
+ * Record a replication error.
57
+ *
58
+ * This could be a recoverable error (e.g. temporary network failure),
59
+ * or a permanent error (e.g. missing toast data).
60
+ *
61
+ * Errors are cleared on commit.
62
+ */
63
+ reportError(e: any): Promise<void>;
64
+
65
+ compact(options?: CompactOptions): Promise<void>;
66
+
67
+ // ## Read operations
68
+
69
+ getCheckpoint(): Promise<ReplicationCheckpoint>;
70
+
71
+ /**
72
+ * Used to resolve "dynamic" parameter queries.
73
+ */
74
+ getParameterSets(checkpoint: util.OpId, lookups: SqliteJsonValue[][]): Promise<SqliteJsonRow[]>;
75
+
76
+ getCheckpointChanges(options: GetCheckpointChangesOptions): Promise<CheckpointChanges>;
77
+
78
+ /**
79
+ * Yields the latest user write checkpoint whenever the sync checkpoint updates.
80
+ *
81
+ * The stream stops or errors if this is not the active sync rules (anymore).
82
+ */
83
+ watchWriteCheckpoint(options: WatchWriteCheckpointOptions): AsyncIterable<StorageCheckpointUpdate>;
84
+
85
+ /**
86
+ * Get a "batch" of data for a checkpoint.
87
+ *
88
+ * The results will be split into separate SyncBucketData chunks to:
89
+ * 1. Separate buckets.
90
+ * 2. Limit the size of each individual chunk according to options.batchSizeLimitBytes.
91
+ *
92
+ * @param checkpoint the checkpoint
93
+ * @param dataBuckets current bucket states
94
+ * @param options batch size options
95
+ */
96
+ getBucketDataBatch(
97
+ checkpoint: util.OpId,
98
+ dataBuckets: Map<string, string>,
99
+ options?: BucketDataBatchOptions
100
+ ): AsyncIterable<SyncBucketDataBatch>;
101
+
102
+ /**
103
+ * Compute checksums for a given list of buckets.
104
+ *
105
+ * Returns zero checksums for any buckets not found.
106
+ */
107
+ getChecksums(checkpoint: util.OpId, buckets: string[]): Promise<util.ChecksumMap>;
108
+ }
109
+
110
+ export interface SyncRulesBucketStorageListener {
111
+ batchStarted: (batch: BucketStorageBatch) => void;
112
+ }
113
+
114
+ export interface SyncRuleStatus {
115
+ checkpoint_lsn: string | null;
116
+ active: boolean;
117
+ snapshot_done: boolean;
118
+ }
119
+ export interface ResolveTableOptions {
120
+ group_id: number;
121
+ connection_id: number;
122
+ connection_tag: string;
123
+ entity_descriptor: SourceEntityDescriptor;
124
+
125
+ sync_rules: SqlSyncRules;
126
+ }
127
+
128
+ export interface ResolveTableResult {
129
+ table: SourceTable;
130
+ dropTables: SourceTable[];
131
+ }
132
+
133
+ export interface StartBatchOptions extends ParseSyncRulesOptions {
134
+ zeroLSN: string;
135
+ /**
136
+ * Whether or not to store a copy of the current data.
137
+ *
138
+ * This is needed if we need to apply partial updates, for example
139
+ * when we get TOAST values from Postgres.
140
+ *
141
+ * This is not needed when we get the full document from the source
142
+ * database, for example from MongoDB.
143
+ */
144
+ storeCurrentData: boolean;
145
+
146
+ /**
147
+ * Set to true for initial replication.
148
+ *
149
+ * This will avoid creating new operations for rows previously replicated.
150
+ */
151
+ skipExistingRows?: boolean;
152
+ }
153
+
154
+ export interface CompactOptions {
155
+ /**
156
+ * Heap memory limit for the compact process.
157
+ *
158
+ * Add around 64MB to this to determine the "--max-old-space-size" argument.
159
+ * Add another 80MB to get RSS usage / memory limits.
160
+ */
161
+ memoryLimitMB?: number;
162
+
163
+ /**
164
+ * If specified, ignore any operations newer than this when compacting.
165
+ *
166
+ * This is primarily for tests, where we want to test compacting at a specific
167
+ * point.
168
+ *
169
+ * This can also be used to create a "safe buffer" of recent operations that should
170
+ * not be compacted, to avoid invalidating checkpoints in use.
171
+ */
172
+ maxOpId?: bigint;
173
+
174
+ /**
175
+ * If specified, compact only the specific buckets.
176
+ *
177
+ * If not specified, compacts all buckets.
178
+ *
179
+ * These can be individual bucket names, or bucket definition names.
180
+ */
181
+ compactBuckets?: string[];
182
+ }
183
+
184
+ export interface TerminateOptions {
185
+ /**
186
+ * If true, also clear the storage before terminating.
187
+ */
188
+ clearStorage: boolean;
189
+ }
190
+
191
+ export interface BucketDataBatchOptions {
192
+ /** Limit number of documents returned. Defaults to 1000. */
193
+ limit?: number;
194
+
195
+ /**
196
+ * Limit size of chunks returned. Defaults to 1MB.
197
+ *
198
+ * This is a lower bound, not an upper bound. As soon as the chunk size goes over this limit,
199
+ * it is returned.
200
+ *
201
+ * Note that an individual data row can be close to 16MB in size, so this does not help in
202
+ * extreme cases.
203
+ */
204
+ chunkLimitBytes?: number;
205
+ }
206
+
207
+ export interface SyncBucketDataBatch {
208
+ batch: util.SyncBucketData;
209
+ targetOp: bigint | null;
210
+ }
211
+
212
+ export interface ReplicationCheckpoint {
213
+ readonly checkpoint: util.OpId;
214
+ readonly lsn: string | null;
215
+ }
216
+
217
+ export interface WatchWriteCheckpointOptions {
218
+ /** user_id and client_id combined. */
219
+ user_id: string;
220
+
221
+ signal: AbortSignal;
222
+ }
223
+
224
+ export interface WatchFilterEvent {
225
+ changedDataBucket?: string;
226
+ changedParameterBucketDefinition?: string;
227
+ invalidate?: boolean;
228
+ }
229
+
230
+ export interface WriteCheckpoint {
231
+ base: ReplicationCheckpoint;
232
+ writeCheckpoint: bigint | null;
233
+ }
234
+
235
+ export interface StorageCheckpointUpdate extends WriteCheckpoint {
236
+ update: CheckpointChanges;
237
+ }
238
+
239
+ export interface GetCheckpointChangesOptions {
240
+ lastCheckpoint: util.OpId;
241
+ nextCheckpoint: util.OpId;
242
+ }
243
+
244
+ export interface CheckpointChanges {
245
+ updatedDataBuckets: string[];
246
+ invalidateDataBuckets: boolean;
247
+ updatedParameterBucketDefinitions: string[];
248
+ invalidateParameterBuckets: boolean;
249
+ }
250
+
251
+ export const CHECKPOINT_INVALIDATE_ALL: CheckpointChanges = {
252
+ updatedDataBuckets: [],
253
+ invalidateDataBuckets: true,
254
+ updatedParameterBucketDefinitions: [],
255
+ invalidateParameterBuckets: true
256
+ };