@powersync/common 1.40.0 → 1.41.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bundle.cjs +10809 -22
- package/dist/bundle.cjs.map +1 -0
- package/dist/bundle.mjs +10730 -22
- package/dist/bundle.mjs.map +1 -0
- package/dist/bundle.node.cjs +10809 -0
- package/dist/bundle.node.cjs.map +1 -0
- package/dist/bundle.node.mjs +10730 -0
- package/dist/bundle.node.mjs.map +1 -0
- package/dist/index.d.cts +5 -1
- package/lib/client/AbstractPowerSyncDatabase.js +1 -0
- package/lib/client/AbstractPowerSyncDatabase.js.map +1 -0
- package/lib/client/AbstractPowerSyncOpenFactory.js +1 -0
- package/lib/client/AbstractPowerSyncOpenFactory.js.map +1 -0
- package/lib/client/ConnectionManager.js +1 -0
- package/lib/client/ConnectionManager.js.map +1 -0
- package/lib/client/CustomQuery.js +1 -0
- package/lib/client/CustomQuery.js.map +1 -0
- package/lib/client/Query.js +1 -0
- package/lib/client/Query.js.map +1 -0
- package/lib/client/SQLOpenFactory.js +1 -0
- package/lib/client/SQLOpenFactory.js.map +1 -0
- package/lib/client/compilableQueryWatch.js +1 -0
- package/lib/client/compilableQueryWatch.js.map +1 -0
- package/lib/client/connection/PowerSyncBackendConnector.js +1 -0
- package/lib/client/connection/PowerSyncBackendConnector.js.map +1 -0
- package/lib/client/connection/PowerSyncCredentials.js +1 -0
- package/lib/client/connection/PowerSyncCredentials.js.map +1 -0
- package/lib/client/constants.js +1 -0
- package/lib/client/constants.js.map +1 -0
- package/lib/client/runOnSchemaChange.js +1 -0
- package/lib/client/runOnSchemaChange.js.map +1 -0
- package/lib/client/sync/bucket/BucketStorageAdapter.js +1 -0
- package/lib/client/sync/bucket/BucketStorageAdapter.js.map +1 -0
- package/lib/client/sync/bucket/CrudBatch.js +1 -0
- package/lib/client/sync/bucket/CrudBatch.js.map +1 -0
- package/lib/client/sync/bucket/CrudEntry.js +1 -0
- package/lib/client/sync/bucket/CrudEntry.js.map +1 -0
- package/lib/client/sync/bucket/CrudTransaction.js +1 -0
- package/lib/client/sync/bucket/CrudTransaction.js.map +1 -0
- package/lib/client/sync/bucket/OpType.js +1 -0
- package/lib/client/sync/bucket/OpType.js.map +1 -0
- package/lib/client/sync/bucket/OplogEntry.js +1 -0
- package/lib/client/sync/bucket/OplogEntry.js.map +1 -0
- package/lib/client/sync/bucket/SqliteBucketStorage.js +1 -0
- package/lib/client/sync/bucket/SqliteBucketStorage.js.map +1 -0
- package/lib/client/sync/bucket/SyncDataBatch.js +1 -0
- package/lib/client/sync/bucket/SyncDataBatch.js.map +1 -0
- package/lib/client/sync/bucket/SyncDataBucket.js +1 -0
- package/lib/client/sync/bucket/SyncDataBucket.js.map +1 -0
- package/lib/client/sync/stream/AbstractRemote.d.ts +5 -0
- package/lib/client/sync/stream/AbstractRemote.js +9 -2
- package/lib/client/sync/stream/AbstractRemote.js.map +1 -0
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.js +1 -0
- package/lib/client/sync/stream/AbstractStreamingSyncImplementation.js.map +1 -0
- package/lib/client/sync/stream/WebsocketClientTransport.js +1 -0
- package/lib/client/sync/stream/WebsocketClientTransport.js.map +1 -0
- package/lib/client/sync/stream/core-instruction.js +1 -0
- package/lib/client/sync/stream/core-instruction.js.map +1 -0
- package/lib/client/sync/stream/streaming-sync-types.js +1 -0
- package/lib/client/sync/stream/streaming-sync-types.js.map +1 -0
- package/lib/client/sync/sync-streams.js +1 -0
- package/lib/client/sync/sync-streams.js.map +1 -0
- package/lib/client/triggers/TriggerManager.js +1 -0
- package/lib/client/triggers/TriggerManager.js.map +1 -0
- package/lib/client/triggers/TriggerManagerImpl.js +1 -0
- package/lib/client/triggers/TriggerManagerImpl.js.map +1 -0
- package/lib/client/triggers/sanitizeSQL.js +1 -0
- package/lib/client/triggers/sanitizeSQL.js.map +1 -0
- package/lib/client/watched/GetAllQuery.js +1 -0
- package/lib/client/watched/GetAllQuery.js.map +1 -0
- package/lib/client/watched/WatchedQuery.js +1 -0
- package/lib/client/watched/WatchedQuery.js.map +1 -0
- package/lib/client/watched/processors/AbstractQueryProcessor.js +1 -0
- package/lib/client/watched/processors/AbstractQueryProcessor.js.map +1 -0
- package/lib/client/watched/processors/DifferentialQueryProcessor.js +1 -0
- package/lib/client/watched/processors/DifferentialQueryProcessor.js.map +1 -0
- package/lib/client/watched/processors/OnChangeQueryProcessor.js +1 -0
- package/lib/client/watched/processors/OnChangeQueryProcessor.js.map +1 -0
- package/lib/client/watched/processors/comparators.js +1 -0
- package/lib/client/watched/processors/comparators.js.map +1 -0
- package/lib/db/DBAdapter.js +1 -0
- package/lib/db/DBAdapter.js.map +1 -0
- package/lib/db/crud/SyncProgress.js +1 -0
- package/lib/db/crud/SyncProgress.js.map +1 -0
- package/lib/db/crud/SyncStatus.js +1 -0
- package/lib/db/crud/SyncStatus.js.map +1 -0
- package/lib/db/crud/UploadQueueStatus.js +1 -0
- package/lib/db/crud/UploadQueueStatus.js.map +1 -0
- package/lib/db/schema/Column.js +1 -0
- package/lib/db/schema/Column.js.map +1 -0
- package/lib/db/schema/Index.js +1 -0
- package/lib/db/schema/Index.js.map +1 -0
- package/lib/db/schema/IndexedColumn.js +1 -0
- package/lib/db/schema/IndexedColumn.js.map +1 -0
- package/lib/db/schema/RawTable.js +1 -0
- package/lib/db/schema/RawTable.js.map +1 -0
- package/lib/db/schema/Schema.d.ts +0 -1
- package/lib/db/schema/Schema.js +4 -8
- package/lib/db/schema/Schema.js.map +1 -0
- package/lib/db/schema/Table.js +1 -0
- package/lib/db/schema/Table.js.map +1 -0
- package/lib/db/schema/TableV2.js +1 -0
- package/lib/db/schema/TableV2.js.map +1 -0
- package/lib/index.js +1 -0
- package/lib/index.js.map +1 -0
- package/lib/types/types.js +1 -0
- package/lib/types/types.js.map +1 -0
- package/lib/utils/AbortOperation.js +1 -0
- package/lib/utils/AbortOperation.js.map +1 -0
- package/lib/utils/BaseObserver.js +1 -0
- package/lib/utils/BaseObserver.js.map +1 -0
- package/lib/utils/ControlledExecutor.js +1 -0
- package/lib/utils/ControlledExecutor.js.map +1 -0
- package/lib/utils/DataStream.js +1 -0
- package/lib/utils/DataStream.js.map +1 -0
- package/lib/utils/Logger.js +1 -0
- package/lib/utils/Logger.js.map +1 -0
- package/lib/utils/MetaBaseObserver.js +1 -0
- package/lib/utils/MetaBaseObserver.js.map +1 -0
- package/lib/utils/async.js +1 -0
- package/lib/utils/async.js.map +1 -0
- package/lib/utils/mutex.js +1 -0
- package/lib/utils/mutex.js.map +1 -0
- package/lib/utils/parseQuery.js +1 -0
- package/lib/utils/parseQuery.js.map +1 -0
- package/package.json +23 -15
- package/src/client/AbstractPowerSyncDatabase.ts +1343 -0
- package/src/client/AbstractPowerSyncOpenFactory.ts +39 -0
- package/src/client/ConnectionManager.ts +402 -0
- package/src/client/CustomQuery.ts +56 -0
- package/src/client/Query.ts +106 -0
- package/src/client/SQLOpenFactory.ts +55 -0
- package/src/client/compilableQueryWatch.ts +55 -0
- package/src/client/connection/PowerSyncBackendConnector.ts +25 -0
- package/src/client/connection/PowerSyncCredentials.ts +5 -0
- package/src/client/constants.ts +1 -0
- package/src/client/runOnSchemaChange.ts +31 -0
- package/src/client/sync/bucket/BucketStorageAdapter.ts +118 -0
- package/src/client/sync/bucket/CrudBatch.ts +21 -0
- package/src/client/sync/bucket/CrudEntry.ts +172 -0
- package/src/client/sync/bucket/CrudTransaction.ts +21 -0
- package/src/client/sync/bucket/OpType.ts +23 -0
- package/src/client/sync/bucket/OplogEntry.ts +50 -0
- package/src/client/sync/bucket/SqliteBucketStorage.ts +395 -0
- package/src/client/sync/bucket/SyncDataBatch.ts +11 -0
- package/src/client/sync/bucket/SyncDataBucket.ts +49 -0
- package/src/client/sync/stream/AbstractRemote.ts +626 -0
- package/src/client/sync/stream/AbstractStreamingSyncImplementation.ts +1258 -0
- package/src/client/sync/stream/WebsocketClientTransport.ts +80 -0
- package/src/client/sync/stream/core-instruction.ts +99 -0
- package/src/client/sync/stream/streaming-sync-types.ts +205 -0
- package/src/client/sync/sync-streams.ts +107 -0
- package/src/client/triggers/TriggerManager.ts +384 -0
- package/src/client/triggers/TriggerManagerImpl.ts +314 -0
- package/src/client/triggers/sanitizeSQL.ts +66 -0
- package/src/client/watched/GetAllQuery.ts +46 -0
- package/src/client/watched/WatchedQuery.ts +121 -0
- package/src/client/watched/processors/AbstractQueryProcessor.ts +226 -0
- package/src/client/watched/processors/DifferentialQueryProcessor.ts +305 -0
- package/src/client/watched/processors/OnChangeQueryProcessor.ts +122 -0
- package/src/client/watched/processors/comparators.ts +57 -0
- package/src/db/DBAdapter.ts +134 -0
- package/src/db/crud/SyncProgress.ts +100 -0
- package/src/db/crud/SyncStatus.ts +308 -0
- package/src/db/crud/UploadQueueStatus.ts +20 -0
- package/src/db/schema/Column.ts +60 -0
- package/src/db/schema/Index.ts +39 -0
- package/src/db/schema/IndexedColumn.ts +42 -0
- package/src/db/schema/RawTable.ts +67 -0
- package/src/db/schema/Schema.ts +76 -0
- package/src/db/schema/Table.ts +359 -0
- package/src/db/schema/TableV2.ts +9 -0
- package/src/index.ts +52 -0
- package/src/types/types.ts +9 -0
- package/src/utils/AbortOperation.ts +17 -0
- package/src/utils/BaseObserver.ts +41 -0
- package/src/utils/ControlledExecutor.ts +72 -0
- package/src/utils/DataStream.ts +211 -0
- package/src/utils/Logger.ts +47 -0
- package/src/utils/MetaBaseObserver.ts +81 -0
- package/src/utils/async.ts +61 -0
- package/src/utils/mutex.ts +34 -0
- package/src/utils/parseQuery.ts +25 -0
|
@@ -0,0 +1,1258 @@
|
|
|
1
|
+
import Logger, { ILogger } from 'js-logger';
|
|
2
|
+
|
|
3
|
+
import { InternalProgressInformation } from '../../../db/crud/SyncProgress.js';
|
|
4
|
+
import { SyncStatus, SyncStatusOptions } from '../../../db/crud/SyncStatus.js';
|
|
5
|
+
import { AbortOperation } from '../../../utils/AbortOperation.js';
|
|
6
|
+
import { BaseListener, BaseObserver, BaseObserverInterface, Disposable } from '../../../utils/BaseObserver.js';
|
|
7
|
+
import { DataStream } from '../../../utils/DataStream.js';
|
|
8
|
+
import { throttleLeadingTrailing } from '../../../utils/async.js';
|
|
9
|
+
import {
|
|
10
|
+
BucketChecksum,
|
|
11
|
+
BucketDescription,
|
|
12
|
+
BucketStorageAdapter,
|
|
13
|
+
Checkpoint,
|
|
14
|
+
PowerSyncControlCommand
|
|
15
|
+
} from '../bucket/BucketStorageAdapter.js';
|
|
16
|
+
import { CrudEntry } from '../bucket/CrudEntry.js';
|
|
17
|
+
import { SyncDataBucket } from '../bucket/SyncDataBucket.js';
|
|
18
|
+
import { AbstractRemote, FetchStrategy, SyncStreamOptions } from './AbstractRemote.js';
|
|
19
|
+
import { coreStatusToJs, EstablishSyncStream, Instruction, SyncPriorityStatus } from './core-instruction.js';
|
|
20
|
+
import {
|
|
21
|
+
BucketRequest,
|
|
22
|
+
CrudUploadNotification,
|
|
23
|
+
StreamingSyncLine,
|
|
24
|
+
StreamingSyncLineOrCrudUploadComplete,
|
|
25
|
+
StreamingSyncRequestParameterType,
|
|
26
|
+
isStreamingKeepalive,
|
|
27
|
+
isStreamingSyncCheckpoint,
|
|
28
|
+
isStreamingSyncCheckpointComplete,
|
|
29
|
+
isStreamingSyncCheckpointDiff,
|
|
30
|
+
isStreamingSyncCheckpointPartiallyComplete,
|
|
31
|
+
isStreamingSyncData
|
|
32
|
+
} from './streaming-sync-types.js';
|
|
33
|
+
|
|
34
|
+
export enum LockType {
|
|
35
|
+
CRUD = 'crud',
|
|
36
|
+
SYNC = 'sync'
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
export enum SyncStreamConnectionMethod {
|
|
40
|
+
HTTP = 'http',
|
|
41
|
+
WEB_SOCKET = 'web-socket'
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
export enum SyncClientImplementation {
|
|
45
|
+
/**
|
|
46
|
+
* Decodes and handles sync lines received from the sync service in JavaScript.
|
|
47
|
+
*
|
|
48
|
+
* This is the default option.
|
|
49
|
+
*
|
|
50
|
+
* @deprecated Don't use {@link SyncClientImplementation.JAVASCRIPT} directly. Instead, use
|
|
51
|
+
* {@link DEFAULT_SYNC_CLIENT_IMPLEMENTATION} or omit the option. The explicit choice to use
|
|
52
|
+
* the JavaScript-based sync implementation will be removed from a future version of the SDK.
|
|
53
|
+
*/
|
|
54
|
+
JAVASCRIPT = 'js',
|
|
55
|
+
/**
|
|
56
|
+
* This implementation offloads the sync line decoding and handling into the PowerSync
|
|
57
|
+
* core extension.
|
|
58
|
+
*
|
|
59
|
+
* @experimental
|
|
60
|
+
* While this implementation is more performant than {@link SyncClientImplementation.JAVASCRIPT},
|
|
61
|
+
* it has seen less real-world testing and is marked as __experimental__ at the moment.
|
|
62
|
+
*
|
|
63
|
+
* ## Compatibility warning
|
|
64
|
+
*
|
|
65
|
+
* The Rust sync client stores sync data in a format that is slightly different than the one used
|
|
66
|
+
* by the old {@link JAVASCRIPT} implementation. When adopting the {@link RUST} client on existing
|
|
67
|
+
* databases, the PowerSync SDK will migrate the format automatically.
|
|
68
|
+
* Further, the {@link JAVASCRIPT} client in recent versions of the PowerSync JS SDK (starting from
|
|
69
|
+
* the version introducing {@link RUST} as an option) also supports the new format, so you can switch
|
|
70
|
+
* back to {@link JAVASCRIPT} later.
|
|
71
|
+
*
|
|
72
|
+
* __However__: Upgrading the SDK version, then adopting {@link RUST} as a sync client and later
|
|
73
|
+
* downgrading the SDK to an older version (necessarily using the JavaScript-based implementation then)
|
|
74
|
+
* can lead to sync issues.
|
|
75
|
+
*/
|
|
76
|
+
RUST = 'rust'
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* The default {@link SyncClientImplementation} to use.
|
|
81
|
+
*
|
|
82
|
+
* Please use this field instead of {@link SyncClientImplementation.JAVASCRIPT} directly. A future version
|
|
83
|
+
* of the PowerSync SDK will enable {@link SyncClientImplementation.RUST} by default and remove the JavaScript
|
|
84
|
+
* option.
|
|
85
|
+
*/
|
|
86
|
+
export const DEFAULT_SYNC_CLIENT_IMPLEMENTATION = SyncClientImplementation.JAVASCRIPT;
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* Abstract Lock to be implemented by various JS environments
|
|
90
|
+
*/
|
|
91
|
+
export interface LockOptions<T> {
|
|
92
|
+
callback: () => Promise<T>;
|
|
93
|
+
type: LockType;
|
|
94
|
+
signal?: AbortSignal;
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
export interface AbstractStreamingSyncImplementationOptions extends RequiredAdditionalConnectionOptions {
|
|
98
|
+
adapter: BucketStorageAdapter;
|
|
99
|
+
subscriptions: SubscribedStream[];
|
|
100
|
+
uploadCrud: () => Promise<void>;
|
|
101
|
+
/**
|
|
102
|
+
* An identifier for which PowerSync DB this sync implementation is
|
|
103
|
+
* linked to. Most commonly DB name, but not restricted to DB name.
|
|
104
|
+
*/
|
|
105
|
+
identifier?: string;
|
|
106
|
+
logger?: ILogger;
|
|
107
|
+
remote: AbstractRemote;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
export interface StreamingSyncImplementationListener extends BaseListener {
|
|
111
|
+
/**
|
|
112
|
+
* Triggered whenever a status update has been attempted to be made or
|
|
113
|
+
* refreshed.
|
|
114
|
+
*/
|
|
115
|
+
statusUpdated?: ((statusUpdate: SyncStatusOptions) => void) | undefined;
|
|
116
|
+
/**
|
|
117
|
+
* Triggers whenever the status' members have changed in value
|
|
118
|
+
*/
|
|
119
|
+
statusChanged?: ((status: SyncStatus) => void) | undefined;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
/**
|
|
123
|
+
* Configurable options to be used when connecting to the PowerSync
|
|
124
|
+
* backend instance.
|
|
125
|
+
*/
|
|
126
|
+
export type PowerSyncConnectionOptions = Omit<InternalConnectionOptions, 'serializedSchema'>;
|
|
127
|
+
|
|
128
|
+
export interface InternalConnectionOptions extends BaseConnectionOptions, AdditionalConnectionOptions {}
|
|
129
|
+
|
|
130
|
+
/** @internal */
|
|
131
|
+
export interface BaseConnectionOptions {
|
|
132
|
+
/**
|
|
133
|
+
* Whether to use a JavaScript implementation to handle received sync lines from the sync
|
|
134
|
+
* service, or whether this work should be offloaded to the PowerSync core extension.
|
|
135
|
+
*
|
|
136
|
+
* This defaults to the JavaScript implementation ({@link SyncClientImplementation.JAVASCRIPT})
|
|
137
|
+
* since the ({@link SyncClientImplementation.RUST}) implementation is experimental at the moment.
|
|
138
|
+
*/
|
|
139
|
+
clientImplementation?: SyncClientImplementation;
|
|
140
|
+
|
|
141
|
+
/**
|
|
142
|
+
* The connection method to use when streaming updates from
|
|
143
|
+
* the PowerSync backend instance.
|
|
144
|
+
* Defaults to a HTTP streaming connection.
|
|
145
|
+
*/
|
|
146
|
+
connectionMethod?: SyncStreamConnectionMethod;
|
|
147
|
+
|
|
148
|
+
/**
|
|
149
|
+
* The fetch strategy to use when streaming updates from the PowerSync backend instance.
|
|
150
|
+
*/
|
|
151
|
+
fetchStrategy?: FetchStrategy;
|
|
152
|
+
|
|
153
|
+
/**
|
|
154
|
+
* These parameters are passed to the sync rules, and will be available under the`user_parameters` object.
|
|
155
|
+
*/
|
|
156
|
+
params?: Record<string, StreamingSyncRequestParameterType>;
|
|
157
|
+
|
|
158
|
+
/**
|
|
159
|
+
* Whether to include streams that have `auto_subscribe: true` in their definition.
|
|
160
|
+
*
|
|
161
|
+
* This defaults to `true`.
|
|
162
|
+
*/
|
|
163
|
+
includeDefaultStreams?: boolean;
|
|
164
|
+
|
|
165
|
+
/**
|
|
166
|
+
* The serialized schema - mainly used to forward information about raw tables to the sync client.
|
|
167
|
+
*/
|
|
168
|
+
serializedSchema?: any;
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
/** @internal */
|
|
172
|
+
export interface AdditionalConnectionOptions {
|
|
173
|
+
/**
|
|
174
|
+
* Delay for retrying sync streaming operations
|
|
175
|
+
* from the PowerSync backend after an error occurs.
|
|
176
|
+
*/
|
|
177
|
+
retryDelayMs?: number;
|
|
178
|
+
/**
|
|
179
|
+
* Backend Connector CRUD operations are throttled
|
|
180
|
+
* to occur at most every `crudUploadThrottleMs`
|
|
181
|
+
* milliseconds.
|
|
182
|
+
*/
|
|
183
|
+
crudUploadThrottleMs?: number;
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
/** @internal */
|
|
187
|
+
export interface RequiredAdditionalConnectionOptions extends Required<AdditionalConnectionOptions> {
|
|
188
|
+
subscriptions: SubscribedStream[];
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
export interface StreamingSyncImplementation
|
|
192
|
+
extends BaseObserverInterface<StreamingSyncImplementationListener>,
|
|
193
|
+
Disposable {
|
|
194
|
+
/**
|
|
195
|
+
* Connects to the sync service
|
|
196
|
+
*/
|
|
197
|
+
connect(options?: InternalConnectionOptions): Promise<void>;
|
|
198
|
+
/**
|
|
199
|
+
* Disconnects from the sync services.
|
|
200
|
+
* @throws if not connected or if abort is not controlled internally
|
|
201
|
+
*/
|
|
202
|
+
disconnect(): Promise<void>;
|
|
203
|
+
getWriteCheckpoint: () => Promise<string>;
|
|
204
|
+
hasCompletedSync: () => Promise<boolean>;
|
|
205
|
+
isConnected: boolean;
|
|
206
|
+
lastSyncedAt?: Date;
|
|
207
|
+
syncStatus: SyncStatus;
|
|
208
|
+
triggerCrudUpload: () => void;
|
|
209
|
+
waitForReady(): Promise<void>;
|
|
210
|
+
waitForStatus(status: SyncStatusOptions): Promise<void>;
|
|
211
|
+
waitUntilStatusMatches(predicate: (status: SyncStatus) => boolean): Promise<void>;
|
|
212
|
+
updateSubscriptions(subscriptions: SubscribedStream[]): void;
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
export const DEFAULT_CRUD_UPLOAD_THROTTLE_MS = 1000;
|
|
216
|
+
export const DEFAULT_RETRY_DELAY_MS = 5000;
|
|
217
|
+
|
|
218
|
+
export const DEFAULT_STREAMING_SYNC_OPTIONS = {
|
|
219
|
+
retryDelayMs: DEFAULT_RETRY_DELAY_MS,
|
|
220
|
+
crudUploadThrottleMs: DEFAULT_CRUD_UPLOAD_THROTTLE_MS
|
|
221
|
+
};
|
|
222
|
+
|
|
223
|
+
export type RequiredPowerSyncConnectionOptions = Required<BaseConnectionOptions>;
|
|
224
|
+
|
|
225
|
+
export const DEFAULT_STREAM_CONNECTION_OPTIONS: RequiredPowerSyncConnectionOptions = {
|
|
226
|
+
connectionMethod: SyncStreamConnectionMethod.WEB_SOCKET,
|
|
227
|
+
clientImplementation: DEFAULT_SYNC_CLIENT_IMPLEMENTATION,
|
|
228
|
+
fetchStrategy: FetchStrategy.Buffered,
|
|
229
|
+
params: {},
|
|
230
|
+
serializedSchema: undefined,
|
|
231
|
+
includeDefaultStreams: true
|
|
232
|
+
};
|
|
233
|
+
|
|
234
|
+
export type SubscribedStream = {
|
|
235
|
+
name: string;
|
|
236
|
+
params: Record<string, any> | null;
|
|
237
|
+
};
|
|
238
|
+
|
|
239
|
+
// The priority we assume when we receive checkpoint lines where no priority is set.
|
|
240
|
+
// This is the default priority used by the sync service, but can be set to an arbitrary
|
|
241
|
+
// value since sync services without priorities also won't send partial sync completion
|
|
242
|
+
// messages.
|
|
243
|
+
const FALLBACK_PRIORITY = 3;
|
|
244
|
+
|
|
245
|
+
export abstract class AbstractStreamingSyncImplementation
|
|
246
|
+
extends BaseObserver<StreamingSyncImplementationListener>
|
|
247
|
+
implements StreamingSyncImplementation
|
|
248
|
+
{
|
|
249
|
+
protected _lastSyncedAt: Date | null;
|
|
250
|
+
protected options: AbstractStreamingSyncImplementationOptions;
|
|
251
|
+
protected abortController: AbortController | null;
|
|
252
|
+
// In rare cases, mostly for tests, uploads can be triggered without being properly connected.
|
|
253
|
+
// This allows ensuring that all upload processes can be aborted.
|
|
254
|
+
protected uploadAbortController: AbortController | null;
|
|
255
|
+
protected crudUpdateListener?: () => void;
|
|
256
|
+
protected streamingSyncPromise?: Promise<void>;
|
|
257
|
+
protected logger: ILogger;
|
|
258
|
+
private activeStreams: SubscribedStream[];
|
|
259
|
+
|
|
260
|
+
private isUploadingCrud: boolean = false;
|
|
261
|
+
private notifyCompletedUploads?: () => void;
|
|
262
|
+
private handleActiveStreamsChange?: () => void;
|
|
263
|
+
|
|
264
|
+
syncStatus: SyncStatus;
|
|
265
|
+
triggerCrudUpload: () => void;
|
|
266
|
+
|
|
267
|
+
constructor(options: AbstractStreamingSyncImplementationOptions) {
|
|
268
|
+
super();
|
|
269
|
+
this.options = options;
|
|
270
|
+
this.activeStreams = options.subscriptions;
|
|
271
|
+
this.logger = options.logger ?? Logger.get('PowerSyncStream');
|
|
272
|
+
|
|
273
|
+
this.syncStatus = new SyncStatus({
|
|
274
|
+
connected: false,
|
|
275
|
+
connecting: false,
|
|
276
|
+
lastSyncedAt: undefined,
|
|
277
|
+
dataFlow: {
|
|
278
|
+
uploading: false,
|
|
279
|
+
downloading: false
|
|
280
|
+
}
|
|
281
|
+
});
|
|
282
|
+
this.abortController = null;
|
|
283
|
+
|
|
284
|
+
this.triggerCrudUpload = throttleLeadingTrailing(() => {
|
|
285
|
+
if (!this.syncStatus.connected || this.isUploadingCrud) {
|
|
286
|
+
return;
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
this.isUploadingCrud = true;
|
|
290
|
+
this._uploadAllCrud().finally(() => {
|
|
291
|
+
this.notifyCompletedUploads?.();
|
|
292
|
+
this.isUploadingCrud = false;
|
|
293
|
+
});
|
|
294
|
+
}, this.options.crudUploadThrottleMs!);
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
async waitForReady() {}
|
|
298
|
+
|
|
299
|
+
waitForStatus(status: SyncStatusOptions): Promise<void> {
|
|
300
|
+
return this.waitUntilStatusMatches((currentStatus) => {
|
|
301
|
+
/**
|
|
302
|
+
* Match only the partial status options provided in the
|
|
303
|
+
* matching status
|
|
304
|
+
*/
|
|
305
|
+
const matchPartialObject = (compA: object, compB: object) => {
|
|
306
|
+
return Object.entries(compA).every(([key, value]) => {
|
|
307
|
+
const comparisonBValue = compB[key];
|
|
308
|
+
if (typeof value == 'object' && typeof comparisonBValue == 'object') {
|
|
309
|
+
return matchPartialObject(value, comparisonBValue);
|
|
310
|
+
}
|
|
311
|
+
return value == comparisonBValue;
|
|
312
|
+
});
|
|
313
|
+
};
|
|
314
|
+
|
|
315
|
+
return matchPartialObject(status, currentStatus);
|
|
316
|
+
});
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
waitUntilStatusMatches(predicate: (status: SyncStatus) => boolean): Promise<void> {
|
|
320
|
+
return new Promise((resolve) => {
|
|
321
|
+
if (predicate(this.syncStatus)) {
|
|
322
|
+
resolve();
|
|
323
|
+
return;
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
const l = this.registerListener({
|
|
327
|
+
statusChanged: (updatedStatus) => {
|
|
328
|
+
if (predicate(updatedStatus)) {
|
|
329
|
+
resolve();
|
|
330
|
+
l?.();
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
});
|
|
334
|
+
});
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
get lastSyncedAt() {
|
|
338
|
+
const lastSynced = this.syncStatus.lastSyncedAt;
|
|
339
|
+
return lastSynced && new Date(lastSynced);
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
get isConnected() {
|
|
343
|
+
return this.syncStatus.connected;
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
async dispose() {
|
|
347
|
+
super.dispose();
|
|
348
|
+
this.crudUpdateListener?.();
|
|
349
|
+
this.crudUpdateListener = undefined;
|
|
350
|
+
this.uploadAbortController?.abort();
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
abstract obtainLock<T>(lockOptions: LockOptions<T>): Promise<T>;
|
|
354
|
+
|
|
355
|
+
async hasCompletedSync() {
|
|
356
|
+
return this.options.adapter.hasCompletedSync();
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
async getWriteCheckpoint(): Promise<string> {
|
|
360
|
+
const clientId = await this.options.adapter.getClientId();
|
|
361
|
+
let path = `/write-checkpoint2.json?client_id=${clientId}`;
|
|
362
|
+
const response = await this.options.remote.get(path);
|
|
363
|
+
const checkpoint = response['data']['write_checkpoint'] as string;
|
|
364
|
+
this.logger.debug(`Created write checkpoint: ${checkpoint}`);
|
|
365
|
+
return checkpoint;
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
protected async _uploadAllCrud(): Promise<void> {
|
|
369
|
+
return this.obtainLock({
|
|
370
|
+
type: LockType.CRUD,
|
|
371
|
+
callback: async () => {
|
|
372
|
+
/**
|
|
373
|
+
* Keep track of the first item in the CRUD queue for the last `uploadCrud` iteration.
|
|
374
|
+
*/
|
|
375
|
+
let checkedCrudItem: CrudEntry | undefined;
|
|
376
|
+
|
|
377
|
+
const controller = new AbortController();
|
|
378
|
+
this.uploadAbortController = controller;
|
|
379
|
+
this.abortController?.signal.addEventListener(
|
|
380
|
+
'abort',
|
|
381
|
+
() => {
|
|
382
|
+
controller.abort();
|
|
383
|
+
},
|
|
384
|
+
{ once: true }
|
|
385
|
+
);
|
|
386
|
+
|
|
387
|
+
while (!controller.signal.aborted) {
|
|
388
|
+
try {
|
|
389
|
+
/**
|
|
390
|
+
* This is the first item in the FIFO CRUD queue.
|
|
391
|
+
*/
|
|
392
|
+
const nextCrudItem = await this.options.adapter.nextCrudItem();
|
|
393
|
+
if (nextCrudItem) {
|
|
394
|
+
this.updateSyncStatus({
|
|
395
|
+
dataFlow: {
|
|
396
|
+
uploading: true
|
|
397
|
+
}
|
|
398
|
+
});
|
|
399
|
+
|
|
400
|
+
if (nextCrudItem.clientId == checkedCrudItem?.clientId) {
|
|
401
|
+
// This will force a higher log level than exceptions which are caught here.
|
|
402
|
+
this.logger.warn(`Potentially previously uploaded CRUD entries are still present in the upload queue.
|
|
403
|
+
Make sure to handle uploads and complete CRUD transactions or batches by calling and awaiting their [.complete()] method.
|
|
404
|
+
The next upload iteration will be delayed.`);
|
|
405
|
+
throw new Error('Delaying due to previously encountered CRUD item.');
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
checkedCrudItem = nextCrudItem;
|
|
409
|
+
await this.options.uploadCrud();
|
|
410
|
+
this.updateSyncStatus({
|
|
411
|
+
dataFlow: {
|
|
412
|
+
uploadError: undefined
|
|
413
|
+
}
|
|
414
|
+
});
|
|
415
|
+
} else {
|
|
416
|
+
// Uploading is completed
|
|
417
|
+
const neededUpdate = await this.options.adapter.updateLocalTarget(() => this.getWriteCheckpoint());
|
|
418
|
+
if (neededUpdate == false && checkedCrudItem != null) {
|
|
419
|
+
// Only log this if there was something to upload
|
|
420
|
+
this.logger.debug('Upload complete, no write checkpoint needed.');
|
|
421
|
+
}
|
|
422
|
+
break;
|
|
423
|
+
}
|
|
424
|
+
} catch (ex) {
|
|
425
|
+
checkedCrudItem = undefined;
|
|
426
|
+
this.updateSyncStatus({
|
|
427
|
+
dataFlow: {
|
|
428
|
+
uploading: false,
|
|
429
|
+
uploadError: ex
|
|
430
|
+
}
|
|
431
|
+
});
|
|
432
|
+
await this.delayRetry(controller.signal);
|
|
433
|
+
if (!this.isConnected) {
|
|
434
|
+
// Exit the upload loop if the sync stream is no longer connected
|
|
435
|
+
break;
|
|
436
|
+
}
|
|
437
|
+
this.logger.debug(
|
|
438
|
+
`Caught exception when uploading. Upload will retry after a delay. Exception: ${ex.message}`
|
|
439
|
+
);
|
|
440
|
+
} finally {
|
|
441
|
+
this.updateSyncStatus({
|
|
442
|
+
dataFlow: {
|
|
443
|
+
uploading: false
|
|
444
|
+
}
|
|
445
|
+
});
|
|
446
|
+
}
|
|
447
|
+
}
|
|
448
|
+
this.uploadAbortController = null;
|
|
449
|
+
}
|
|
450
|
+
});
|
|
451
|
+
}
|
|
452
|
+
|
|
453
|
+
async connect(options?: PowerSyncConnectionOptions) {
|
|
454
|
+
if (this.abortController) {
|
|
455
|
+
await this.disconnect();
|
|
456
|
+
}
|
|
457
|
+
|
|
458
|
+
const controller = new AbortController();
|
|
459
|
+
this.abortController = controller;
|
|
460
|
+
this.streamingSyncPromise = this.streamingSync(this.abortController.signal, options);
|
|
461
|
+
|
|
462
|
+
// Return a promise that resolves when the connection status is updated to indicate that we're connected.
|
|
463
|
+
return new Promise<void>((resolve) => {
|
|
464
|
+
const disposer = this.registerListener({
|
|
465
|
+
statusChanged: (status) => {
|
|
466
|
+
if (status.dataFlowStatus.downloadError != null) {
|
|
467
|
+
this.logger.warn('Initial connect attempt did not successfully connect to server');
|
|
468
|
+
} else if (status.connecting) {
|
|
469
|
+
// Still connecting.
|
|
470
|
+
return;
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
disposer();
|
|
474
|
+
resolve();
|
|
475
|
+
}
|
|
476
|
+
});
|
|
477
|
+
});
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
async disconnect(): Promise<void> {
|
|
481
|
+
if (!this.abortController) {
|
|
482
|
+
return;
|
|
483
|
+
}
|
|
484
|
+
|
|
485
|
+
// This might be called multiple times
|
|
486
|
+
if (!this.abortController.signal.aborted) {
|
|
487
|
+
this.abortController.abort(new AbortOperation('Disconnect has been requested'));
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
// Await any pending operations before completing the disconnect operation
|
|
491
|
+
try {
|
|
492
|
+
await this.streamingSyncPromise;
|
|
493
|
+
} catch (ex) {
|
|
494
|
+
// The operation might have failed, all we care about is if it has completed
|
|
495
|
+
this.logger.warn(ex);
|
|
496
|
+
}
|
|
497
|
+
this.streamingSyncPromise = undefined;
|
|
498
|
+
|
|
499
|
+
this.abortController = null;
|
|
500
|
+
this.updateSyncStatus({ connected: false, connecting: false });
|
|
501
|
+
}
|
|
502
|
+
|
|
503
|
+
/**
|
|
504
|
+
* @deprecated use [connect instead]
|
|
505
|
+
*/
|
|
506
|
+
async streamingSync(signal?: AbortSignal, options?: PowerSyncConnectionOptions): Promise<void> {
|
|
507
|
+
if (!signal) {
|
|
508
|
+
this.abortController = new AbortController();
|
|
509
|
+
signal = this.abortController.signal;
|
|
510
|
+
}
|
|
511
|
+
|
|
512
|
+
/**
|
|
513
|
+
* Listen for CRUD updates and trigger upstream uploads
|
|
514
|
+
*/
|
|
515
|
+
this.crudUpdateListener = this.options.adapter.registerListener({
|
|
516
|
+
crudUpdate: () => this.triggerCrudUpload()
|
|
517
|
+
});
|
|
518
|
+
|
|
519
|
+
/**
|
|
520
|
+
* Create a new abort controller which aborts items downstream.
|
|
521
|
+
* This is needed to close any previous connections on exception.
|
|
522
|
+
*/
|
|
523
|
+
let nestedAbortController = new AbortController();
|
|
524
|
+
|
|
525
|
+
signal.addEventListener('abort', () => {
|
|
526
|
+
/**
|
|
527
|
+
* A request for disconnect was received upstream. Relay the request
|
|
528
|
+
* to the nested abort controller.
|
|
529
|
+
*/
|
|
530
|
+
nestedAbortController.abort(signal?.reason ?? new AbortOperation('Received command to disconnect from upstream'));
|
|
531
|
+
this.crudUpdateListener?.();
|
|
532
|
+
this.crudUpdateListener = undefined;
|
|
533
|
+
this.updateSyncStatus({
|
|
534
|
+
connected: false,
|
|
535
|
+
connecting: false,
|
|
536
|
+
dataFlow: {
|
|
537
|
+
downloading: false,
|
|
538
|
+
downloadProgress: null
|
|
539
|
+
}
|
|
540
|
+
});
|
|
541
|
+
});
|
|
542
|
+
|
|
543
|
+
/**
|
|
544
|
+
* This loops runs until [retry] is false or the abort signal is set to aborted.
|
|
545
|
+
* Aborting the nestedAbortController will:
|
|
546
|
+
* - Abort any pending fetch requests
|
|
547
|
+
* - Close any sync stream ReadableStreams (which will also close any established network requests)
|
|
548
|
+
*/
|
|
549
|
+
while (true) {
|
|
550
|
+
this.updateSyncStatus({ connecting: true });
|
|
551
|
+
let shouldDelayRetry = true;
|
|
552
|
+
let result: RustIterationResult | null = null;
|
|
553
|
+
|
|
554
|
+
try {
|
|
555
|
+
if (signal?.aborted) {
|
|
556
|
+
break;
|
|
557
|
+
}
|
|
558
|
+
result = await this.streamingSyncIteration(nestedAbortController.signal, options);
|
|
559
|
+
// Continue immediately, streamingSyncIteration will wait before completing if necessary.
|
|
560
|
+
} catch (ex) {
|
|
561
|
+
/**
|
|
562
|
+
* Either:
|
|
563
|
+
* - A network request failed with a failed connection or not OKAY response code.
|
|
564
|
+
* - There was a sync processing error.
|
|
565
|
+
* - The connection was aborted.
|
|
566
|
+
* This loop will retry after a delay if the connection was not aborted.
|
|
567
|
+
* The nested abort controller will cleanup any open network requests and streams.
|
|
568
|
+
* The WebRemote should only abort pending fetch requests or close active Readable streams.
|
|
569
|
+
*/
|
|
570
|
+
|
|
571
|
+
if (ex instanceof AbortOperation) {
|
|
572
|
+
this.logger.warn(ex);
|
|
573
|
+
shouldDelayRetry = false;
|
|
574
|
+
// A disconnect was requested, we should not delay since there is no explicit retry
|
|
575
|
+
} else {
|
|
576
|
+
this.logger.error(ex);
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
this.updateSyncStatus({
|
|
580
|
+
dataFlow: {
|
|
581
|
+
downloadError: ex
|
|
582
|
+
}
|
|
583
|
+
});
|
|
584
|
+
} finally {
|
|
585
|
+
this.notifyCompletedUploads = undefined;
|
|
586
|
+
|
|
587
|
+
if (!signal.aborted) {
|
|
588
|
+
nestedAbortController.abort(new AbortOperation('Closing sync stream network requests before retry.'));
|
|
589
|
+
nestedAbortController = new AbortController();
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
if (result?.immediateRestart != true) {
|
|
593
|
+
this.updateSyncStatus({
|
|
594
|
+
connected: false,
|
|
595
|
+
connecting: true // May be unnecessary
|
|
596
|
+
});
|
|
597
|
+
|
|
598
|
+
// On error, wait a little before retrying
|
|
599
|
+
if (shouldDelayRetry) {
|
|
600
|
+
await this.delayRetry(nestedAbortController.signal);
|
|
601
|
+
}
|
|
602
|
+
}
|
|
603
|
+
}
|
|
604
|
+
}
|
|
605
|
+
|
|
606
|
+
// Mark as disconnected if here
|
|
607
|
+
this.updateSyncStatus({ connected: false, connecting: false });
|
|
608
|
+
}
|
|
609
|
+
|
|
610
|
+
private async collectLocalBucketState(): Promise<[BucketRequest[], Map<string, BucketDescription | null>]> {
|
|
611
|
+
const bucketEntries = await this.options.adapter.getBucketStates();
|
|
612
|
+
const req: BucketRequest[] = bucketEntries.map((entry) => ({
|
|
613
|
+
name: entry.bucket,
|
|
614
|
+
after: entry.op_id
|
|
615
|
+
}));
|
|
616
|
+
const localDescriptions = new Map<string, BucketDescription | null>();
|
|
617
|
+
for (const entry of bucketEntries) {
|
|
618
|
+
localDescriptions.set(entry.bucket, null);
|
|
619
|
+
}
|
|
620
|
+
|
|
621
|
+
return [req, localDescriptions];
|
|
622
|
+
}
|
|
623
|
+
|
|
624
|
+
/**
|
|
625
|
+
* Older versions of the JS SDK used to encode subkeys as JSON in {@link OplogEntry.toJSON}.
|
|
626
|
+
* Because subkeys are always strings, this leads to quotes being added around them in `ps_oplog`.
|
|
627
|
+
* While this is not a problem as long as it's done consistently, it causes issues when a database
|
|
628
|
+
* created by the JS SDK is used with other SDKs, or (more likely) when the new Rust sync client
|
|
629
|
+
* is enabled.
|
|
630
|
+
*
|
|
631
|
+
* So, we add a migration from the old key format (with quotes) to the new one (no quotes). The
|
|
632
|
+
* migration is only triggered when necessary (for now). The function returns whether the new format
|
|
633
|
+
* should be used, so that the JS SDK is able to write to updated databases.
|
|
634
|
+
*
|
|
635
|
+
* @param requireFixedKeyFormat Whether we require the new format or also support the old one.
|
|
636
|
+
* The Rust client requires the new subkey format.
|
|
637
|
+
* @returns Whether the database is now using the new, fixed subkey format.
|
|
638
|
+
*/
|
|
639
|
+
private async requireKeyFormat(requireFixedKeyFormat: boolean): Promise<boolean> {
|
|
640
|
+
const hasMigrated = await this.options.adapter.hasMigratedSubkeys();
|
|
641
|
+
if (requireFixedKeyFormat && !hasMigrated) {
|
|
642
|
+
await this.options.adapter.migrateToFixedSubkeys();
|
|
643
|
+
return true;
|
|
644
|
+
} else {
|
|
645
|
+
return hasMigrated;
|
|
646
|
+
}
|
|
647
|
+
}
|
|
648
|
+
|
|
649
|
+
protected streamingSyncIteration(
|
|
650
|
+
signal: AbortSignal,
|
|
651
|
+
options?: PowerSyncConnectionOptions
|
|
652
|
+
): Promise<RustIterationResult | null> {
|
|
653
|
+
return this.obtainLock({
|
|
654
|
+
type: LockType.SYNC,
|
|
655
|
+
signal,
|
|
656
|
+
callback: async () => {
|
|
657
|
+
const resolvedOptions: RequiredPowerSyncConnectionOptions = {
|
|
658
|
+
...DEFAULT_STREAM_CONNECTION_OPTIONS,
|
|
659
|
+
...(options ?? {})
|
|
660
|
+
};
|
|
661
|
+
const clientImplementation = resolvedOptions.clientImplementation;
|
|
662
|
+
this.updateSyncStatus({ clientImplementation });
|
|
663
|
+
|
|
664
|
+
if (clientImplementation == SyncClientImplementation.JAVASCRIPT) {
|
|
665
|
+
await this.legacyStreamingSyncIteration(signal, resolvedOptions);
|
|
666
|
+
return null;
|
|
667
|
+
} else {
|
|
668
|
+
await this.requireKeyFormat(true);
|
|
669
|
+
return await this.rustSyncIteration(signal, resolvedOptions);
|
|
670
|
+
}
|
|
671
|
+
}
|
|
672
|
+
});
|
|
673
|
+
}
|
|
674
|
+
|
|
675
|
+
private async legacyStreamingSyncIteration(signal: AbortSignal, resolvedOptions: RequiredPowerSyncConnectionOptions) {
|
|
676
|
+
const rawTables = resolvedOptions.serializedSchema?.raw_tables;
|
|
677
|
+
if (rawTables != null && rawTables.length) {
|
|
678
|
+
this.logger.warn('Raw tables require the Rust-based sync client. The JS client will ignore them.');
|
|
679
|
+
}
|
|
680
|
+
|
|
681
|
+
this.logger.debug('Streaming sync iteration started');
|
|
682
|
+
this.options.adapter.startSession();
|
|
683
|
+
let [req, bucketMap] = await this.collectLocalBucketState();
|
|
684
|
+
|
|
685
|
+
let targetCheckpoint: Checkpoint | null = null;
|
|
686
|
+
// A checkpoint that has been validated but not applied (e.g. due to pending local writes)
|
|
687
|
+
let pendingValidatedCheckpoint: Checkpoint | null = null;
|
|
688
|
+
|
|
689
|
+
const clientId = await this.options.adapter.getClientId();
|
|
690
|
+
const usingFixedKeyFormat = await this.requireKeyFormat(false);
|
|
691
|
+
|
|
692
|
+
this.logger.debug('Requesting stream from server');
|
|
693
|
+
|
|
694
|
+
const syncOptions: SyncStreamOptions = {
|
|
695
|
+
path: '/sync/stream',
|
|
696
|
+
abortSignal: signal,
|
|
697
|
+
data: {
|
|
698
|
+
buckets: req,
|
|
699
|
+
include_checksum: true,
|
|
700
|
+
raw_data: true,
|
|
701
|
+
parameters: resolvedOptions.params,
|
|
702
|
+
client_id: clientId
|
|
703
|
+
}
|
|
704
|
+
};
|
|
705
|
+
|
|
706
|
+
let stream: DataStream<StreamingSyncLineOrCrudUploadComplete>;
|
|
707
|
+
if (resolvedOptions?.connectionMethod == SyncStreamConnectionMethod.HTTP) {
|
|
708
|
+
stream = await this.options.remote.postStreamRaw(syncOptions, (line: string | CrudUploadNotification) => {
|
|
709
|
+
if (typeof line == 'string') {
|
|
710
|
+
return JSON.parse(line) as StreamingSyncLine;
|
|
711
|
+
} else {
|
|
712
|
+
// Directly enqueued by us
|
|
713
|
+
return line;
|
|
714
|
+
}
|
|
715
|
+
});
|
|
716
|
+
} else {
|
|
717
|
+
const bson = await this.options.remote.getBSON();
|
|
718
|
+
stream = await this.options.remote.socketStreamRaw(
|
|
719
|
+
{
|
|
720
|
+
...syncOptions,
|
|
721
|
+
...{ fetchStrategy: resolvedOptions.fetchStrategy }
|
|
722
|
+
},
|
|
723
|
+
(payload: Uint8Array | CrudUploadNotification) => {
|
|
724
|
+
if (payload instanceof Uint8Array) {
|
|
725
|
+
return bson.deserialize(payload) as StreamingSyncLine;
|
|
726
|
+
} else {
|
|
727
|
+
// Directly enqueued by us
|
|
728
|
+
return payload;
|
|
729
|
+
}
|
|
730
|
+
},
|
|
731
|
+
bson
|
|
732
|
+
);
|
|
733
|
+
}
|
|
734
|
+
|
|
735
|
+
this.logger.debug('Stream established. Processing events');
|
|
736
|
+
|
|
737
|
+
this.notifyCompletedUploads = () => {
|
|
738
|
+
if (!stream.closed) {
|
|
739
|
+
stream.enqueueData({ crud_upload_completed: null });
|
|
740
|
+
}
|
|
741
|
+
};
|
|
742
|
+
|
|
743
|
+
while (!stream.closed) {
|
|
744
|
+
const line = await stream.read();
|
|
745
|
+
if (!line) {
|
|
746
|
+
// The stream has closed while waiting
|
|
747
|
+
return;
|
|
748
|
+
}
|
|
749
|
+
|
|
750
|
+
if ('crud_upload_completed' in line) {
|
|
751
|
+
if (pendingValidatedCheckpoint != null) {
|
|
752
|
+
const { applied, endIteration } = await this.applyCheckpoint(pendingValidatedCheckpoint);
|
|
753
|
+
if (applied) {
|
|
754
|
+
pendingValidatedCheckpoint = null;
|
|
755
|
+
} else if (endIteration) {
|
|
756
|
+
break;
|
|
757
|
+
}
|
|
758
|
+
}
|
|
759
|
+
|
|
760
|
+
continue;
|
|
761
|
+
}
|
|
762
|
+
|
|
763
|
+
// A connection is active and messages are being received
|
|
764
|
+
if (!this.syncStatus.connected) {
|
|
765
|
+
// There is a connection now
|
|
766
|
+
Promise.resolve().then(() => this.triggerCrudUpload());
|
|
767
|
+
this.updateSyncStatus({
|
|
768
|
+
connected: true
|
|
769
|
+
});
|
|
770
|
+
}
|
|
771
|
+
|
|
772
|
+
if (isStreamingSyncCheckpoint(line)) {
|
|
773
|
+
targetCheckpoint = line.checkpoint;
|
|
774
|
+
// New checkpoint - existing validated checkpoint is no longer valid
|
|
775
|
+
pendingValidatedCheckpoint = null;
|
|
776
|
+
const bucketsToDelete = new Set<string>(bucketMap.keys());
|
|
777
|
+
const newBuckets = new Map<string, BucketDescription>();
|
|
778
|
+
for (const checksum of line.checkpoint.buckets) {
|
|
779
|
+
newBuckets.set(checksum.bucket, {
|
|
780
|
+
name: checksum.bucket,
|
|
781
|
+
priority: checksum.priority ?? FALLBACK_PRIORITY
|
|
782
|
+
});
|
|
783
|
+
bucketsToDelete.delete(checksum.bucket);
|
|
784
|
+
}
|
|
785
|
+
if (bucketsToDelete.size > 0) {
|
|
786
|
+
this.logger.debug('Removing buckets', [...bucketsToDelete]);
|
|
787
|
+
}
|
|
788
|
+
bucketMap = newBuckets;
|
|
789
|
+
await this.options.adapter.removeBuckets([...bucketsToDelete]);
|
|
790
|
+
await this.options.adapter.setTargetCheckpoint(targetCheckpoint);
|
|
791
|
+
await this.updateSyncStatusForStartingCheckpoint(targetCheckpoint);
|
|
792
|
+
} else if (isStreamingSyncCheckpointComplete(line)) {
|
|
793
|
+
const result = await this.applyCheckpoint(targetCheckpoint!);
|
|
794
|
+
if (result.endIteration) {
|
|
795
|
+
return;
|
|
796
|
+
} else if (!result.applied) {
|
|
797
|
+
// "Could not apply checkpoint due to local data". We need to retry after
|
|
798
|
+
// finishing uploads.
|
|
799
|
+
pendingValidatedCheckpoint = targetCheckpoint;
|
|
800
|
+
} else {
|
|
801
|
+
// Nothing to retry later. This would likely already be null from the last
|
|
802
|
+
// checksum or checksum_diff operation, but we make sure.
|
|
803
|
+
pendingValidatedCheckpoint = null;
|
|
804
|
+
}
|
|
805
|
+
} else if (isStreamingSyncCheckpointPartiallyComplete(line)) {
|
|
806
|
+
const priority = line.partial_checkpoint_complete.priority;
|
|
807
|
+
this.logger.debug('Partial checkpoint complete', priority);
|
|
808
|
+
const result = await this.options.adapter.syncLocalDatabase(targetCheckpoint!, priority);
|
|
809
|
+
if (!result.checkpointValid) {
|
|
810
|
+
// This means checksums failed. Start again with a new checkpoint.
|
|
811
|
+
// TODO: better back-off
|
|
812
|
+
await new Promise((resolve) => setTimeout(resolve, 50));
|
|
813
|
+
return;
|
|
814
|
+
} else if (!result.ready) {
|
|
815
|
+
// If we have pending uploads, we can't complete new checkpoints outside of priority 0.
|
|
816
|
+
// We'll resolve this for a complete checkpoint.
|
|
817
|
+
} else {
|
|
818
|
+
// We'll keep on downloading, but can report that this priority is synced now.
|
|
819
|
+
this.logger.debug('partial checkpoint validation succeeded');
|
|
820
|
+
|
|
821
|
+
// All states with a higher priority can be deleted since this partial sync includes them.
|
|
822
|
+
const priorityStates = this.syncStatus.priorityStatusEntries.filter((s) => s.priority <= priority);
|
|
823
|
+
priorityStates.push({
|
|
824
|
+
priority,
|
|
825
|
+
lastSyncedAt: new Date(),
|
|
826
|
+
hasSynced: true
|
|
827
|
+
});
|
|
828
|
+
|
|
829
|
+
this.updateSyncStatus({
|
|
830
|
+
connected: true,
|
|
831
|
+
priorityStatusEntries: priorityStates
|
|
832
|
+
});
|
|
833
|
+
}
|
|
834
|
+
} else if (isStreamingSyncCheckpointDiff(line)) {
|
|
835
|
+
// TODO: It may be faster to just keep track of the diff, instead of the entire checkpoint
|
|
836
|
+
if (targetCheckpoint == null) {
|
|
837
|
+
throw new Error('Checkpoint diff without previous checkpoint');
|
|
838
|
+
}
|
|
839
|
+
// New checkpoint - existing validated checkpoint is no longer valid
|
|
840
|
+
pendingValidatedCheckpoint = null;
|
|
841
|
+
const diff = line.checkpoint_diff;
|
|
842
|
+
const newBuckets = new Map<string, BucketChecksum>();
|
|
843
|
+
for (const checksum of targetCheckpoint.buckets) {
|
|
844
|
+
newBuckets.set(checksum.bucket, checksum);
|
|
845
|
+
}
|
|
846
|
+
for (const checksum of diff.updated_buckets) {
|
|
847
|
+
newBuckets.set(checksum.bucket, checksum);
|
|
848
|
+
}
|
|
849
|
+
for (const bucket of diff.removed_buckets) {
|
|
850
|
+
newBuckets.delete(bucket);
|
|
851
|
+
}
|
|
852
|
+
|
|
853
|
+
const newCheckpoint: Checkpoint = {
|
|
854
|
+
last_op_id: diff.last_op_id,
|
|
855
|
+
buckets: [...newBuckets.values()],
|
|
856
|
+
write_checkpoint: diff.write_checkpoint
|
|
857
|
+
};
|
|
858
|
+
targetCheckpoint = newCheckpoint;
|
|
859
|
+
await this.updateSyncStatusForStartingCheckpoint(targetCheckpoint);
|
|
860
|
+
|
|
861
|
+
bucketMap = new Map();
|
|
862
|
+
newBuckets.forEach((checksum, name) =>
|
|
863
|
+
bucketMap.set(name, {
|
|
864
|
+
name: checksum.bucket,
|
|
865
|
+
priority: checksum.priority ?? FALLBACK_PRIORITY
|
|
866
|
+
})
|
|
867
|
+
);
|
|
868
|
+
|
|
869
|
+
const bucketsToDelete = diff.removed_buckets;
|
|
870
|
+
if (bucketsToDelete.length > 0) {
|
|
871
|
+
this.logger.debug('Remove buckets', bucketsToDelete);
|
|
872
|
+
}
|
|
873
|
+
await this.options.adapter.removeBuckets(bucketsToDelete);
|
|
874
|
+
await this.options.adapter.setTargetCheckpoint(targetCheckpoint);
|
|
875
|
+
} else if (isStreamingSyncData(line)) {
|
|
876
|
+
const { data } = line;
|
|
877
|
+
const previousProgress = this.syncStatus.dataFlowStatus.downloadProgress;
|
|
878
|
+
let updatedProgress: InternalProgressInformation | null = null;
|
|
879
|
+
if (previousProgress) {
|
|
880
|
+
updatedProgress = { ...previousProgress };
|
|
881
|
+
const progressForBucket = updatedProgress[data.bucket];
|
|
882
|
+
if (progressForBucket) {
|
|
883
|
+
updatedProgress[data.bucket] = {
|
|
884
|
+
...progressForBucket,
|
|
885
|
+
since_last: progressForBucket.since_last + data.data.length
|
|
886
|
+
};
|
|
887
|
+
}
|
|
888
|
+
}
|
|
889
|
+
|
|
890
|
+
this.updateSyncStatus({
|
|
891
|
+
dataFlow: {
|
|
892
|
+
downloading: true,
|
|
893
|
+
downloadProgress: updatedProgress
|
|
894
|
+
}
|
|
895
|
+
});
|
|
896
|
+
await this.options.adapter.saveSyncData({ buckets: [SyncDataBucket.fromRow(data)] }, usingFixedKeyFormat);
|
|
897
|
+
} else if (isStreamingKeepalive(line)) {
|
|
898
|
+
const remaining_seconds = line.token_expires_in;
|
|
899
|
+
if (remaining_seconds == 0) {
|
|
900
|
+
// Connection would be closed automatically right after this
|
|
901
|
+
this.logger.debug('Token expiring; reconnect');
|
|
902
|
+
/**
|
|
903
|
+
* For a rare case where the backend connector does not update the token
|
|
904
|
+
* (uses the same one), this should have some delay.
|
|
905
|
+
*/
|
|
906
|
+
await this.delayRetry();
|
|
907
|
+
return;
|
|
908
|
+
} else if (remaining_seconds < 30) {
|
|
909
|
+
this.logger.debug('Token will expire soon; reconnect');
|
|
910
|
+
// Pre-emptively refresh the token
|
|
911
|
+
this.options.remote.invalidateCredentials();
|
|
912
|
+
return;
|
|
913
|
+
}
|
|
914
|
+
this.triggerCrudUpload();
|
|
915
|
+
} else {
|
|
916
|
+
this.logger.debug('Received unknown sync line', line);
|
|
917
|
+
}
|
|
918
|
+
}
|
|
919
|
+
this.logger.debug('Stream input empty');
|
|
920
|
+
// Connection closed. Likely due to auth issue.
|
|
921
|
+
return;
|
|
922
|
+
}
|
|
923
|
+
|
|
924
|
+
private async rustSyncIteration(
|
|
925
|
+
signal: AbortSignal,
|
|
926
|
+
resolvedOptions: RequiredPowerSyncConnectionOptions
|
|
927
|
+
): Promise<RustIterationResult> {
|
|
928
|
+
const syncImplementation = this;
|
|
929
|
+
const adapter = this.options.adapter;
|
|
930
|
+
const remote = this.options.remote;
|
|
931
|
+
let receivingLines: Promise<void> | null = null;
|
|
932
|
+
let hadSyncLine = false;
|
|
933
|
+
let hideDisconnectOnRestart = false;
|
|
934
|
+
|
|
935
|
+
if (signal.aborted) {
|
|
936
|
+
throw new AbortOperation('Connection request has been aborted');
|
|
937
|
+
}
|
|
938
|
+
const abortController = new AbortController();
|
|
939
|
+
signal.addEventListener('abort', () => abortController.abort());
|
|
940
|
+
|
|
941
|
+
// Pending sync lines received from the service, as well as local events that trigger a powersync_control
|
|
942
|
+
// invocation (local events include refreshed tokens and completed uploads).
|
|
943
|
+
// This is a single data stream so that we can handle all control calls from a single place.
|
|
944
|
+
let controlInvocations: DataStream<EnqueuedCommand, Uint8Array | EnqueuedCommand> | null = null;
|
|
945
|
+
|
|
946
|
+
async function connect(instr: EstablishSyncStream) {
|
|
947
|
+
const syncOptions: SyncStreamOptions = {
|
|
948
|
+
path: '/sync/stream',
|
|
949
|
+
abortSignal: abortController.signal,
|
|
950
|
+
data: instr.request
|
|
951
|
+
};
|
|
952
|
+
|
|
953
|
+
if (resolvedOptions.connectionMethod == SyncStreamConnectionMethod.HTTP) {
|
|
954
|
+
controlInvocations = await remote.postStreamRaw(syncOptions, (line: string | EnqueuedCommand) => {
|
|
955
|
+
if (typeof line == 'string') {
|
|
956
|
+
return {
|
|
957
|
+
command: PowerSyncControlCommand.PROCESS_TEXT_LINE,
|
|
958
|
+
payload: line
|
|
959
|
+
};
|
|
960
|
+
} else {
|
|
961
|
+
// Directly enqueued by us
|
|
962
|
+
return line;
|
|
963
|
+
}
|
|
964
|
+
});
|
|
965
|
+
} else {
|
|
966
|
+
controlInvocations = await remote.socketStreamRaw(
|
|
967
|
+
{
|
|
968
|
+
...syncOptions,
|
|
969
|
+
fetchStrategy: resolvedOptions.fetchStrategy
|
|
970
|
+
},
|
|
971
|
+
(payload: Uint8Array | EnqueuedCommand) => {
|
|
972
|
+
if (payload instanceof Uint8Array) {
|
|
973
|
+
return {
|
|
974
|
+
command: PowerSyncControlCommand.PROCESS_BSON_LINE,
|
|
975
|
+
payload: payload
|
|
976
|
+
};
|
|
977
|
+
} else {
|
|
978
|
+
// Directly enqueued by us
|
|
979
|
+
return payload;
|
|
980
|
+
}
|
|
981
|
+
}
|
|
982
|
+
);
|
|
983
|
+
}
|
|
984
|
+
|
|
985
|
+
// The rust client will set connected: true after the first sync line because that's when it gets invoked, but
|
|
986
|
+
// we're already connected here and can report that.
|
|
987
|
+
syncImplementation.updateSyncStatus({ connected: true });
|
|
988
|
+
|
|
989
|
+
try {
|
|
990
|
+
while (!controlInvocations.closed) {
|
|
991
|
+
const line = await controlInvocations.read();
|
|
992
|
+
if (line == null) {
|
|
993
|
+
return;
|
|
994
|
+
}
|
|
995
|
+
|
|
996
|
+
await control(line.command, line.payload);
|
|
997
|
+
|
|
998
|
+
if (!hadSyncLine) {
|
|
999
|
+
syncImplementation.triggerCrudUpload();
|
|
1000
|
+
hadSyncLine = true;
|
|
1001
|
+
}
|
|
1002
|
+
}
|
|
1003
|
+
} finally {
|
|
1004
|
+
const activeInstructions = controlInvocations;
|
|
1005
|
+
// We concurrently add events to the active data stream when e.g. a CRUD upload is completed or a token is
|
|
1006
|
+
// refreshed. That would throw after closing (and we can't handle those events either way), so set this back
|
|
1007
|
+
// to null.
|
|
1008
|
+
controlInvocations = null;
|
|
1009
|
+
await activeInstructions.close();
|
|
1010
|
+
}
|
|
1011
|
+
}
|
|
1012
|
+
|
|
1013
|
+
async function stop() {
|
|
1014
|
+
await control(PowerSyncControlCommand.STOP);
|
|
1015
|
+
}
|
|
1016
|
+
|
|
1017
|
+
async function control(op: PowerSyncControlCommand, payload?: Uint8Array | string) {
|
|
1018
|
+
const rawResponse = await adapter.control(op, payload ?? null);
|
|
1019
|
+
const logger = syncImplementation.logger;
|
|
1020
|
+
logger.trace(
|
|
1021
|
+
'powersync_control',
|
|
1022
|
+
op,
|
|
1023
|
+
payload == null || typeof payload == 'string' ? payload : '<bytes>',
|
|
1024
|
+
rawResponse
|
|
1025
|
+
);
|
|
1026
|
+
|
|
1027
|
+
await handleInstructions(JSON.parse(rawResponse));
|
|
1028
|
+
}
|
|
1029
|
+
|
|
1030
|
+
async function handleInstruction(instruction: Instruction) {
|
|
1031
|
+
if ('LogLine' in instruction) {
|
|
1032
|
+
switch (instruction.LogLine.severity) {
|
|
1033
|
+
case 'DEBUG':
|
|
1034
|
+
syncImplementation.logger.debug(instruction.LogLine.line);
|
|
1035
|
+
break;
|
|
1036
|
+
case 'INFO':
|
|
1037
|
+
syncImplementation.logger.info(instruction.LogLine.line);
|
|
1038
|
+
break;
|
|
1039
|
+
case 'WARNING':
|
|
1040
|
+
syncImplementation.logger.warn(instruction.LogLine.line);
|
|
1041
|
+
break;
|
|
1042
|
+
}
|
|
1043
|
+
} else if ('UpdateSyncStatus' in instruction) {
|
|
1044
|
+
syncImplementation.updateSyncStatus(coreStatusToJs(instruction.UpdateSyncStatus.status));
|
|
1045
|
+
} else if ('EstablishSyncStream' in instruction) {
|
|
1046
|
+
if (receivingLines != null) {
|
|
1047
|
+
// Already connected, this shouldn't happen during a single iteration.
|
|
1048
|
+
throw 'Unexpected request to establish sync stream, already connected';
|
|
1049
|
+
}
|
|
1050
|
+
|
|
1051
|
+
receivingLines = connect(instruction.EstablishSyncStream);
|
|
1052
|
+
} else if ('FetchCredentials' in instruction) {
|
|
1053
|
+
if (instruction.FetchCredentials.did_expire) {
|
|
1054
|
+
remote.invalidateCredentials();
|
|
1055
|
+
} else {
|
|
1056
|
+
remote.invalidateCredentials();
|
|
1057
|
+
|
|
1058
|
+
// Restart iteration after the credentials have been refreshed.
|
|
1059
|
+
remote.fetchCredentials().then(
|
|
1060
|
+
(_) => {
|
|
1061
|
+
controlInvocations?.enqueueData({ command: PowerSyncControlCommand.NOTIFY_TOKEN_REFRESHED });
|
|
1062
|
+
},
|
|
1063
|
+
(err) => {
|
|
1064
|
+
syncImplementation.logger.warn('Could not prefetch credentials', err);
|
|
1065
|
+
}
|
|
1066
|
+
);
|
|
1067
|
+
}
|
|
1068
|
+
} else if ('CloseSyncStream' in instruction) {
|
|
1069
|
+
abortController.abort();
|
|
1070
|
+
hideDisconnectOnRestart = instruction.CloseSyncStream.hide_disconnect;
|
|
1071
|
+
} else if ('FlushFileSystem' in instruction) {
|
|
1072
|
+
// Not necessary on JS platforms.
|
|
1073
|
+
} else if ('DidCompleteSync' in instruction) {
|
|
1074
|
+
syncImplementation.updateSyncStatus({
|
|
1075
|
+
dataFlow: {
|
|
1076
|
+
downloadError: undefined
|
|
1077
|
+
}
|
|
1078
|
+
});
|
|
1079
|
+
}
|
|
1080
|
+
}
|
|
1081
|
+
|
|
1082
|
+
async function handleInstructions(instructions: Instruction[]) {
|
|
1083
|
+
for (const instr of instructions) {
|
|
1084
|
+
await handleInstruction(instr);
|
|
1085
|
+
}
|
|
1086
|
+
}
|
|
1087
|
+
|
|
1088
|
+
try {
|
|
1089
|
+
const options: any = {
|
|
1090
|
+
parameters: resolvedOptions.params,
|
|
1091
|
+
active_streams: this.activeStreams,
|
|
1092
|
+
include_defaults: resolvedOptions.includeDefaultStreams
|
|
1093
|
+
};
|
|
1094
|
+
if (resolvedOptions.serializedSchema) {
|
|
1095
|
+
options.schema = resolvedOptions.serializedSchema;
|
|
1096
|
+
}
|
|
1097
|
+
|
|
1098
|
+
await control(PowerSyncControlCommand.START, JSON.stringify(options));
|
|
1099
|
+
|
|
1100
|
+
this.notifyCompletedUploads = () => {
|
|
1101
|
+
if (controlInvocations && !controlInvocations?.closed) {
|
|
1102
|
+
controlInvocations.enqueueData({ command: PowerSyncControlCommand.NOTIFY_CRUD_UPLOAD_COMPLETED });
|
|
1103
|
+
}
|
|
1104
|
+
};
|
|
1105
|
+
this.handleActiveStreamsChange = () => {
|
|
1106
|
+
if (controlInvocations && !controlInvocations?.closed) {
|
|
1107
|
+
controlInvocations.enqueueData({
|
|
1108
|
+
command: PowerSyncControlCommand.UPDATE_SUBSCRIPTIONS,
|
|
1109
|
+
payload: JSON.stringify(this.activeStreams)
|
|
1110
|
+
});
|
|
1111
|
+
}
|
|
1112
|
+
};
|
|
1113
|
+
await receivingLines;
|
|
1114
|
+
} finally {
|
|
1115
|
+
this.notifyCompletedUploads = this.handleActiveStreamsChange = undefined;
|
|
1116
|
+
await stop();
|
|
1117
|
+
}
|
|
1118
|
+
|
|
1119
|
+
return { immediateRestart: hideDisconnectOnRestart };
|
|
1120
|
+
}
|
|
1121
|
+
|
|
1122
|
+
private async updateSyncStatusForStartingCheckpoint(checkpoint: Checkpoint) {
|
|
1123
|
+
const localProgress = await this.options.adapter.getBucketOperationProgress();
|
|
1124
|
+
const progress: InternalProgressInformation = {};
|
|
1125
|
+
let invalidated = false;
|
|
1126
|
+
|
|
1127
|
+
for (const bucket of checkpoint.buckets) {
|
|
1128
|
+
const savedProgress = localProgress[bucket.bucket];
|
|
1129
|
+
const atLast = savedProgress?.atLast ?? 0;
|
|
1130
|
+
const sinceLast = savedProgress?.sinceLast ?? 0;
|
|
1131
|
+
|
|
1132
|
+
progress[bucket.bucket] = {
|
|
1133
|
+
// The fallback priority doesn't matter here, but 3 is the one newer versions of the sync service
|
|
1134
|
+
// will use by default.
|
|
1135
|
+
priority: bucket.priority ?? 3,
|
|
1136
|
+
at_last: atLast,
|
|
1137
|
+
since_last: sinceLast,
|
|
1138
|
+
target_count: bucket.count ?? 0
|
|
1139
|
+
};
|
|
1140
|
+
|
|
1141
|
+
if (bucket.count != null && bucket.count < atLast + sinceLast) {
|
|
1142
|
+
// Either due to a defrag / sync rule deploy or a compaction operation, the size
|
|
1143
|
+
// of the bucket shrank so much that the local ops exceed the ops in the updated
|
|
1144
|
+
// bucket. We can't prossibly report progress in this case (it would overshoot 100%).
|
|
1145
|
+
invalidated = true;
|
|
1146
|
+
}
|
|
1147
|
+
}
|
|
1148
|
+
|
|
1149
|
+
if (invalidated) {
|
|
1150
|
+
for (const bucket in progress) {
|
|
1151
|
+
const bucketProgress = progress[bucket];
|
|
1152
|
+
bucketProgress.at_last = 0;
|
|
1153
|
+
bucketProgress.since_last = 0;
|
|
1154
|
+
}
|
|
1155
|
+
}
|
|
1156
|
+
|
|
1157
|
+
this.updateSyncStatus({
|
|
1158
|
+
dataFlow: {
|
|
1159
|
+
downloading: true,
|
|
1160
|
+
downloadProgress: progress
|
|
1161
|
+
}
|
|
1162
|
+
});
|
|
1163
|
+
}
|
|
1164
|
+
|
|
1165
|
+
private async applyCheckpoint(checkpoint: Checkpoint) {
|
|
1166
|
+
let result = await this.options.adapter.syncLocalDatabase(checkpoint);
|
|
1167
|
+
|
|
1168
|
+
if (!result.checkpointValid) {
|
|
1169
|
+
this.logger.debug(`Checksum mismatch in checkpoint ${checkpoint.last_op_id}, will reconnect`);
|
|
1170
|
+
// This means checksums failed. Start again with a new checkpoint.
|
|
1171
|
+
// TODO: better back-off
|
|
1172
|
+
await new Promise((resolve) => setTimeout(resolve, 50));
|
|
1173
|
+
return { applied: false, endIteration: true };
|
|
1174
|
+
} else if (!result.ready) {
|
|
1175
|
+
this.logger.debug(
|
|
1176
|
+
`Could not apply checkpoint ${checkpoint.last_op_id} due to local data. We will retry applying the checkpoint after that upload is completed.`
|
|
1177
|
+
);
|
|
1178
|
+
|
|
1179
|
+
return { applied: false, endIteration: false };
|
|
1180
|
+
}
|
|
1181
|
+
|
|
1182
|
+
this.logger.debug(`Applied checkpoint ${checkpoint.last_op_id}`, checkpoint);
|
|
1183
|
+
this.updateSyncStatus({
|
|
1184
|
+
connected: true,
|
|
1185
|
+
lastSyncedAt: new Date(),
|
|
1186
|
+
dataFlow: {
|
|
1187
|
+
downloading: false,
|
|
1188
|
+
downloadProgress: null,
|
|
1189
|
+
downloadError: undefined
|
|
1190
|
+
}
|
|
1191
|
+
});
|
|
1192
|
+
|
|
1193
|
+
return { applied: true, endIteration: false };
|
|
1194
|
+
}
|
|
1195
|
+
|
|
1196
|
+
protected updateSyncStatus(options: SyncStatusOptions) {
|
|
1197
|
+
const updatedStatus = new SyncStatus({
|
|
1198
|
+
connected: options.connected ?? this.syncStatus.connected,
|
|
1199
|
+
connecting: !options.connected && (options.connecting ?? this.syncStatus.connecting),
|
|
1200
|
+
lastSyncedAt: options.lastSyncedAt ?? this.syncStatus.lastSyncedAt,
|
|
1201
|
+
dataFlow: {
|
|
1202
|
+
...this.syncStatus.dataFlowStatus,
|
|
1203
|
+
...options.dataFlow
|
|
1204
|
+
},
|
|
1205
|
+
priorityStatusEntries: options.priorityStatusEntries ?? this.syncStatus.priorityStatusEntries,
|
|
1206
|
+
clientImplementation: options.clientImplementation ?? this.syncStatus.clientImplementation
|
|
1207
|
+
});
|
|
1208
|
+
|
|
1209
|
+
if (!this.syncStatus.isEqual(updatedStatus)) {
|
|
1210
|
+
this.syncStatus = updatedStatus;
|
|
1211
|
+
// Only trigger this is there was a change
|
|
1212
|
+
this.iterateListeners((cb) => cb.statusChanged?.(updatedStatus));
|
|
1213
|
+
}
|
|
1214
|
+
|
|
1215
|
+
// trigger this for all updates
|
|
1216
|
+
this.iterateListeners((cb) => cb.statusUpdated?.(options));
|
|
1217
|
+
}
|
|
1218
|
+
|
|
1219
|
+
private async delayRetry(signal?: AbortSignal): Promise<void> {
|
|
1220
|
+
return new Promise((resolve) => {
|
|
1221
|
+
if (signal?.aborted) {
|
|
1222
|
+
// If the signal is already aborted, resolve immediately
|
|
1223
|
+
resolve();
|
|
1224
|
+
return;
|
|
1225
|
+
}
|
|
1226
|
+
|
|
1227
|
+
const { retryDelayMs } = this.options;
|
|
1228
|
+
|
|
1229
|
+
let timeoutId: ReturnType<typeof setTimeout> | undefined;
|
|
1230
|
+
|
|
1231
|
+
const endDelay = () => {
|
|
1232
|
+
resolve();
|
|
1233
|
+
if (timeoutId) {
|
|
1234
|
+
clearTimeout(timeoutId);
|
|
1235
|
+
timeoutId = undefined;
|
|
1236
|
+
}
|
|
1237
|
+
signal?.removeEventListener('abort', endDelay);
|
|
1238
|
+
};
|
|
1239
|
+
|
|
1240
|
+
signal?.addEventListener('abort', endDelay, { once: true });
|
|
1241
|
+
timeoutId = setTimeout(endDelay, retryDelayMs);
|
|
1242
|
+
});
|
|
1243
|
+
}
|
|
1244
|
+
|
|
1245
|
+
updateSubscriptions(subscriptions: SubscribedStream[]): void {
|
|
1246
|
+
this.activeStreams = subscriptions;
|
|
1247
|
+
this.handleActiveStreamsChange?.();
|
|
1248
|
+
}
|
|
1249
|
+
}
|
|
1250
|
+
|
|
1251
|
+
interface EnqueuedCommand {
|
|
1252
|
+
command: PowerSyncControlCommand;
|
|
1253
|
+
payload?: Uint8Array | string;
|
|
1254
|
+
}
|
|
1255
|
+
|
|
1256
|
+
interface RustIterationResult {
|
|
1257
|
+
immediateRestart: boolean;
|
|
1258
|
+
}
|