@powersync/common 1.34.0 → 1.36.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/dist/bundle.cjs +5 -5
  2. package/dist/bundle.mjs +3 -3
  3. package/dist/index.d.cts +2934 -0
  4. package/lib/client/AbstractPowerSyncDatabase.d.ts +61 -5
  5. package/lib/client/AbstractPowerSyncDatabase.js +103 -29
  6. package/lib/client/CustomQuery.d.ts +22 -0
  7. package/lib/client/CustomQuery.js +42 -0
  8. package/lib/client/Query.d.ts +97 -0
  9. package/lib/client/Query.js +1 -0
  10. package/lib/client/sync/bucket/BucketStorageAdapter.d.ts +2 -2
  11. package/lib/client/sync/stream/AbstractRemote.js +31 -19
  12. package/lib/client/sync/stream/AbstractStreamingSyncImplementation.d.ts +3 -2
  13. package/lib/client/sync/stream/AbstractStreamingSyncImplementation.js +19 -5
  14. package/lib/client/watched/GetAllQuery.d.ts +32 -0
  15. package/lib/client/watched/GetAllQuery.js +24 -0
  16. package/lib/client/watched/WatchedQuery.d.ts +98 -0
  17. package/lib/client/watched/WatchedQuery.js +12 -0
  18. package/lib/client/watched/processors/AbstractQueryProcessor.d.ts +67 -0
  19. package/lib/client/watched/processors/AbstractQueryProcessor.js +135 -0
  20. package/lib/client/watched/processors/DifferentialQueryProcessor.d.ts +121 -0
  21. package/lib/client/watched/processors/DifferentialQueryProcessor.js +166 -0
  22. package/lib/client/watched/processors/OnChangeQueryProcessor.d.ts +33 -0
  23. package/lib/client/watched/processors/OnChangeQueryProcessor.js +76 -0
  24. package/lib/client/watched/processors/comparators.d.ts +30 -0
  25. package/lib/client/watched/processors/comparators.js +34 -0
  26. package/lib/index.d.ts +8 -0
  27. package/lib/index.js +8 -0
  28. package/lib/utils/BaseObserver.d.ts +3 -4
  29. package/lib/utils/BaseObserver.js +3 -0
  30. package/lib/utils/MetaBaseObserver.d.ts +29 -0
  31. package/lib/utils/MetaBaseObserver.js +50 -0
  32. package/package.json +12 -7
@@ -0,0 +1,2934 @@
1
+ import { Mutex } from 'async-mutex';
2
+ import Logger, { ILogger, ILogLevel } from 'js-logger';
3
+ export { GlobalLogger, ILogHandler, ILogLevel, ILogger, ILoggerOpts } from 'js-logger';
4
+ import { BSON } from 'bson';
5
+ import { fetch } from 'cross-fetch';
6
+
7
+ interface Disposable {
8
+ dispose: () => Promise<void> | void;
9
+ }
10
+ type BaseListener = Record<string, ((...event: any) => any) | undefined>;
11
+ interface BaseObserverInterface<T extends BaseListener> {
12
+ registerListener(listener: Partial<T>): () => void;
13
+ }
14
+ declare class BaseObserver<T extends BaseListener = BaseListener> implements BaseObserverInterface<T> {
15
+ protected listeners: Set<Partial<T>>;
16
+ constructor();
17
+ dispose(): void;
18
+ /**
19
+ * Register a listener for updates to the PowerSync client.
20
+ */
21
+ registerListener(listener: Partial<T>): () => void;
22
+ iterateListeners(cb: (listener: Partial<T>) => any): void;
23
+ iterateAsyncListeners(cb: (listener: Partial<T>) => Promise<any>): Promise<void>;
24
+ }
25
+
26
+ /**
27
+ * Set of generic interfaces to allow PowerSync compatibility with
28
+ * different SQLite DB implementations.
29
+ */
30
+
31
+ /**
32
+ * TODO most of these types could be exported to a common `types` package
33
+ * which is used by the DB adapter libraries as well.
34
+ */
35
+ /**
36
+ * Object returned by SQL Query executions.
37
+ */
38
+ type QueryResult = {
39
+ /** Represents the auto-generated row id if applicable. */
40
+ insertId?: number;
41
+ /** Number of affected rows if result of a update query. */
42
+ rowsAffected: number;
43
+ /** if status is undefined or 0 this object will contain the query results */
44
+ rows?: {
45
+ /** Raw array with all dataset */
46
+ _array: any[];
47
+ /** The length of the dataset */
48
+ length: number;
49
+ /** A convenience function to acess the index based the row object
50
+ * @param idx the row index
51
+ * @returns the row structure identified by column names
52
+ */
53
+ item: (idx: number) => any;
54
+ };
55
+ };
56
+ interface DBGetUtils {
57
+ /** Execute a read-only query and return results. */
58
+ getAll<T>(sql: string, parameters?: any[]): Promise<T[]>;
59
+ /** Execute a read-only query and return the first result, or null if the ResultSet is empty. */
60
+ getOptional<T>(sql: string, parameters?: any[]): Promise<T | null>;
61
+ /** Execute a read-only query and return the first result, error if the ResultSet is empty. */
62
+ get<T>(sql: string, parameters?: any[]): Promise<T>;
63
+ }
64
+ interface LockContext extends DBGetUtils {
65
+ /** Execute a single write statement. */
66
+ execute: (query: string, params?: any[] | undefined) => Promise<QueryResult>;
67
+ /**
68
+ * Execute a single write statement and return raw results.
69
+ * Unlike `execute`, which returns an object with structured key-value pairs,
70
+ * `executeRaw` returns a nested array of raw values, where each row is
71
+ * represented as an array of column values without field names.
72
+ *
73
+ * Example result:
74
+ *
75
+ * ```[ [ '1', 'list 1', '33', 'Post content', '1' ] ]```
76
+ *
77
+ * Where as `execute`'s `rows._array` would have been:
78
+ *
79
+ * ```[ { id: '33', name: 'list 1', content: 'Post content', list_id: '1' } ]```
80
+ */
81
+ executeRaw: (query: string, params?: any[] | undefined) => Promise<any[][]>;
82
+ }
83
+ interface Transaction extends LockContext {
84
+ /** Commit multiple changes to the local DB using the Transaction context. */
85
+ commit: () => Promise<QueryResult>;
86
+ /** Roll back multiple attempted changes using the Transaction context. */
87
+ rollback: () => Promise<QueryResult>;
88
+ }
89
+ /**
90
+ * Update table operation numbers from SQLite
91
+ */
92
+ declare enum RowUpdateType {
93
+ SQLITE_INSERT = 18,
94
+ SQLITE_DELETE = 9,
95
+ SQLITE_UPDATE = 23
96
+ }
97
+ interface TableUpdateOperation {
98
+ opType: RowUpdateType;
99
+ rowId: number;
100
+ }
101
+ /**
102
+ * Notification of an update to one or more tables, for the purpose of realtime change notifications.
103
+ */
104
+ interface UpdateNotification extends TableUpdateOperation {
105
+ table: string;
106
+ }
107
+ interface BatchedUpdateNotification {
108
+ rawUpdates: UpdateNotification[];
109
+ tables: string[];
110
+ groupedUpdates: Record<string, TableUpdateOperation[]>;
111
+ }
112
+ interface DBAdapterListener extends BaseListener {
113
+ /**
114
+ * Listener for table updates.
115
+ * Allows for single table updates in order to maintain API compatibility
116
+ * without the need for a major version bump
117
+ * The DB adapter can also batch update notifications if supported.
118
+ */
119
+ tablesUpdated: (updateNotification: BatchedUpdateNotification | UpdateNotification) => void;
120
+ }
121
+ interface DBLockOptions {
122
+ timeoutMs?: number;
123
+ }
124
+ interface DBAdapter extends BaseObserverInterface<DBAdapterListener>, DBGetUtils {
125
+ close: () => void | Promise<void>;
126
+ execute: (query: string, params?: any[]) => Promise<QueryResult>;
127
+ executeRaw: (query: string, params?: any[]) => Promise<any[][]>;
128
+ executeBatch: (query: string, params?: any[][]) => Promise<QueryResult>;
129
+ name: string;
130
+ readLock: <T>(fn: (tx: LockContext) => Promise<T>, options?: DBLockOptions) => Promise<T>;
131
+ readTransaction: <T>(fn: (tx: Transaction) => Promise<T>, options?: DBLockOptions) => Promise<T>;
132
+ writeLock: <T>(fn: (tx: LockContext) => Promise<T>, options?: DBLockOptions) => Promise<T>;
133
+ writeTransaction: <T>(fn: (tx: Transaction) => Promise<T>, options?: DBLockOptions) => Promise<T>;
134
+ /**
135
+ * This method refreshes the schema information across all connections. This is for advanced use cases, and should generally not be needed.
136
+ */
137
+ refreshSchema: () => Promise<void>;
138
+ }
139
+ declare function isBatchedUpdateNotification(update: BatchedUpdateNotification | UpdateNotification): update is BatchedUpdateNotification;
140
+ declare function extractTableUpdates(update: BatchedUpdateNotification | UpdateNotification): string[];
141
+
142
+ /**
143
+ * 64-bit unsigned integer stored as a string in base-10.
144
+ *
145
+ * Not sortable as a string.
146
+ */
147
+ type OpId = string;
148
+ /**
149
+ * Type of local change.
150
+ */
151
+ declare enum UpdateType {
152
+ /** Insert or replace existing row. All non-null columns are included in the data. Generated by INSERT statements. */
153
+ PUT = "PUT",
154
+ /** Update existing row. Contains the id, and value of each changed column. Generated by UPDATE statements. */
155
+ PATCH = "PATCH",
156
+ /** Delete existing row. Contains the id. Generated by DELETE statements. */
157
+ DELETE = "DELETE"
158
+ }
159
+ type CrudEntryJSON = {
160
+ id: string;
161
+ data: string;
162
+ tx_id?: number;
163
+ };
164
+ /**
165
+ * The output JSON seems to be a third type of JSON, not the same as the input JSON.
166
+ */
167
+ type CrudEntryOutputJSON = {
168
+ op_id: number;
169
+ op: UpdateType;
170
+ type: string;
171
+ id: string;
172
+ tx_id?: number;
173
+ data?: Record<string, any>;
174
+ old?: Record<string, any>;
175
+ metadata?: string;
176
+ };
177
+ /**
178
+ * A single client-side change.
179
+ */
180
+ declare class CrudEntry {
181
+ /**
182
+ * Auto-incrementing client-side id.
183
+ */
184
+ clientId: number;
185
+ /**
186
+ * ID of the changed row.
187
+ */
188
+ id: string;
189
+ /**
190
+ * Type of change.
191
+ */
192
+ op: UpdateType;
193
+ /**
194
+ * Data associated with the change.
195
+ */
196
+ opData?: Record<string, any>;
197
+ /**
198
+ * For tables where the `trackPreviousValues` option has been enabled, this tracks previous values for
199
+ * `UPDATE` and `DELETE` statements.
200
+ */
201
+ previousValues?: Record<string, any>;
202
+ /**
203
+ * Table that contained the change.
204
+ */
205
+ table: string;
206
+ /**
207
+ * Auto-incrementing transaction id. This is the same for all operations within the same transaction.
208
+ */
209
+ transactionId?: number;
210
+ /**
211
+ * Client-side metadata attached with this write.
212
+ *
213
+ * This field is only available when the `trackMetadata` option was set to `true` when creating a table
214
+ * and the insert or update statement set the `_metadata` column.
215
+ */
216
+ metadata?: string;
217
+ static fromRow(dbRow: CrudEntryJSON): CrudEntry;
218
+ constructor(clientId: number, op: UpdateType, table: string, id: string, transactionId?: number, opData?: Record<string, any>, previousValues?: Record<string, any>, metadata?: string);
219
+ /**
220
+ * Converts the change to JSON format.
221
+ */
222
+ toJSON(): CrudEntryOutputJSON;
223
+ equals(entry: CrudEntry): boolean;
224
+ /**
225
+ * The hash code for this object.
226
+ * @deprecated This should not be necessary in the JS SDK.
227
+ * Use the @see CrudEntry#equals method instead.
228
+ * TODO remove in the next major release.
229
+ */
230
+ hashCode(): string;
231
+ /**
232
+ * Generates an array for use in deep comparison operations
233
+ */
234
+ toComparisonArray(): (string | number | Record<string, any> | undefined)[];
235
+ }
236
+
237
+ /**
238
+ * A batch of client-side changes.
239
+ */
240
+ declare class CrudBatch {
241
+ /**
242
+ * List of client-side changes.
243
+ */
244
+ crud: CrudEntry[];
245
+ /**
246
+ * true if there are more changes in the local queue.
247
+ */
248
+ haveMore: boolean;
249
+ /**
250
+ * Call to remove the changes from the local queue, once successfully uploaded.
251
+ */
252
+ complete: (writeCheckpoint?: string) => Promise<void>;
253
+ constructor(
254
+ /**
255
+ * List of client-side changes.
256
+ */
257
+ crud: CrudEntry[],
258
+ /**
259
+ * true if there are more changes in the local queue.
260
+ */
261
+ haveMore: boolean,
262
+ /**
263
+ * Call to remove the changes from the local queue, once successfully uploaded.
264
+ */
265
+ complete: (writeCheckpoint?: string) => Promise<void>);
266
+ }
267
+
268
+ declare enum OpTypeEnum {
269
+ CLEAR = 1,
270
+ MOVE = 2,
271
+ PUT = 3,
272
+ REMOVE = 4
273
+ }
274
+ type OpTypeJSON = string;
275
+ /**
276
+ * Used internally for sync buckets.
277
+ */
278
+ declare class OpType {
279
+ value: OpTypeEnum;
280
+ static fromJSON(jsonValue: OpTypeJSON): OpType;
281
+ constructor(value: OpTypeEnum);
282
+ toJSON(): string;
283
+ }
284
+
285
+ interface OplogEntryJSON {
286
+ checksum: number;
287
+ data?: string;
288
+ object_id?: string;
289
+ object_type?: string;
290
+ op_id: string;
291
+ op: OpTypeJSON;
292
+ subkey?: string;
293
+ }
294
+ declare class OplogEntry {
295
+ op_id: OpId;
296
+ op: OpType;
297
+ checksum: number;
298
+ subkey?: string | undefined;
299
+ object_type?: string | undefined;
300
+ object_id?: string | undefined;
301
+ data?: string | undefined;
302
+ static fromRow(row: OplogEntryJSON): OplogEntry;
303
+ constructor(op_id: OpId, op: OpType, checksum: number, subkey?: string | undefined, object_type?: string | undefined, object_id?: string | undefined, data?: string | undefined);
304
+ toJSON(fixedKeyEncoding?: boolean): OplogEntryJSON;
305
+ }
306
+
307
+ type SyncDataBucketJSON = {
308
+ bucket: string;
309
+ has_more?: boolean;
310
+ after?: string;
311
+ next_after?: string;
312
+ data: OplogEntryJSON[];
313
+ };
314
+ declare class SyncDataBucket {
315
+ bucket: string;
316
+ data: OplogEntry[];
317
+ /**
318
+ * True if the response does not contain all the data for this bucket, and another request must be made.
319
+ */
320
+ has_more: boolean;
321
+ /**
322
+ * The `after` specified in the request.
323
+ */
324
+ after?: OpId | undefined;
325
+ /**
326
+ * Use this for the next request.
327
+ */
328
+ next_after?: OpId | undefined;
329
+ static fromRow(row: SyncDataBucketJSON): SyncDataBucket;
330
+ constructor(bucket: string, data: OplogEntry[],
331
+ /**
332
+ * True if the response does not contain all the data for this bucket, and another request must be made.
333
+ */
334
+ has_more: boolean,
335
+ /**
336
+ * The `after` specified in the request.
337
+ */
338
+ after?: OpId | undefined,
339
+ /**
340
+ * Use this for the next request.
341
+ */
342
+ next_after?: OpId | undefined);
343
+ toJSON(fixedKeyEncoding?: boolean): SyncDataBucketJSON;
344
+ }
345
+
346
+ declare class SyncDataBatch {
347
+ buckets: SyncDataBucket[];
348
+ static fromJSON(json: any): SyncDataBatch;
349
+ constructor(buckets: SyncDataBucket[]);
350
+ }
351
+
352
+ interface BucketDescription {
353
+ name: string;
354
+ priority: number;
355
+ }
356
+ interface Checkpoint {
357
+ last_op_id: OpId;
358
+ buckets: BucketChecksum[];
359
+ write_checkpoint?: string;
360
+ }
361
+ interface BucketState {
362
+ bucket: string;
363
+ op_id: string;
364
+ }
365
+ interface ChecksumCache {
366
+ checksums: Map<string, {
367
+ checksum: BucketChecksum;
368
+ last_op_id: OpId;
369
+ }>;
370
+ lastOpId: OpId;
371
+ }
372
+ interface SyncLocalDatabaseResult {
373
+ ready: boolean;
374
+ checkpointValid: boolean;
375
+ checkpointFailures?: string[];
376
+ }
377
+ type SavedProgress = {
378
+ atLast: number;
379
+ sinceLast: number;
380
+ };
381
+ type BucketOperationProgress = Record<string, SavedProgress>;
382
+ interface BucketChecksum {
383
+ bucket: string;
384
+ priority?: number;
385
+ /**
386
+ * 32-bit unsigned hash.
387
+ */
388
+ checksum: number;
389
+ /**
390
+ * Count of operations - informational only.
391
+ */
392
+ count?: number;
393
+ }
394
+ declare enum PSInternalTable {
395
+ DATA = "ps_data",
396
+ CRUD = "ps_crud",
397
+ BUCKETS = "ps_buckets",
398
+ OPLOG = "ps_oplog",
399
+ UNTYPED = "ps_untyped"
400
+ }
401
+ declare enum PowerSyncControlCommand {
402
+ PROCESS_TEXT_LINE = "line_text",
403
+ PROCESS_BSON_LINE = "line_binary",
404
+ STOP = "stop",
405
+ START = "start",
406
+ NOTIFY_TOKEN_REFRESHED = "refreshed_token",
407
+ NOTIFY_CRUD_UPLOAD_COMPLETED = "completed_upload"
408
+ }
409
+ interface BucketStorageListener extends BaseListener {
410
+ crudUpdate: () => void;
411
+ }
412
+ interface BucketStorageAdapter extends BaseObserverInterface<BucketStorageListener>, Disposable {
413
+ init(): Promise<void>;
414
+ saveSyncData(batch: SyncDataBatch, fixedKeyFormat?: boolean): Promise<void>;
415
+ removeBuckets(buckets: string[]): Promise<void>;
416
+ setTargetCheckpoint(checkpoint: Checkpoint): Promise<void>;
417
+ startSession(): void;
418
+ getBucketStates(): Promise<BucketState[]>;
419
+ getBucketOperationProgress(): Promise<BucketOperationProgress>;
420
+ hasMigratedSubkeys(): Promise<boolean>;
421
+ migrateToFixedSubkeys(): Promise<void>;
422
+ syncLocalDatabase(checkpoint: Checkpoint, priority?: number): Promise<{
423
+ checkpointValid: boolean;
424
+ ready: boolean;
425
+ failures?: any[];
426
+ }>;
427
+ nextCrudItem(): Promise<CrudEntry | undefined>;
428
+ hasCrud(): Promise<boolean>;
429
+ getCrudBatch(limit?: number): Promise<CrudBatch | null>;
430
+ hasCompletedSync(): Promise<boolean>;
431
+ updateLocalTarget(cb: () => Promise<string>): Promise<boolean>;
432
+ getMaxOpId(): string;
433
+ /**
434
+ * Get an unique client id.
435
+ */
436
+ getClientId(): Promise<string>;
437
+ /**
438
+ * Invokes the `powersync_control` function for the sync client.
439
+ */
440
+ control(op: PowerSyncControlCommand, payload: string | Uint8Array | null): Promise<string>;
441
+ }
442
+
443
+ /**
444
+ * For sync2.json
445
+ */
446
+ interface ContinueCheckpointRequest {
447
+ /**
448
+ * Existing bucket states. Only these buckets are synchronized.
449
+ */
450
+ buckets: BucketRequest[];
451
+ checkpoint_token: string;
452
+ limit?: number;
453
+ }
454
+ interface SyncNewCheckpointRequest {
455
+ /**
456
+ * Existing bucket states. Used if include_data is specified.
457
+ */
458
+ buckets?: BucketRequest[];
459
+ request_checkpoint: {
460
+ /**
461
+ * Whether or not to include an initial data request.
462
+ */
463
+ include_data: boolean;
464
+ /**
465
+ * Whether or not to compute a checksum.
466
+ */
467
+ include_checksum: boolean;
468
+ };
469
+ limit?: number;
470
+ }
471
+ type SyncRequest = ContinueCheckpointRequest | SyncNewCheckpointRequest;
472
+ interface SyncResponse {
473
+ /**
474
+ * Data for the buckets returned. May not have an an entry for each bucket in the request.
475
+ */
476
+ data?: SyncDataBucketJSON[];
477
+ /**
478
+ * True if the response limit has been reached, and another request must be made.
479
+ */
480
+ has_more: boolean;
481
+ checkpoint_token?: string;
482
+ checkpoint?: Checkpoint;
483
+ }
484
+ type JSONValue = string | number | boolean | null | undefined | JSONObject | JSONArray;
485
+ interface JSONObject {
486
+ [key: string]: JSONValue;
487
+ }
488
+ type JSONArray = JSONValue[];
489
+ type StreamingSyncRequestParameterType = JSONValue;
490
+ interface StreamingSyncRequest {
491
+ /**
492
+ * Existing bucket states.
493
+ */
494
+ buckets?: BucketRequest[];
495
+ /**
496
+ * If specified, limit the response to only include these buckets.
497
+ */
498
+ only?: string[];
499
+ /**
500
+ * Whether or not to compute a checksum for each checkpoint
501
+ */
502
+ include_checksum: boolean;
503
+ /**
504
+ * Changes the response to stringified data in each OplogEntry
505
+ */
506
+ raw_data: boolean;
507
+ /**
508
+ * Client parameters to be passed to the sync rules.
509
+ */
510
+ parameters?: Record<string, StreamingSyncRequestParameterType>;
511
+ client_id?: string;
512
+ }
513
+ interface StreamingSyncCheckpoint {
514
+ checkpoint: Checkpoint;
515
+ }
516
+ interface StreamingSyncCheckpointDiff {
517
+ checkpoint_diff: {
518
+ last_op_id: OpId;
519
+ updated_buckets: BucketChecksum[];
520
+ removed_buckets: string[];
521
+ write_checkpoint?: string;
522
+ };
523
+ }
524
+ interface StreamingSyncDataJSON {
525
+ data: SyncDataBucketJSON;
526
+ }
527
+ interface StreamingSyncCheckpointComplete {
528
+ checkpoint_complete: {
529
+ last_op_id: OpId;
530
+ };
531
+ }
532
+ interface StreamingSyncCheckpointPartiallyComplete {
533
+ partial_checkpoint_complete: {
534
+ priority: number;
535
+ last_op_id: OpId;
536
+ };
537
+ }
538
+ interface StreamingSyncKeepalive {
539
+ /** If specified, token expires in this many seconds. */
540
+ token_expires_in: number;
541
+ }
542
+ type StreamingSyncLine = StreamingSyncDataJSON | StreamingSyncCheckpoint | StreamingSyncCheckpointDiff | StreamingSyncCheckpointComplete | StreamingSyncCheckpointPartiallyComplete | StreamingSyncKeepalive;
543
+ type CrudUploadNotification = {
544
+ crud_upload_completed: null;
545
+ };
546
+ type StreamingSyncLineOrCrudUploadComplete = StreamingSyncLine | CrudUploadNotification;
547
+ interface BucketRequest {
548
+ name: string;
549
+ /**
550
+ * Base-10 number. Sync all data from this bucket with op_id > after.
551
+ */
552
+ after: OpId;
553
+ }
554
+ declare function isStreamingSyncData(line: StreamingSyncLine): line is StreamingSyncDataJSON;
555
+ declare function isStreamingKeepalive(line: StreamingSyncLine): line is StreamingSyncKeepalive;
556
+ declare function isStreamingSyncCheckpoint(line: StreamingSyncLine): line is StreamingSyncCheckpoint;
557
+ declare function isStreamingSyncCheckpointComplete(line: StreamingSyncLine): line is StreamingSyncCheckpointComplete;
558
+ declare function isStreamingSyncCheckpointPartiallyComplete(line: StreamingSyncLine): line is StreamingSyncCheckpointPartiallyComplete;
559
+ declare function isStreamingSyncCheckpointDiff(line: StreamingSyncLine): line is StreamingSyncCheckpointDiff;
560
+ declare function isContinueCheckpointRequest(request: SyncRequest): request is ContinueCheckpointRequest;
561
+ declare function isSyncNewCheckpointRequest(request: SyncRequest): request is SyncNewCheckpointRequest;
562
+ /**
563
+ * For crud.json
564
+ */
565
+ interface CrudRequest {
566
+ data: CrudEntry[];
567
+ }
568
+ interface CrudResponse {
569
+ /**
570
+ * A sync response with a checkpoint >= this checkpoint would contain all the changes in this request.
571
+ *
572
+ * Any earlier checkpoint may or may not contain these changes.
573
+ *
574
+ * May be empty when the request contains no ops.
575
+ */
576
+ checkpoint?: OpId;
577
+ }
578
+
579
+ interface BucketProgress {
580
+ priority: number;
581
+ at_last: number;
582
+ since_last: number;
583
+ target_count: number;
584
+ }
585
+
586
+ /** @internal */
587
+ type InternalProgressInformation = Record<string, BucketProgress>;
588
+ /**
589
+ * Information about a progressing download made by the PowerSync SDK.
590
+ *
591
+ * To obtain these values, use {@link SyncProgress}, available through
592
+ * {@link SyncStatus#downloadProgress}.
593
+ */
594
+ interface ProgressWithOperations {
595
+ /**
596
+ * The total amount of operations to download for the current sync iteration
597
+ * to complete.
598
+ */
599
+ totalOperations: number;
600
+ /**
601
+ * The amount of operations that have already been downloaded.
602
+ */
603
+ downloadedOperations: number;
604
+ /**
605
+ * Relative progress, as {@link downloadedOperations} of {@link totalOperations}.
606
+ *
607
+ * This will be a number between `0.0` and `1.0` (inclusive).
608
+ *
609
+ * When this number reaches `1.0`, all changes have been received from the sync service.
610
+ * Actually applying these changes happens before the `downloadProgress` field is cleared from
611
+ * {@link SyncStatus}, so progress can stay at `1.0` for a short while before completing.
612
+ */
613
+ downloadedFraction: number;
614
+ }
615
+ /**
616
+ * Provides realtime progress on how PowerSync is downloading rows.
617
+ *
618
+ * The progress until the next complete sync is available through the fields on {@link ProgressWithOperations},
619
+ * which this class implements.
620
+ * Additionally, the {@link SyncProgress.untilPriority} method can be used to otbain progress towards
621
+ * a specific priority (instead of the progress for the entire download).
622
+ *
623
+ * The reported progress always reflects the status towards the end of a sync iteration (after
624
+ * which a consistent snapshot of all buckets is available locally).
625
+ *
626
+ * In rare cases (in particular, when a [compacting](https://docs.powersync.com/usage/lifecycle-maintenance/compacting-buckets)
627
+ * operation takes place between syncs), it's possible for the returned numbers to be slightly
628
+ * inaccurate. For this reason, {@link SyncProgress} should be seen as an approximation of progress.
629
+ * The information returned is good enough to build progress bars, but not exact enough to track
630
+ * individual download counts.
631
+ *
632
+ * Also note that data is downloaded in bulk, which means that individual counters are unlikely
633
+ * to be updated one-by-one.
634
+ */
635
+ declare class SyncProgress implements ProgressWithOperations {
636
+ protected internal: InternalProgressInformation;
637
+ totalOperations: number;
638
+ downloadedOperations: number;
639
+ downloadedFraction: number;
640
+ constructor(internal: InternalProgressInformation);
641
+ /**
642
+ * Returns download progress towards all data up until the specified priority being received.
643
+ *
644
+ * The returned {@link ProgressWithOperations} tracks the target amount of operations that need
645
+ * to be downloaded in total and how many of them have already been received.
646
+ */
647
+ untilPriority(priority: number): ProgressWithOperations;
648
+ }
649
+
650
+ type SyncDataFlowStatus = Partial<{
651
+ downloading: boolean;
652
+ uploading: boolean;
653
+ /**
654
+ * Error during downloading (including connecting).
655
+ *
656
+ * Cleared on the next successful data download.
657
+ */
658
+ downloadError?: Error;
659
+ /**
660
+ * Error during uploading.
661
+ * Cleared on the next successful upload.
662
+ */
663
+ uploadError?: Error;
664
+ /**
665
+ * Internal information about how far we are downloading operations in buckets.
666
+ *
667
+ * Please use the {@link SyncStatus#downloadProgress} property to track sync progress.
668
+ */
669
+ downloadProgress: InternalProgressInformation | null;
670
+ }>;
671
+ interface SyncPriorityStatus {
672
+ priority: number;
673
+ lastSyncedAt?: Date;
674
+ hasSynced?: boolean;
675
+ }
676
+ type SyncStatusOptions = {
677
+ connected?: boolean;
678
+ connecting?: boolean;
679
+ dataFlow?: SyncDataFlowStatus;
680
+ lastSyncedAt?: Date;
681
+ hasSynced?: boolean;
682
+ priorityStatusEntries?: SyncPriorityStatus[];
683
+ };
684
+ declare class SyncStatus {
685
+ protected options: SyncStatusOptions;
686
+ constructor(options: SyncStatusOptions);
687
+ /**
688
+ * Indicates if the client is currently connected to the PowerSync service.
689
+ *
690
+ * @returns {boolean} True if connected, false otherwise. Defaults to false if not specified.
691
+ */
692
+ get connected(): boolean;
693
+ /**
694
+ * Indicates if the client is in the process of establishing a connection to the PowerSync service.
695
+ *
696
+ * @returns {boolean} True if connecting, false otherwise. Defaults to false if not specified.
697
+ */
698
+ get connecting(): boolean;
699
+ /**
700
+ * Time that a last sync has fully completed, if any.
701
+ * This timestamp is reset to null after a restart of the PowerSync service.
702
+ *
703
+ * @returns {Date | undefined} The timestamp of the last successful sync, or undefined if no sync has completed.
704
+ */
705
+ get lastSyncedAt(): Date | undefined;
706
+ /**
707
+ * Indicates whether there has been at least one full sync completed since initialization.
708
+ *
709
+ * @returns {boolean | undefined} True if at least one sync has completed, false if no sync has completed,
710
+ * or undefined when the state is still being loaded from the database.
711
+ */
712
+ get hasSynced(): boolean | undefined;
713
+ /**
714
+ * Provides the current data flow status regarding uploads and downloads.
715
+ *
716
+ * @returns {SyncDataFlowStatus} An object containing:
717
+ * - downloading: True if actively downloading changes (only when connected is also true)
718
+ * - uploading: True if actively uploading changes
719
+ * Defaults to {downloading: false, uploading: false} if not specified.
720
+ */
721
+ get dataFlowStatus(): Partial<{
722
+ downloading: boolean;
723
+ uploading: boolean;
724
+ /**
725
+ * Error during downloading (including connecting).
726
+ *
727
+ * Cleared on the next successful data download.
728
+ */
729
+ downloadError?: Error;
730
+ /**
731
+ * Error during uploading.
732
+ * Cleared on the next successful upload.
733
+ */
734
+ uploadError?: Error;
735
+ /**
736
+ * Internal information about how far we are downloading operations in buckets.
737
+ *
738
+ * Please use the {@link SyncStatus#downloadProgress} property to track sync progress.
739
+ */
740
+ downloadProgress: InternalProgressInformation | null;
741
+ }>;
742
+ /**
743
+ * Provides sync status information for all bucket priorities, sorted by priority (highest first).
744
+ *
745
+ * @returns {SyncPriorityStatus[]} An array of status entries for different sync priority levels,
746
+ * sorted with highest priorities (lower numbers) first.
747
+ */
748
+ get priorityStatusEntries(): SyncPriorityStatus[];
749
+ /**
750
+ * A realtime progress report on how many operations have been downloaded and
751
+ * how many are necessary in total to complete the next sync iteration.
752
+ *
753
+ * This field is only set when {@link SyncDataFlowStatus#downloading} is also true.
754
+ */
755
+ get downloadProgress(): SyncProgress | null;
756
+ /**
757
+ * Reports the sync status (a pair of {@link SyncStatus#hasSynced} and {@link SyncStatus#lastSyncedAt} fields)
758
+ * for a specific bucket priority level.
759
+ *
760
+ * When buckets with different priorities are declared, PowerSync may choose to synchronize higher-priority
761
+ * buckets first. When a consistent view over all buckets for all priorities up until the given priority is
762
+ * reached, PowerSync makes data from those buckets available before lower-priority buckets have finished
763
+ * syncing.
764
+ *
765
+ * This method returns the status for the requested priority or the next higher priority level that has
766
+ * status information available. This is because when PowerSync makes data for a given priority available,
767
+ * all buckets in higher-priorities are guaranteed to be consistent with that checkpoint.
768
+ *
769
+ * For example, if PowerSync just finished synchronizing buckets in priority level 3, calling this method
770
+ * with a priority of 1 may return information for priority level 3.
771
+ *
772
+ * @param {number} priority The bucket priority for which the status should be reported
773
+ * @returns {SyncPriorityStatus} Status information for the requested priority level or the next higher level with available status
774
+ */
775
+ statusForPriority(priority: number): SyncPriorityStatus;
776
+ /**
777
+ * Compares this SyncStatus instance with another to determine if they are equal.
778
+ * Equality is determined by comparing the serialized JSON representation of both instances.
779
+ *
780
+ * @param {SyncStatus} status The SyncStatus instance to compare against
781
+ * @returns {boolean} True if the instances are considered equal, false otherwise
782
+ */
783
+ isEqual(status: SyncStatus): boolean;
784
+ /**
785
+ * Creates a human-readable string representation of the current sync status.
786
+ * Includes information about connection state, sync completion, and data flow.
787
+ *
788
+ * @returns {string} A string representation of the sync status
789
+ */
790
+ getMessage(): string;
791
+ /**
792
+ * Serializes the SyncStatus instance to a plain object.
793
+ *
794
+ * @returns {SyncStatusOptions} A plain object representation of the sync status
795
+ */
796
+ toJSON(): SyncStatusOptions;
797
+ private static comparePriorities;
798
+ }
799
+
800
+ declare class UploadQueueStats {
801
+ /**
802
+ * Number of records in the upload queue.
803
+ */
804
+ count: number;
805
+ /**
806
+ * Size of the upload queue in bytes.
807
+ */
808
+ size: number | null;
809
+ constructor(
810
+ /**
811
+ * Number of records in the upload queue.
812
+ */
813
+ count: number,
814
+ /**
815
+ * Size of the upload queue in bytes.
816
+ */
817
+ size?: number | null);
818
+ toString(): string;
819
+ }
820
+
821
+ declare enum ColumnType {
822
+ TEXT = "TEXT",
823
+ INTEGER = "INTEGER",
824
+ REAL = "REAL"
825
+ }
826
+ interface ColumnOptions {
827
+ name: string;
828
+ type?: ColumnType;
829
+ }
830
+ type BaseColumnType<T extends number | string | null> = {
831
+ type: ColumnType;
832
+ };
833
+ type ColumnsType = Record<string, BaseColumnType<any>>;
834
+ type ExtractColumnValueType<T extends BaseColumnType<any>> = T extends BaseColumnType<infer R> ? R : unknown;
835
+ declare const MAX_AMOUNT_OF_COLUMNS = 1999;
836
+ declare const column: {
837
+ text: BaseColumnType<string | null>;
838
+ integer: BaseColumnType<number | null>;
839
+ real: BaseColumnType<number | null>;
840
+ };
841
+ declare class Column {
842
+ protected options: ColumnOptions;
843
+ constructor(options: ColumnOptions);
844
+ get name(): string;
845
+ get type(): ColumnType | undefined;
846
+ toJSON(): {
847
+ name: string;
848
+ type: ColumnType | undefined;
849
+ };
850
+ }
851
+
852
+ /**
853
+ * A pending variant of a {@link RawTable} that doesn't have a name (because it would be inferred when creating the
854
+ * schema).
855
+ */
856
+ type RawTableType = {
857
+ /**
858
+ * The statement to run when PowerSync detects that a row needs to be inserted or updated.
859
+ */
860
+ put: PendingStatement;
861
+ /**
862
+ * The statement to run when PowerSync detects that a row needs to be deleted.
863
+ */
864
+ delete: PendingStatement;
865
+ };
866
+ /**
867
+ * A parameter to use as part of {@link PendingStatement}.
868
+ *
869
+ * For delete statements, only the `"Id"` value is supported - the sync client will replace it with the id of the row to
870
+ * be synced.
871
+ *
872
+ * For insert and replace operations, the values of columns in the table are available as parameters through
873
+ * `{Column: 'name'}`.
874
+ */
875
+ type PendingStatementParameter = 'Id' | {
876
+ Column: string;
877
+ };
878
+ /**
879
+ * A statement that the PowerSync client should use to insert or delete data into a table managed by the user.
880
+ */
881
+ type PendingStatement = {
882
+ sql: string;
883
+ params: PendingStatementParameter[];
884
+ };
885
+ /**
886
+ * Instructs PowerSync to sync data into a "raw" table.
887
+ *
888
+ * Since raw tables are not backed by JSON, running complex queries on them may be more efficient. Further, they allow
889
+ * using client-side table and column constraints.
890
+ *
891
+ * To collect local writes to raw tables with PowerSync, custom triggers are required. See
892
+ * {@link https://docs.powersync.com/usage/use-case-examples/raw-tables the documentation} for details and an example on
893
+ * using raw tables.
894
+ *
895
+ * Note that raw tables are only supported when using the new `SyncClientImplementation.rust` sync client.
896
+ *
897
+ * @experimental Please note that this feature is experimental at the moment, and not covered by PowerSync semver or
898
+ * stability guarantees.
899
+ */
900
+ declare class RawTable implements RawTableType {
901
+ /**
902
+ * The name of the table.
903
+ *
904
+ * This does not have to match the actual table name in the schema - {@link put} and {@link delete} are free to use
905
+ * another table. Instead, this name is used by the sync client to recognize that operations on this table (as it
906
+ * appears in the source / backend database) are to be handled specially.
907
+ */
908
+ name: string;
909
+ put: PendingStatement;
910
+ delete: PendingStatement;
911
+ constructor(name: string, type: RawTableType);
912
+ }
913
+
914
+ interface IndexColumnOptions {
915
+ name: string;
916
+ ascending?: boolean;
917
+ }
918
+ declare const DEFAULT_INDEX_COLUMN_OPTIONS: Partial<IndexColumnOptions>;
919
+ declare class IndexedColumn {
920
+ protected options: IndexColumnOptions;
921
+ static createAscending(column: string): IndexedColumn;
922
+ constructor(options: IndexColumnOptions);
923
+ get name(): string;
924
+ get ascending(): boolean | undefined;
925
+ toJSON(table: Table): {
926
+ name: string;
927
+ ascending: boolean | undefined;
928
+ type: ColumnType;
929
+ };
930
+ }
931
+
932
+ interface IndexOptions {
933
+ name: string;
934
+ columns?: IndexedColumn[];
935
+ }
936
+ declare const DEFAULT_INDEX_OPTIONS: Partial<IndexOptions>;
937
+ declare class Index {
938
+ protected options: IndexOptions;
939
+ static createAscending(options: IndexOptions, columnNames: string[]): Index;
940
+ constructor(options: IndexOptions);
941
+ get name(): string;
942
+ get columns(): IndexedColumn[];
943
+ toJSON(table: Table): {
944
+ name: string;
945
+ columns: {
946
+ name: string;
947
+ ascending: boolean | undefined;
948
+ type: ColumnType;
949
+ }[];
950
+ };
951
+ }
952
+
953
+ /**
954
+ Generate a new table from the columns and indexes
955
+ @deprecated You should use {@link Table} instead as it now allows TableV2 syntax.
956
+ This will be removed in the next major release.
957
+ */
958
+ declare class TableV2<Columns extends ColumnsType = ColumnsType> extends Table<Columns> {
959
+ }
960
+
961
+ interface SharedTableOptions {
962
+ localOnly?: boolean;
963
+ insertOnly?: boolean;
964
+ viewName?: string;
965
+ trackPrevious?: boolean | TrackPreviousOptions;
966
+ trackMetadata?: boolean;
967
+ ignoreEmptyUpdates?: boolean;
968
+ }
969
+ /** Whether to include previous column values when PowerSync tracks local changes.
970
+ *
971
+ * Including old values may be helpful for some backend connector implementations, which is
972
+ * why it can be enabled on per-table or per-columm basis.
973
+ */
974
+ interface TrackPreviousOptions {
975
+ /** When defined, a list of column names for which old values should be tracked. */
976
+ columns?: string[];
977
+ /** When enabled, only include values that have actually been changed by an update. */
978
+ onlyWhenChanged?: boolean;
979
+ }
980
+ interface TableOptions extends SharedTableOptions {
981
+ /**
982
+ * The synced table name, matching sync rules
983
+ */
984
+ name: string;
985
+ columns: Column[];
986
+ indexes?: Index[];
987
+ }
988
+ type RowType<T extends TableV2<any>> = {
989
+ [K in keyof T['columnMap']]: ExtractColumnValueType<T['columnMap'][K]>;
990
+ } & {
991
+ id: string;
992
+ };
993
+ type IndexShorthand = Record<string, string[]>;
994
+ interface TableV2Options extends SharedTableOptions {
995
+ indexes?: IndexShorthand;
996
+ }
997
+ declare const DEFAULT_TABLE_OPTIONS: {
998
+ indexes: never[];
999
+ insertOnly: boolean;
1000
+ localOnly: boolean;
1001
+ trackPrevious: boolean;
1002
+ trackMetadata: boolean;
1003
+ ignoreEmptyUpdates: boolean;
1004
+ };
1005
+ declare const InvalidSQLCharacters: RegExp;
1006
+ declare class Table<Columns extends ColumnsType = ColumnsType> {
1007
+ protected options: TableOptions;
1008
+ protected _mappedColumns: Columns;
1009
+ static createLocalOnly(options: TableOptions): Table<ColumnsType>;
1010
+ static createInsertOnly(options: TableOptions): Table<ColumnsType>;
1011
+ /**
1012
+ * Create a table.
1013
+ * @deprecated This was only only included for TableV2 and is no longer necessary.
1014
+ * Prefer to use new Table() directly.
1015
+ *
1016
+ * TODO remove in the next major release.
1017
+ */
1018
+ static createTable(name: string, table: Table): Table<ColumnsType>;
1019
+ /**
1020
+ * Creates a new Table instance.
1021
+ *
1022
+ * This constructor supports two different versions:
1023
+ * 1. New constructor: Using a Columns object and an optional TableV2Options object
1024
+ * 2. Deprecated constructor: Using a TableOptions object (will be removed in the next major release)
1025
+ *
1026
+ * @constructor
1027
+ * @param {Columns | TableOptions} optionsOrColumns - Either a Columns object (for V2 syntax) or a TableOptions object (for V1 syntax)
1028
+ * @param {TableV2Options} [v2Options] - Optional configuration options for V2 syntax
1029
+ *
1030
+ * @example
1031
+ * ```javascript
1032
+ * // New Constructor
1033
+ * const table = new Table(
1034
+ * {
1035
+ * name: column.text,
1036
+ * age: column.integer
1037
+ * },
1038
+ * { indexes: { nameIndex: ['name'] } }
1039
+ * );
1040
+ *```
1041
+ *
1042
+ *
1043
+ * @example
1044
+ * ```javascript
1045
+ * // Deprecated Constructor
1046
+ * const table = new Table({
1047
+ * name: 'users',
1048
+ * columns: [
1049
+ * new Column({ name: 'name', type: ColumnType.TEXT }),
1050
+ * new Column({ name: 'age', type: ColumnType.INTEGER })
1051
+ * ]
1052
+ * });
1053
+ *```
1054
+ */
1055
+ constructor(columns: Columns, options?: TableV2Options);
1056
+ /**
1057
+ * @deprecated This constructor will be removed in the next major release.
1058
+ * Use the new constructor shown below instead as this does not show types.
1059
+ * @example
1060
+ * <caption>Use this instead</caption>
1061
+ * ```javascript
1062
+ * const table = new Table(
1063
+ * {
1064
+ * name: column.text,
1065
+ * age: column.integer
1066
+ * },
1067
+ * { indexes: { nameIndex: ['name'] } }
1068
+ * );
1069
+ *```
1070
+ */
1071
+ constructor(options: TableOptions);
1072
+ copyWithName(name: string): Table;
1073
+ private isTableV1;
1074
+ private initTableV1;
1075
+ private initTableV2;
1076
+ private applyDefaultOptions;
1077
+ get name(): string;
1078
+ get viewNameOverride(): string | undefined;
1079
+ get viewName(): string;
1080
+ get columns(): Column[];
1081
+ get columnMap(): Columns;
1082
+ get indexes(): Index[];
1083
+ get localOnly(): boolean;
1084
+ get insertOnly(): boolean;
1085
+ get trackPrevious(): boolean | TrackPreviousOptions;
1086
+ get trackMetadata(): boolean;
1087
+ get ignoreEmptyUpdates(): boolean;
1088
+ get internalName(): string;
1089
+ get validName(): boolean;
1090
+ validate(): void;
1091
+ toJSON(): {
1092
+ name: string;
1093
+ view_name: string;
1094
+ local_only: boolean;
1095
+ insert_only: boolean;
1096
+ include_old: any;
1097
+ include_old_only_when_changed: boolean;
1098
+ include_metadata: boolean;
1099
+ ignore_empty_update: boolean;
1100
+ columns: {
1101
+ name: string;
1102
+ type: ColumnType | undefined;
1103
+ }[];
1104
+ indexes: {
1105
+ name: string;
1106
+ columns: {
1107
+ name: string;
1108
+ ascending: boolean | undefined;
1109
+ type: ColumnType;
1110
+ }[];
1111
+ }[];
1112
+ };
1113
+ }
1114
+
1115
+ type SchemaType = Record<string, Table<any>>;
1116
+ type SchemaTableType<S extends SchemaType> = {
1117
+ [K in keyof S]: RowType<S[K]>;
1118
+ };
1119
+ /**
1120
+ * A schema is a collection of tables. It is used to define the structure of a database.
1121
+ */
1122
+ declare class Schema<S extends SchemaType = SchemaType> {
1123
+ readonly types: SchemaTableType<S>;
1124
+ readonly props: S;
1125
+ readonly tables: Table[];
1126
+ readonly rawTables: RawTable[];
1127
+ constructor(tables: Table[] | S);
1128
+ /**
1129
+ * Adds raw tables to this schema. Raw tables are identified by their name, but entirely managed by the application
1130
+ * developer instead of automatically by PowerSync.
1131
+ * Since raw tables are not backed by JSON, running complex queries on them may be more efficient. Further, they allow
1132
+ * using client-side table and column constraints.
1133
+ * Note that raw tables are only supported when using the new `SyncClientImplementation.rust` sync client.
1134
+ *
1135
+ * @param tables An object of (table name, raw table definition) entries.
1136
+ * @experimental Note that the raw tables API is still experimental and may change in the future.
1137
+ */
1138
+ withRawTables(tables: Record<string, RawTableType>): void;
1139
+ validate(): void;
1140
+ toJSON(): {
1141
+ tables: {
1142
+ name: string;
1143
+ view_name: string;
1144
+ local_only: boolean;
1145
+ insert_only: boolean;
1146
+ include_old: any;
1147
+ include_old_only_when_changed: boolean;
1148
+ include_metadata: boolean;
1149
+ ignore_empty_update: boolean;
1150
+ columns: {
1151
+ name: string;
1152
+ type: ColumnType | undefined;
1153
+ }[];
1154
+ indexes: {
1155
+ name: string;
1156
+ columns: {
1157
+ name: string;
1158
+ ascending: boolean | undefined;
1159
+ type: ColumnType;
1160
+ }[];
1161
+ }[];
1162
+ }[];
1163
+ raw_tables: RawTable[];
1164
+ };
1165
+ private convertToClassicTables;
1166
+ }
1167
+
1168
+ interface PowerSyncCredentials {
1169
+ endpoint: string;
1170
+ token: string;
1171
+ expiresAt?: Date;
1172
+ }
1173
+
1174
+ interface PowerSyncBackendConnector {
1175
+ /** Allows the PowerSync client to retrieve an authentication token from your backend
1176
+ * which is used to authenticate against the PowerSync service.
1177
+ *
1178
+ * This should always fetch a fresh set of credentials - don't use cached
1179
+ * values.
1180
+ *
1181
+ * Return null if the user is not signed in. Throw an error if credentials
1182
+ * cannot be fetched due to a network error or other temporary error.
1183
+ *
1184
+ * This token is kept for the duration of a sync connection.
1185
+ */
1186
+ fetchCredentials: () => Promise<PowerSyncCredentials | null>;
1187
+ /** Upload local changes to the app backend.
1188
+ *
1189
+ * Use {@link AbstractPowerSyncDatabase.getCrudBatch} to get a batch of changes to upload.
1190
+ *
1191
+ * Any thrown errors will result in a retry after the configured wait period (default: 5 seconds).
1192
+ */
1193
+ uploadData: (database: AbstractPowerSyncDatabase) => Promise<void>;
1194
+ }
1195
+
1196
+ type DataStreamOptions<ParsedData, SourceData> = {
1197
+ mapLine?: (line: SourceData) => ParsedData;
1198
+ /**
1199
+ * Close the stream if any consumer throws an error
1200
+ */
1201
+ closeOnError?: boolean;
1202
+ pressure?: {
1203
+ highWaterMark?: number;
1204
+ lowWaterMark?: number;
1205
+ };
1206
+ logger?: ILogger;
1207
+ };
1208
+ type DataStreamCallback<Data extends any = any> = (data: Data) => Promise<void>;
1209
+ interface DataStreamListener<Data extends any = any> extends BaseListener {
1210
+ data: (data: Data) => Promise<void>;
1211
+ closed: () => void;
1212
+ error: (error: Error) => void;
1213
+ highWater: () => Promise<void>;
1214
+ lowWater: () => Promise<void>;
1215
+ }
1216
+ declare const DEFAULT_PRESSURE_LIMITS: {
1217
+ highWater: number;
1218
+ lowWater: number;
1219
+ };
1220
+ /**
1221
+ * A very basic implementation of a data stream with backpressure support which does not use
1222
+ * native JS streams or async iterators.
1223
+ * This is handy for environments such as React Native which need polyfills for the above.
1224
+ */
1225
+ declare class DataStream<ParsedData, SourceData = any> extends BaseObserver<DataStreamListener<ParsedData>> {
1226
+ protected options?: DataStreamOptions<ParsedData, SourceData> | undefined;
1227
+ dataQueue: SourceData[];
1228
+ protected isClosed: boolean;
1229
+ protected processingPromise: Promise<void> | null;
1230
+ protected notifyDataAdded: (() => void) | null;
1231
+ protected logger: ILogger;
1232
+ protected mapLine: (line: SourceData) => ParsedData;
1233
+ constructor(options?: DataStreamOptions<ParsedData, SourceData> | undefined);
1234
+ get highWatermark(): number;
1235
+ get lowWatermark(): number;
1236
+ get closed(): boolean;
1237
+ close(): Promise<void>;
1238
+ /**
1239
+ * Enqueues data for the consumers to read
1240
+ */
1241
+ enqueueData(data: SourceData): void;
1242
+ /**
1243
+ * Reads data once from the data stream
1244
+ * @returns a Data payload or Null if the stream closed.
1245
+ */
1246
+ read(): Promise<ParsedData | null>;
1247
+ /**
1248
+ * Executes a callback for each data item in the stream
1249
+ */
1250
+ forEach(callback: DataStreamCallback<ParsedData>): () => void;
1251
+ protected processQueue(): Promise<void> | undefined;
1252
+ protected hasDataReader(): boolean;
1253
+ protected _processQueue(): Promise<void>;
1254
+ protected iterateAsyncErrored(cb: (l: Partial<DataStreamListener<ParsedData>>) => Promise<void>): Promise<void>;
1255
+ }
1256
+
1257
+ type BSONImplementation = typeof BSON;
1258
+ type RemoteConnector = {
1259
+ fetchCredentials: () => Promise<PowerSyncCredentials | null>;
1260
+ invalidateCredentials?: () => void;
1261
+ };
1262
+ declare const DEFAULT_REMOTE_LOGGER: Logger.ILogger;
1263
+ type SyncStreamOptions = {
1264
+ path: string;
1265
+ data: StreamingSyncRequest;
1266
+ headers?: Record<string, string>;
1267
+ abortSignal?: AbortSignal;
1268
+ fetchOptions?: Request;
1269
+ };
1270
+ declare enum FetchStrategy {
1271
+ /**
1272
+ * Queues multiple sync events before processing, reducing round-trips.
1273
+ * This comes at the cost of more processing overhead, which may cause ACK timeouts on older/weaker devices for big enough datasets.
1274
+ */
1275
+ Buffered = "buffered",
1276
+ /**
1277
+ * Processes each sync event immediately before requesting the next.
1278
+ * This reduces processing overhead and improves real-time responsiveness.
1279
+ */
1280
+ Sequential = "sequential"
1281
+ }
1282
+ type SocketSyncStreamOptions = SyncStreamOptions & {
1283
+ fetchStrategy: FetchStrategy;
1284
+ };
1285
+ type FetchImplementation = typeof fetch;
1286
+ /**
1287
+ * Class wrapper for providing a fetch implementation.
1288
+ * The class wrapper is used to distinguish the fetchImplementation
1289
+ * option in [AbstractRemoteOptions] from the general fetch method
1290
+ * which is typeof "function"
1291
+ */
1292
+ declare class FetchImplementationProvider {
1293
+ getFetch(): FetchImplementation;
1294
+ }
1295
+ type AbstractRemoteOptions = {
1296
+ /**
1297
+ * Transforms the PowerSync base URL which might contain
1298
+ * `http(s)://` to the corresponding WebSocket variant
1299
+ * e.g. `ws(s)://`
1300
+ */
1301
+ socketUrlTransformer: (url: string) => string;
1302
+ /**
1303
+ * Optionally provide the fetch implementation to use.
1304
+ * Note that this usually needs to be bound to the global scope.
1305
+ * Binding should be done before passing here.
1306
+ */
1307
+ fetchImplementation: FetchImplementation | FetchImplementationProvider;
1308
+ /**
1309
+ * Optional options to pass directly to all `fetch` calls.
1310
+ *
1311
+ * This can include fields such as `dispatcher` (e.g. for proxy support),
1312
+ * `cache`, or any other fetch-compatible options.
1313
+ */
1314
+ fetchOptions?: {};
1315
+ };
1316
+ declare const DEFAULT_REMOTE_OPTIONS: AbstractRemoteOptions;
1317
+ declare abstract class AbstractRemote {
1318
+ protected connector: RemoteConnector;
1319
+ protected logger: ILogger;
1320
+ protected credentials: PowerSyncCredentials | null;
1321
+ protected options: AbstractRemoteOptions;
1322
+ constructor(connector: RemoteConnector, logger?: ILogger, options?: Partial<AbstractRemoteOptions>);
1323
+ /**
1324
+ * @returns a fetch implementation (function)
1325
+ * which can be called to perform fetch requests
1326
+ */
1327
+ get fetch(): FetchImplementation;
1328
+ /**
1329
+ * Get credentials currently cached, or fetch new credentials if none are
1330
+ * available.
1331
+ *
1332
+ * These credentials may have expired already.
1333
+ */
1334
+ getCredentials(): Promise<PowerSyncCredentials | null>;
1335
+ /**
1336
+ * Fetch a new set of credentials and cache it.
1337
+ *
1338
+ * Until this call succeeds, `getCredentials` will still return the
1339
+ * old credentials.
1340
+ *
1341
+ * This may be called before the current credentials have expired.
1342
+ */
1343
+ prefetchCredentials(): Promise<PowerSyncCredentials | null>;
1344
+ /**
1345
+ * Get credentials for PowerSync.
1346
+ *
1347
+ * This should always fetch a fresh set of credentials - don't use cached
1348
+ * values.
1349
+ */
1350
+ fetchCredentials(): Promise<PowerSyncCredentials | null>;
1351
+ /***
1352
+ * Immediately invalidate credentials.
1353
+ *
1354
+ * This may be called when the current credentials have expired.
1355
+ */
1356
+ invalidateCredentials(): void;
1357
+ getUserAgent(): string;
1358
+ protected buildRequest(path: string): Promise<{
1359
+ url: string;
1360
+ headers: {
1361
+ 'content-type': string;
1362
+ Authorization: string;
1363
+ 'x-user-agent': string;
1364
+ };
1365
+ }>;
1366
+ post(path: string, data: any, headers?: Record<string, string>): Promise<any>;
1367
+ get(path: string, headers?: Record<string, string>): Promise<any>;
1368
+ /**
1369
+ * Provides a BSON implementation. The import nature of this varies depending on the platform
1370
+ */
1371
+ abstract getBSON(): Promise<BSONImplementation>;
1372
+ protected createSocket(url: string): WebSocket;
1373
+ /**
1374
+ * Returns a data stream of sync line data.
1375
+ *
1376
+ * @param map Maps received payload frames to the typed event value.
1377
+ * @param bson A BSON encoder and decoder. When set, the data stream will be requested with a BSON payload
1378
+ * (required for compatibility with older sync services).
1379
+ */
1380
+ socketStreamRaw<T>(options: SocketSyncStreamOptions, map: (buffer: Uint8Array) => T, bson?: typeof BSON): Promise<DataStream<T>>;
1381
+ /**
1382
+ * Connects to the sync/stream http endpoint, mapping and emitting each received string line.
1383
+ */
1384
+ postStreamRaw<T>(options: SyncStreamOptions, mapLine: (line: string) => T): Promise<DataStream<T>>;
1385
+ }
1386
+
1387
+ declare enum LockType {
1388
+ CRUD = "crud",
1389
+ SYNC = "sync"
1390
+ }
1391
+ declare enum SyncStreamConnectionMethod {
1392
+ HTTP = "http",
1393
+ WEB_SOCKET = "web-socket"
1394
+ }
1395
+ declare enum SyncClientImplementation {
1396
+ /**
1397
+ * Decodes and handles sync lines received from the sync service in JavaScript.
1398
+ *
1399
+ * This is the default option.
1400
+ *
1401
+ * @deprecated Don't use {@link SyncClientImplementation.JAVASCRIPT} directly. Instead, use
1402
+ * {@link DEFAULT_SYNC_CLIENT_IMPLEMENTATION} or omit the option. The explicit choice to use
1403
+ * the JavaScript-based sync implementation will be removed from a future version of the SDK.
1404
+ */
1405
+ JAVASCRIPT = "js",
1406
+ /**
1407
+ * This implementation offloads the sync line decoding and handling into the PowerSync
1408
+ * core extension.
1409
+ *
1410
+ * @experimental
1411
+ * While this implementation is more performant than {@link SyncClientImplementation.JAVASCRIPT},
1412
+ * it has seen less real-world testing and is marked as __experimental__ at the moment.
1413
+ *
1414
+ * ## Compatibility warning
1415
+ *
1416
+ * The Rust sync client stores sync data in a format that is slightly different than the one used
1417
+ * by the old {@link JAVASCRIPT} implementation. When adopting the {@link RUST} client on existing
1418
+ * databases, the PowerSync SDK will migrate the format automatically.
1419
+ * Further, the {@link JAVASCRIPT} client in recent versions of the PowerSync JS SDK (starting from
1420
+ * the version introducing {@link RUST} as an option) also supports the new format, so you can switch
1421
+ * back to {@link JAVASCRIPT} later.
1422
+ *
1423
+ * __However__: Upgrading the SDK version, then adopting {@link RUST} as a sync client and later
1424
+ * downgrading the SDK to an older version (necessarily using the JavaScript-based implementation then)
1425
+ * can lead to sync issues.
1426
+ */
1427
+ RUST = "rust"
1428
+ }
1429
+ /**
1430
+ * The default {@link SyncClientImplementation} to use.
1431
+ *
1432
+ * Please use this field instead of {@link SyncClientImplementation.JAVASCRIPT} directly. A future version
1433
+ * of the PowerSync SDK will enable {@link SyncClientImplementation.RUST} by default and remove the JavaScript
1434
+ * option.
1435
+ */
1436
+ declare const DEFAULT_SYNC_CLIENT_IMPLEMENTATION = SyncClientImplementation.JAVASCRIPT;
1437
+ /**
1438
+ * Abstract Lock to be implemented by various JS environments
1439
+ */
1440
+ interface LockOptions<T> {
1441
+ callback: () => Promise<T>;
1442
+ type: LockType;
1443
+ signal?: AbortSignal;
1444
+ }
1445
+ interface AbstractStreamingSyncImplementationOptions extends AdditionalConnectionOptions {
1446
+ adapter: BucketStorageAdapter;
1447
+ uploadCrud: () => Promise<void>;
1448
+ /**
1449
+ * An identifier for which PowerSync DB this sync implementation is
1450
+ * linked to. Most commonly DB name, but not restricted to DB name.
1451
+ */
1452
+ identifier?: string;
1453
+ logger?: ILogger;
1454
+ remote: AbstractRemote;
1455
+ }
1456
+ interface StreamingSyncImplementationListener extends BaseListener {
1457
+ /**
1458
+ * Triggered whenever a status update has been attempted to be made or
1459
+ * refreshed.
1460
+ */
1461
+ statusUpdated?: ((statusUpdate: SyncStatusOptions) => void) | undefined;
1462
+ /**
1463
+ * Triggers whenever the status' members have changed in value
1464
+ */
1465
+ statusChanged?: ((status: SyncStatus) => void) | undefined;
1466
+ }
1467
+ /**
1468
+ * Configurable options to be used when connecting to the PowerSync
1469
+ * backend instance.
1470
+ */
1471
+ type PowerSyncConnectionOptions = Omit<InternalConnectionOptions, 'serializedSchema'>;
1472
+ interface InternalConnectionOptions extends BaseConnectionOptions, AdditionalConnectionOptions {
1473
+ }
1474
+ /** @internal */
1475
+ interface BaseConnectionOptions {
1476
+ /**
1477
+ * Whether to use a JavaScript implementation to handle received sync lines from the sync
1478
+ * service, or whether this work should be offloaded to the PowerSync core extension.
1479
+ *
1480
+ * This defaults to the JavaScript implementation ({@link SyncClientImplementation.JAVASCRIPT})
1481
+ * since the ({@link SyncClientImplementation.RUST}) implementation is experimental at the moment.
1482
+ */
1483
+ clientImplementation?: SyncClientImplementation;
1484
+ /**
1485
+ * The connection method to use when streaming updates from
1486
+ * the PowerSync backend instance.
1487
+ * Defaults to a HTTP streaming connection.
1488
+ */
1489
+ connectionMethod?: SyncStreamConnectionMethod;
1490
+ /**
1491
+ * The fetch strategy to use when streaming updates from the PowerSync backend instance.
1492
+ */
1493
+ fetchStrategy?: FetchStrategy;
1494
+ /**
1495
+ * These parameters are passed to the sync rules, and will be available under the`user_parameters` object.
1496
+ */
1497
+ params?: Record<string, StreamingSyncRequestParameterType>;
1498
+ /**
1499
+ * The serialized schema - mainly used to forward information about raw tables to the sync client.
1500
+ */
1501
+ serializedSchema?: any;
1502
+ }
1503
+ /** @internal */
1504
+ interface AdditionalConnectionOptions {
1505
+ /**
1506
+ * Delay for retrying sync streaming operations
1507
+ * from the PowerSync backend after an error occurs.
1508
+ */
1509
+ retryDelayMs?: number;
1510
+ /**
1511
+ * Backend Connector CRUD operations are throttled
1512
+ * to occur at most every `crudUploadThrottleMs`
1513
+ * milliseconds.
1514
+ */
1515
+ crudUploadThrottleMs?: number;
1516
+ }
1517
+ /** @internal */
1518
+ type RequiredAdditionalConnectionOptions = Required<AdditionalConnectionOptions>;
1519
+ interface StreamingSyncImplementation extends BaseObserverInterface<StreamingSyncImplementationListener>, Disposable {
1520
+ /**
1521
+ * Connects to the sync service
1522
+ */
1523
+ connect(options?: InternalConnectionOptions): Promise<void>;
1524
+ /**
1525
+ * Disconnects from the sync services.
1526
+ * @throws if not connected or if abort is not controlled internally
1527
+ */
1528
+ disconnect(): Promise<void>;
1529
+ getWriteCheckpoint: () => Promise<string>;
1530
+ hasCompletedSync: () => Promise<boolean>;
1531
+ isConnected: boolean;
1532
+ lastSyncedAt?: Date;
1533
+ syncStatus: SyncStatus;
1534
+ triggerCrudUpload: () => void;
1535
+ waitForReady(): Promise<void>;
1536
+ waitForStatus(status: SyncStatusOptions): Promise<void>;
1537
+ waitUntilStatusMatches(predicate: (status: SyncStatus) => boolean): Promise<void>;
1538
+ }
1539
+ declare const DEFAULT_CRUD_UPLOAD_THROTTLE_MS = 1000;
1540
+ declare const DEFAULT_RETRY_DELAY_MS = 5000;
1541
+ declare const DEFAULT_STREAMING_SYNC_OPTIONS: {
1542
+ retryDelayMs: number;
1543
+ crudUploadThrottleMs: number;
1544
+ };
1545
+ type RequiredPowerSyncConnectionOptions = Required<BaseConnectionOptions>;
1546
+ declare const DEFAULT_STREAM_CONNECTION_OPTIONS: RequiredPowerSyncConnectionOptions;
1547
+ declare abstract class AbstractStreamingSyncImplementation extends BaseObserver<StreamingSyncImplementationListener> implements StreamingSyncImplementation {
1548
+ protected _lastSyncedAt: Date | null;
1549
+ protected options: AbstractStreamingSyncImplementationOptions;
1550
+ protected abortController: AbortController | null;
1551
+ protected uploadAbortController: AbortController | null;
1552
+ protected crudUpdateListener?: () => void;
1553
+ protected streamingSyncPromise?: Promise<void>;
1554
+ protected logger: ILogger;
1555
+ private isUploadingCrud;
1556
+ private notifyCompletedUploads?;
1557
+ syncStatus: SyncStatus;
1558
+ triggerCrudUpload: () => void;
1559
+ constructor(options: AbstractStreamingSyncImplementationOptions);
1560
+ waitForReady(): Promise<void>;
1561
+ waitForStatus(status: SyncStatusOptions): Promise<void>;
1562
+ waitUntilStatusMatches(predicate: (status: SyncStatus) => boolean): Promise<void>;
1563
+ get lastSyncedAt(): Date | undefined;
1564
+ get isConnected(): boolean;
1565
+ dispose(): Promise<void>;
1566
+ abstract obtainLock<T>(lockOptions: LockOptions<T>): Promise<T>;
1567
+ hasCompletedSync(): Promise<boolean>;
1568
+ getWriteCheckpoint(): Promise<string>;
1569
+ protected _uploadAllCrud(): Promise<void>;
1570
+ connect(options?: PowerSyncConnectionOptions): Promise<void>;
1571
+ disconnect(): Promise<void>;
1572
+ /**
1573
+ * @deprecated use [connect instead]
1574
+ */
1575
+ streamingSync(signal?: AbortSignal, options?: PowerSyncConnectionOptions): Promise<void>;
1576
+ private collectLocalBucketState;
1577
+ /**
1578
+ * Older versions of the JS SDK used to encode subkeys as JSON in {@link OplogEntry.toJSON}.
1579
+ * Because subkeys are always strings, this leads to quotes being added around them in `ps_oplog`.
1580
+ * While this is not a problem as long as it's done consistently, it causes issues when a database
1581
+ * created by the JS SDK is used with other SDKs, or (more likely) when the new Rust sync client
1582
+ * is enabled.
1583
+ *
1584
+ * So, we add a migration from the old key format (with quotes) to the new one (no quotes). The
1585
+ * migration is only triggered when necessary (for now). The function returns whether the new format
1586
+ * should be used, so that the JS SDK is able to write to updated databases.
1587
+ *
1588
+ * @param requireFixedKeyFormat Whether we require the new format or also support the old one.
1589
+ * The Rust client requires the new subkey format.
1590
+ * @returns Whether the database is now using the new, fixed subkey format.
1591
+ */
1592
+ private requireKeyFormat;
1593
+ protected streamingSyncIteration(signal: AbortSignal, options?: PowerSyncConnectionOptions): Promise<void>;
1594
+ private legacyStreamingSyncIteration;
1595
+ private rustSyncIteration;
1596
+ private updateSyncStatusForStartingCheckpoint;
1597
+ private applyCheckpoint;
1598
+ protected updateSyncStatus(options: SyncStatusOptions): void;
1599
+ private delayRetry;
1600
+ }
1601
+
1602
+ /**
1603
+ * @internal
1604
+ */
1605
+ interface ConnectionManagerSyncImplementationResult {
1606
+ sync: StreamingSyncImplementation;
1607
+ /**
1608
+ * Additional cleanup function which is called after the sync stream implementation
1609
+ * is disposed.
1610
+ */
1611
+ onDispose: () => Promise<void> | void;
1612
+ }
1613
+ /**
1614
+ * @internal
1615
+ */
1616
+ interface ConnectionManagerOptions {
1617
+ createSyncImplementation(connector: PowerSyncBackendConnector, options: InternalConnectionOptions): Promise<ConnectionManagerSyncImplementationResult>;
1618
+ logger: ILogger;
1619
+ }
1620
+ type StoredConnectionOptions = {
1621
+ connector: PowerSyncBackendConnector;
1622
+ options: InternalConnectionOptions;
1623
+ };
1624
+ /**
1625
+ * @internal
1626
+ */
1627
+ interface ConnectionManagerListener extends BaseListener {
1628
+ syncStreamCreated: (sync: StreamingSyncImplementation) => void;
1629
+ }
1630
+ /**
1631
+ * @internal
1632
+ */
1633
+ declare class ConnectionManager extends BaseObserver<ConnectionManagerListener> {
1634
+ protected options: ConnectionManagerOptions;
1635
+ /**
1636
+ * Tracks active connection attempts
1637
+ */
1638
+ protected connectingPromise: Promise<void> | null;
1639
+ /**
1640
+ * Tracks actively instantiating a streaming sync implementation.
1641
+ */
1642
+ protected syncStreamInitPromise: Promise<void> | null;
1643
+ /**
1644
+ * Active disconnect operation. Calling disconnect multiple times
1645
+ * will resolve to the same operation.
1646
+ */
1647
+ protected disconnectingPromise: Promise<void> | null;
1648
+ /**
1649
+ * Tracks the last parameters supplied to `connect` calls.
1650
+ * Calling `connect` multiple times in succession will result in:
1651
+ * - 1 pending connection operation which will be aborted.
1652
+ * - updating the last set of parameters while waiting for the pending
1653
+ * attempt to be aborted
1654
+ * - internally connecting with the last set of parameters
1655
+ */
1656
+ protected pendingConnectionOptions: StoredConnectionOptions | null;
1657
+ syncStreamImplementation: StreamingSyncImplementation | null;
1658
+ /**
1659
+ * Additional cleanup function which is called after the sync stream implementation
1660
+ * is disposed.
1661
+ */
1662
+ protected syncDisposer: (() => Promise<void> | void) | null;
1663
+ constructor(options: ConnectionManagerOptions);
1664
+ get logger(): ILogger;
1665
+ close(): Promise<void>;
1666
+ connect(connector: PowerSyncBackendConnector, options: InternalConnectionOptions): Promise<void>;
1667
+ protected connectInternal(): Promise<void>;
1668
+ /**
1669
+ * Close the sync connection.
1670
+ *
1671
+ * Use {@link connect} to connect again.
1672
+ */
1673
+ disconnect(): Promise<void>;
1674
+ protected disconnectInternal(): Promise<void>;
1675
+ protected performDisconnect(): Promise<void>;
1676
+ }
1677
+
1678
+ /**
1679
+ * A basic comparator for incrementally watched queries. This performs a single comparison which
1680
+ * determines if the result set has changed. The {@link WatchedQuery} will only emit the new result
1681
+ * if a change has been detected.
1682
+ */
1683
+ interface WatchedQueryComparator<Data> {
1684
+ checkEquality: (current: Data, previous: Data) => boolean;
1685
+ }
1686
+ /**
1687
+ * Options for {@link ArrayComparator}
1688
+ */
1689
+ type ArrayComparatorOptions<ItemType> = {
1690
+ /**
1691
+ * Returns a string to uniquely identify an item in the array.
1692
+ */
1693
+ compareBy: (item: ItemType) => string;
1694
+ };
1695
+ /**
1696
+ * An efficient comparator for {@link WatchedQuery} created with {@link Query#watch}. This has the ability to determine if a query
1697
+ * result has changes without necessarily processing all items in the result.
1698
+ */
1699
+ declare class ArrayComparator<ItemType> implements WatchedQueryComparator<ItemType[]> {
1700
+ protected options: ArrayComparatorOptions<ItemType>;
1701
+ constructor(options: ArrayComparatorOptions<ItemType>);
1702
+ checkEquality(current: ItemType[], previous: ItemType[]): boolean;
1703
+ }
1704
+ /**
1705
+ * Watched query comparator that always reports changed result sets.
1706
+ */
1707
+ declare const FalsyComparator: WatchedQueryComparator<unknown>;
1708
+
1709
+ interface CompilableQuery<T> {
1710
+ execute(): Promise<T[]>;
1711
+ compile(): CompiledQuery;
1712
+ }
1713
+ interface CompiledQuery {
1714
+ readonly sql: string;
1715
+ readonly parameters: ReadonlyArray<unknown>;
1716
+ }
1717
+
1718
+ /**
1719
+ * Represents the counts of listeners for each event type in a BaseListener.
1720
+ */
1721
+ type ListenerCounts<Listener extends BaseListener> = Partial<Record<keyof Listener, number>> & {
1722
+ total: number;
1723
+ };
1724
+ /**
1725
+ * Meta listener which reports the counts of listeners for each event type.
1726
+ */
1727
+ interface MetaListener<ParentListener extends BaseListener> extends BaseListener {
1728
+ listenersChanged?: (counts: ListenerCounts<ParentListener>) => void;
1729
+ }
1730
+ interface ListenerMetaManager<Listener extends BaseListener> extends BaseObserverInterface<MetaListener<Listener>> {
1731
+ counts: ListenerCounts<Listener>;
1732
+ }
1733
+ interface MetaBaseObserverInterface<Listener extends BaseListener> extends BaseObserverInterface<Listener> {
1734
+ listenerMeta: ListenerMetaManager<Listener>;
1735
+ }
1736
+ /**
1737
+ * A BaseObserver that tracks the counts of listeners for each event type.
1738
+ */
1739
+ declare class MetaBaseObserver<Listener extends BaseListener> extends BaseObserver<Listener> implements MetaBaseObserverInterface<Listener> {
1740
+ protected get listenerCounts(): ListenerCounts<Listener>;
1741
+ get listenerMeta(): ListenerMetaManager<Listener>;
1742
+ protected metaListener: BaseObserver<MetaListener<Listener>>;
1743
+ constructor();
1744
+ registerListener(listener: Partial<Listener>): () => void;
1745
+ }
1746
+
1747
+ /**
1748
+ * State for {@link WatchedQuery} instances.
1749
+ */
1750
+ interface WatchedQueryState<Data> {
1751
+ /**
1752
+ * Indicates the initial loading state (hard loading).
1753
+ * Loading becomes false once the first set of results from the watched query is available or an error occurs.
1754
+ */
1755
+ readonly isLoading: boolean;
1756
+ /**
1757
+ * Indicates whether the query is currently fetching data, is true during the initial load
1758
+ * and any time when the query is re-evaluating (useful for large queries).
1759
+ */
1760
+ readonly isFetching: boolean;
1761
+ /**
1762
+ * The last error that occurred while executing the query.
1763
+ */
1764
+ readonly error: Error | null;
1765
+ /**
1766
+ * The last time the query was updated.
1767
+ */
1768
+ readonly lastUpdated: Date | null;
1769
+ /**
1770
+ * The last data returned by the query.
1771
+ */
1772
+ readonly data: Data;
1773
+ }
1774
+ /**
1775
+ * Options provided to the `execute` method of a {@link WatchCompatibleQuery}.
1776
+ */
1777
+ interface WatchExecuteOptions {
1778
+ sql: string;
1779
+ parameters: any[];
1780
+ db: AbstractPowerSyncDatabase;
1781
+ }
1782
+ /**
1783
+ * Similar to {@link CompatibleQuery}, except the `execute` method
1784
+ * does not enforce an Array result type.
1785
+ */
1786
+ interface WatchCompatibleQuery<ResultType> {
1787
+ execute(options: WatchExecuteOptions): Promise<ResultType>;
1788
+ compile(): CompiledQuery;
1789
+ }
1790
+ interface WatchedQueryOptions {
1791
+ /** The minimum interval between queries. */
1792
+ throttleMs?: number;
1793
+ /**
1794
+ * If true (default) the watched query will update its state to report
1795
+ * on the fetching state of the query.
1796
+ * Setting to false reduces the number of state changes if the fetch status
1797
+ * is not relevant to the consumer.
1798
+ */
1799
+ reportFetching?: boolean;
1800
+ /**
1801
+ * By default, watched queries requery the database on any change to any dependent table of the query.
1802
+ * Supplying an override here can be used to limit the tables which trigger querying the database.
1803
+ */
1804
+ triggerOnTables?: string[];
1805
+ }
1806
+ declare enum WatchedQueryListenerEvent {
1807
+ ON_DATA = "onData",
1808
+ ON_ERROR = "onError",
1809
+ ON_STATE_CHANGE = "onStateChange",
1810
+ CLOSED = "closed"
1811
+ }
1812
+ interface WatchedQueryListener<Data> extends BaseListener {
1813
+ [WatchedQueryListenerEvent.ON_DATA]?: (data: Data) => void | Promise<void>;
1814
+ [WatchedQueryListenerEvent.ON_ERROR]?: (error: Error) => void | Promise<void>;
1815
+ [WatchedQueryListenerEvent.ON_STATE_CHANGE]?: (state: WatchedQueryState<Data>) => void | Promise<void>;
1816
+ [WatchedQueryListenerEvent.CLOSED]?: () => void | Promise<void>;
1817
+ }
1818
+ declare const DEFAULT_WATCH_THROTTLE_MS = 30;
1819
+ declare const DEFAULT_WATCH_QUERY_OPTIONS: WatchedQueryOptions;
1820
+ interface WatchedQuery<Data = unknown, Settings extends WatchedQueryOptions = WatchedQueryOptions, Listener extends WatchedQueryListener<Data> = WatchedQueryListener<Data>> extends MetaBaseObserverInterface<Listener> {
1821
+ /**
1822
+ * Current state of the watched query.
1823
+ */
1824
+ readonly state: WatchedQueryState<Data>;
1825
+ readonly closed: boolean;
1826
+ /**
1827
+ * Subscribe to watched query events.
1828
+ * @returns A function to unsubscribe from the events.
1829
+ */
1830
+ registerListener(listener: Listener): () => void;
1831
+ /**
1832
+ * Updates the underlying query options.
1833
+ * This will trigger a re-evaluation of the query and update the state.
1834
+ */
1835
+ updateSettings(options: Settings): Promise<void>;
1836
+ /**
1837
+ * Close the watched query and end all subscriptions.
1838
+ */
1839
+ close(): Promise<void>;
1840
+ }
1841
+
1842
+ /**
1843
+ * @internal
1844
+ */
1845
+ interface AbstractQueryProcessorOptions<Data, Settings extends WatchedQueryOptions = WatchedQueryOptions> {
1846
+ db: AbstractPowerSyncDatabase;
1847
+ watchOptions: Settings;
1848
+ placeholderData: Data;
1849
+ }
1850
+ /**
1851
+ * @internal
1852
+ */
1853
+ interface LinkQueryOptions<Data, Settings extends WatchedQueryOptions = WatchedQueryOptions> {
1854
+ abortSignal: AbortSignal;
1855
+ settings: Settings;
1856
+ }
1857
+ type MutableDeep<T> = T extends ReadonlyArray<infer U> ? U[] : T;
1858
+ /**
1859
+ * @internal Mutable version of {@link WatchedQueryState}.
1860
+ * This is used internally to allow updates to the state.
1861
+ */
1862
+ type MutableWatchedQueryState<Data> = {
1863
+ -readonly [P in keyof WatchedQueryState<Data>]: MutableDeep<WatchedQueryState<Data>[P]>;
1864
+ };
1865
+ type WatchedQueryProcessorListener<Data> = WatchedQueryListener<Data>;
1866
+ /**
1867
+ * Performs underlying watching and yields a stream of results.
1868
+ * @internal
1869
+ */
1870
+ declare abstract class AbstractQueryProcessor<Data = unknown[], Settings extends WatchedQueryOptions = WatchedQueryOptions> extends MetaBaseObserver<WatchedQueryProcessorListener<Data>> implements WatchedQuery<Data, Settings> {
1871
+ protected options: AbstractQueryProcessorOptions<Data, Settings>;
1872
+ readonly state: WatchedQueryState<Data>;
1873
+ protected abortController: AbortController;
1874
+ protected initialized: Promise<void>;
1875
+ protected _closed: boolean;
1876
+ protected disposeListeners: (() => void) | null;
1877
+ get closed(): boolean;
1878
+ constructor(options: AbstractQueryProcessorOptions<Data, Settings>);
1879
+ protected constructInitialState(): WatchedQueryState<Data>;
1880
+ protected get reportFetching(): boolean;
1881
+ /**
1882
+ * Updates the underlying query.
1883
+ */
1884
+ updateSettings(settings: Settings): Promise<void>;
1885
+ /**
1886
+ * This method is used to link a query to the subscribers of this listener class.
1887
+ * This method should perform actual query watching and report results via {@link updateState} method.
1888
+ */
1889
+ protected abstract linkQuery(options: LinkQueryOptions<Data>): Promise<void>;
1890
+ protected updateState(update: Partial<MutableWatchedQueryState<Data>>): Promise<void>;
1891
+ /**
1892
+ * Configures base DB listeners and links the query to listeners.
1893
+ */
1894
+ protected init(): Promise<void>;
1895
+ close(): Promise<void>;
1896
+ /**
1897
+ * Runs a callback and reports errors to the error listeners.
1898
+ */
1899
+ protected runWithReporting<T>(callback: () => Promise<T>): Promise<void>;
1900
+ /**
1901
+ * Iterate listeners and reports errors to onError handlers.
1902
+ */
1903
+ protected iterateAsyncListenersWithError(callback: (listener: Partial<WatchedQueryProcessorListener<Data>>) => Promise<void> | void): Promise<void>;
1904
+ }
1905
+
1906
+ /**
1907
+ * Represents an updated row in a differential watched query.
1908
+ * It contains both the current and previous state of the row.
1909
+ */
1910
+ interface WatchedQueryRowDifferential<RowType> {
1911
+ readonly current: RowType;
1912
+ readonly previous: RowType;
1913
+ }
1914
+ /**
1915
+ * Represents the result of a watched query that has been diffed.
1916
+ * {@link DifferentialWatchedQueryState#diff} is of the {@link WatchedQueryDifferential} form.
1917
+ */
1918
+ interface WatchedQueryDifferential<RowType> {
1919
+ readonly added: ReadonlyArray<Readonly<RowType>>;
1920
+ /**
1921
+ * The entire current result set.
1922
+ * Array item object references are preserved between updates if the item is unchanged.
1923
+ *
1924
+ * e.g. In the query
1925
+ * ```sql
1926
+ * SELECT name, make FROM assets ORDER BY make ASC;
1927
+ * ```
1928
+ *
1929
+ * If a previous result set contains an item (A) `{name: 'pc', make: 'Cool PC'}` and
1930
+ * an update has been made which adds another item (B) to the result set (the item A is unchanged) - then
1931
+ * the updated result set will be contain the same object reference, to item A, as the previous result set.
1932
+ * This is regardless of the item A's position in the updated result set.
1933
+ */
1934
+ readonly all: ReadonlyArray<Readonly<RowType>>;
1935
+ readonly removed: ReadonlyArray<Readonly<RowType>>;
1936
+ readonly updated: ReadonlyArray<WatchedQueryRowDifferential<Readonly<RowType>>>;
1937
+ readonly unchanged: ReadonlyArray<Readonly<RowType>>;
1938
+ }
1939
+ /**
1940
+ * Row comparator for differentially watched queries which keys and compares items in the result set.
1941
+ */
1942
+ interface DifferentialWatchedQueryComparator<RowType> {
1943
+ /**
1944
+ * Generates a unique key for the item.
1945
+ */
1946
+ keyBy: (item: RowType) => string;
1947
+ /**
1948
+ * Generates a token for comparing items with matching keys.
1949
+ */
1950
+ compareBy: (item: RowType) => string;
1951
+ }
1952
+ /**
1953
+ * Options for building a differential watched query with the {@link Query} builder.
1954
+ */
1955
+ interface DifferentialWatchedQueryOptions<RowType> extends WatchedQueryOptions {
1956
+ /**
1957
+ * Initial result data which is presented while the initial loading is executing.
1958
+ */
1959
+ placeholderData?: RowType[];
1960
+ /**
1961
+ * Row comparator used to identify and compare rows in the result set.
1962
+ * If not provided, the default comparator will be used which keys items by their `id` property if available,
1963
+ * otherwise it uses JSON stringification of the entire item for keying and comparison.
1964
+ * @defaultValue {@link DEFAULT_ROW_COMPARATOR}
1965
+ */
1966
+ rowComparator?: DifferentialWatchedQueryComparator<RowType>;
1967
+ }
1968
+ /**
1969
+ * Settings for differential incremental watched queries using.
1970
+ */
1971
+ interface DifferentialWatchedQuerySettings<RowType> extends DifferentialWatchedQueryOptions<RowType> {
1972
+ /**
1973
+ * The query here must return an array of items that can be differentiated.
1974
+ */
1975
+ query: WatchCompatibleQuery<RowType[]>;
1976
+ }
1977
+ interface DifferentialWatchedQueryListener<RowType> extends WatchedQueryListener<ReadonlyArray<Readonly<RowType>>> {
1978
+ onDiff?: (diff: WatchedQueryDifferential<RowType>) => void | Promise<void>;
1979
+ }
1980
+ type DifferentialWatchedQuery<RowType> = WatchedQuery<ReadonlyArray<Readonly<RowType>>, DifferentialWatchedQuerySettings<RowType>, DifferentialWatchedQueryListener<RowType>>;
1981
+ /**
1982
+ * @internal
1983
+ */
1984
+ interface DifferentialQueryProcessorOptions<RowType> extends AbstractQueryProcessorOptions<RowType[], DifferentialWatchedQuerySettings<RowType>> {
1985
+ rowComparator?: DifferentialWatchedQueryComparator<RowType>;
1986
+ }
1987
+ type DataHashMap<RowType> = Map<string, {
1988
+ hash: string;
1989
+ item: RowType;
1990
+ }>;
1991
+ /**
1992
+ * An empty differential result set.
1993
+ * This is used as the initial state for differential incrementally watched queries.
1994
+ */
1995
+ declare const EMPTY_DIFFERENTIAL: {
1996
+ added: never[];
1997
+ all: never[];
1998
+ removed: never[];
1999
+ updated: never[];
2000
+ unchanged: never[];
2001
+ };
2002
+ /**
2003
+ * Default implementation of the {@link DifferentialWatchedQueryComparator} for watched queries.
2004
+ * It keys items by their `id` property if available, alternatively it uses JSON stringification
2005
+ * of the entire item for the key and comparison.
2006
+ */
2007
+ declare const DEFAULT_ROW_COMPARATOR: DifferentialWatchedQueryComparator<any>;
2008
+ /**
2009
+ * Uses the PowerSync onChange event to trigger watched queries.
2010
+ * Results are emitted on every change of the relevant tables.
2011
+ * @internal
2012
+ */
2013
+ declare class DifferentialQueryProcessor<RowType> extends AbstractQueryProcessor<ReadonlyArray<Readonly<RowType>>, DifferentialWatchedQuerySettings<RowType>> implements DifferentialWatchedQuery<RowType> {
2014
+ protected options: DifferentialQueryProcessorOptions<RowType>;
2015
+ protected comparator: DifferentialWatchedQueryComparator<RowType>;
2016
+ constructor(options: DifferentialQueryProcessorOptions<RowType>);
2017
+ protected differentiate(current: RowType[], previousMap: DataHashMap<RowType>): {
2018
+ diff: WatchedQueryDifferential<RowType>;
2019
+ map: DataHashMap<RowType>;
2020
+ hasChanged: boolean;
2021
+ };
2022
+ protected linkQuery(options: LinkQueryOptions<WatchedQueryDifferential<RowType>>): Promise<void>;
2023
+ }
2024
+
2025
+ /**
2026
+ * Settings for {@link WatchedQuery} instances created via {@link Query#watch}.
2027
+ */
2028
+ interface WatchedQuerySettings<DataType> extends WatchedQueryOptions {
2029
+ query: WatchCompatibleQuery<DataType>;
2030
+ }
2031
+ /**
2032
+ * {@link WatchedQuery} returned from {@link Query#watch}.
2033
+ */
2034
+ type StandardWatchedQuery<DataType> = WatchedQuery<DataType, WatchedQuerySettings<DataType>>;
2035
+ /**
2036
+ * @internal
2037
+ */
2038
+ interface OnChangeQueryProcessorOptions<Data> extends AbstractQueryProcessorOptions<Data, WatchedQuerySettings<Data>> {
2039
+ comparator?: WatchedQueryComparator<Data>;
2040
+ }
2041
+ /**
2042
+ * Uses the PowerSync onChange event to trigger watched queries.
2043
+ * Results are emitted on every change of the relevant tables.
2044
+ * @internal
2045
+ */
2046
+ declare class OnChangeQueryProcessor<Data> extends AbstractQueryProcessor<Data, WatchedQuerySettings<Data>> {
2047
+ protected options: OnChangeQueryProcessorOptions<Data>;
2048
+ constructor(options: OnChangeQueryProcessorOptions<Data>);
2049
+ /**
2050
+ * @returns If the sets are equal
2051
+ */
2052
+ protected checkEquality(current: Data, previous: Data): boolean;
2053
+ protected linkQuery(options: LinkQueryOptions<Data>): Promise<void>;
2054
+ }
2055
+
2056
+ /**
2057
+ * Query parameters for {@link ArrayQueryDefinition#parameters}
2058
+ */
2059
+ type QueryParam = string | number | boolean | null | undefined | bigint | Uint8Array;
2060
+ /**
2061
+ * Options for building a query with {@link AbstractPowerSyncDatabase#query}.
2062
+ * This query will be executed with {@link AbstractPowerSyncDatabase#getAll}.
2063
+ */
2064
+ interface ArrayQueryDefinition<RowType = unknown> {
2065
+ sql: string;
2066
+ parameters?: ReadonlyArray<Readonly<QueryParam>>;
2067
+ /**
2068
+ * Maps the raw SQLite row to a custom typed object.
2069
+ * @example
2070
+ * ```javascript
2071
+ * mapper: (row) => ({
2072
+ * ...row,
2073
+ * created_at: new Date(row.created_at as string),
2074
+ * })
2075
+ * ```
2076
+ */
2077
+ mapper?: (row: Record<string, unknown>) => RowType;
2078
+ }
2079
+ /**
2080
+ * Options for {@link Query#watch}.
2081
+ */
2082
+ interface StandardWatchedQueryOptions<RowType> extends WatchedQueryOptions {
2083
+ /**
2084
+ * The underlying watched query implementation (re)evaluates the query on any SQLite table change.
2085
+ *
2086
+ * Providing this optional comparator can be used to filter duplicate result set emissions when the result set is unchanged.
2087
+ * The comparator compares the previous and current result set.
2088
+ *
2089
+ * For an efficient comparator see {@link ArrayComparator}.
2090
+ *
2091
+ * @example
2092
+ * ```javascript
2093
+ * comparator: new ArrayComparator({
2094
+ * compareBy: (item) => JSON.stringify(item)
2095
+ * })
2096
+ * ```
2097
+ */
2098
+ comparator?: WatchedQueryComparator<RowType[]>;
2099
+ /**
2100
+ * The initial data state reported while the query is loading for the first time.
2101
+ * @default []
2102
+ */
2103
+ placeholderData?: RowType[];
2104
+ }
2105
+ interface Query<RowType> {
2106
+ /**
2107
+ * Creates a {@link WatchedQuery} which watches and emits results of the linked query.
2108
+ *
2109
+ * By default the returned watched query will emit changes whenever a change to the underlying SQLite tables is made.
2110
+ * These changes might not be relevant to the query, but the query will emit a new result set.
2111
+ *
2112
+ * A {@link StandardWatchedQueryOptions#comparator} can be provided to limit the data emissions. The watched query will still
2113
+ * query the underlying DB on underlying table changes, but the result will only be emitted if the comparator detects a change in the results.
2114
+ *
2115
+ * The comparator in this method is optimized and returns early as soon as it detects a change. Each data emission will correlate to a change in the result set,
2116
+ * but note that the result set will not maintain internal object references to the previous result set. If internal object references are needed,
2117
+ * consider using {@link Query#differentialWatch} instead.
2118
+ */
2119
+ watch(options?: StandardWatchedQueryOptions<RowType>): StandardWatchedQuery<ReadonlyArray<Readonly<RowType>>>;
2120
+ /**
2121
+ * Creates a {@link WatchedQuery} which watches and emits results of the linked query.
2122
+ *
2123
+ * This query method watches for changes in the underlying SQLite tables and runs the query on each table change.
2124
+ * The difference between the current and previous result set is computed.
2125
+ * The watched query will not emit changes if the result set is identical to the previous result set.
2126
+ *
2127
+ * If the result set is different, the watched query will emit the new result set and emit a detailed diff of the changes via the `onData` and `onDiff` listeners.
2128
+ *
2129
+ * The deep differentiation allows maintaining result set object references between result emissions.
2130
+ * The {@link DifferentialWatchedQuery#state} `data` array will contain the previous row references for unchanged rows.
2131
+ *
2132
+ * @example
2133
+ * ```javascript
2134
+ * const watchedLists = powerSync.query({sql: 'SELECT * FROM lists'})
2135
+ * .differentialWatch();
2136
+ *
2137
+ * const disposeListener = watchedLists.registerListener({
2138
+ * onData: (lists) => {
2139
+ * console.log('The latest result set for the query is', lists);
2140
+ * },
2141
+ * onDiff: (diff) => {
2142
+ * console.log('The lists result set has changed since the last emission', diff.added, diff.removed, diff.updated, diff.all)
2143
+ * }
2144
+ * })
2145
+ * ```
2146
+ */
2147
+ differentialWatch(options?: DifferentialWatchedQueryOptions<RowType>): DifferentialWatchedQuery<RowType>;
2148
+ }
2149
+
2150
+ interface SQLOpenOptions {
2151
+ /**
2152
+ * Filename for the database.
2153
+ */
2154
+ dbFilename: string;
2155
+ /**
2156
+ * Directory where the database file is located.
2157
+ *
2158
+ * When set, the directory must exist when the database is opened, it will
2159
+ * not be created automatically.
2160
+ */
2161
+ dbLocation?: string;
2162
+ /**
2163
+ * Enable debugMode to log queries to the performance timeline.
2164
+ *
2165
+ * Defaults to false.
2166
+ *
2167
+ * To enable in development builds, use:
2168
+ *
2169
+ * debugMode: process.env.NODE_ENV !== 'production'
2170
+ */
2171
+ debugMode?: boolean;
2172
+ }
2173
+ interface SQLOpenFactory {
2174
+ /**
2175
+ * Opens a connection adapter to a SQLite DB
2176
+ */
2177
+ openDB(): DBAdapter;
2178
+ }
2179
+ /**
2180
+ * Tests if the input is a {@link SQLOpenOptions}
2181
+ */
2182
+ declare const isSQLOpenOptions: (test: any) => test is SQLOpenOptions;
2183
+ /**
2184
+ * Tests if input is a {@link SQLOpenFactory}
2185
+ */
2186
+ declare const isSQLOpenFactory: (test: any) => test is SQLOpenFactory;
2187
+ /**
2188
+ * Tests if input is a {@link DBAdapter}
2189
+ */
2190
+ declare const isDBAdapter: (test: any) => test is DBAdapter;
2191
+
2192
+ declare class CrudTransaction extends CrudBatch {
2193
+ /**
2194
+ * List of client-side changes.
2195
+ */
2196
+ crud: CrudEntry[];
2197
+ /**
2198
+ * Call to remove the changes from the local queue, once successfully uploaded.
2199
+ */
2200
+ complete: (checkpoint?: string) => Promise<void>;
2201
+ /**
2202
+ * If null, this contains a list of changes recorded without an explicit transaction associated.
2203
+ */
2204
+ transactionId?: number | undefined;
2205
+ constructor(
2206
+ /**
2207
+ * List of client-side changes.
2208
+ */
2209
+ crud: CrudEntry[],
2210
+ /**
2211
+ * Call to remove the changes from the local queue, once successfully uploaded.
2212
+ */
2213
+ complete: (checkpoint?: string) => Promise<void>,
2214
+ /**
2215
+ * If null, this contains a list of changes recorded without an explicit transaction associated.
2216
+ */
2217
+ transactionId?: number | undefined);
2218
+ }
2219
+
2220
+ interface DisconnectAndClearOptions {
2221
+ /** When set to false, data in local-only tables is preserved. */
2222
+ clearLocal?: boolean;
2223
+ }
2224
+ interface BasePowerSyncDatabaseOptions extends AdditionalConnectionOptions {
2225
+ /** Schema used for the local database. */
2226
+ schema: Schema;
2227
+ /**
2228
+ * @deprecated Use {@link retryDelayMs} instead as this will be removed in future releases.
2229
+ */
2230
+ retryDelay?: number;
2231
+ logger?: ILogger;
2232
+ }
2233
+ interface PowerSyncDatabaseOptions extends BasePowerSyncDatabaseOptions {
2234
+ /**
2235
+ * Source for a SQLite database connection.
2236
+ * This can be either:
2237
+ * - A {@link DBAdapter} if providing an instantiated SQLite connection
2238
+ * - A {@link SQLOpenFactory} which will be used to open a SQLite connection
2239
+ * - {@link SQLOpenOptions} for opening a SQLite connection with a default {@link SQLOpenFactory}
2240
+ */
2241
+ database: DBAdapter | SQLOpenFactory | SQLOpenOptions;
2242
+ }
2243
+ interface PowerSyncDatabaseOptionsWithDBAdapter extends BasePowerSyncDatabaseOptions {
2244
+ database: DBAdapter;
2245
+ }
2246
+ interface PowerSyncDatabaseOptionsWithOpenFactory extends BasePowerSyncDatabaseOptions {
2247
+ database: SQLOpenFactory;
2248
+ }
2249
+ interface PowerSyncDatabaseOptionsWithSettings extends BasePowerSyncDatabaseOptions {
2250
+ database: SQLOpenOptions;
2251
+ }
2252
+ interface SQLOnChangeOptions {
2253
+ signal?: AbortSignal;
2254
+ tables?: string[];
2255
+ /** The minimum interval between queries. */
2256
+ throttleMs?: number;
2257
+ /**
2258
+ * @deprecated All tables specified in {@link tables} will be watched, including PowerSync tables with prefixes.
2259
+ *
2260
+ * Allows for watching any SQL table
2261
+ * by not removing PowerSync table name prefixes
2262
+ */
2263
+ rawTableNames?: boolean;
2264
+ /**
2265
+ * Emits an empty result set immediately
2266
+ */
2267
+ triggerImmediate?: boolean;
2268
+ }
2269
+ interface SQLWatchOptions extends SQLOnChangeOptions {
2270
+ /**
2271
+ * Optional comparator which will be used to compare the results of the query.
2272
+ * The watched query will only yield results if the comparator returns false.
2273
+ */
2274
+ comparator?: WatchedQueryComparator<QueryResult>;
2275
+ }
2276
+ interface WatchOnChangeEvent {
2277
+ changedTables: string[];
2278
+ }
2279
+ interface WatchHandler {
2280
+ onResult: (results: QueryResult) => void;
2281
+ onError?: (error: Error) => void;
2282
+ }
2283
+ interface WatchOnChangeHandler {
2284
+ onChange: (event: WatchOnChangeEvent) => Promise<void> | void;
2285
+ onError?: (error: Error) => void;
2286
+ }
2287
+ interface PowerSyncDBListener extends StreamingSyncImplementationListener {
2288
+ initialized: () => void;
2289
+ schemaChanged: (schema: Schema) => void;
2290
+ closing: () => Promise<void> | void;
2291
+ closed: () => Promise<void> | void;
2292
+ }
2293
+ interface PowerSyncCloseOptions {
2294
+ /**
2295
+ * Disconnect the sync stream client if connected.
2296
+ * This is usually true, but can be false for Web when using
2297
+ * multiple tabs and a shared sync provider.
2298
+ */
2299
+ disconnect?: boolean;
2300
+ }
2301
+ declare const DEFAULT_POWERSYNC_CLOSE_OPTIONS: PowerSyncCloseOptions;
2302
+ declare const DEFAULT_POWERSYNC_DB_OPTIONS: {
2303
+ retryDelayMs: number;
2304
+ crudUploadThrottleMs: number;
2305
+ };
2306
+ declare const DEFAULT_CRUD_BATCH_LIMIT = 100;
2307
+ /**
2308
+ * Requesting nested or recursive locks can block the application in some circumstances.
2309
+ * This default lock timeout will act as a failsafe to throw an error if a lock cannot
2310
+ * be obtained.
2311
+ */
2312
+ declare const DEFAULT_LOCK_TIMEOUT_MS = 120000;
2313
+ /**
2314
+ * Tests if the input is a {@link PowerSyncDatabaseOptionsWithSettings}
2315
+ * @internal
2316
+ */
2317
+ declare const isPowerSyncDatabaseOptionsWithSettings: (test: any) => test is PowerSyncDatabaseOptionsWithSettings;
2318
+ declare abstract class AbstractPowerSyncDatabase extends BaseObserver<PowerSyncDBListener> {
2319
+ protected options: PowerSyncDatabaseOptions;
2320
+ /**
2321
+ * Returns true if the connection is closed.
2322
+ */
2323
+ closed: boolean;
2324
+ ready: boolean;
2325
+ /**
2326
+ * Current connection status.
2327
+ */
2328
+ currentStatus: SyncStatus;
2329
+ sdkVersion: string;
2330
+ protected bucketStorageAdapter: BucketStorageAdapter;
2331
+ protected _isReadyPromise: Promise<void>;
2332
+ protected connectionManager: ConnectionManager;
2333
+ get syncStreamImplementation(): StreamingSyncImplementation | null;
2334
+ protected _schema: Schema;
2335
+ private _database;
2336
+ protected runExclusiveMutex: Mutex;
2337
+ logger: ILogger;
2338
+ constructor(options: PowerSyncDatabaseOptionsWithDBAdapter);
2339
+ constructor(options: PowerSyncDatabaseOptionsWithOpenFactory);
2340
+ constructor(options: PowerSyncDatabaseOptionsWithSettings);
2341
+ constructor(options: PowerSyncDatabaseOptions);
2342
+ /**
2343
+ * Schema used for the local database.
2344
+ */
2345
+ get schema(): Schema<{
2346
+ [x: string]: Table<any>;
2347
+ }>;
2348
+ /**
2349
+ * The underlying database.
2350
+ *
2351
+ * For the most part, behavior is the same whether querying on the underlying database, or on {@link AbstractPowerSyncDatabase}.
2352
+ */
2353
+ get database(): DBAdapter;
2354
+ /**
2355
+ * Whether a connection to the PowerSync service is currently open.
2356
+ */
2357
+ get connected(): boolean;
2358
+ get connecting(): boolean;
2359
+ /**
2360
+ * Opens the DBAdapter given open options using a default open factory
2361
+ */
2362
+ protected abstract openDBAdapter(options: PowerSyncDatabaseOptionsWithSettings): DBAdapter;
2363
+ protected abstract generateSyncStreamImplementation(connector: PowerSyncBackendConnector, options: RequiredAdditionalConnectionOptions): StreamingSyncImplementation;
2364
+ protected abstract generateBucketStorageAdapter(): BucketStorageAdapter;
2365
+ /**
2366
+ * @returns A promise which will resolve once initialization is completed.
2367
+ */
2368
+ waitForReady(): Promise<void>;
2369
+ /**
2370
+ * Wait for the first sync operation to complete.
2371
+ *
2372
+ * @param request Either an abort signal (after which the promise will complete regardless of
2373
+ * whether a full sync was completed) or an object providing an abort signal and a priority target.
2374
+ * When a priority target is set, the promise may complete when all buckets with the given (or higher)
2375
+ * priorities have been synchronized. This can be earlier than a complete sync.
2376
+ * @returns A promise which will resolve once the first full sync has completed.
2377
+ */
2378
+ waitForFirstSync(request?: AbortSignal | {
2379
+ signal?: AbortSignal;
2380
+ priority?: number;
2381
+ }): Promise<void>;
2382
+ /**
2383
+ * Allows for extended implementations to execute custom initialization
2384
+ * logic as part of the total init process
2385
+ */
2386
+ abstract _initialize(): Promise<void>;
2387
+ /**
2388
+ * Entry point for executing initialization logic.
2389
+ * This is to be automatically executed in the constructor.
2390
+ */
2391
+ protected initialize(): Promise<void>;
2392
+ private _loadVersion;
2393
+ protected updateHasSynced(): Promise<void>;
2394
+ /**
2395
+ * Replace the schema with a new version. This is for advanced use cases - typically the schema should just be specified once in the constructor.
2396
+ *
2397
+ * Cannot be used while connected - this should only be called before {@link AbstractPowerSyncDatabase.connect}.
2398
+ */
2399
+ updateSchema(schema: Schema): Promise<void>;
2400
+ /**
2401
+ * Wait for initialization to complete.
2402
+ * While initializing is automatic, this helps to catch and report initialization errors.
2403
+ */
2404
+ init(): Promise<void>;
2405
+ resolvedConnectionOptions(options?: PowerSyncConnectionOptions): RequiredAdditionalConnectionOptions;
2406
+ /**
2407
+ * @deprecated Use {@link AbstractPowerSyncDatabase#close} instead.
2408
+ * Clears all listeners registered by {@link AbstractPowerSyncDatabase#registerListener}.
2409
+ */
2410
+ dispose(): void;
2411
+ /**
2412
+ * Locking mechanism for exclusively running critical portions of connect/disconnect operations.
2413
+ * Locking here is mostly only important on web for multiple tab scenarios.
2414
+ */
2415
+ protected runExclusive<T>(callback: () => Promise<T>): Promise<T>;
2416
+ /**
2417
+ * Connects to stream of events from the PowerSync instance.
2418
+ */
2419
+ connect(connector: PowerSyncBackendConnector, options?: PowerSyncConnectionOptions): Promise<void>;
2420
+ /**
2421
+ * Close the sync connection.
2422
+ *
2423
+ * Use {@link connect} to connect again.
2424
+ */
2425
+ disconnect(): Promise<void>;
2426
+ /**
2427
+ * Disconnect and clear the database.
2428
+ * Use this when logging out.
2429
+ * The database can still be queried after this is called, but the tables
2430
+ * would be empty.
2431
+ *
2432
+ * To preserve data in local-only tables, set clearLocal to false.
2433
+ */
2434
+ disconnectAndClear(options?: DisconnectAndClearOptions): Promise<void>;
2435
+ /**
2436
+ * Close the database, releasing resources.
2437
+ *
2438
+ * Also disconnects any active connection.
2439
+ *
2440
+ * Once close is called, this connection cannot be used again - a new one
2441
+ * must be constructed.
2442
+ */
2443
+ close(options?: PowerSyncCloseOptions): Promise<void>;
2444
+ /**
2445
+ * Get upload queue size estimate and count.
2446
+ */
2447
+ getUploadQueueStats(includeSize?: boolean): Promise<UploadQueueStats>;
2448
+ /**
2449
+ * Get a batch of CRUD data to upload.
2450
+ *
2451
+ * Returns null if there is no data to upload.
2452
+ *
2453
+ * Use this from the {@link PowerSyncBackendConnector.uploadData} callback.
2454
+ *
2455
+ * Once the data have been successfully uploaded, call {@link CrudBatch.complete} before
2456
+ * requesting the next batch.
2457
+ *
2458
+ * Use {@link limit} to specify the maximum number of updates to return in a single
2459
+ * batch.
2460
+ *
2461
+ * This method does include transaction ids in the result, but does not group
2462
+ * data by transaction. One batch may contain data from multiple transactions,
2463
+ * and a single transaction may be split over multiple batches.
2464
+ *
2465
+ * @param limit Maximum number of CRUD entries to include in the batch
2466
+ * @returns A batch of CRUD operations to upload, or null if there are none
2467
+ */
2468
+ getCrudBatch(limit?: number): Promise<CrudBatch | null>;
2469
+ /**
2470
+ * Get the next recorded transaction to upload.
2471
+ *
2472
+ * Returns null if there is no data to upload.
2473
+ *
2474
+ * Use this from the {@link PowerSyncBackendConnector.uploadData} callback.
2475
+ *
2476
+ * Once the data have been successfully uploaded, call {@link CrudTransaction.complete} before
2477
+ * requesting the next transaction.
2478
+ *
2479
+ * Unlike {@link getCrudBatch}, this only returns data from a single transaction at a time.
2480
+ * All data for the transaction is loaded into memory.
2481
+ *
2482
+ * @returns A transaction of CRUD operations to upload, or null if there are none
2483
+ */
2484
+ getNextCrudTransaction(): Promise<CrudTransaction | null>;
2485
+ /**
2486
+ * Get an unique client id for this database.
2487
+ *
2488
+ * The id is not reset when the database is cleared, only when the database is deleted.
2489
+ *
2490
+ * @returns A unique identifier for the database instance
2491
+ */
2492
+ getClientId(): Promise<string>;
2493
+ private handleCrudCheckpoint;
2494
+ /**
2495
+ * Execute a SQL write (INSERT/UPDATE/DELETE) query
2496
+ * and optionally return results.
2497
+ *
2498
+ * @param sql The SQL query to execute
2499
+ * @param parameters Optional array of parameters to bind to the query
2500
+ * @returns The query result as an object with structured key-value pairs
2501
+ */
2502
+ execute(sql: string, parameters?: any[]): Promise<QueryResult>;
2503
+ /**
2504
+ * Execute a SQL write (INSERT/UPDATE/DELETE) query directly on the database without any PowerSync processing.
2505
+ * This bypasses certain PowerSync abstractions and is useful for accessing the raw database results.
2506
+ *
2507
+ * @param sql The SQL query to execute
2508
+ * @param parameters Optional array of parameters to bind to the query
2509
+ * @returns The raw query result from the underlying database as a nested array of raw values, where each row is
2510
+ * represented as an array of column values without field names.
2511
+ */
2512
+ executeRaw(sql: string, parameters?: any[]): Promise<any[][]>;
2513
+ /**
2514
+ * Execute a write query (INSERT/UPDATE/DELETE) multiple times with each parameter set
2515
+ * and optionally return results.
2516
+ * This is faster than executing separately with each parameter set.
2517
+ *
2518
+ * @param sql The SQL query to execute
2519
+ * @param parameters Optional 2D array of parameter sets, where each inner array is a set of parameters for one execution
2520
+ * @returns The query result
2521
+ */
2522
+ executeBatch(sql: string, parameters?: any[][]): Promise<QueryResult>;
2523
+ /**
2524
+ * Execute a read-only query and return results.
2525
+ *
2526
+ * @param sql The SQL query to execute
2527
+ * @param parameters Optional array of parameters to bind to the query
2528
+ * @returns An array of results
2529
+ */
2530
+ getAll<T>(sql: string, parameters?: any[]): Promise<T[]>;
2531
+ /**
2532
+ * Execute a read-only query and return the first result, or null if the ResultSet is empty.
2533
+ *
2534
+ * @param sql The SQL query to execute
2535
+ * @param parameters Optional array of parameters to bind to the query
2536
+ * @returns The first result if found, or null if no results are returned
2537
+ */
2538
+ getOptional<T>(sql: string, parameters?: any[]): Promise<T | null>;
2539
+ /**
2540
+ * Execute a read-only query and return the first result, error if the ResultSet is empty.
2541
+ *
2542
+ * @param sql The SQL query to execute
2543
+ * @param parameters Optional array of parameters to bind to the query
2544
+ * @returns The first result matching the query
2545
+ * @throws Error if no rows are returned
2546
+ */
2547
+ get<T>(sql: string, parameters?: any[]): Promise<T>;
2548
+ /**
2549
+ * Takes a read lock, without starting a transaction.
2550
+ * In most cases, {@link readTransaction} should be used instead.
2551
+ */
2552
+ readLock<T>(callback: (db: DBAdapter) => Promise<T>): Promise<T>;
2553
+ /**
2554
+ * Takes a global lock, without starting a transaction.
2555
+ * In most cases, {@link writeTransaction} should be used instead.
2556
+ */
2557
+ writeLock<T>(callback: (db: DBAdapter) => Promise<T>): Promise<T>;
2558
+ /**
2559
+ * Open a read-only transaction.
2560
+ * Read transactions can run concurrently to a write transaction.
2561
+ * Changes from any write transaction are not visible to read transactions started before it.
2562
+ *
2563
+ * @param callback Function to execute within the transaction
2564
+ * @param lockTimeout Time in milliseconds to wait for a lock before throwing an error
2565
+ * @returns The result of the callback
2566
+ * @throws Error if the lock cannot be obtained within the timeout period
2567
+ */
2568
+ readTransaction<T>(callback: (tx: Transaction) => Promise<T>, lockTimeout?: number): Promise<T>;
2569
+ /**
2570
+ * Open a read-write transaction.
2571
+ * This takes a global lock - only one write transaction can execute against the database at a time.
2572
+ * Statements within the transaction must be done on the provided {@link Transaction} interface.
2573
+ *
2574
+ * @param callback Function to execute within the transaction
2575
+ * @param lockTimeout Time in milliseconds to wait for a lock before throwing an error
2576
+ * @returns The result of the callback
2577
+ * @throws Error if the lock cannot be obtained within the timeout period
2578
+ */
2579
+ writeTransaction<T>(callback: (tx: Transaction) => Promise<T>, lockTimeout?: number): Promise<T>;
2580
+ /**
2581
+ * This version of `watch` uses {@link AsyncGenerator}, for documentation see {@link watchWithAsyncGenerator}.
2582
+ * Can be overloaded to use a callback handler instead, for documentation see {@link watchWithCallback}.
2583
+ *
2584
+ * @example
2585
+ * ```javascript
2586
+ * async *attachmentIds() {
2587
+ * for await (const result of this.powersync.watch(
2588
+ * `SELECT photo_id as id FROM todos WHERE photo_id IS NOT NULL`,
2589
+ * []
2590
+ * )) {
2591
+ * yield result.rows?._array.map((r) => r.id) ?? [];
2592
+ * }
2593
+ * }
2594
+ * ```
2595
+ */
2596
+ watch(sql: string, parameters?: any[], options?: SQLWatchOptions): AsyncIterable<QueryResult>;
2597
+ /**
2598
+ * See {@link watchWithCallback}.
2599
+ *
2600
+ * @example
2601
+ * ```javascript
2602
+ * onAttachmentIdsChange(onResult) {
2603
+ * this.powersync.watch(
2604
+ * `SELECT photo_id as id FROM todos WHERE photo_id IS NOT NULL`,
2605
+ * [],
2606
+ * {
2607
+ * onResult: (result) => onResult(result.rows?._array.map((r) => r.id) ?? [])
2608
+ * }
2609
+ * );
2610
+ * }
2611
+ * ```
2612
+ */
2613
+ watch(sql: string, parameters?: any[], handler?: WatchHandler, options?: SQLWatchOptions): void;
2614
+ /**
2615
+ * Allows defining a query which can be used to build a {@link WatchedQuery}.
2616
+ * The defined query will be executed with {@link AbstractPowerSyncDatabase#getAll}.
2617
+ * An optional mapper function can be provided to transform the results.
2618
+ *
2619
+ * @example
2620
+ * ```javascript
2621
+ * const watchedTodos = powersync.query({
2622
+ * sql: `SELECT photo_id as id FROM todos WHERE photo_id IS NOT NULL`,
2623
+ * parameters: [],
2624
+ * mapper: (row) => ({
2625
+ * ...row,
2626
+ * created_at: new Date(row.created_at as string)
2627
+ * })
2628
+ * })
2629
+ * .watch()
2630
+ * // OR use .differentialWatch() for fine-grained watches.
2631
+ * ```
2632
+ */
2633
+ query<RowType>(query: ArrayQueryDefinition<RowType>): Query<RowType>;
2634
+ /**
2635
+ * Allows building a {@link WatchedQuery} using an existing {@link WatchCompatibleQuery}.
2636
+ * The watched query will use the provided {@link WatchCompatibleQuery.execute} method to query results.
2637
+ *
2638
+ * @example
2639
+ * ```javascript
2640
+ *
2641
+ * // Potentially a query from an ORM like Drizzle
2642
+ * const query = db.select().from(lists);
2643
+ *
2644
+ * const watchedTodos = powersync.customQuery(query)
2645
+ * .watch()
2646
+ * // OR use .differentialWatch() for fine-grained watches.
2647
+ * ```
2648
+ */
2649
+ customQuery<RowType>(query: WatchCompatibleQuery<RowType[]>): Query<RowType>;
2650
+ /**
2651
+ * Execute a read query every time the source tables are modified.
2652
+ * Use {@link SQLWatchOptions.throttleMs} to specify the minimum interval between queries.
2653
+ * Source tables are automatically detected using `EXPLAIN QUERY PLAN`.
2654
+ *
2655
+ * Note that the `onChange` callback member of the handler is required.
2656
+ *
2657
+ * @param sql The SQL query to execute
2658
+ * @param parameters Optional array of parameters to bind to the query
2659
+ * @param handler Callbacks for handling results and errors
2660
+ * @param options Options for configuring watch behavior
2661
+ */
2662
+ watchWithCallback(sql: string, parameters?: any[], handler?: WatchHandler, options?: SQLWatchOptions): void;
2663
+ /**
2664
+ * Execute a read query every time the source tables are modified.
2665
+ * Use {@link SQLWatchOptions.throttleMs} to specify the minimum interval between queries.
2666
+ * Source tables are automatically detected using `EXPLAIN QUERY PLAN`.
2667
+ *
2668
+ * @param sql The SQL query to execute
2669
+ * @param parameters Optional array of parameters to bind to the query
2670
+ * @param options Options for configuring watch behavior
2671
+ * @returns An AsyncIterable that yields QueryResults whenever the data changes
2672
+ */
2673
+ watchWithAsyncGenerator(sql: string, parameters?: any[], options?: SQLWatchOptions): AsyncIterable<QueryResult>;
2674
+ /**
2675
+ * Resolves the list of tables that are used in a SQL query.
2676
+ * If tables are specified in the options, those are used directly.
2677
+ * Otherwise, analyzes the query using EXPLAIN to determine which tables are accessed.
2678
+ *
2679
+ * @param sql The SQL query to analyze
2680
+ * @param parameters Optional parameters for the SQL query
2681
+ * @param options Optional watch options that may contain explicit table list
2682
+ * @returns Array of table names that the query depends on
2683
+ */
2684
+ resolveTables(sql: string, parameters?: any[], options?: SQLWatchOptions): Promise<string[]>;
2685
+ /**
2686
+ * This version of `onChange` uses {@link AsyncGenerator}, for documentation see {@link onChangeWithAsyncGenerator}.
2687
+ * Can be overloaded to use a callback handler instead, for documentation see {@link onChangeWithCallback}.
2688
+ *
2689
+ * @example
2690
+ * ```javascript
2691
+ * async monitorChanges() {
2692
+ * for await (const event of this.powersync.onChange({tables: ['todos']})) {
2693
+ * console.log('Detected change event:', event);
2694
+ * }
2695
+ * }
2696
+ * ```
2697
+ */
2698
+ onChange(options?: SQLOnChangeOptions): AsyncIterable<WatchOnChangeEvent>;
2699
+ /**
2700
+ * See {@link onChangeWithCallback}.
2701
+ *
2702
+ * @example
2703
+ * ```javascript
2704
+ * monitorChanges() {
2705
+ * this.powersync.onChange({
2706
+ * onChange: (event) => {
2707
+ * console.log('Change detected:', event);
2708
+ * }
2709
+ * }, { tables: ['todos'] });
2710
+ * }
2711
+ * ```
2712
+ */
2713
+ onChange(handler?: WatchOnChangeHandler, options?: SQLOnChangeOptions): () => void;
2714
+ /**
2715
+ * Invoke the provided callback on any changes to any of the specified tables.
2716
+ *
2717
+ * This is preferred over {@link watchWithCallback} when multiple queries need to be performed
2718
+ * together when data is changed.
2719
+ *
2720
+ * Note that the `onChange` callback member of the handler is required.
2721
+ *
2722
+ * @param handler Callbacks for handling change events and errors
2723
+ * @param options Options for configuring watch behavior
2724
+ * @returns A dispose function to stop watching for changes
2725
+ */
2726
+ onChangeWithCallback(handler?: WatchOnChangeHandler, options?: SQLOnChangeOptions): () => void;
2727
+ /**
2728
+ * Create a Stream of changes to any of the specified tables.
2729
+ *
2730
+ * This is preferred over {@link watchWithAsyncGenerator} when multiple queries need to be performed
2731
+ * together when data is changed.
2732
+ *
2733
+ * Note: do not declare this as `async *onChange` as it will not work in React Native.
2734
+ *
2735
+ * @param options Options for configuring watch behavior
2736
+ * @returns An AsyncIterable that yields change events whenever the specified tables change
2737
+ */
2738
+ onChangeWithAsyncGenerator(options?: SQLWatchOptions): AsyncIterable<WatchOnChangeEvent>;
2739
+ private handleTableChanges;
2740
+ private processTableUpdates;
2741
+ /**
2742
+ * @ignore
2743
+ */
2744
+ private executeReadOnly;
2745
+ }
2746
+
2747
+ interface PowerSyncOpenFactoryOptions extends Partial<PowerSyncDatabaseOptions>, SQLOpenOptions {
2748
+ /** Schema used for the local database. */
2749
+ schema: Schema;
2750
+ }
2751
+ declare abstract class AbstractPowerSyncDatabaseOpenFactory {
2752
+ protected options: PowerSyncOpenFactoryOptions;
2753
+ constructor(options: PowerSyncOpenFactoryOptions);
2754
+ /**
2755
+ * Schema used for the local database.
2756
+ */
2757
+ get schema(): Schema<{
2758
+ [x: string]: Table<any>;
2759
+ }>;
2760
+ protected abstract openDB(): DBAdapter;
2761
+ generateOptions(): PowerSyncDatabaseOptions;
2762
+ abstract generateInstance(options: PowerSyncDatabaseOptions): AbstractPowerSyncDatabase;
2763
+ getInstance(): AbstractPowerSyncDatabase;
2764
+ }
2765
+
2766
+ interface CompilableQueryWatchHandler<T> {
2767
+ onResult: (results: T[]) => void;
2768
+ onError?: (error: Error) => void;
2769
+ }
2770
+ declare function compilableQueryWatch<T>(db: AbstractPowerSyncDatabase, query: CompilableQuery<T>, handler: CompilableQueryWatchHandler<T>, options?: SQLWatchOptions): void;
2771
+
2772
+ declare const MAX_OP_ID = "9223372036854775807";
2773
+
2774
+ declare function runOnSchemaChange(callback: (signal: AbortSignal) => void, db: AbstractPowerSyncDatabase, options?: SQLWatchOptions): void;
2775
+
2776
+ declare class SqliteBucketStorage extends BaseObserver<BucketStorageListener> implements BucketStorageAdapter {
2777
+ private db;
2778
+ private logger;
2779
+ tableNames: Set<string>;
2780
+ private _hasCompletedSync;
2781
+ private updateListener;
2782
+ private _clientId?;
2783
+ constructor(db: DBAdapter, logger?: ILogger);
2784
+ init(): Promise<void>;
2785
+ dispose(): Promise<void>;
2786
+ _getClientId(): Promise<string>;
2787
+ getClientId(): Promise<string>;
2788
+ getMaxOpId(): string;
2789
+ /**
2790
+ * Reset any caches.
2791
+ */
2792
+ startSession(): void;
2793
+ getBucketStates(): Promise<BucketState[]>;
2794
+ getBucketOperationProgress(): Promise<BucketOperationProgress>;
2795
+ saveSyncData(batch: SyncDataBatch, fixedKeyFormat?: boolean): Promise<void>;
2796
+ removeBuckets(buckets: string[]): Promise<void>;
2797
+ /**
2798
+ * Mark a bucket for deletion.
2799
+ */
2800
+ private deleteBucket;
2801
+ hasCompletedSync(): Promise<boolean>;
2802
+ syncLocalDatabase(checkpoint: Checkpoint, priority?: number): Promise<SyncLocalDatabaseResult>;
2803
+ /**
2804
+ * Atomically update the local state to the current checkpoint.
2805
+ *
2806
+ * This includes creating new tables, dropping old tables, and copying data over from the oplog.
2807
+ */
2808
+ private updateObjectsFromBuckets;
2809
+ validateChecksums(checkpoint: Checkpoint, priority: number | undefined): Promise<SyncLocalDatabaseResult>;
2810
+ updateLocalTarget(cb: () => Promise<string>): Promise<boolean>;
2811
+ nextCrudItem(): Promise<CrudEntry | undefined>;
2812
+ hasCrud(): Promise<boolean>;
2813
+ /**
2814
+ * Get a batch of objects to send to the server.
2815
+ * When the objects are successfully sent to the server, call .complete()
2816
+ */
2817
+ getCrudBatch(limit?: number): Promise<CrudBatch | null>;
2818
+ writeTransaction<T>(callback: (tx: Transaction) => Promise<T>, options?: {
2819
+ timeoutMs: number;
2820
+ }): Promise<T>;
2821
+ /**
2822
+ * Set a target checkpoint.
2823
+ */
2824
+ setTargetCheckpoint(checkpoint: Checkpoint): Promise<void>;
2825
+ control(op: PowerSyncControlCommand, payload: string | Uint8Array | ArrayBuffer | null): Promise<string>;
2826
+ hasMigratedSubkeys(): Promise<boolean>;
2827
+ migrateToFixedSubkeys(): Promise<void>;
2828
+ static _subkeyMigrationKey: string;
2829
+ }
2830
+
2831
+ /**
2832
+ * Options for {@link GetAllQuery}.
2833
+ */
2834
+ type GetAllQueryOptions<RowType = unknown> = {
2835
+ sql: string;
2836
+ parameters?: ReadonlyArray<unknown>;
2837
+ /**
2838
+ * Optional mapper function to convert raw rows into the desired RowType.
2839
+ * @example
2840
+ * ```javascript
2841
+ * (rawRow) => ({
2842
+ * id: rawRow.id,
2843
+ * created_at: new Date(rawRow.created_at),
2844
+ * })
2845
+ * ```
2846
+ */
2847
+ mapper?: (rawRow: Record<string, unknown>) => RowType;
2848
+ };
2849
+ /**
2850
+ * Performs a {@link AbstractPowerSyncDatabase.getAll} operation for a watched query.
2851
+ */
2852
+ declare class GetAllQuery<RowType = unknown> implements WatchCompatibleQuery<RowType[]> {
2853
+ protected options: GetAllQueryOptions<RowType>;
2854
+ constructor(options: GetAllQueryOptions<RowType>);
2855
+ compile(): CompiledQuery;
2856
+ execute(options: {
2857
+ db: AbstractPowerSyncDatabase;
2858
+ }): Promise<RowType[]>;
2859
+ }
2860
+
2861
+ /**
2862
+ * Calls to Abortcontroller.abort(reason: any) will result in the
2863
+ * `reason` being thrown. This is not necessarily an error,
2864
+ * but extends error for better logging purposes.
2865
+ */
2866
+ declare class AbortOperation extends Error {
2867
+ protected reason: string;
2868
+ constructor(reason: string);
2869
+ }
2870
+
2871
+ interface ControlledExecutorOptions {
2872
+ /**
2873
+ * If throttling is enabled, it ensures only one task runs at a time,
2874
+ * and only one additional task can be scheduled to run after the current task completes. The pending task will be overwritten by the latest task.
2875
+ * Enabled by default.
2876
+ */
2877
+ throttleEnabled?: boolean;
2878
+ }
2879
+ declare class ControlledExecutor<T> {
2880
+ private task;
2881
+ /**
2882
+ * Represents the currently running task, which could be a Promise or undefined if no task is running.
2883
+ */
2884
+ private runningTask;
2885
+ private pendingTaskParam;
2886
+ /**
2887
+ * Flag to determine if throttling is enabled, which controls whether tasks are queued or run immediately.
2888
+ */
2889
+ private isThrottling;
2890
+ private closed;
2891
+ constructor(task: (param: T) => Promise<void> | void, options?: ControlledExecutorOptions);
2892
+ schedule(param: T): void;
2893
+ dispose(): void;
2894
+ private execute;
2895
+ }
2896
+
2897
+ declare const LogLevel: {
2898
+ TRACE: Logger.ILogLevel;
2899
+ DEBUG: Logger.ILogLevel;
2900
+ INFO: Logger.ILogLevel;
2901
+ TIME: Logger.ILogLevel;
2902
+ WARN: Logger.ILogLevel;
2903
+ ERROR: Logger.ILogLevel;
2904
+ OFF: Logger.ILogLevel;
2905
+ };
2906
+ interface CreateLoggerOptions {
2907
+ logLevel?: ILogLevel;
2908
+ }
2909
+ /**
2910
+ * Retrieves the base (default) logger instance.
2911
+ *
2912
+ * This base logger controls the default logging configuration and is shared
2913
+ * across all loggers created with `createLogger`. Adjusting settings on this
2914
+ * base logger affects all loggers derived from it unless explicitly overridden.
2915
+ *
2916
+ */
2917
+ declare function createBaseLogger(): typeof Logger;
2918
+ /**
2919
+ * Creates and configures a new named logger based on the base logger.
2920
+ *
2921
+ * Named loggers allow specific modules or areas of your application to have
2922
+ * their own logging levels and behaviors. These loggers inherit configuration
2923
+ * from the base logger by default but can override settings independently.
2924
+ */
2925
+ declare function createLogger(name: string, options?: CreateLoggerOptions): ILogger;
2926
+
2927
+ interface ParsedQuery {
2928
+ sqlStatement: string;
2929
+ parameters: any[];
2930
+ }
2931
+ declare const parseQuery: <T>(query: string | CompilableQuery<T>, parameters: any[]) => ParsedQuery;
2932
+
2933
+ export { AbortOperation, AbstractPowerSyncDatabase, AbstractPowerSyncDatabaseOpenFactory, AbstractQueryProcessor, AbstractRemote, AbstractStreamingSyncImplementation, ArrayComparator, BaseObserver, Column, ColumnType, ConnectionManager, ControlledExecutor, CrudBatch, CrudEntry, CrudTransaction, DEFAULT_CRUD_BATCH_LIMIT, DEFAULT_CRUD_UPLOAD_THROTTLE_MS, DEFAULT_INDEX_COLUMN_OPTIONS, DEFAULT_INDEX_OPTIONS, DEFAULT_LOCK_TIMEOUT_MS, DEFAULT_POWERSYNC_CLOSE_OPTIONS, DEFAULT_POWERSYNC_DB_OPTIONS, DEFAULT_PRESSURE_LIMITS, DEFAULT_REMOTE_LOGGER, DEFAULT_REMOTE_OPTIONS, DEFAULT_RETRY_DELAY_MS, DEFAULT_ROW_COMPARATOR, DEFAULT_STREAMING_SYNC_OPTIONS, DEFAULT_STREAM_CONNECTION_OPTIONS, DEFAULT_SYNC_CLIENT_IMPLEMENTATION, DEFAULT_TABLE_OPTIONS, DEFAULT_WATCH_QUERY_OPTIONS, DEFAULT_WATCH_THROTTLE_MS, DataStream, DifferentialQueryProcessor, EMPTY_DIFFERENTIAL, FalsyComparator, FetchImplementationProvider, FetchStrategy, GetAllQuery, Index, IndexedColumn, InvalidSQLCharacters, LockType, LogLevel, MAX_AMOUNT_OF_COLUMNS, MAX_OP_ID, OnChangeQueryProcessor, OpType, OpTypeEnum, OplogEntry, PSInternalTable, PowerSyncControlCommand, RowUpdateType, Schema, SqliteBucketStorage, SyncClientImplementation, SyncDataBatch, SyncDataBucket, SyncProgress, SyncStatus, SyncStreamConnectionMethod, Table, TableV2, UpdateType, UploadQueueStats, WatchedQueryListenerEvent, column, compilableQueryWatch, createBaseLogger, createLogger, extractTableUpdates, isBatchedUpdateNotification, isContinueCheckpointRequest, isDBAdapter, isPowerSyncDatabaseOptionsWithSettings, isSQLOpenFactory, isSQLOpenOptions, isStreamingKeepalive, isStreamingSyncCheckpoint, isStreamingSyncCheckpointComplete, isStreamingSyncCheckpointDiff, isStreamingSyncCheckpointPartiallyComplete, isStreamingSyncData, isSyncNewCheckpointRequest, parseQuery, runOnSchemaChange };
2934
+ export type { AbstractQueryProcessorOptions, AbstractRemoteOptions, AbstractStreamingSyncImplementationOptions, AdditionalConnectionOptions, ArrayComparatorOptions, ArrayQueryDefinition, BSONImplementation, BaseColumnType, BaseConnectionOptions, BaseListener, BaseObserverInterface, BasePowerSyncDatabaseOptions, BatchedUpdateNotification, BucketChecksum, BucketDescription, BucketOperationProgress, BucketRequest, BucketState, BucketStorageAdapter, BucketStorageListener, Checkpoint, ChecksumCache, ColumnOptions, ColumnsType, CompilableQuery, CompilableQueryWatchHandler, CompiledQuery, ConnectionManagerListener, ConnectionManagerOptions, ConnectionManagerSyncImplementationResult, ContinueCheckpointRequest, ControlledExecutorOptions, CreateLoggerOptions, CrudRequest, CrudResponse, CrudUploadNotification, DBAdapter, DBAdapterListener, DBGetUtils, DBLockOptions, DataStreamCallback, DataStreamListener, DataStreamOptions, DifferentialQueryProcessorOptions, DifferentialWatchedQuery, DifferentialWatchedQueryComparator, DifferentialWatchedQueryListener, DifferentialWatchedQueryOptions, DifferentialWatchedQuerySettings, DisconnectAndClearOptions, Disposable, ExtractColumnValueType, FetchImplementation, GetAllQueryOptions, IndexColumnOptions, IndexOptions, IndexShorthand, InternalConnectionOptions, LinkQueryOptions, LockContext, LockOptions, MutableWatchedQueryState, OnChangeQueryProcessorOptions, OpId, OpTypeJSON, OplogEntryJSON, ParsedQuery, PowerSyncBackendConnector, PowerSyncCloseOptions, PowerSyncConnectionOptions, PowerSyncCredentials, PowerSyncDBListener, PowerSyncDatabaseOptions, PowerSyncDatabaseOptionsWithDBAdapter, PowerSyncDatabaseOptionsWithOpenFactory, PowerSyncDatabaseOptionsWithSettings, PowerSyncOpenFactoryOptions, ProgressWithOperations, Query, QueryParam, QueryResult, RemoteConnector, RequiredAdditionalConnectionOptions, RequiredPowerSyncConnectionOptions, RowType, SQLOnChangeOptions, SQLOpenFactory, SQLOpenOptions, SQLWatchOptions, SavedProgress, SchemaTableType, SocketSyncStreamOptions, StandardWatchedQuery, StandardWatchedQueryOptions, StreamingSyncCheckpoint, StreamingSyncCheckpointComplete, StreamingSyncCheckpointDiff, StreamingSyncCheckpointPartiallyComplete, StreamingSyncDataJSON, StreamingSyncImplementation, StreamingSyncImplementationListener, StreamingSyncKeepalive, StreamingSyncLine, StreamingSyncLineOrCrudUploadComplete, StreamingSyncRequest, StreamingSyncRequestParameterType, SyncDataBucketJSON, SyncDataFlowStatus, SyncLocalDatabaseResult, SyncNewCheckpointRequest, SyncPriorityStatus, SyncRequest, SyncResponse, SyncStatusOptions, SyncStreamOptions, TableOptions, TableUpdateOperation, TableV2Options, TrackPreviousOptions, Transaction, UpdateNotification, WatchCompatibleQuery, WatchExecuteOptions, WatchHandler, WatchOnChangeEvent, WatchOnChangeHandler, WatchedQuery, WatchedQueryComparator, WatchedQueryDifferential, WatchedQueryListener, WatchedQueryOptions, WatchedQueryRowDifferential, WatchedQuerySettings, WatchedQueryState };