@powersync/service-module-postgres-storage 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +17 -0
- package/LICENSE +67 -0
- package/README.md +67 -0
- package/dist/.tsbuildinfo +1 -0
- package/dist/@types/index.d.ts +7 -0
- package/dist/@types/migrations/PostgresMigrationAgent.d.ts +12 -0
- package/dist/@types/migrations/PostgresMigrationStore.d.ts +14 -0
- package/dist/@types/migrations/migration-utils.d.ts +3 -0
- package/dist/@types/migrations/scripts/1684951997326-init.d.ts +3 -0
- package/dist/@types/module/PostgresStorageModule.d.ts +6 -0
- package/dist/@types/storage/PostgresBucketStorageFactory.d.ts +42 -0
- package/dist/@types/storage/PostgresCompactor.d.ts +40 -0
- package/dist/@types/storage/PostgresStorageProvider.d.ts +5 -0
- package/dist/@types/storage/PostgresSyncRulesStorage.d.ts +46 -0
- package/dist/@types/storage/PostgresTestStorageFactoryGenerator.d.ts +13 -0
- package/dist/@types/storage/batch/OperationBatch.d.ts +47 -0
- package/dist/@types/storage/batch/PostgresBucketBatch.d.ts +90 -0
- package/dist/@types/storage/batch/PostgresPersistedBatch.d.ts +64 -0
- package/dist/@types/storage/checkpoints/PostgresWriteCheckpointAPI.d.ts +20 -0
- package/dist/@types/storage/storage-index.d.ts +5 -0
- package/dist/@types/storage/sync-rules/PostgresPersistedSyncRulesContent.d.ts +17 -0
- package/dist/@types/types/codecs.d.ts +61 -0
- package/dist/@types/types/models/ActiveCheckpoint.d.ts +12 -0
- package/dist/@types/types/models/ActiveCheckpointNotification.d.ts +19 -0
- package/dist/@types/types/models/BucketData.d.ts +22 -0
- package/dist/@types/types/models/BucketParameters.d.ts +11 -0
- package/dist/@types/types/models/CurrentData.d.ts +22 -0
- package/dist/@types/types/models/Instance.d.ts +6 -0
- package/dist/@types/types/models/Migration.d.ts +12 -0
- package/dist/@types/types/models/SourceTable.d.ts +31 -0
- package/dist/@types/types/models/SyncRules.d.ts +47 -0
- package/dist/@types/types/models/WriteCheckpoint.d.ts +15 -0
- package/dist/@types/types/models/models-index.d.ts +10 -0
- package/dist/@types/types/types.d.ts +94 -0
- package/dist/@types/utils/bson.d.ts +6 -0
- package/dist/@types/utils/bucket-data.d.ts +18 -0
- package/dist/@types/utils/db.d.ts +8 -0
- package/dist/@types/utils/ts-codec.d.ts +5 -0
- package/dist/@types/utils/utils-index.d.ts +4 -0
- package/dist/index.js +8 -0
- package/dist/index.js.map +1 -0
- package/dist/migrations/PostgresMigrationAgent.js +36 -0
- package/dist/migrations/PostgresMigrationAgent.js.map +1 -0
- package/dist/migrations/PostgresMigrationStore.js +60 -0
- package/dist/migrations/PostgresMigrationStore.js.map +1 -0
- package/dist/migrations/migration-utils.js +13 -0
- package/dist/migrations/migration-utils.js.map +1 -0
- package/dist/migrations/scripts/1684951997326-init.js +196 -0
- package/dist/migrations/scripts/1684951997326-init.js.map +1 -0
- package/dist/module/PostgresStorageModule.js +23 -0
- package/dist/module/PostgresStorageModule.js.map +1 -0
- package/dist/storage/PostgresBucketStorageFactory.js +433 -0
- package/dist/storage/PostgresBucketStorageFactory.js.map +1 -0
- package/dist/storage/PostgresCompactor.js +298 -0
- package/dist/storage/PostgresCompactor.js.map +1 -0
- package/dist/storage/PostgresStorageProvider.js +35 -0
- package/dist/storage/PostgresStorageProvider.js.map +1 -0
- package/dist/storage/PostgresSyncRulesStorage.js +619 -0
- package/dist/storage/PostgresSyncRulesStorage.js.map +1 -0
- package/dist/storage/PostgresTestStorageFactoryGenerator.js +110 -0
- package/dist/storage/PostgresTestStorageFactoryGenerator.js.map +1 -0
- package/dist/storage/batch/OperationBatch.js +93 -0
- package/dist/storage/batch/OperationBatch.js.map +1 -0
- package/dist/storage/batch/PostgresBucketBatch.js +732 -0
- package/dist/storage/batch/PostgresBucketBatch.js.map +1 -0
- package/dist/storage/batch/PostgresPersistedBatch.js +367 -0
- package/dist/storage/batch/PostgresPersistedBatch.js.map +1 -0
- package/dist/storage/checkpoints/PostgresWriteCheckpointAPI.js +148 -0
- package/dist/storage/checkpoints/PostgresWriteCheckpointAPI.js.map +1 -0
- package/dist/storage/storage-index.js +6 -0
- package/dist/storage/storage-index.js.map +1 -0
- package/dist/storage/sync-rules/PostgresPersistedSyncRulesContent.js +58 -0
- package/dist/storage/sync-rules/PostgresPersistedSyncRulesContent.js.map +1 -0
- package/dist/types/codecs.js +97 -0
- package/dist/types/codecs.js.map +1 -0
- package/dist/types/models/ActiveCheckpoint.js +12 -0
- package/dist/types/models/ActiveCheckpoint.js.map +1 -0
- package/dist/types/models/ActiveCheckpointNotification.js +8 -0
- package/dist/types/models/ActiveCheckpointNotification.js.map +1 -0
- package/dist/types/models/BucketData.js +23 -0
- package/dist/types/models/BucketData.js.map +1 -0
- package/dist/types/models/BucketParameters.js +11 -0
- package/dist/types/models/BucketParameters.js.map +1 -0
- package/dist/types/models/CurrentData.js +16 -0
- package/dist/types/models/CurrentData.js.map +1 -0
- package/dist/types/models/Instance.js +5 -0
- package/dist/types/models/Instance.js.map +1 -0
- package/dist/types/models/Migration.js +12 -0
- package/dist/types/models/Migration.js.map +1 -0
- package/dist/types/models/SourceTable.js +24 -0
- package/dist/types/models/SourceTable.js.map +1 -0
- package/dist/types/models/SyncRules.js +47 -0
- package/dist/types/models/SyncRules.js.map +1 -0
- package/dist/types/models/WriteCheckpoint.js +13 -0
- package/dist/types/models/WriteCheckpoint.js.map +1 -0
- package/dist/types/models/models-index.js +11 -0
- package/dist/types/models/models-index.js.map +1 -0
- package/dist/types/types.js +46 -0
- package/dist/types/types.js.map +1 -0
- package/dist/utils/bson.js +16 -0
- package/dist/utils/bson.js.map +1 -0
- package/dist/utils/bucket-data.js +25 -0
- package/dist/utils/bucket-data.js.map +1 -0
- package/dist/utils/db.js +24 -0
- package/dist/utils/db.js.map +1 -0
- package/dist/utils/ts-codec.js +11 -0
- package/dist/utils/ts-codec.js.map +1 -0
- package/dist/utils/utils-index.js +5 -0
- package/dist/utils/utils-index.js.map +1 -0
- package/package.json +50 -0
- package/src/index.ts +10 -0
- package/src/migrations/PostgresMigrationAgent.ts +46 -0
- package/src/migrations/PostgresMigrationStore.ts +70 -0
- package/src/migrations/migration-utils.ts +14 -0
- package/src/migrations/scripts/1684951997326-init.ts +141 -0
- package/src/module/PostgresStorageModule.ts +30 -0
- package/src/storage/PostgresBucketStorageFactory.ts +496 -0
- package/src/storage/PostgresCompactor.ts +366 -0
- package/src/storage/PostgresStorageProvider.ts +42 -0
- package/src/storage/PostgresSyncRulesStorage.ts +666 -0
- package/src/storage/PostgresTestStorageFactoryGenerator.ts +61 -0
- package/src/storage/batch/OperationBatch.ts +101 -0
- package/src/storage/batch/PostgresBucketBatch.ts +885 -0
- package/src/storage/batch/PostgresPersistedBatch.ts +441 -0
- package/src/storage/checkpoints/PostgresWriteCheckpointAPI.ts +176 -0
- package/src/storage/storage-index.ts +5 -0
- package/src/storage/sync-rules/PostgresPersistedSyncRulesContent.ts +67 -0
- package/src/types/codecs.ts +136 -0
- package/src/types/models/ActiveCheckpoint.ts +15 -0
- package/src/types/models/ActiveCheckpointNotification.ts +14 -0
- package/src/types/models/BucketData.ts +26 -0
- package/src/types/models/BucketParameters.ts +14 -0
- package/src/types/models/CurrentData.ts +23 -0
- package/src/types/models/Instance.ts +8 -0
- package/src/types/models/Migration.ts +19 -0
- package/src/types/models/SourceTable.ts +32 -0
- package/src/types/models/SyncRules.ts +50 -0
- package/src/types/models/WriteCheckpoint.ts +20 -0
- package/src/types/models/models-index.ts +10 -0
- package/src/types/types.ts +73 -0
- package/src/utils/bson.ts +17 -0
- package/src/utils/bucket-data.ts +25 -0
- package/src/utils/db.ts +27 -0
- package/src/utils/ts-codec.ts +14 -0
- package/src/utils/utils-index.ts +4 -0
- package/test/src/__snapshots__/storage.test.ts.snap +9 -0
- package/test/src/__snapshots__/storage_sync.test.ts.snap +332 -0
- package/test/src/env.ts +6 -0
- package/test/src/migrations.test.ts +34 -0
- package/test/src/setup.ts +16 -0
- package/test/src/storage.test.ts +131 -0
- package/test/src/storage_compacting.test.ts +5 -0
- package/test/src/storage_sync.test.ts +12 -0
- package/test/src/util.ts +34 -0
- package/test/tsconfig.json +20 -0
- package/tsconfig.json +36 -0
- package/vitest.config.ts +13 -0
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import * as framework from '@powersync/lib-services-framework';
|
|
2
|
+
import { storage } from '@powersync/service-core';
|
|
3
|
+
import * as pg_wire from '@powersync/service-jpgwire';
|
|
4
|
+
import * as lib_postgres from '@powersync/lib-service-postgres';
|
|
5
|
+
import { NormalizedPostgresStorageConfig } from '../types/types.js';
|
|
6
|
+
import { PostgresPersistedSyncRulesContent } from './sync-rules/PostgresPersistedSyncRulesContent.js';
|
|
7
|
+
export type PostgresBucketStorageOptions = {
|
|
8
|
+
config: NormalizedPostgresStorageConfig;
|
|
9
|
+
slot_name_prefix: string;
|
|
10
|
+
};
|
|
11
|
+
export declare class PostgresBucketStorageFactory extends framework.DisposableObserver<storage.BucketStorageFactoryListener> implements storage.BucketStorageFactory {
|
|
12
|
+
protected options: PostgresBucketStorageOptions;
|
|
13
|
+
readonly db: lib_postgres.DatabaseClient;
|
|
14
|
+
readonly slot_name_prefix: string;
|
|
15
|
+
private sharedIterator;
|
|
16
|
+
private readonly storageCache;
|
|
17
|
+
constructor(options: PostgresBucketStorageOptions);
|
|
18
|
+
[Symbol.asyncDispose](): Promise<void>;
|
|
19
|
+
prepareStatements(connection: pg_wire.PgConnection): Promise<void>;
|
|
20
|
+
getInstance(syncRules: storage.PersistedSyncRulesContent): storage.SyncRulesBucketStorage;
|
|
21
|
+
getStorageMetrics(): Promise<storage.StorageMetrics>;
|
|
22
|
+
getPowerSyncInstanceId(): Promise<string>;
|
|
23
|
+
configureSyncRules(sync_rules: string, options?: {
|
|
24
|
+
lock?: boolean;
|
|
25
|
+
}): Promise<{
|
|
26
|
+
updated: boolean;
|
|
27
|
+
persisted_sync_rules?: storage.PersistedSyncRulesContent;
|
|
28
|
+
lock?: storage.ReplicationLock;
|
|
29
|
+
}>;
|
|
30
|
+
updateSyncRules(options: storage.UpdateSyncRulesOptions): Promise<PostgresPersistedSyncRulesContent>;
|
|
31
|
+
slotRemoved(slot_name: string): Promise<void>;
|
|
32
|
+
getActiveSyncRules(options: storage.ParseSyncRulesOptions): Promise<storage.PersistedSyncRules | null>;
|
|
33
|
+
getActiveSyncRulesContent(): Promise<storage.PersistedSyncRulesContent | null>;
|
|
34
|
+
getNextSyncRules(options: storage.ParseSyncRulesOptions): Promise<storage.PersistedSyncRules | null>;
|
|
35
|
+
getNextSyncRulesContent(): Promise<storage.PersistedSyncRulesContent | null>;
|
|
36
|
+
getReplicatingSyncRules(): Promise<storage.PersistedSyncRulesContent[]>;
|
|
37
|
+
getStoppedSyncRules(): Promise<storage.PersistedSyncRulesContent[]>;
|
|
38
|
+
getActiveCheckpoint(): Promise<storage.ActiveCheckpoint>;
|
|
39
|
+
watchWriteCheckpoint(user_id: string, signal: AbortSignal): AsyncIterable<storage.WriteCheckpoint>;
|
|
40
|
+
protected watchActiveCheckpoint(signal: AbortSignal): AsyncIterable<storage.ActiveCheckpoint>;
|
|
41
|
+
private makeActiveCheckpoint;
|
|
42
|
+
}
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import * as lib_postgres from '@powersync/lib-service-postgres';
|
|
2
|
+
import { storage } from '@powersync/service-core';
|
|
3
|
+
/**
|
|
4
|
+
* Additional options, primarily for testing.
|
|
5
|
+
*/
|
|
6
|
+
export interface PostgresCompactOptions extends storage.CompactOptions {
|
|
7
|
+
/** Minimum of 2 */
|
|
8
|
+
clearBatchLimit?: number;
|
|
9
|
+
/** Minimum of 1 */
|
|
10
|
+
moveBatchLimit?: number;
|
|
11
|
+
/** Minimum of 1 */
|
|
12
|
+
moveBatchQueryLimit?: number;
|
|
13
|
+
}
|
|
14
|
+
export declare class PostgresCompactor {
|
|
15
|
+
private db;
|
|
16
|
+
private group_id;
|
|
17
|
+
private updates;
|
|
18
|
+
private idLimitBytes;
|
|
19
|
+
private moveBatchLimit;
|
|
20
|
+
private moveBatchQueryLimit;
|
|
21
|
+
private clearBatchLimit;
|
|
22
|
+
private maxOpId;
|
|
23
|
+
private buckets;
|
|
24
|
+
constructor(db: lib_postgres.DatabaseClient, group_id: number, options?: PostgresCompactOptions);
|
|
25
|
+
/**
|
|
26
|
+
* Compact buckets by converting operations into MOVE and/or CLEAR operations.
|
|
27
|
+
*
|
|
28
|
+
* See /docs/compacting-operations.md for details.
|
|
29
|
+
*/
|
|
30
|
+
compact(): Promise<void>;
|
|
31
|
+
compactInternal(bucket: string | undefined): Promise<void>;
|
|
32
|
+
private flush;
|
|
33
|
+
/**
|
|
34
|
+
* Perform a CLEAR compact for a bucket.
|
|
35
|
+
*
|
|
36
|
+
* @param bucket bucket name
|
|
37
|
+
* @param op op_id of the last non-PUT operation, which will be converted to CLEAR.
|
|
38
|
+
*/
|
|
39
|
+
private clearBucket;
|
|
40
|
+
}
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import * as lib_postgres from '@powersync/lib-service-postgres';
|
|
2
|
+
import { DisposableObserver } from '@powersync/lib-services-framework';
|
|
3
|
+
import { storage, utils } from '@powersync/service-core';
|
|
4
|
+
import * as sync_rules from '@powersync/service-sync-rules';
|
|
5
|
+
import { RequiredOperationBatchLimits } from '../types/types.js';
|
|
6
|
+
import { PostgresWriteCheckpointAPI } from './checkpoints/PostgresWriteCheckpointAPI.js';
|
|
7
|
+
import { PostgresBucketStorageFactory } from './PostgresBucketStorageFactory.js';
|
|
8
|
+
export type PostgresSyncRulesStorageOptions = {
|
|
9
|
+
factory: PostgresBucketStorageFactory;
|
|
10
|
+
db: lib_postgres.DatabaseClient;
|
|
11
|
+
sync_rules: storage.PersistedSyncRulesContent;
|
|
12
|
+
write_checkpoint_mode?: storage.WriteCheckpointMode;
|
|
13
|
+
batchLimits: RequiredOperationBatchLimits;
|
|
14
|
+
};
|
|
15
|
+
export declare class PostgresSyncRulesStorage extends DisposableObserver<storage.SyncRulesBucketStorageListener> implements storage.SyncRulesBucketStorage {
|
|
16
|
+
protected options: PostgresSyncRulesStorageOptions;
|
|
17
|
+
readonly group_id: number;
|
|
18
|
+
readonly sync_rules: storage.PersistedSyncRulesContent;
|
|
19
|
+
readonly slot_name: string;
|
|
20
|
+
readonly factory: PostgresBucketStorageFactory;
|
|
21
|
+
protected db: lib_postgres.DatabaseClient;
|
|
22
|
+
protected writeCheckpointAPI: PostgresWriteCheckpointAPI;
|
|
23
|
+
private parsedSyncRulesCache;
|
|
24
|
+
private checksumCache;
|
|
25
|
+
constructor(options: PostgresSyncRulesStorageOptions);
|
|
26
|
+
get writeCheckpointMode(): storage.WriteCheckpointMode;
|
|
27
|
+
getParsedSyncRules(options: storage.ParseSyncRulesOptions): sync_rules.SqlSyncRules;
|
|
28
|
+
reportError(e: any): Promise<void>;
|
|
29
|
+
compact(options?: storage.CompactOptions): Promise<void>;
|
|
30
|
+
batchCreateCustomWriteCheckpoints(checkpoints: storage.BatchedCustomWriteCheckpointOptions[]): Promise<void>;
|
|
31
|
+
createCustomWriteCheckpoint(checkpoint: storage.BatchedCustomWriteCheckpointOptions): Promise<bigint>;
|
|
32
|
+
lastWriteCheckpoint(filters: storage.SyncStorageLastWriteCheckpointFilters): Promise<bigint | null>;
|
|
33
|
+
setWriteCheckpointMode(mode: storage.WriteCheckpointMode): void;
|
|
34
|
+
createManagedWriteCheckpoint(checkpoint: storage.ManagedWriteCheckpointOptions): Promise<bigint>;
|
|
35
|
+
getCheckpoint(): Promise<storage.ReplicationCheckpoint>;
|
|
36
|
+
resolveTable(options: storage.ResolveTableOptions): Promise<storage.ResolveTableResult>;
|
|
37
|
+
startBatch(options: storage.StartBatchOptions, callback: (batch: storage.BucketStorageBatch) => Promise<void>): Promise<storage.FlushedResult | null>;
|
|
38
|
+
getParameterSets(checkpoint: utils.OpId, lookups: sync_rules.SqliteJsonValue[][]): Promise<sync_rules.SqliteJsonRow[]>;
|
|
39
|
+
getBucketDataBatch(checkpoint: utils.OpId, dataBuckets: Map<string, string>, options?: storage.BucketDataBatchOptions): AsyncIterable<storage.SyncBucketDataBatch>;
|
|
40
|
+
getChecksums(checkpoint: utils.OpId, buckets: string[]): Promise<utils.ChecksumMap>;
|
|
41
|
+
terminate(options?: storage.TerminateOptions): Promise<void>;
|
|
42
|
+
getStatus(): Promise<storage.SyncRuleStatus>;
|
|
43
|
+
clear(): Promise<void>;
|
|
44
|
+
autoActivate(): Promise<void>;
|
|
45
|
+
private getChecksumsInternal;
|
|
46
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { TestStorageOptions } from '@powersync/service-core';
|
|
2
|
+
import { PostgresMigrationAgent } from '../migrations/PostgresMigrationAgent.js';
|
|
3
|
+
import { PostgresStorageConfigDecoded } from '../types/types.js';
|
|
4
|
+
import { PostgresBucketStorageFactory } from './PostgresBucketStorageFactory.js';
|
|
5
|
+
export type PostgresTestStorageOptions = {
|
|
6
|
+
url: string;
|
|
7
|
+
/**
|
|
8
|
+
* Vitest can cause issues when loading .ts files for migrations.
|
|
9
|
+
* This allows for providing a custom PostgresMigrationAgent.
|
|
10
|
+
*/
|
|
11
|
+
migrationAgent?: (config: PostgresStorageConfigDecoded) => PostgresMigrationAgent;
|
|
12
|
+
};
|
|
13
|
+
export declare const PostgresTestStorageFactoryGenerator: (factoryOptions: PostgresTestStorageOptions) => (options?: TestStorageOptions) => Promise<PostgresBucketStorageFactory>;
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* TODO share this implementation better in the core package.
|
|
3
|
+
* There are some subtle differences in this implementation.
|
|
4
|
+
*/
|
|
5
|
+
import { storage } from '@powersync/service-core';
|
|
6
|
+
import { RequiredOperationBatchLimits } from '../../types/types.js';
|
|
7
|
+
/**
|
|
8
|
+
* Batch of input operations.
|
|
9
|
+
*
|
|
10
|
+
* We accumulate operations up to MAX_RECORD_BATCH_SIZE,
|
|
11
|
+
* then further split into sub-batches if MAX_CURRENT_DATA_BATCH_SIZE is exceeded.
|
|
12
|
+
*/
|
|
13
|
+
export declare class OperationBatch {
|
|
14
|
+
protected options: RequiredOperationBatchLimits;
|
|
15
|
+
batch: RecordOperation[];
|
|
16
|
+
currentSize: number;
|
|
17
|
+
readonly maxBatchCount: number;
|
|
18
|
+
readonly maxRecordSize: number;
|
|
19
|
+
readonly maxCurrentDataBatchSize: number;
|
|
20
|
+
get length(): number;
|
|
21
|
+
constructor(options: RequiredOperationBatchLimits);
|
|
22
|
+
push(op: RecordOperation): void;
|
|
23
|
+
shouldFlush(): boolean;
|
|
24
|
+
/**
|
|
25
|
+
*
|
|
26
|
+
* @param sizes Map of source key to estimated size of the current_data document, or undefined if current_data is not persisted.
|
|
27
|
+
*
|
|
28
|
+
*/
|
|
29
|
+
batched(sizes: Map<string, number> | undefined): Generator<RecordOperation[]>;
|
|
30
|
+
}
|
|
31
|
+
export declare class RecordOperation {
|
|
32
|
+
readonly record: storage.SaveOptions;
|
|
33
|
+
readonly afterId: storage.ReplicaId | null;
|
|
34
|
+
readonly beforeId: storage.ReplicaId;
|
|
35
|
+
readonly internalBeforeKey: string;
|
|
36
|
+
readonly internalAfterKey: string | null;
|
|
37
|
+
readonly estimatedSize: number;
|
|
38
|
+
constructor(record: storage.SaveOptions);
|
|
39
|
+
}
|
|
40
|
+
/**
|
|
41
|
+
* In-memory cache key - must not be persisted.
|
|
42
|
+
*/
|
|
43
|
+
export declare function cacheKey(sourceTableId: string, id: storage.ReplicaId): string;
|
|
44
|
+
/**
|
|
45
|
+
* Calculates a cache key for a stored ReplicaId. This is usually stored as a bytea/Buffer.
|
|
46
|
+
*/
|
|
47
|
+
export declare function encodedCacheKey(sourceTableId: string, storedKey: Buffer): string;
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
import * as lib_postgres from '@powersync/lib-service-postgres';
|
|
2
|
+
import { DisposableObserver } from '@powersync/lib-services-framework';
|
|
3
|
+
import { storage } from '@powersync/service-core';
|
|
4
|
+
import * as sync_rules from '@powersync/service-sync-rules';
|
|
5
|
+
import * as t from 'ts-codec';
|
|
6
|
+
import { CurrentDataDecoded } from '../../types/models/CurrentData.js';
|
|
7
|
+
import { RequiredOperationBatchLimits } from '../../types/types.js';
|
|
8
|
+
import { OperationBatch, RecordOperation } from './OperationBatch.js';
|
|
9
|
+
import { PostgresPersistedBatch } from './PostgresPersistedBatch.js';
|
|
10
|
+
export interface PostgresBucketBatchOptions {
|
|
11
|
+
db: lib_postgres.DatabaseClient;
|
|
12
|
+
sync_rules: sync_rules.SqlSyncRules;
|
|
13
|
+
group_id: number;
|
|
14
|
+
slot_name: string;
|
|
15
|
+
last_checkpoint_lsn: string | null;
|
|
16
|
+
no_checkpoint_before_lsn: string;
|
|
17
|
+
store_current_data: boolean;
|
|
18
|
+
keep_alive_op?: bigint | null;
|
|
19
|
+
/**
|
|
20
|
+
* Set to true for initial replication.
|
|
21
|
+
*/
|
|
22
|
+
skip_existing_rows: boolean;
|
|
23
|
+
batch_limits: RequiredOperationBatchLimits;
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* Intermediate type which helps for only watching the active sync rules
|
|
27
|
+
* via the Postgres NOTIFY protocol.
|
|
28
|
+
*/
|
|
29
|
+
declare const StatefulCheckpoint: t.Intersection<t.Codec<{
|
|
30
|
+
id: number;
|
|
31
|
+
last_checkpoint: bigint | null;
|
|
32
|
+
last_checkpoint_lsn: string | null;
|
|
33
|
+
}, {
|
|
34
|
+
id: number;
|
|
35
|
+
last_checkpoint: string | number | null;
|
|
36
|
+
last_checkpoint_lsn: string | null;
|
|
37
|
+
}, string, t.CodecProps>, t.ObjectCodec<{
|
|
38
|
+
state: t.EnumCodec<typeof storage.SyncRuleState>;
|
|
39
|
+
}>>;
|
|
40
|
+
type StatefulCheckpointDecoded = t.Decoded<typeof StatefulCheckpoint>;
|
|
41
|
+
export declare class PostgresBucketBatch extends DisposableObserver<storage.BucketBatchStorageListener> implements storage.BucketStorageBatch {
|
|
42
|
+
protected options: PostgresBucketBatchOptions;
|
|
43
|
+
last_flushed_op: bigint | null;
|
|
44
|
+
protected db: lib_postgres.DatabaseClient;
|
|
45
|
+
protected group_id: number;
|
|
46
|
+
protected last_checkpoint_lsn: string | null;
|
|
47
|
+
protected no_checkpoint_before_lsn: string;
|
|
48
|
+
protected persisted_op: bigint | null;
|
|
49
|
+
protected write_checkpoint_batch: storage.CustomWriteCheckpointOptions[];
|
|
50
|
+
protected readonly sync_rules: sync_rules.SqlSyncRules;
|
|
51
|
+
protected batch: OperationBatch | null;
|
|
52
|
+
private lastWaitingLogThrottled;
|
|
53
|
+
constructor(options: PostgresBucketBatchOptions);
|
|
54
|
+
get lastCheckpointLsn(): string | null;
|
|
55
|
+
save(record: storage.SaveOptions): Promise<storage.FlushedResult | null>;
|
|
56
|
+
truncate(sourceTables: storage.SourceTable[]): Promise<storage.FlushedResult | null>;
|
|
57
|
+
protected truncateSingle(sourceTable: storage.SourceTable): Promise<bigint | null>;
|
|
58
|
+
drop(sourceTables: storage.SourceTable[]): Promise<storage.FlushedResult | null>;
|
|
59
|
+
flush(): Promise<storage.FlushedResult | null>;
|
|
60
|
+
private flushInner;
|
|
61
|
+
commit(lsn: string): Promise<boolean>;
|
|
62
|
+
keepalive(lsn: string): Promise<boolean>;
|
|
63
|
+
markSnapshotDone(tables: storage.SourceTable[], no_checkpoint_before_lsn: string): Promise<storage.SourceTable[]>;
|
|
64
|
+
addCustomWriteCheckpoint(checkpoint: storage.BatchedCustomWriteCheckpointOptions): void;
|
|
65
|
+
protected replicateBatch(db: lib_postgres.WrappedConnection, batch: OperationBatch): Promise<OperationBatch | null>;
|
|
66
|
+
protected saveOperation(persistedBatch: PostgresPersistedBatch, operation: RecordOperation, currentData?: CurrentDataDecoded | null): Promise<{
|
|
67
|
+
group_id: number;
|
|
68
|
+
source_table: string;
|
|
69
|
+
source_key: Buffer;
|
|
70
|
+
data: Buffer;
|
|
71
|
+
buckets: {
|
|
72
|
+
id: string;
|
|
73
|
+
bucket: string;
|
|
74
|
+
table: string;
|
|
75
|
+
}[];
|
|
76
|
+
lookups: Buffer[];
|
|
77
|
+
} | null>;
|
|
78
|
+
/**
|
|
79
|
+
* Gets relevant {@link SqlEventDescriptor}s for the given {@link SourceTable}
|
|
80
|
+
* TODO maybe share this with an abstract class
|
|
81
|
+
*/
|
|
82
|
+
protected getTableEvents(table: storage.SourceTable): sync_rules.SqlEventDescriptor[];
|
|
83
|
+
protected withReplicationTransaction<T>(callback: (tx: lib_postgres.WrappedConnection) => Promise<T>): Promise<T>;
|
|
84
|
+
}
|
|
85
|
+
/**
|
|
86
|
+
* Uses Postgres' NOTIFY functionality to update different processes when the
|
|
87
|
+
* active checkpoint has been updated.
|
|
88
|
+
*/
|
|
89
|
+
export declare const notifySyncRulesUpdate: (db: lib_postgres.DatabaseClient, update: StatefulCheckpointDecoded) => Promise<void>;
|
|
90
|
+
export {};
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
import * as lib_postgres from '@powersync/lib-service-postgres';
|
|
2
|
+
import { storage } from '@powersync/service-core';
|
|
3
|
+
import * as sync_rules from '@powersync/service-sync-rules';
|
|
4
|
+
import { models, RequiredOperationBatchLimits } from '../../types/types.js';
|
|
5
|
+
export type SaveBucketDataOptions = {
|
|
6
|
+
/**
|
|
7
|
+
* This value will be serialized into a BSON Byte array for storage
|
|
8
|
+
*/
|
|
9
|
+
source_key: storage.ReplicaId;
|
|
10
|
+
table: storage.SourceTable;
|
|
11
|
+
before_buckets: models.CurrentBucket[];
|
|
12
|
+
evaluated: sync_rules.EvaluatedRow[];
|
|
13
|
+
};
|
|
14
|
+
export type SaveParameterDataOptions = {
|
|
15
|
+
source_key: storage.ReplicaId;
|
|
16
|
+
table: storage.SourceTable;
|
|
17
|
+
evaluated: sync_rules.EvaluatedParameters[];
|
|
18
|
+
existing_lookups: Buffer[];
|
|
19
|
+
};
|
|
20
|
+
export type DeleteCurrentDataOptions = {
|
|
21
|
+
source_table_id: bigint;
|
|
22
|
+
/**
|
|
23
|
+
* ReplicaID which needs to be serialized in order to be queried
|
|
24
|
+
* or inserted into the DB
|
|
25
|
+
*/
|
|
26
|
+
source_key?: storage.ReplicaId;
|
|
27
|
+
/**
|
|
28
|
+
* Optionally provide the serialized source key directly
|
|
29
|
+
*/
|
|
30
|
+
serialized_source_key?: Buffer;
|
|
31
|
+
};
|
|
32
|
+
export type PostgresPersistedBatchOptions = RequiredOperationBatchLimits & {
|
|
33
|
+
group_id: number;
|
|
34
|
+
};
|
|
35
|
+
export declare class PostgresPersistedBatch {
|
|
36
|
+
group_id: number;
|
|
37
|
+
/**
|
|
38
|
+
* Very rough estimate of current operations size in bytes
|
|
39
|
+
*/
|
|
40
|
+
currentSize: number;
|
|
41
|
+
readonly maxTransactionBatchSize: number;
|
|
42
|
+
readonly maxTransactionDocCount: number;
|
|
43
|
+
/**
|
|
44
|
+
* Ordered set of bucket_data insert operation parameters
|
|
45
|
+
*/
|
|
46
|
+
protected bucketDataInserts: models.BucketData[];
|
|
47
|
+
protected parameterDataInserts: models.BucketParameters[];
|
|
48
|
+
protected currentDataDeletes: Pick<models.CurrentData, 'group_id' | 'source_key' | 'source_table'>[];
|
|
49
|
+
/**
|
|
50
|
+
* This is stored as a map to avoid multiple inserts (or conflicts) for the same key
|
|
51
|
+
*/
|
|
52
|
+
protected currentDataInserts: Map<string, models.CurrentData>;
|
|
53
|
+
constructor(options: PostgresPersistedBatchOptions);
|
|
54
|
+
saveBucketData(options: SaveBucketDataOptions): void;
|
|
55
|
+
saveParameterData(options: SaveParameterDataOptions): void;
|
|
56
|
+
deleteCurrentData(options: DeleteCurrentDataOptions): void;
|
|
57
|
+
upsertCurrentData(options: models.CurrentDataDecoded): void;
|
|
58
|
+
shouldFlushTransaction(): boolean;
|
|
59
|
+
flush(db: lib_postgres.WrappedConnection): Promise<void>;
|
|
60
|
+
protected flushBucketData(db: lib_postgres.WrappedConnection): Promise<void>;
|
|
61
|
+
protected flushParameterData(db: lib_postgres.WrappedConnection): Promise<void>;
|
|
62
|
+
protected flushCurrentData(db: lib_postgres.WrappedConnection): Promise<void>;
|
|
63
|
+
}
|
|
64
|
+
export declare function currentBucketKey(b: models.CurrentBucket): string;
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import * as lib_postgres from '@powersync/lib-service-postgres';
|
|
2
|
+
import { storage } from '@powersync/service-core';
|
|
3
|
+
export type PostgresCheckpointAPIOptions = {
|
|
4
|
+
db: lib_postgres.DatabaseClient;
|
|
5
|
+
mode: storage.WriteCheckpointMode;
|
|
6
|
+
};
|
|
7
|
+
export declare class PostgresWriteCheckpointAPI implements storage.WriteCheckpointAPI {
|
|
8
|
+
readonly db: lib_postgres.DatabaseClient;
|
|
9
|
+
private _mode;
|
|
10
|
+
constructor(options: PostgresCheckpointAPIOptions);
|
|
11
|
+
get writeCheckpointMode(): storage.WriteCheckpointMode;
|
|
12
|
+
setWriteCheckpointMode(mode: storage.WriteCheckpointMode): void;
|
|
13
|
+
batchCreateCustomWriteCheckpoints(checkpoints: storage.CustomWriteCheckpointOptions[]): Promise<void>;
|
|
14
|
+
createCustomWriteCheckpoint(options: storage.CustomWriteCheckpointOptions): Promise<bigint>;
|
|
15
|
+
createManagedWriteCheckpoint(checkpoint: storage.ManagedWriteCheckpointOptions): Promise<bigint>;
|
|
16
|
+
lastWriteCheckpoint(filters: storage.LastWriteCheckpointFilters): Promise<bigint | null>;
|
|
17
|
+
protected lastCustomWriteCheckpoint(filters: storage.CustomWriteCheckpointFilters): Promise<bigint | null>;
|
|
18
|
+
protected lastManagedWriteCheckpoint(filters: storage.ManagedWriteCheckpointFilters): Promise<bigint | null>;
|
|
19
|
+
}
|
|
20
|
+
export declare function batchCreateCustomWriteCheckpoints(db: lib_postgres.DatabaseClient, checkpoints: storage.CustomWriteCheckpointOptions[]): Promise<void>;
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import * as lib_postgres from '@powersync/lib-service-postgres';
|
|
2
|
+
import { storage } from '@powersync/service-core';
|
|
3
|
+
import { models } from '../../types/types.js';
|
|
4
|
+
export declare class PostgresPersistedSyncRulesContent implements storage.PersistedSyncRulesContent {
|
|
5
|
+
private db;
|
|
6
|
+
readonly slot_name: string;
|
|
7
|
+
readonly id: number;
|
|
8
|
+
readonly sync_rules_content: string;
|
|
9
|
+
readonly last_checkpoint_lsn: string | null;
|
|
10
|
+
readonly last_fatal_error: string | null;
|
|
11
|
+
readonly last_keepalive_ts: Date | null;
|
|
12
|
+
readonly last_checkpoint_ts: Date | null;
|
|
13
|
+
current_lock: storage.ReplicationLock | null;
|
|
14
|
+
constructor(db: lib_postgres.DatabaseClient, row: models.SyncRulesDecoded);
|
|
15
|
+
parsed(options: storage.ParseSyncRulesOptions): storage.PersistedSyncRules;
|
|
16
|
+
lock(): Promise<storage.ReplicationLock>;
|
|
17
|
+
}
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
import * as t from 'ts-codec';
|
|
2
|
+
export declare const BIGINT_MAX: bigint;
|
|
3
|
+
/**
|
|
4
|
+
* The use of ts-codec:
|
|
5
|
+
* We currently use pgwire for Postgres queries. This library provides fine-grained control
|
|
6
|
+
* over parameter typings and efficient streaming of query responses. Additionally, configuring
|
|
7
|
+
* pgwire with default certificates allows us to use the same connection configuration process
|
|
8
|
+
* for both replication and storage libraries.
|
|
9
|
+
*
|
|
10
|
+
* Unfortunately, ORM driver support for pgwire is limited, so we rely on pure SQL queries in the
|
|
11
|
+
* absence of writing an ORM driver from scratch.
|
|
12
|
+
*
|
|
13
|
+
* [Opinion]: Writing pure SQL queries throughout a codebase can be daunting from a maintenance
|
|
14
|
+
* and debugging perspective. For example, row response types are often declared when performing a query:
|
|
15
|
+
*
|
|
16
|
+
* ```typescript
|
|
17
|
+
* const rows = await db.queryRows<MyRowType>(`SELECT one, two FROM my_table`);
|
|
18
|
+
* ```
|
|
19
|
+
* This type declaration suggests `rows` is an array of `MyRowType` objects, even though no validation
|
|
20
|
+
* is enforced. Adding a field to the `MyRowType` interface without updating the query could easily
|
|
21
|
+
* introduce subtle bugs. Similarly, type mismatches between SQL results and TypeScript interfaces, such as
|
|
22
|
+
* a `Date` field returned as a `string`, require manual conversion.
|
|
23
|
+
*
|
|
24
|
+
* `ts-codec` is not an ORM, but it simplifies working with pure SQL query responses in several ways:
|
|
25
|
+
*
|
|
26
|
+
* - **Validations**: The `decode` operation ensures that the returned row matches the expected object
|
|
27
|
+
* structure, throwing an error if it doesn't.
|
|
28
|
+
* - **Decoding Columns**: pgwire already decodes common SQLite types, but `ts-codec` adds an extra layer
|
|
29
|
+
* for JS-native values. For instance, `jsonb` columns are returned as `JsonContainer`/`string` and can
|
|
30
|
+
* be automatically parsed into objects. Similarly, fields like `group_id` are converted from `Bigint`
|
|
31
|
+
* to `Number` for easier use.
|
|
32
|
+
* - **Encoded Forms**: A single `ts-codec` type definition can infer both encoded and decoded forms. This
|
|
33
|
+
* is especially useful for persisted batch operations that rely on JSON query parameters for bulk inserts.
|
|
34
|
+
* Collections like `bucket_data`, `current_data`, and `bucket_parameters` use encoded/decoded types, making
|
|
35
|
+
* changes easier to manage and validate. While some manual encoding is done for intermediate values (e.g.,
|
|
36
|
+
* size estimation), these types are validated with `ts-codec` to ensure consistency.
|
|
37
|
+
*/
|
|
38
|
+
/**
|
|
39
|
+
* Wraps a codec which is encoded to a JSON string
|
|
40
|
+
*/
|
|
41
|
+
export declare const jsonb: <Decoded>(subCodec: t.Codec<Decoded, any>) => t.Codec<Decoded, string, string, t.CodecProps>;
|
|
42
|
+
/**
|
|
43
|
+
* Just performs a pure JSON.parse for the decoding step
|
|
44
|
+
*/
|
|
45
|
+
export declare const jsonb_raw: <Decoded>() => t.Codec<Decoded, string, string, t.CodecProps>;
|
|
46
|
+
export declare const bigint: t.Codec<bigint, string | number, string, t.CodecProps>;
|
|
47
|
+
export declare const uint8array: t.Codec<Uint8Array, Uint8Array, string, t.CodecProps>;
|
|
48
|
+
/**
|
|
49
|
+
* PGWire returns BYTEA values as Uint8Array instances.
|
|
50
|
+
* We also serialize to a hex string for bulk inserts.
|
|
51
|
+
*/
|
|
52
|
+
export declare const hexBuffer: t.Codec<Buffer, string, "hexBuffer", t.CodecProps>;
|
|
53
|
+
/**
|
|
54
|
+
* PGWire returns INTEGER columns as a `bigint`.
|
|
55
|
+
* This does a decode operation to `number`.
|
|
56
|
+
*/
|
|
57
|
+
export declare const pgwire_number: t.Codec<number, number, "pg_number", t.CodecProps>;
|
|
58
|
+
/**
|
|
59
|
+
* A codec which contains the same type on the input and output.
|
|
60
|
+
*/
|
|
61
|
+
export declare const IdentityCodec: <T>() => t.Codec<T, T, string, t.CodecProps>;
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import * as t from 'ts-codec';
|
|
2
|
+
/**
|
|
3
|
+
* Notification payload sent via Postgres' NOTIFY API.
|
|
4
|
+
*
|
|
5
|
+
*/
|
|
6
|
+
export declare const ActiveCheckpoint: t.ObjectCodec<{
|
|
7
|
+
id: t.Codec<number, number, "pg_number", t.CodecProps>;
|
|
8
|
+
last_checkpoint: t.Union<t.Codec<null, null, string, t.CodecProps>, t.Codec<bigint, string | number, string, t.CodecProps>>;
|
|
9
|
+
last_checkpoint_lsn: t.Union<t.Codec<null, null, string, t.CodecProps>, t.IdentityCodec<t.CodecType.String>>;
|
|
10
|
+
}>;
|
|
11
|
+
export type ActiveCheckpoint = t.Encoded<typeof ActiveCheckpoint>;
|
|
12
|
+
export type ActiveCheckpointDecoded = t.Decoded<typeof ActiveCheckpoint>;
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import * as t from 'ts-codec';
|
|
2
|
+
export declare const ActiveCheckpointPayload: t.ObjectCodec<{
|
|
3
|
+
active_checkpoint: t.ObjectCodec<{
|
|
4
|
+
id: t.Codec<number, number, "pg_number", t.CodecProps>;
|
|
5
|
+
last_checkpoint: t.Union<t.Codec<null, null, string, t.CodecProps>, t.Codec<bigint, string | number, string, t.CodecProps>>;
|
|
6
|
+
last_checkpoint_lsn: t.Union<t.Codec<null, null, string, t.CodecProps>, t.IdentityCodec<t.CodecType.String>>;
|
|
7
|
+
}>;
|
|
8
|
+
}>;
|
|
9
|
+
export type ActiveCheckpointPayload = t.Encoded<typeof ActiveCheckpointPayload>;
|
|
10
|
+
export type ActiveCheckpointPayloadDecoded = t.Decoded<typeof ActiveCheckpointPayload>;
|
|
11
|
+
export declare const ActiveCheckpointNotification: t.Codec<{
|
|
12
|
+
active_checkpoint: {
|
|
13
|
+
id: number;
|
|
14
|
+
last_checkpoint: bigint | null;
|
|
15
|
+
last_checkpoint_lsn: string | null;
|
|
16
|
+
};
|
|
17
|
+
}, string, string, t.CodecProps>;
|
|
18
|
+
export type ActiveCheckpointNotification = t.Encoded<typeof ActiveCheckpointNotification>;
|
|
19
|
+
export type ActiveCheckpointNotificationDecoded = t.Decoded<typeof ActiveCheckpointNotification>;
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import * as t from 'ts-codec';
|
|
2
|
+
export declare enum OpType {
|
|
3
|
+
PUT = "PUT",
|
|
4
|
+
REMOVE = "REMOVE",
|
|
5
|
+
MOVE = "MOVE",
|
|
6
|
+
CLEAR = "CLEAR"
|
|
7
|
+
}
|
|
8
|
+
export declare const BucketData: t.ObjectCodec<{
|
|
9
|
+
group_id: t.Codec<number, number, "pg_number", t.CodecProps>;
|
|
10
|
+
bucket_name: t.IdentityCodec<t.CodecType.String>;
|
|
11
|
+
op_id: t.Codec<bigint, string | number, string, t.CodecProps>;
|
|
12
|
+
op: t.EnumCodec<typeof OpType>;
|
|
13
|
+
source_table: t.Union<t.Codec<null, null, string, t.CodecProps>, t.IdentityCodec<t.CodecType.String>>;
|
|
14
|
+
source_key: t.Union<t.Codec<null, null, string, t.CodecProps>, t.Codec<Buffer, string, "hexBuffer", t.CodecProps>>;
|
|
15
|
+
table_name: t.Union<t.Codec<string, string, string, t.CodecProps>, t.Codec<null, null, t.CodecType.Null, t.CodecProps>>;
|
|
16
|
+
row_id: t.Union<t.Codec<string, string, string, t.CodecProps>, t.Codec<null, null, t.CodecType.Null, t.CodecProps>>;
|
|
17
|
+
checksum: t.Codec<bigint, string | number, string, t.CodecProps>;
|
|
18
|
+
data: t.Union<t.Codec<null, null, string, t.CodecProps>, t.IdentityCodec<t.CodecType.String>>;
|
|
19
|
+
target_op: t.Union<t.Codec<null, null, string, t.CodecProps>, t.Codec<bigint, string | number, string, t.CodecProps>>;
|
|
20
|
+
}>;
|
|
21
|
+
export type BucketData = t.Encoded<typeof BucketData>;
|
|
22
|
+
export type BucketDataDecoded = t.Decoded<typeof BucketData>;
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import * as t from 'ts-codec';
|
|
2
|
+
export declare const BucketParameters: t.ObjectCodec<{
|
|
3
|
+
id: t.Codec<bigint, string | number, string, t.CodecProps>;
|
|
4
|
+
group_id: t.Codec<number, number, "pg_number", t.CodecProps>;
|
|
5
|
+
source_table: t.IdentityCodec<t.CodecType.String>;
|
|
6
|
+
source_key: t.Codec<Buffer, string, "hexBuffer", t.CodecProps>;
|
|
7
|
+
lookup: t.Codec<Buffer, string, "hexBuffer", t.CodecProps>;
|
|
8
|
+
bucket_parameters: t.IdentityCodec<t.CodecType.String>;
|
|
9
|
+
}>;
|
|
10
|
+
export type BucketParameters = t.Encoded<typeof BucketParameters>;
|
|
11
|
+
export type BucketParametersDecoded = t.Decoded<typeof BucketParameters>;
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import * as t from 'ts-codec';
|
|
2
|
+
export declare const CurrentBucket: t.ObjectCodec<{
|
|
3
|
+
bucket: t.IdentityCodec<t.CodecType.String>;
|
|
4
|
+
table: t.IdentityCodec<t.CodecType.String>;
|
|
5
|
+
id: t.IdentityCodec<t.CodecType.String>;
|
|
6
|
+
}>;
|
|
7
|
+
export type CurrentBucket = t.Encoded<typeof CurrentBucket>;
|
|
8
|
+
export type CurrentBucketDecoded = t.Decoded<typeof CurrentBucket>;
|
|
9
|
+
export declare const CurrentData: t.ObjectCodec<{
|
|
10
|
+
buckets: t.Codec<{
|
|
11
|
+
id: string;
|
|
12
|
+
bucket: string;
|
|
13
|
+
table: string;
|
|
14
|
+
}[], string, string, t.CodecProps>;
|
|
15
|
+
data: t.Codec<Buffer, string, "hexBuffer", t.CodecProps>;
|
|
16
|
+
group_id: t.Codec<number, number, "pg_number", t.CodecProps>;
|
|
17
|
+
lookups: t.ArrayCodec<t.Codec<Buffer, string, "hexBuffer", t.CodecProps>>;
|
|
18
|
+
source_key: t.Codec<Buffer, string, "hexBuffer", t.CodecProps>;
|
|
19
|
+
source_table: t.IdentityCodec<t.CodecType.String>;
|
|
20
|
+
}>;
|
|
21
|
+
export type CurrentData = t.Encoded<typeof CurrentData>;
|
|
22
|
+
export type CurrentDataDecoded = t.Decoded<typeof CurrentData>;
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import { framework } from '@powersync/service-core';
|
|
2
|
+
import * as t from 'ts-codec';
|
|
3
|
+
export declare const Migration: t.ObjectCodec<{
|
|
4
|
+
last_run: t.IdentityCodec<t.CodecType.String>;
|
|
5
|
+
log: t.Codec<{
|
|
6
|
+
name: string;
|
|
7
|
+
direction: framework.Direction;
|
|
8
|
+
timestamp: Date;
|
|
9
|
+
}[], string, string, t.CodecProps>;
|
|
10
|
+
}>;
|
|
11
|
+
export type Migration = t.Encoded<typeof Migration>;
|
|
12
|
+
export type MigrationDecoded = t.Decoded<typeof Migration>;
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
import * as t from 'ts-codec';
|
|
2
|
+
export type StoredRelationId = {
|
|
3
|
+
object_id: string | number;
|
|
4
|
+
};
|
|
5
|
+
export declare const ColumnDescriptor: t.ObjectCodec<{
|
|
6
|
+
name: t.IdentityCodec<t.CodecType.String>;
|
|
7
|
+
/**
|
|
8
|
+
* The type of the column ie VARCHAR, INT, etc
|
|
9
|
+
*/
|
|
10
|
+
type: t.OptionalCodec<t.Codec<string, string, string, t.CodecProps>>;
|
|
11
|
+
/**
|
|
12
|
+
* Some data sources have a type id that can be used to identify the type of the column
|
|
13
|
+
*/
|
|
14
|
+
typeId: t.OptionalCodec<t.Codec<number, number, string, t.CodecProps>>;
|
|
15
|
+
}>;
|
|
16
|
+
export declare const SourceTable: t.ObjectCodec<{
|
|
17
|
+
id: t.IdentityCodec<t.CodecType.String>;
|
|
18
|
+
group_id: t.Codec<number, number, "pg_number", t.CodecProps>;
|
|
19
|
+
connection_id: t.Codec<bigint, string | number, string, t.CodecProps>;
|
|
20
|
+
relation_id: t.Union<t.Codec<null, null, string, t.CodecProps>, t.Codec<StoredRelationId, string, string, t.CodecProps>>;
|
|
21
|
+
schema_name: t.IdentityCodec<t.CodecType.String>;
|
|
22
|
+
table_name: t.IdentityCodec<t.CodecType.String>;
|
|
23
|
+
replica_id_columns: t.Union<t.Codec<null, null, string, t.CodecProps>, t.Codec<{
|
|
24
|
+
name: string;
|
|
25
|
+
type?: string | undefined;
|
|
26
|
+
typeId?: number | undefined;
|
|
27
|
+
}[], string, string, t.CodecProps>>;
|
|
28
|
+
snapshot_done: t.IdentityCodec<t.CodecType.Boolean>;
|
|
29
|
+
}>;
|
|
30
|
+
export type SourceTable = t.Encoded<typeof SourceTable>;
|
|
31
|
+
export type SourceTableDecoded = t.Decoded<typeof SourceTable>;
|