@powersync/service-core 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.probes/.gitkeep +0 -0
- package/CHANGELOG.md +13 -0
- package/LICENSE +67 -0
- package/README.md +3 -0
- package/dist/api/api-index.d.ts +2 -0
- package/dist/api/api-index.js +3 -0
- package/dist/api/api-index.js.map +1 -0
- package/dist/api/diagnostics.d.ts +21 -0
- package/dist/api/diagnostics.js +183 -0
- package/dist/api/diagnostics.js.map +1 -0
- package/dist/api/schema.d.ts +5 -0
- package/dist/api/schema.js +88 -0
- package/dist/api/schema.js.map +1 -0
- package/dist/auth/CachedKeyCollector.d.ts +46 -0
- package/dist/auth/CachedKeyCollector.js +116 -0
- package/dist/auth/CachedKeyCollector.js.map +1 -0
- package/dist/auth/CompoundKeyCollector.d.ts +8 -0
- package/dist/auth/CompoundKeyCollector.js +23 -0
- package/dist/auth/CompoundKeyCollector.js.map +1 -0
- package/dist/auth/JwtPayload.d.ts +10 -0
- package/dist/auth/JwtPayload.js +2 -0
- package/dist/auth/JwtPayload.js.map +1 -0
- package/dist/auth/KeyCollector.d.ts +24 -0
- package/dist/auth/KeyCollector.js +2 -0
- package/dist/auth/KeyCollector.js.map +1 -0
- package/dist/auth/KeySpec.d.ts +26 -0
- package/dist/auth/KeySpec.js +49 -0
- package/dist/auth/KeySpec.js.map +1 -0
- package/dist/auth/KeyStore.d.ts +39 -0
- package/dist/auth/KeyStore.js +131 -0
- package/dist/auth/KeyStore.js.map +1 -0
- package/dist/auth/LeakyBucket.d.ts +39 -0
- package/dist/auth/LeakyBucket.js +57 -0
- package/dist/auth/LeakyBucket.js.map +1 -0
- package/dist/auth/RemoteJWKSCollector.d.ts +24 -0
- package/dist/auth/RemoteJWKSCollector.js +106 -0
- package/dist/auth/RemoteJWKSCollector.js.map +1 -0
- package/dist/auth/StaticKeyCollector.d.ts +14 -0
- package/dist/auth/StaticKeyCollector.js +19 -0
- package/dist/auth/StaticKeyCollector.js.map +1 -0
- package/dist/auth/SupabaseKeyCollector.d.ts +22 -0
- package/dist/auth/SupabaseKeyCollector.js +61 -0
- package/dist/auth/SupabaseKeyCollector.js.map +1 -0
- package/dist/auth/auth-index.d.ts +10 -0
- package/dist/auth/auth-index.js +11 -0
- package/dist/auth/auth-index.js.map +1 -0
- package/dist/db/db-index.d.ts +1 -0
- package/dist/db/db-index.js +2 -0
- package/dist/db/db-index.js.map +1 -0
- package/dist/db/mongo.d.ts +29 -0
- package/dist/db/mongo.js +65 -0
- package/dist/db/mongo.js.map +1 -0
- package/dist/entry/cli-entry.d.ts +15 -0
- package/dist/entry/cli-entry.js +36 -0
- package/dist/entry/cli-entry.js.map +1 -0
- package/dist/entry/commands/config-command.d.ts +10 -0
- package/dist/entry/commands/config-command.js +21 -0
- package/dist/entry/commands/config-command.js.map +1 -0
- package/dist/entry/commands/migrate-action.d.ts +2 -0
- package/dist/entry/commands/migrate-action.js +18 -0
- package/dist/entry/commands/migrate-action.js.map +1 -0
- package/dist/entry/commands/start-action.d.ts +3 -0
- package/dist/entry/commands/start-action.js +15 -0
- package/dist/entry/commands/start-action.js.map +1 -0
- package/dist/entry/commands/teardown-action.d.ts +2 -0
- package/dist/entry/commands/teardown-action.js +17 -0
- package/dist/entry/commands/teardown-action.js.map +1 -0
- package/dist/entry/entry-index.d.ts +5 -0
- package/dist/entry/entry-index.js +6 -0
- package/dist/entry/entry-index.js.map +1 -0
- package/dist/index.d.ts +24 -0
- package/dist/index.js +26 -0
- package/dist/index.js.map +1 -0
- package/dist/metrics/metrics.d.ts +16 -0
- package/dist/metrics/metrics.js +139 -0
- package/dist/metrics/metrics.js.map +1 -0
- package/dist/migrations/db/migrations/1684951997326-init.d.ts +3 -0
- package/dist/migrations/db/migrations/1684951997326-init.js +31 -0
- package/dist/migrations/db/migrations/1684951997326-init.js.map +1 -0
- package/dist/migrations/db/migrations/1688556755264-initial-sync-rules.d.ts +2 -0
- package/dist/migrations/db/migrations/1688556755264-initial-sync-rules.js +5 -0
- package/dist/migrations/db/migrations/1688556755264-initial-sync-rules.js.map +1 -0
- package/dist/migrations/db/migrations/1702295701188-sync-rule-state.d.ts +3 -0
- package/dist/migrations/db/migrations/1702295701188-sync-rule-state.js +54 -0
- package/dist/migrations/db/migrations/1702295701188-sync-rule-state.js.map +1 -0
- package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.d.ts +3 -0
- package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.js +27 -0
- package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.js.map +1 -0
- package/dist/migrations/db/store.d.ts +3 -0
- package/dist/migrations/db/store.js +10 -0
- package/dist/migrations/db/store.js.map +1 -0
- package/dist/migrations/migrations.d.ts +10 -0
- package/dist/migrations/migrations.js +94 -0
- package/dist/migrations/migrations.js.map +1 -0
- package/dist/replication/ErrorRateLimiter.d.ts +17 -0
- package/dist/replication/ErrorRateLimiter.js +42 -0
- package/dist/replication/ErrorRateLimiter.js.map +1 -0
- package/dist/replication/PgRelation.d.ts +16 -0
- package/dist/replication/PgRelation.js +26 -0
- package/dist/replication/PgRelation.js.map +1 -0
- package/dist/replication/WalConnection.d.ts +34 -0
- package/dist/replication/WalConnection.js +190 -0
- package/dist/replication/WalConnection.js.map +1 -0
- package/dist/replication/WalStream.d.ts +58 -0
- package/dist/replication/WalStream.js +517 -0
- package/dist/replication/WalStream.js.map +1 -0
- package/dist/replication/WalStreamManager.d.ts +30 -0
- package/dist/replication/WalStreamManager.js +199 -0
- package/dist/replication/WalStreamManager.js.map +1 -0
- package/dist/replication/WalStreamRunner.d.ts +38 -0
- package/dist/replication/WalStreamRunner.js +155 -0
- package/dist/replication/WalStreamRunner.js.map +1 -0
- package/dist/replication/replication-index.d.ts +7 -0
- package/dist/replication/replication-index.js +8 -0
- package/dist/replication/replication-index.js.map +1 -0
- package/dist/replication/util.d.ts +9 -0
- package/dist/replication/util.js +62 -0
- package/dist/replication/util.js.map +1 -0
- package/dist/routes/admin.d.ts +7 -0
- package/dist/routes/admin.js +192 -0
- package/dist/routes/admin.js.map +1 -0
- package/dist/routes/auth.d.ts +58 -0
- package/dist/routes/auth.js +182 -0
- package/dist/routes/auth.js.map +1 -0
- package/dist/routes/checkpointing.d.ts +3 -0
- package/dist/routes/checkpointing.js +30 -0
- package/dist/routes/checkpointing.js.map +1 -0
- package/dist/routes/dev.d.ts +6 -0
- package/dist/routes/dev.js +163 -0
- package/dist/routes/dev.js.map +1 -0
- package/dist/routes/route-generators.d.ts +15 -0
- package/dist/routes/route-generators.js +32 -0
- package/dist/routes/route-generators.js.map +1 -0
- package/dist/routes/router-socket.d.ts +10 -0
- package/dist/routes/router-socket.js +5 -0
- package/dist/routes/router-socket.js.map +1 -0
- package/dist/routes/router.d.ts +13 -0
- package/dist/routes/router.js +2 -0
- package/dist/routes/router.js.map +1 -0
- package/dist/routes/routes-index.d.ts +4 -0
- package/dist/routes/routes-index.js +5 -0
- package/dist/routes/routes-index.js.map +1 -0
- package/dist/routes/socket-route.d.ts +2 -0
- package/dist/routes/socket-route.js +119 -0
- package/dist/routes/socket-route.js.map +1 -0
- package/dist/routes/sync-rules.d.ts +6 -0
- package/dist/routes/sync-rules.js +182 -0
- package/dist/routes/sync-rules.js.map +1 -0
- package/dist/routes/sync-stream.d.ts +5 -0
- package/dist/routes/sync-stream.js +74 -0
- package/dist/routes/sync-stream.js.map +1 -0
- package/dist/runner/teardown.d.ts +2 -0
- package/dist/runner/teardown.js +79 -0
- package/dist/runner/teardown.js.map +1 -0
- package/dist/storage/BucketStorage.d.ts +298 -0
- package/dist/storage/BucketStorage.js +25 -0
- package/dist/storage/BucketStorage.js.map +1 -0
- package/dist/storage/MongoBucketStorage.d.ts +51 -0
- package/dist/storage/MongoBucketStorage.js +388 -0
- package/dist/storage/MongoBucketStorage.js.map +1 -0
- package/dist/storage/SourceTable.d.ts +39 -0
- package/dist/storage/SourceTable.js +50 -0
- package/dist/storage/SourceTable.js.map +1 -0
- package/dist/storage/mongo/MongoBucketBatch.d.ts +48 -0
- package/dist/storage/mongo/MongoBucketBatch.js +584 -0
- package/dist/storage/mongo/MongoBucketBatch.js.map +1 -0
- package/dist/storage/mongo/MongoIdSequence.d.ts +12 -0
- package/dist/storage/mongo/MongoIdSequence.js +21 -0
- package/dist/storage/mongo/MongoIdSequence.js.map +1 -0
- package/dist/storage/mongo/MongoPersistedSyncRules.d.ts +9 -0
- package/dist/storage/mongo/MongoPersistedSyncRules.js +9 -0
- package/dist/storage/mongo/MongoPersistedSyncRules.js.map +1 -0
- package/dist/storage/mongo/MongoPersistedSyncRulesContent.d.ts +20 -0
- package/dist/storage/mongo/MongoPersistedSyncRulesContent.js +26 -0
- package/dist/storage/mongo/MongoPersistedSyncRulesContent.js.map +1 -0
- package/dist/storage/mongo/MongoSyncBucketStorage.d.ts +27 -0
- package/dist/storage/mongo/MongoSyncBucketStorage.js +379 -0
- package/dist/storage/mongo/MongoSyncBucketStorage.js.map +1 -0
- package/dist/storage/mongo/MongoSyncRulesLock.d.ts +16 -0
- package/dist/storage/mongo/MongoSyncRulesLock.js +65 -0
- package/dist/storage/mongo/MongoSyncRulesLock.js.map +1 -0
- package/dist/storage/mongo/OperationBatch.d.ts +26 -0
- package/dist/storage/mongo/OperationBatch.js +101 -0
- package/dist/storage/mongo/OperationBatch.js.map +1 -0
- package/dist/storage/mongo/PersistedBatch.d.ts +42 -0
- package/dist/storage/mongo/PersistedBatch.js +200 -0
- package/dist/storage/mongo/PersistedBatch.js.map +1 -0
- package/dist/storage/mongo/db.d.ts +23 -0
- package/dist/storage/mongo/db.js +34 -0
- package/dist/storage/mongo/db.js.map +1 -0
- package/dist/storage/mongo/models.d.ts +137 -0
- package/dist/storage/mongo/models.js +27 -0
- package/dist/storage/mongo/models.js.map +1 -0
- package/dist/storage/mongo/util.d.ts +26 -0
- package/dist/storage/mongo/util.js +81 -0
- package/dist/storage/mongo/util.js.map +1 -0
- package/dist/storage/storage-index.d.ts +14 -0
- package/dist/storage/storage-index.js +15 -0
- package/dist/storage/storage-index.js.map +1 -0
- package/dist/sync/BroadcastIterable.d.ts +38 -0
- package/dist/sync/BroadcastIterable.js +153 -0
- package/dist/sync/BroadcastIterable.js.map +1 -0
- package/dist/sync/LastValueSink.d.ts +25 -0
- package/dist/sync/LastValueSink.js +84 -0
- package/dist/sync/LastValueSink.js.map +1 -0
- package/dist/sync/merge.d.ts +39 -0
- package/dist/sync/merge.js +175 -0
- package/dist/sync/merge.js.map +1 -0
- package/dist/sync/safeRace.d.ts +1 -0
- package/dist/sync/safeRace.js +91 -0
- package/dist/sync/safeRace.js.map +1 -0
- package/dist/sync/sync-index.d.ts +6 -0
- package/dist/sync/sync-index.js +7 -0
- package/dist/sync/sync-index.js.map +1 -0
- package/dist/sync/sync.d.ts +18 -0
- package/dist/sync/sync.js +248 -0
- package/dist/sync/sync.js.map +1 -0
- package/dist/sync/util.d.ts +26 -0
- package/dist/sync/util.js +73 -0
- package/dist/sync/util.js.map +1 -0
- package/dist/system/CorePowerSyncSystem.d.ts +18 -0
- package/dist/system/CorePowerSyncSystem.js +28 -0
- package/dist/system/CorePowerSyncSystem.js.map +1 -0
- package/dist/util/Mutex.d.ts +47 -0
- package/dist/util/Mutex.js +132 -0
- package/dist/util/Mutex.js.map +1 -0
- package/dist/util/PgManager.d.ts +24 -0
- package/dist/util/PgManager.js +55 -0
- package/dist/util/PgManager.js.map +1 -0
- package/dist/util/alerting.d.ts +4 -0
- package/dist/util/alerting.js +14 -0
- package/dist/util/alerting.js.map +1 -0
- package/dist/util/config/collectors/config-collector.d.ts +29 -0
- package/dist/util/config/collectors/config-collector.js +116 -0
- package/dist/util/config/collectors/config-collector.js.map +1 -0
- package/dist/util/config/collectors/impl/base64-config-collector.d.ts +6 -0
- package/dist/util/config/collectors/impl/base64-config-collector.js +15 -0
- package/dist/util/config/collectors/impl/base64-config-collector.js.map +1 -0
- package/dist/util/config/collectors/impl/fallback-config-collector.d.ts +11 -0
- package/dist/util/config/collectors/impl/fallback-config-collector.js +19 -0
- package/dist/util/config/collectors/impl/fallback-config-collector.js.map +1 -0
- package/dist/util/config/collectors/impl/filesystem-config-collector.d.ts +6 -0
- package/dist/util/config/collectors/impl/filesystem-config-collector.js +35 -0
- package/dist/util/config/collectors/impl/filesystem-config-collector.js.map +1 -0
- package/dist/util/config/compound-config-collector.d.ts +32 -0
- package/dist/util/config/compound-config-collector.js +126 -0
- package/dist/util/config/compound-config-collector.js.map +1 -0
- package/dist/util/config/sync-rules/impl/base64-sync-rules-collector.d.ts +7 -0
- package/dist/util/config/sync-rules/impl/base64-sync-rules-collector.js +17 -0
- package/dist/util/config/sync-rules/impl/base64-sync-rules-collector.js.map +1 -0
- package/dist/util/config/sync-rules/impl/filesystem-sync-rules-collector.d.ts +7 -0
- package/dist/util/config/sync-rules/impl/filesystem-sync-rules-collector.js +21 -0
- package/dist/util/config/sync-rules/impl/filesystem-sync-rules-collector.js.map +1 -0
- package/dist/util/config/sync-rules/impl/inline-sync-rules-collector.d.ts +7 -0
- package/dist/util/config/sync-rules/impl/inline-sync-rules-collector.js +17 -0
- package/dist/util/config/sync-rules/impl/inline-sync-rules-collector.js.map +1 -0
- package/dist/util/config/sync-rules/sync-collector.d.ts +6 -0
- package/dist/util/config/sync-rules/sync-collector.js +3 -0
- package/dist/util/config/sync-rules/sync-collector.js.map +1 -0
- package/dist/util/config/types.d.ts +53 -0
- package/dist/util/config/types.js +7 -0
- package/dist/util/config/types.js.map +1 -0
- package/dist/util/config.d.ts +7 -0
- package/dist/util/config.js +35 -0
- package/dist/util/config.js.map +1 -0
- package/dist/util/env.d.ts +10 -0
- package/dist/util/env.js +25 -0
- package/dist/util/env.js.map +1 -0
- package/dist/util/memory-tracking.d.ts +7 -0
- package/dist/util/memory-tracking.js +58 -0
- package/dist/util/memory-tracking.js.map +1 -0
- package/dist/util/migration_lib.d.ts +11 -0
- package/dist/util/migration_lib.js +64 -0
- package/dist/util/migration_lib.js.map +1 -0
- package/dist/util/pgwire_utils.d.ts +24 -0
- package/dist/util/pgwire_utils.js +117 -0
- package/dist/util/pgwire_utils.js.map +1 -0
- package/dist/util/populate_test_data.d.ts +8 -0
- package/dist/util/populate_test_data.js +65 -0
- package/dist/util/populate_test_data.js.map +1 -0
- package/dist/util/protocol-types.d.ts +178 -0
- package/dist/util/protocol-types.js +38 -0
- package/dist/util/protocol-types.js.map +1 -0
- package/dist/util/secs.d.ts +2 -0
- package/dist/util/secs.js +49 -0
- package/dist/util/secs.js.map +1 -0
- package/dist/util/util-index.d.ts +22 -0
- package/dist/util/util-index.js +23 -0
- package/dist/util/util-index.js.map +1 -0
- package/dist/util/utils.d.ts +14 -0
- package/dist/util/utils.js +75 -0
- package/dist/util/utils.js.map +1 -0
- package/package.json +55 -0
- package/src/api/api-index.ts +2 -0
- package/src/api/diagnostics.ts +221 -0
- package/src/api/schema.ts +99 -0
- package/src/auth/CachedKeyCollector.ts +132 -0
- package/src/auth/CompoundKeyCollector.ts +33 -0
- package/src/auth/JwtPayload.ts +11 -0
- package/src/auth/KeyCollector.ts +27 -0
- package/src/auth/KeySpec.ts +67 -0
- package/src/auth/KeyStore.ts +156 -0
- package/src/auth/LeakyBucket.ts +66 -0
- package/src/auth/RemoteJWKSCollector.ts +130 -0
- package/src/auth/StaticKeyCollector.ts +21 -0
- package/src/auth/SupabaseKeyCollector.ts +67 -0
- package/src/auth/auth-index.ts +10 -0
- package/src/db/db-index.ts +1 -0
- package/src/db/mongo.ts +72 -0
- package/src/entry/cli-entry.ts +41 -0
- package/src/entry/commands/config-command.ts +36 -0
- package/src/entry/commands/migrate-action.ts +25 -0
- package/src/entry/commands/start-action.ts +24 -0
- package/src/entry/commands/teardown-action.ts +23 -0
- package/src/entry/entry-index.ts +5 -0
- package/src/index.ts +37 -0
- package/src/metrics/metrics.ts +169 -0
- package/src/migrations/db/migrations/1684951997326-init.ts +33 -0
- package/src/migrations/db/migrations/1688556755264-initial-sync-rules.ts +5 -0
- package/src/migrations/db/migrations/1702295701188-sync-rule-state.ts +99 -0
- package/src/migrations/db/migrations/1711543888062-write-checkpoint-index.ts +32 -0
- package/src/migrations/db/store.ts +11 -0
- package/src/migrations/migrations.ts +122 -0
- package/src/replication/ErrorRateLimiter.ts +49 -0
- package/src/replication/PgRelation.ts +42 -0
- package/src/replication/WalConnection.ts +227 -0
- package/src/replication/WalStream.ts +626 -0
- package/src/replication/WalStreamManager.ts +214 -0
- package/src/replication/WalStreamRunner.ts +180 -0
- package/src/replication/replication-index.ts +7 -0
- package/src/replication/util.ts +76 -0
- package/src/routes/admin.ts +229 -0
- package/src/routes/auth.ts +209 -0
- package/src/routes/checkpointing.ts +38 -0
- package/src/routes/dev.ts +194 -0
- package/src/routes/route-generators.ts +39 -0
- package/src/routes/router-socket.ts +13 -0
- package/src/routes/router.ts +17 -0
- package/src/routes/routes-index.ts +5 -0
- package/src/routes/socket-route.ts +131 -0
- package/src/routes/sync-rules.ts +210 -0
- package/src/routes/sync-stream.ts +92 -0
- package/src/runner/teardown.ts +91 -0
- package/src/storage/BucketStorage.ts +386 -0
- package/src/storage/MongoBucketStorage.ts +493 -0
- package/src/storage/SourceTable.ts +60 -0
- package/src/storage/mongo/MongoBucketBatch.ts +756 -0
- package/src/storage/mongo/MongoIdSequence.ts +24 -0
- package/src/storage/mongo/MongoPersistedSyncRules.ts +16 -0
- package/src/storage/mongo/MongoPersistedSyncRulesContent.ts +47 -0
- package/src/storage/mongo/MongoSyncBucketStorage.ts +517 -0
- package/src/storage/mongo/MongoSyncRulesLock.ts +81 -0
- package/src/storage/mongo/OperationBatch.ts +115 -0
- package/src/storage/mongo/PersistedBatch.ts +245 -0
- package/src/storage/mongo/db.ts +69 -0
- package/src/storage/mongo/models.ts +157 -0
- package/src/storage/mongo/util.ts +88 -0
- package/src/storage/storage-index.ts +15 -0
- package/src/sync/BroadcastIterable.ts +161 -0
- package/src/sync/LastValueSink.ts +100 -0
- package/src/sync/merge.ts +200 -0
- package/src/sync/safeRace.ts +99 -0
- package/src/sync/sync-index.ts +6 -0
- package/src/sync/sync.ts +312 -0
- package/src/sync/util.ts +98 -0
- package/src/system/CorePowerSyncSystem.ts +43 -0
- package/src/util/Mutex.ts +159 -0
- package/src/util/PgManager.ts +64 -0
- package/src/util/alerting.ts +17 -0
- package/src/util/config/collectors/config-collector.ts +141 -0
- package/src/util/config/collectors/impl/base64-config-collector.ts +18 -0
- package/src/util/config/collectors/impl/fallback-config-collector.ts +22 -0
- package/src/util/config/collectors/impl/filesystem-config-collector.ts +41 -0
- package/src/util/config/compound-config-collector.ts +171 -0
- package/src/util/config/sync-rules/impl/base64-sync-rules-collector.ts +21 -0
- package/src/util/config/sync-rules/impl/filesystem-sync-rules-collector.ts +26 -0
- package/src/util/config/sync-rules/impl/inline-sync-rules-collector.ts +21 -0
- package/src/util/config/sync-rules/sync-collector.ts +8 -0
- package/src/util/config/types.ts +60 -0
- package/src/util/config.ts +39 -0
- package/src/util/env.ts +28 -0
- package/src/util/memory-tracking.ts +67 -0
- package/src/util/migration_lib.ts +79 -0
- package/src/util/pgwire_utils.ts +139 -0
- package/src/util/populate_test_data.ts +78 -0
- package/src/util/protocol-types.ts +223 -0
- package/src/util/secs.ts +54 -0
- package/src/util/util-index.ts +25 -0
- package/src/util/utils.ts +102 -0
- package/test/src/__snapshots__/pg_test.test.ts.snap +256 -0
- package/test/src/__snapshots__/sync.test.ts.snap +235 -0
- package/test/src/auth.test.ts +340 -0
- package/test/src/broadcast_iterable.test.ts +156 -0
- package/test/src/data_storage.test.ts +1176 -0
- package/test/src/env.ts +8 -0
- package/test/src/large_batch.test.ts +194 -0
- package/test/src/merge_iterable.test.ts +355 -0
- package/test/src/pg_test.test.ts +432 -0
- package/test/src/schema_changes.test.ts +545 -0
- package/test/src/slow_tests.test.ts +257 -0
- package/test/src/sql_functions.test.ts +254 -0
- package/test/src/sql_operators.test.ts +132 -0
- package/test/src/sync.test.ts +293 -0
- package/test/src/sync_rules.test.ts +1051 -0
- package/test/src/util.ts +67 -0
- package/test/src/validation.test.ts +63 -0
- package/test/src/wal_stream.test.ts +310 -0
- package/test/src/wal_stream_utils.ts +147 -0
- package/test/tsconfig.json +20 -0
- package/tsconfig.json +20 -0
- package/tsconfig.tsbuildinfo +1 -0
- package/vitest.config.ts +11 -0
|
@@ -0,0 +1,756 @@
|
|
|
1
|
+
import * as micro from '@journeyapps-platform/micro';
|
|
2
|
+
import { SqliteRow, SqlSyncRules } from '@powersync/service-sync-rules';
|
|
3
|
+
import * as bson from 'bson';
|
|
4
|
+
import * as mongo from 'mongodb';
|
|
5
|
+
|
|
6
|
+
import * as util from '@/util/util-index.js';
|
|
7
|
+
import * as replication from '@/replication/replication-index.js';
|
|
8
|
+
import { BucketStorageBatch, FlushedResult, mergeToast, SaveOptions } from '../BucketStorage.js';
|
|
9
|
+
import { SourceTable } from '../SourceTable.js';
|
|
10
|
+
import { PowerSyncMongo } from './db.js';
|
|
11
|
+
import { CurrentBucket, CurrentDataDocument, SourceKey } from './models.js';
|
|
12
|
+
import { MongoIdSequence } from './MongoIdSequence.js';
|
|
13
|
+
import { cacheKey, OperationBatch, RecordOperation } from './OperationBatch.js';
|
|
14
|
+
import { PersistedBatch } from './PersistedBatch.js';
|
|
15
|
+
import { BSON_DESERIALIZE_OPTIONS, idPrefixFilter, serializeLookup } from './util.js';
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* 15MB
|
|
19
|
+
*/
|
|
20
|
+
const MAX_ROW_SIZE = 15 * 1024 * 1024;
|
|
21
|
+
|
|
22
|
+
// Currently, we can only have a single flush() at a time, since it locks the op_id sequence.
|
|
23
|
+
// While the MongoDB transaction retry mechanism handles this okay, using an in-process Mutex
|
|
24
|
+
// makes it more fair and has less overhead.
|
|
25
|
+
//
|
|
26
|
+
// In the future, we can investigate allowing multiple replication streams operating independently.
|
|
27
|
+
const replicationMutex = new util.Mutex();
|
|
28
|
+
export class MongoBucketBatch implements BucketStorageBatch {
|
|
29
|
+
private readonly client: mongo.MongoClient;
|
|
30
|
+
public readonly db: PowerSyncMongo;
|
|
31
|
+
public readonly session: mongo.ClientSession;
|
|
32
|
+
private readonly sync_rules: SqlSyncRules;
|
|
33
|
+
|
|
34
|
+
private readonly group_id: number;
|
|
35
|
+
|
|
36
|
+
private readonly slot_name: string;
|
|
37
|
+
|
|
38
|
+
private batch: OperationBatch | null = null;
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Last LSN received associated with a checkpoint.
|
|
42
|
+
*
|
|
43
|
+
* This could be either:
|
|
44
|
+
* 1. A commit LSN.
|
|
45
|
+
* 2. A keepalive message LSN.
|
|
46
|
+
*/
|
|
47
|
+
private last_checkpoint_lsn: string | null = null;
|
|
48
|
+
|
|
49
|
+
private no_checkpoint_before_lsn: string;
|
|
50
|
+
|
|
51
|
+
private persisted_op: bigint | null = null;
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* For tests only - not for persistence logic.
|
|
55
|
+
*/
|
|
56
|
+
public last_flushed_op: bigint | null = null;
|
|
57
|
+
|
|
58
|
+
constructor(
|
|
59
|
+
db: PowerSyncMongo,
|
|
60
|
+
sync_rules: SqlSyncRules,
|
|
61
|
+
group_id: number,
|
|
62
|
+
slot_name: string,
|
|
63
|
+
last_checkpoint_lsn: string | null,
|
|
64
|
+
no_checkpoint_before_lsn: string | null
|
|
65
|
+
) {
|
|
66
|
+
this.db = db;
|
|
67
|
+
this.client = db.client;
|
|
68
|
+
this.sync_rules = sync_rules;
|
|
69
|
+
this.group_id = group_id;
|
|
70
|
+
this.slot_name = slot_name;
|
|
71
|
+
this.session = this.client.startSession();
|
|
72
|
+
this.last_checkpoint_lsn = last_checkpoint_lsn;
|
|
73
|
+
this.no_checkpoint_before_lsn = no_checkpoint_before_lsn ?? replication.ZERO_LSN;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
async flush(): Promise<FlushedResult | null> {
|
|
77
|
+
let result: FlushedResult | null = null;
|
|
78
|
+
// One flush may be split over multiple transactions.
|
|
79
|
+
// Each flushInner() is one transaction.
|
|
80
|
+
while (this.batch != null) {
|
|
81
|
+
let r = await this.flushInner();
|
|
82
|
+
if (r) {
|
|
83
|
+
result = r;
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
return result;
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
private async flushInner(): Promise<FlushedResult | null> {
|
|
90
|
+
const batch = this.batch;
|
|
91
|
+
if (batch == null) {
|
|
92
|
+
return null;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
let last_op: bigint | null = null;
|
|
96
|
+
let resumeBatch: OperationBatch | null = null;
|
|
97
|
+
|
|
98
|
+
await this.withReplicationTransaction(`Flushing ${batch.length} ops`, async (session, opSeq) => {
|
|
99
|
+
resumeBatch = await this.replicateBatch(session, batch, opSeq);
|
|
100
|
+
|
|
101
|
+
last_op = opSeq.last();
|
|
102
|
+
});
|
|
103
|
+
|
|
104
|
+
// null if done, set if we need another flush
|
|
105
|
+
this.batch = resumeBatch;
|
|
106
|
+
|
|
107
|
+
if (last_op == null) {
|
|
108
|
+
throw new Error('Unexpected last_op == null');
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
this.persisted_op = last_op;
|
|
112
|
+
this.last_flushed_op = last_op;
|
|
113
|
+
return { flushed_op: String(last_op) };
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
private async replicateBatch(
|
|
117
|
+
session: mongo.ClientSession,
|
|
118
|
+
batch: OperationBatch,
|
|
119
|
+
op_seq: MongoIdSequence
|
|
120
|
+
): Promise<OperationBatch | null> {
|
|
121
|
+
// 1. Find sizes of current_data documents, to assist in intelligent batching without
|
|
122
|
+
// exceeding memory limits.
|
|
123
|
+
//
|
|
124
|
+
// A previous attempt tried to do batching by the results of the current_data query
|
|
125
|
+
// (automatically limited to 48MB(?) per batch by MongoDB). The issue is that it changes
|
|
126
|
+
// the order of processing, which then becomes really tricky to manage.
|
|
127
|
+
// This now takes 2+ queries, but doesn't have any issues with order of operations.
|
|
128
|
+
const sizeLookups: SourceKey[] = batch.batch.map((r) => {
|
|
129
|
+
return { g: this.group_id, t: r.record.sourceTable.id, k: r.beforeId };
|
|
130
|
+
});
|
|
131
|
+
|
|
132
|
+
const sizes = new Map<string, number>();
|
|
133
|
+
|
|
134
|
+
const sizeCursor: mongo.AggregationCursor<{ _id: SourceKey; size: number }> = this.db.current_data.aggregate(
|
|
135
|
+
[
|
|
136
|
+
{
|
|
137
|
+
$match: {
|
|
138
|
+
_id: { $in: sizeLookups }
|
|
139
|
+
}
|
|
140
|
+
},
|
|
141
|
+
{
|
|
142
|
+
$project: {
|
|
143
|
+
_id: 1,
|
|
144
|
+
size: { $bsonSize: '$$ROOT' }
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
],
|
|
148
|
+
{ session }
|
|
149
|
+
);
|
|
150
|
+
for await (let doc of sizeCursor.stream()) {
|
|
151
|
+
const key = cacheKey(doc._id.t, doc._id.k);
|
|
152
|
+
sizes.set(key, doc.size);
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
// If set, we need to start a new transaction with this batch.
|
|
156
|
+
let resumeBatch: OperationBatch | null = null;
|
|
157
|
+
let transactionSize = 0;
|
|
158
|
+
|
|
159
|
+
// Now batch according to the sizes
|
|
160
|
+
for await (let b of batch.batched(sizes)) {
|
|
161
|
+
if (resumeBatch) {
|
|
162
|
+
for (let op of b) {
|
|
163
|
+
resumeBatch.push(op);
|
|
164
|
+
}
|
|
165
|
+
continue;
|
|
166
|
+
}
|
|
167
|
+
const lookups: SourceKey[] = b.map((r) => {
|
|
168
|
+
return { g: this.group_id, t: r.record.sourceTable.id, k: r.beforeId };
|
|
169
|
+
});
|
|
170
|
+
let current_data_lookup = new Map<string, CurrentDataDocument>();
|
|
171
|
+
const cursor = this.db.current_data.find(
|
|
172
|
+
{
|
|
173
|
+
_id: { $in: lookups }
|
|
174
|
+
},
|
|
175
|
+
{ session }
|
|
176
|
+
);
|
|
177
|
+
for await (let doc of cursor.stream()) {
|
|
178
|
+
current_data_lookup.set(cacheKey(doc._id.t, doc._id.k), doc);
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
let persistedBatch: PersistedBatch | null = new PersistedBatch(this.group_id, transactionSize);
|
|
182
|
+
|
|
183
|
+
for (let op of b) {
|
|
184
|
+
if (resumeBatch) {
|
|
185
|
+
resumeBatch.push(op);
|
|
186
|
+
continue;
|
|
187
|
+
}
|
|
188
|
+
const currentData = current_data_lookup.get(op.internalBeforeKey) ?? null;
|
|
189
|
+
if (currentData != null) {
|
|
190
|
+
current_data_lookup.delete(op.internalBeforeKey);
|
|
191
|
+
}
|
|
192
|
+
const nextData = this.saveOperation(persistedBatch!, op, currentData, op_seq);
|
|
193
|
+
if (nextData != null) {
|
|
194
|
+
// Update our current_data and size cache
|
|
195
|
+
current_data_lookup.set(op.internalAfterKey!, nextData);
|
|
196
|
+
sizes.set(op.internalAfterKey!, nextData.data.length());
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
if (persistedBatch!.shouldFlushTransaction()) {
|
|
200
|
+
// Transaction is getting big.
|
|
201
|
+
// Flush, and resume in a new transaction.
|
|
202
|
+
await persistedBatch!.flush(this.db, this.session);
|
|
203
|
+
persistedBatch = null;
|
|
204
|
+
// Computing our current progress is a little tricky here, since
|
|
205
|
+
// we're stopping in the middle of a batch.
|
|
206
|
+
// We create a new batch, and push any remaining operations to it.
|
|
207
|
+
resumeBatch = new OperationBatch();
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
if (persistedBatch) {
|
|
212
|
+
transactionSize = persistedBatch.currentSize;
|
|
213
|
+
await persistedBatch.flush(this.db, this.session);
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
return resumeBatch;
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
private saveOperation(
|
|
221
|
+
batch: PersistedBatch,
|
|
222
|
+
operation: RecordOperation,
|
|
223
|
+
current_data: CurrentDataDocument | null,
|
|
224
|
+
opSeq: MongoIdSequence
|
|
225
|
+
) {
|
|
226
|
+
const record = operation.record;
|
|
227
|
+
const beforeId = operation.beforeId;
|
|
228
|
+
const afterId = operation.afterId;
|
|
229
|
+
let after = record.after;
|
|
230
|
+
const sourceTable = record.sourceTable;
|
|
231
|
+
|
|
232
|
+
let existing_buckets: CurrentBucket[] = [];
|
|
233
|
+
let new_buckets: CurrentBucket[] = [];
|
|
234
|
+
let existing_lookups: bson.Binary[] = [];
|
|
235
|
+
let new_lookups: bson.Binary[] = [];
|
|
236
|
+
|
|
237
|
+
const before_key: SourceKey = { g: this.group_id, t: record.sourceTable.id, k: beforeId };
|
|
238
|
+
|
|
239
|
+
if (record.tag == 'update') {
|
|
240
|
+
const result = current_data;
|
|
241
|
+
if (result == null) {
|
|
242
|
+
// Not an error if we re-apply a transaction
|
|
243
|
+
existing_buckets = [];
|
|
244
|
+
existing_lookups = [];
|
|
245
|
+
} else {
|
|
246
|
+
const data = bson.deserialize((result.data as mongo.Binary).buffer, BSON_DESERIALIZE_OPTIONS) as SqliteRow;
|
|
247
|
+
existing_buckets = result.buckets;
|
|
248
|
+
existing_lookups = result.lookups;
|
|
249
|
+
after = mergeToast(after!, data);
|
|
250
|
+
}
|
|
251
|
+
} else if (record.tag == 'delete') {
|
|
252
|
+
const result = current_data;
|
|
253
|
+
if (result == null) {
|
|
254
|
+
// Not an error if we re-apply a transaction
|
|
255
|
+
existing_buckets = [];
|
|
256
|
+
existing_lookups = [];
|
|
257
|
+
} else {
|
|
258
|
+
existing_buckets = result.buckets;
|
|
259
|
+
existing_lookups = result.lookups;
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
let afterData: bson.Binary | undefined;
|
|
264
|
+
if (afterId) {
|
|
265
|
+
try {
|
|
266
|
+
// This will fail immediately if the record is > 16MB.
|
|
267
|
+
afterData = new bson.Binary(bson.serialize(after!));
|
|
268
|
+
// We additionally make sure it's <= 15MB - we need some margin for metadata.
|
|
269
|
+
if (afterData.length() > MAX_ROW_SIZE) {
|
|
270
|
+
throw new Error(`Row too large: ${afterData.length()}`);
|
|
271
|
+
}
|
|
272
|
+
} catch (e) {
|
|
273
|
+
// Replace with empty values, equivalent to TOAST values
|
|
274
|
+
after = Object.fromEntries(
|
|
275
|
+
Object.entries(after!).map(([key, value]) => {
|
|
276
|
+
return [key, undefined];
|
|
277
|
+
})
|
|
278
|
+
);
|
|
279
|
+
afterData = new bson.Binary(bson.serialize(after!));
|
|
280
|
+
|
|
281
|
+
micro.alerts.captureMessage(
|
|
282
|
+
`Data too big on ${record.sourceTable.qualifiedName}.${record.after?.id}: ${e.message}`,
|
|
283
|
+
{
|
|
284
|
+
level: micro.errors.ErrorSeverity.WARNING,
|
|
285
|
+
metadata: {
|
|
286
|
+
replication_slot: this.slot_name,
|
|
287
|
+
table: record.sourceTable.qualifiedName
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
);
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
// 2. Save bucket data
|
|
295
|
+
if (beforeId != null && beforeId != afterId) {
|
|
296
|
+
// Source ID updated
|
|
297
|
+
if (sourceTable.syncData) {
|
|
298
|
+
// Delete old record
|
|
299
|
+
batch.saveBucketData({
|
|
300
|
+
op_seq: opSeq,
|
|
301
|
+
sourceKey: beforeId,
|
|
302
|
+
table: sourceTable,
|
|
303
|
+
before_buckets: existing_buckets,
|
|
304
|
+
evaluated: []
|
|
305
|
+
});
|
|
306
|
+
// Clear this, so we don't also try to REMOVE for the new id
|
|
307
|
+
existing_buckets = [];
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
if (sourceTable.syncParameters) {
|
|
311
|
+
// Delete old parameters
|
|
312
|
+
batch.saveParameterData({
|
|
313
|
+
op_seq: opSeq,
|
|
314
|
+
sourceKey: beforeId,
|
|
315
|
+
sourceTable,
|
|
316
|
+
evaluated: [],
|
|
317
|
+
existing_lookups
|
|
318
|
+
});
|
|
319
|
+
existing_lookups = [];
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
// If we re-apply a transaction, we can end up with a partial row.
|
|
324
|
+
//
|
|
325
|
+
// We may end up with toasted values, which means the record is not quite valid.
|
|
326
|
+
// However, it will be valid by the end of the transaction.
|
|
327
|
+
//
|
|
328
|
+
// In this case, we don't save the op, but we do save the current data.
|
|
329
|
+
if (afterId && after && util.isCompleteRow(after)) {
|
|
330
|
+
// Insert or update
|
|
331
|
+
if (sourceTable.syncData) {
|
|
332
|
+
const { results: evaluated, errors } = this.sync_rules.evaluateRowWithErrors({
|
|
333
|
+
record: after,
|
|
334
|
+
sourceTable
|
|
335
|
+
});
|
|
336
|
+
|
|
337
|
+
for (let error of errors) {
|
|
338
|
+
micro.alerts.captureMessage(
|
|
339
|
+
`Failed to evaluate data query on ${record.sourceTable.qualifiedName}.${record.after?.id}: ${error.error}`,
|
|
340
|
+
{
|
|
341
|
+
level: micro.errors.ErrorSeverity.WARNING,
|
|
342
|
+
metadata: {
|
|
343
|
+
replication_slot: this.slot_name,
|
|
344
|
+
table: record.sourceTable.qualifiedName
|
|
345
|
+
}
|
|
346
|
+
}
|
|
347
|
+
);
|
|
348
|
+
micro.logger.error(
|
|
349
|
+
`Failed to evaluate data query on ${record.sourceTable.qualifiedName}.${record.after?.id}: ${error.error}`
|
|
350
|
+
);
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
// Save new one
|
|
354
|
+
batch.saveBucketData({
|
|
355
|
+
op_seq: opSeq,
|
|
356
|
+
sourceKey: afterId,
|
|
357
|
+
evaluated,
|
|
358
|
+
table: sourceTable,
|
|
359
|
+
before_buckets: existing_buckets
|
|
360
|
+
});
|
|
361
|
+
new_buckets = evaluated.map((e) => {
|
|
362
|
+
return {
|
|
363
|
+
bucket: e.bucket,
|
|
364
|
+
table: e.table,
|
|
365
|
+
id: e.id
|
|
366
|
+
};
|
|
367
|
+
});
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
if (sourceTable.syncParameters) {
|
|
371
|
+
// Parameters
|
|
372
|
+
const { results: paramEvaluated, errors: paramErrors } = this.sync_rules.evaluateParameterRowWithErrors(
|
|
373
|
+
sourceTable,
|
|
374
|
+
after
|
|
375
|
+
);
|
|
376
|
+
|
|
377
|
+
for (let error of paramErrors) {
|
|
378
|
+
micro.alerts.captureMessage(
|
|
379
|
+
`Failed to evaluate parameter query on ${record.sourceTable.qualifiedName}.${record.after?.id}: ${error.error}`,
|
|
380
|
+
{
|
|
381
|
+
level: micro.errors.ErrorSeverity.WARNING,
|
|
382
|
+
metadata: {
|
|
383
|
+
replication_slot: this.slot_name,
|
|
384
|
+
table: record.sourceTable.qualifiedName
|
|
385
|
+
}
|
|
386
|
+
}
|
|
387
|
+
);
|
|
388
|
+
micro.logger.error(
|
|
389
|
+
`Failed to evaluate parameter query on ${record.sourceTable.qualifiedName}.${after.id}: ${error.error}`
|
|
390
|
+
);
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
batch.saveParameterData({
|
|
394
|
+
op_seq: opSeq,
|
|
395
|
+
sourceKey: afterId,
|
|
396
|
+
sourceTable,
|
|
397
|
+
evaluated: paramEvaluated,
|
|
398
|
+
existing_lookups
|
|
399
|
+
});
|
|
400
|
+
new_lookups = paramEvaluated.map((p) => {
|
|
401
|
+
return serializeLookup(p.lookup);
|
|
402
|
+
});
|
|
403
|
+
}
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
let result: CurrentDataDocument | null = null;
|
|
407
|
+
|
|
408
|
+
// 5. TOAST: Update current data and bucket list.
|
|
409
|
+
if (afterId) {
|
|
410
|
+
// Insert or update
|
|
411
|
+
const after_key: SourceKey = { g: this.group_id, t: sourceTable.id, k: afterId };
|
|
412
|
+
batch.upsertCurrentData(after_key, {
|
|
413
|
+
data: afterData,
|
|
414
|
+
buckets: new_buckets,
|
|
415
|
+
lookups: new_lookups
|
|
416
|
+
});
|
|
417
|
+
result = {
|
|
418
|
+
_id: after_key,
|
|
419
|
+
data: afterData!,
|
|
420
|
+
buckets: new_buckets,
|
|
421
|
+
lookups: new_lookups
|
|
422
|
+
};
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
if (beforeId != afterId) {
|
|
426
|
+
// Either a delete (afterId == null), or replaced the old replication id
|
|
427
|
+
batch.deleteCurrentData(before_key);
|
|
428
|
+
}
|
|
429
|
+
return result;
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
private async withTransaction(cb: () => Promise<void>) {
|
|
433
|
+
await replicationMutex.exclusiveLock(async () => {
|
|
434
|
+
await this.session.withTransaction(
|
|
435
|
+
async () => {
|
|
436
|
+
try {
|
|
437
|
+
await cb();
|
|
438
|
+
} catch (e: unknown) {
|
|
439
|
+
if (e instanceof mongo.MongoError && e.hasErrorLabel('TransientTransactionError')) {
|
|
440
|
+
// Likely write conflict caused by concurrent write stream replicating
|
|
441
|
+
} else {
|
|
442
|
+
micro.logger.warn('Transaction error', e as Error);
|
|
443
|
+
}
|
|
444
|
+
await new Promise((resolve) => setTimeout(resolve, Math.random() * 50));
|
|
445
|
+
throw e;
|
|
446
|
+
}
|
|
447
|
+
},
|
|
448
|
+
{ maxCommitTimeMS: 10000 }
|
|
449
|
+
);
|
|
450
|
+
});
|
|
451
|
+
}
|
|
452
|
+
|
|
453
|
+
private async withReplicationTransaction(
|
|
454
|
+
description: string,
|
|
455
|
+
callback: (session: mongo.ClientSession, opSeq: MongoIdSequence) => Promise<void>
|
|
456
|
+
): Promise<void> {
|
|
457
|
+
let flushTry = 0;
|
|
458
|
+
|
|
459
|
+
const start = Date.now();
|
|
460
|
+
const lastTry = start + 90000;
|
|
461
|
+
|
|
462
|
+
const session = this.session;
|
|
463
|
+
|
|
464
|
+
await this.withTransaction(async () => {
|
|
465
|
+
flushTry += 1;
|
|
466
|
+
if (flushTry == 1) {
|
|
467
|
+
micro.logger.info(`${this.slot_name} ${description}`);
|
|
468
|
+
} else if (flushTry % 10 == 0) {
|
|
469
|
+
micro.logger.info(`${this.slot_name} ${description} ops - try ${flushTry}`);
|
|
470
|
+
}
|
|
471
|
+
if (flushTry > 20 && Date.now() > lastTry) {
|
|
472
|
+
throw new Error('Max transaction tries exceeded');
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
const next_op_id_doc = await this.db.op_id_sequence.findOneAndUpdate(
|
|
476
|
+
{
|
|
477
|
+
_id: 'main'
|
|
478
|
+
},
|
|
479
|
+
{
|
|
480
|
+
$setOnInsert: { op_id: 0n },
|
|
481
|
+
$set: {
|
|
482
|
+
// Force update to ensure we get a mongo lock
|
|
483
|
+
ts: Date.now()
|
|
484
|
+
}
|
|
485
|
+
},
|
|
486
|
+
{
|
|
487
|
+
upsert: true,
|
|
488
|
+
returnDocument: 'after',
|
|
489
|
+
session
|
|
490
|
+
}
|
|
491
|
+
);
|
|
492
|
+
const opSeq = new MongoIdSequence(next_op_id_doc?.op_id ?? 0n);
|
|
493
|
+
|
|
494
|
+
await callback(session, opSeq);
|
|
495
|
+
|
|
496
|
+
await this.db.op_id_sequence.updateOne(
|
|
497
|
+
{
|
|
498
|
+
_id: 'main'
|
|
499
|
+
},
|
|
500
|
+
{
|
|
501
|
+
$set: {
|
|
502
|
+
op_id: opSeq.last()
|
|
503
|
+
}
|
|
504
|
+
},
|
|
505
|
+
{
|
|
506
|
+
session
|
|
507
|
+
}
|
|
508
|
+
);
|
|
509
|
+
|
|
510
|
+
await this.db.sync_rules.updateOne(
|
|
511
|
+
{
|
|
512
|
+
_id: this.group_id
|
|
513
|
+
},
|
|
514
|
+
{
|
|
515
|
+
$set: {
|
|
516
|
+
last_keepalive_ts: new Date()
|
|
517
|
+
}
|
|
518
|
+
},
|
|
519
|
+
{ session }
|
|
520
|
+
);
|
|
521
|
+
});
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
async abort() {
|
|
525
|
+
await this.session.endSession();
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
async commit(lsn: string): Promise<boolean> {
|
|
529
|
+
await this.flush();
|
|
530
|
+
|
|
531
|
+
if (this.last_checkpoint_lsn != null && lsn <= this.last_checkpoint_lsn) {
|
|
532
|
+
// When re-applying transactions, don't create a new checkpoint until
|
|
533
|
+
// we are past the last transaction.
|
|
534
|
+
micro.logger.info(`Re-applied transaction ${lsn} - skipping checkpoint`);
|
|
535
|
+
return false;
|
|
536
|
+
}
|
|
537
|
+
if (lsn < this.no_checkpoint_before_lsn) {
|
|
538
|
+
micro.logger.info(
|
|
539
|
+
`Waiting until ${this.no_checkpoint_before_lsn} before creating checkpoint, currently at ${lsn}`
|
|
540
|
+
);
|
|
541
|
+
return false;
|
|
542
|
+
}
|
|
543
|
+
|
|
544
|
+
if (this.persisted_op != null) {
|
|
545
|
+
const now = new Date();
|
|
546
|
+
await this.db.sync_rules.updateOne(
|
|
547
|
+
{
|
|
548
|
+
_id: this.group_id
|
|
549
|
+
},
|
|
550
|
+
{
|
|
551
|
+
$set: {
|
|
552
|
+
last_checkpoint: this.persisted_op,
|
|
553
|
+
last_checkpoint_lsn: lsn,
|
|
554
|
+
last_checkpoint_ts: now,
|
|
555
|
+
last_keepalive_ts: now,
|
|
556
|
+
snapshot_done: true,
|
|
557
|
+
last_fatal_error: null
|
|
558
|
+
}
|
|
559
|
+
},
|
|
560
|
+
{ session: this.session }
|
|
561
|
+
);
|
|
562
|
+
this.persisted_op = null;
|
|
563
|
+
}
|
|
564
|
+
this.last_checkpoint_lsn = lsn;
|
|
565
|
+
return true;
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
async keepalive(lsn: string): Promise<boolean> {
|
|
569
|
+
if (this.last_checkpoint_lsn != null && lsn <= this.last_checkpoint_lsn) {
|
|
570
|
+
// No-op
|
|
571
|
+
return false;
|
|
572
|
+
}
|
|
573
|
+
|
|
574
|
+
if (lsn < this.no_checkpoint_before_lsn) {
|
|
575
|
+
return false;
|
|
576
|
+
}
|
|
577
|
+
|
|
578
|
+
if (this.persisted_op != null) {
|
|
579
|
+
// The commit may have been skipped due to "no_checkpoint_before_lsn".
|
|
580
|
+
// Apply it now if relevant
|
|
581
|
+
return await this.commit(lsn);
|
|
582
|
+
}
|
|
583
|
+
|
|
584
|
+
await this.db.sync_rules.updateOne(
|
|
585
|
+
{
|
|
586
|
+
_id: this.group_id
|
|
587
|
+
},
|
|
588
|
+
{
|
|
589
|
+
$set: {
|
|
590
|
+
last_checkpoint_lsn: lsn,
|
|
591
|
+
snapshot_done: true,
|
|
592
|
+
last_fatal_error: null,
|
|
593
|
+
last_keepalive_ts: new Date()
|
|
594
|
+
}
|
|
595
|
+
},
|
|
596
|
+
{ session: this.session }
|
|
597
|
+
);
|
|
598
|
+
this.last_checkpoint_lsn = lsn;
|
|
599
|
+
|
|
600
|
+
return true;
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
async save(record: SaveOptions): Promise<FlushedResult | null> {
|
|
604
|
+
micro.logger.debug(`Saving ${record.tag}:${record.before?.id}/${record.after?.id}`);
|
|
605
|
+
|
|
606
|
+
this.batch ??= new OperationBatch();
|
|
607
|
+
this.batch.push(new RecordOperation(record));
|
|
608
|
+
|
|
609
|
+
if (this.batch.shouldFlush()) {
|
|
610
|
+
const r = await this.flush();
|
|
611
|
+
// HACK: Give other streams a chance to also flush
|
|
612
|
+
const t = 150;
|
|
613
|
+
await new Promise((resolve) => setTimeout(resolve, t));
|
|
614
|
+
return r;
|
|
615
|
+
}
|
|
616
|
+
return null;
|
|
617
|
+
}
|
|
618
|
+
|
|
619
|
+
/**
|
|
620
|
+
* Drop is equivalent to TRUNCATE, plus removing our record of the table.
|
|
621
|
+
*/
|
|
622
|
+
async drop(sourceTables: SourceTable[]): Promise<FlushedResult | null> {
|
|
623
|
+
await this.truncate(sourceTables);
|
|
624
|
+
const result = await this.flush();
|
|
625
|
+
|
|
626
|
+
await this.withTransaction(async () => {
|
|
627
|
+
for (let table of sourceTables) {
|
|
628
|
+
await this.db.source_tables.deleteOne({ _id: table.id });
|
|
629
|
+
}
|
|
630
|
+
});
|
|
631
|
+
return result;
|
|
632
|
+
}
|
|
633
|
+
|
|
634
|
+
async truncate(sourceTables: SourceTable[]): Promise<FlushedResult | null> {
|
|
635
|
+
await this.flush();
|
|
636
|
+
|
|
637
|
+
let last_op: bigint | null = null;
|
|
638
|
+
for (let table of sourceTables) {
|
|
639
|
+
last_op = await this.truncateSingle(table);
|
|
640
|
+
}
|
|
641
|
+
|
|
642
|
+
if (last_op) {
|
|
643
|
+
this.persisted_op = last_op;
|
|
644
|
+
}
|
|
645
|
+
|
|
646
|
+
return {
|
|
647
|
+
flushed_op: String(last_op!)
|
|
648
|
+
};
|
|
649
|
+
}
|
|
650
|
+
|
|
651
|
+
async truncateSingle(sourceTable: SourceTable): Promise<bigint> {
|
|
652
|
+
let last_op: bigint | null = null;
|
|
653
|
+
|
|
654
|
+
// To avoid too large transactions, we limit the amount of data we delete per transaction.
|
|
655
|
+
// Since we don't use the record data here, we don't have explicit size limits per batch.
|
|
656
|
+
const BATCH_LIMIT = 2000;
|
|
657
|
+
|
|
658
|
+
let lastBatchCount = BATCH_LIMIT;
|
|
659
|
+
while (lastBatchCount == BATCH_LIMIT) {
|
|
660
|
+
await this.withReplicationTransaction(`Truncate ${sourceTable.qualifiedName}`, async (session, opSeq) => {
|
|
661
|
+
const current_data_filter: mongo.Filter<CurrentDataDocument> = {
|
|
662
|
+
_id: idPrefixFilter<SourceKey>({ g: this.group_id, t: sourceTable.id }, ['k'])
|
|
663
|
+
};
|
|
664
|
+
|
|
665
|
+
const cursor = this.db.current_data.find(current_data_filter, {
|
|
666
|
+
projection: {
|
|
667
|
+
_id: 1,
|
|
668
|
+
buckets: 1,
|
|
669
|
+
lookups: 1
|
|
670
|
+
},
|
|
671
|
+
limit: BATCH_LIMIT,
|
|
672
|
+
session: session
|
|
673
|
+
});
|
|
674
|
+
const batch = await cursor.toArray();
|
|
675
|
+
const persistedBatch = new PersistedBatch(this.group_id, 0);
|
|
676
|
+
|
|
677
|
+
for (let value of batch) {
|
|
678
|
+
persistedBatch.saveBucketData({
|
|
679
|
+
op_seq: opSeq,
|
|
680
|
+
before_buckets: value.buckets,
|
|
681
|
+
evaluated: [],
|
|
682
|
+
table: sourceTable,
|
|
683
|
+
sourceKey: value._id.k
|
|
684
|
+
});
|
|
685
|
+
persistedBatch.saveParameterData({
|
|
686
|
+
op_seq: opSeq,
|
|
687
|
+
existing_lookups: value.lookups,
|
|
688
|
+
evaluated: [],
|
|
689
|
+
sourceTable: sourceTable,
|
|
690
|
+
sourceKey: value._id.k
|
|
691
|
+
});
|
|
692
|
+
|
|
693
|
+
persistedBatch.deleteCurrentData(value._id);
|
|
694
|
+
}
|
|
695
|
+
await persistedBatch.flush(this.db, session);
|
|
696
|
+
lastBatchCount = batch.length;
|
|
697
|
+
|
|
698
|
+
last_op = opSeq.last();
|
|
699
|
+
});
|
|
700
|
+
}
|
|
701
|
+
|
|
702
|
+
return last_op!;
|
|
703
|
+
}
|
|
704
|
+
|
|
705
|
+
async markSnapshotDone(tables: SourceTable[], no_checkpoint_before_lsn: string) {
|
|
706
|
+
const session = this.session;
|
|
707
|
+
const ids = tables.map((table) => table.id);
|
|
708
|
+
|
|
709
|
+
await this.withTransaction(async () => {
|
|
710
|
+
await this.db.source_tables.updateMany(
|
|
711
|
+
{ _id: { $in: ids } },
|
|
712
|
+
{
|
|
713
|
+
$set: {
|
|
714
|
+
snapshot_done: true
|
|
715
|
+
}
|
|
716
|
+
},
|
|
717
|
+
{ session }
|
|
718
|
+
);
|
|
719
|
+
|
|
720
|
+
if (no_checkpoint_before_lsn > this.no_checkpoint_before_lsn) {
|
|
721
|
+
this.no_checkpoint_before_lsn = no_checkpoint_before_lsn;
|
|
722
|
+
|
|
723
|
+
await this.db.sync_rules.updateOne(
|
|
724
|
+
{
|
|
725
|
+
_id: this.group_id
|
|
726
|
+
},
|
|
727
|
+
{
|
|
728
|
+
$set: {
|
|
729
|
+
no_checkpoint_before: no_checkpoint_before_lsn,
|
|
730
|
+
last_keepalive_ts: new Date()
|
|
731
|
+
}
|
|
732
|
+
},
|
|
733
|
+
{ session: this.session }
|
|
734
|
+
);
|
|
735
|
+
}
|
|
736
|
+
});
|
|
737
|
+
return tables.map((table) => {
|
|
738
|
+
const copy = new SourceTable(
|
|
739
|
+
table.id,
|
|
740
|
+
table.connectionTag,
|
|
741
|
+
table.relationId,
|
|
742
|
+
table.schema,
|
|
743
|
+
table.table,
|
|
744
|
+
table.replicaIdColumns,
|
|
745
|
+
table.snapshotComplete
|
|
746
|
+
);
|
|
747
|
+
copy.syncData = table.syncData;
|
|
748
|
+
copy.syncParameters = table.syncParameters;
|
|
749
|
+
return copy;
|
|
750
|
+
});
|
|
751
|
+
}
|
|
752
|
+
}
|
|
753
|
+
|
|
754
|
+
export function currentBucketKey(b: CurrentBucket) {
|
|
755
|
+
return `${b.bucket}/${b.table}/${b.id}`;
|
|
756
|
+
}
|