@powersync/service-core 0.13.0 → 0.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (181) hide show
  1. package/CHANGELOG.md +31 -0
  2. package/dist/entry/commands/compact-action.js +14 -14
  3. package/dist/entry/commands/compact-action.js.map +1 -1
  4. package/dist/entry/commands/migrate-action.js +15 -4
  5. package/dist/entry/commands/migrate-action.js.map +1 -1
  6. package/dist/index.d.ts +1 -3
  7. package/dist/index.js +1 -3
  8. package/dist/index.js.map +1 -1
  9. package/dist/migrations/PowerSyncMigrationManager.d.ts +17 -0
  10. package/dist/migrations/PowerSyncMigrationManager.js +21 -0
  11. package/dist/migrations/PowerSyncMigrationManager.js.map +1 -0
  12. package/dist/migrations/ensure-automatic-migrations.d.ts +4 -0
  13. package/dist/migrations/ensure-automatic-migrations.js +14 -0
  14. package/dist/migrations/ensure-automatic-migrations.js.map +1 -0
  15. package/dist/migrations/migrations-index.d.ts +2 -3
  16. package/dist/migrations/migrations-index.js +2 -3
  17. package/dist/migrations/migrations-index.js.map +1 -1
  18. package/dist/routes/configure-fastify.d.ts +12 -12
  19. package/dist/routes/endpoints/admin.d.ts +24 -24
  20. package/dist/storage/BucketStorage.d.ts +51 -3
  21. package/dist/storage/BucketStorage.js +26 -0
  22. package/dist/storage/BucketStorage.js.map +1 -1
  23. package/dist/storage/bson.d.ts +24 -0
  24. package/dist/storage/bson.js +73 -0
  25. package/dist/storage/bson.js.map +1 -0
  26. package/dist/storage/storage-index.d.ts +3 -14
  27. package/dist/storage/storage-index.js +3 -14
  28. package/dist/storage/storage-index.js.map +1 -1
  29. package/dist/sync/sync.js +3 -1
  30. package/dist/sync/sync.js.map +1 -1
  31. package/dist/system/ServiceContext.d.ts +3 -0
  32. package/dist/system/ServiceContext.js +11 -3
  33. package/dist/system/ServiceContext.js.map +1 -1
  34. package/dist/util/config/types.d.ts +2 -2
  35. package/dist/util/utils.d.ts +17 -1
  36. package/dist/util/utils.js +49 -1
  37. package/dist/util/utils.js.map +1 -1
  38. package/package.json +7 -8
  39. package/src/entry/commands/compact-action.ts +19 -14
  40. package/src/entry/commands/migrate-action.ts +17 -4
  41. package/src/index.ts +1 -4
  42. package/src/migrations/PowerSyncMigrationManager.ts +42 -0
  43. package/src/migrations/ensure-automatic-migrations.ts +15 -0
  44. package/src/migrations/migrations-index.ts +2 -3
  45. package/src/storage/BucketStorage.ts +59 -3
  46. package/src/storage/bson.ts +78 -0
  47. package/src/storage/storage-index.ts +3 -15
  48. package/src/sync/sync.ts +3 -1
  49. package/src/system/ServiceContext.ts +17 -4
  50. package/src/util/config/types.ts +2 -2
  51. package/src/util/utils.ts +47 -1
  52. package/test/src/env.ts +0 -1
  53. package/tsconfig.tsbuildinfo +1 -1
  54. package/dist/db/db-index.d.ts +0 -1
  55. package/dist/db/db-index.js +0 -2
  56. package/dist/db/db-index.js.map +0 -1
  57. package/dist/db/mongo.d.ts +0 -35
  58. package/dist/db/mongo.js +0 -73
  59. package/dist/db/mongo.js.map +0 -1
  60. package/dist/locks/LockManager.d.ts +0 -10
  61. package/dist/locks/LockManager.js +0 -7
  62. package/dist/locks/LockManager.js.map +0 -1
  63. package/dist/locks/MongoLocks.d.ts +0 -36
  64. package/dist/locks/MongoLocks.js +0 -81
  65. package/dist/locks/MongoLocks.js.map +0 -1
  66. package/dist/locks/locks-index.d.ts +0 -2
  67. package/dist/locks/locks-index.js +0 -3
  68. package/dist/locks/locks-index.js.map +0 -1
  69. package/dist/migrations/db/migrations/1684951997326-init.d.ts +0 -3
  70. package/dist/migrations/db/migrations/1684951997326-init.js +0 -33
  71. package/dist/migrations/db/migrations/1684951997326-init.js.map +0 -1
  72. package/dist/migrations/db/migrations/1688556755264-initial-sync-rules.d.ts +0 -2
  73. package/dist/migrations/db/migrations/1688556755264-initial-sync-rules.js +0 -5
  74. package/dist/migrations/db/migrations/1688556755264-initial-sync-rules.js.map +0 -1
  75. package/dist/migrations/db/migrations/1702295701188-sync-rule-state.d.ts +0 -3
  76. package/dist/migrations/db/migrations/1702295701188-sync-rule-state.js +0 -56
  77. package/dist/migrations/db/migrations/1702295701188-sync-rule-state.js.map +0 -1
  78. package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.d.ts +0 -3
  79. package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.js +0 -29
  80. package/dist/migrations/db/migrations/1711543888062-write-checkpoint-index.js.map +0 -1
  81. package/dist/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.d.ts +0 -3
  82. package/dist/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.js +0 -31
  83. package/dist/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.js.map +0 -1
  84. package/dist/migrations/definitions.d.ts +0 -18
  85. package/dist/migrations/definitions.js +0 -6
  86. package/dist/migrations/definitions.js.map +0 -1
  87. package/dist/migrations/executor.d.ts +0 -16
  88. package/dist/migrations/executor.js +0 -64
  89. package/dist/migrations/executor.js.map +0 -1
  90. package/dist/migrations/migrations.d.ts +0 -18
  91. package/dist/migrations/migrations.js +0 -110
  92. package/dist/migrations/migrations.js.map +0 -1
  93. package/dist/migrations/store/migration-store.d.ts +0 -11
  94. package/dist/migrations/store/migration-store.js +0 -46
  95. package/dist/migrations/store/migration-store.js.map +0 -1
  96. package/dist/storage/MongoBucketStorage.d.ts +0 -48
  97. package/dist/storage/MongoBucketStorage.js +0 -427
  98. package/dist/storage/MongoBucketStorage.js.map +0 -1
  99. package/dist/storage/mongo/MongoBucketBatch.d.ts +0 -74
  100. package/dist/storage/mongo/MongoBucketBatch.js +0 -683
  101. package/dist/storage/mongo/MongoBucketBatch.js.map +0 -1
  102. package/dist/storage/mongo/MongoCompactor.d.ts +0 -40
  103. package/dist/storage/mongo/MongoCompactor.js +0 -310
  104. package/dist/storage/mongo/MongoCompactor.js.map +0 -1
  105. package/dist/storage/mongo/MongoIdSequence.d.ts +0 -12
  106. package/dist/storage/mongo/MongoIdSequence.js +0 -21
  107. package/dist/storage/mongo/MongoIdSequence.js.map +0 -1
  108. package/dist/storage/mongo/MongoPersistedSyncRules.d.ts +0 -9
  109. package/dist/storage/mongo/MongoPersistedSyncRules.js +0 -9
  110. package/dist/storage/mongo/MongoPersistedSyncRules.js.map +0 -1
  111. package/dist/storage/mongo/MongoPersistedSyncRulesContent.d.ts +0 -20
  112. package/dist/storage/mongo/MongoPersistedSyncRulesContent.js +0 -26
  113. package/dist/storage/mongo/MongoPersistedSyncRulesContent.js.map +0 -1
  114. package/dist/storage/mongo/MongoStorageProvider.d.ts +0 -5
  115. package/dist/storage/mongo/MongoStorageProvider.js +0 -26
  116. package/dist/storage/mongo/MongoStorageProvider.js.map +0 -1
  117. package/dist/storage/mongo/MongoSyncBucketStorage.d.ts +0 -38
  118. package/dist/storage/mongo/MongoSyncBucketStorage.js +0 -534
  119. package/dist/storage/mongo/MongoSyncBucketStorage.js.map +0 -1
  120. package/dist/storage/mongo/MongoSyncRulesLock.d.ts +0 -16
  121. package/dist/storage/mongo/MongoSyncRulesLock.js +0 -65
  122. package/dist/storage/mongo/MongoSyncRulesLock.js.map +0 -1
  123. package/dist/storage/mongo/MongoWriteCheckpointAPI.d.ts +0 -20
  124. package/dist/storage/mongo/MongoWriteCheckpointAPI.js +0 -104
  125. package/dist/storage/mongo/MongoWriteCheckpointAPI.js.map +0 -1
  126. package/dist/storage/mongo/OperationBatch.d.ts +0 -35
  127. package/dist/storage/mongo/OperationBatch.js +0 -119
  128. package/dist/storage/mongo/OperationBatch.js.map +0 -1
  129. package/dist/storage/mongo/PersistedBatch.d.ts +0 -46
  130. package/dist/storage/mongo/PersistedBatch.js +0 -223
  131. package/dist/storage/mongo/PersistedBatch.js.map +0 -1
  132. package/dist/storage/mongo/config.d.ts +0 -19
  133. package/dist/storage/mongo/config.js +0 -26
  134. package/dist/storage/mongo/config.js.map +0 -1
  135. package/dist/storage/mongo/db.d.ts +0 -36
  136. package/dist/storage/mongo/db.js +0 -47
  137. package/dist/storage/mongo/db.js.map +0 -1
  138. package/dist/storage/mongo/models.d.ts +0 -163
  139. package/dist/storage/mongo/models.js +0 -27
  140. package/dist/storage/mongo/models.js.map +0 -1
  141. package/dist/storage/mongo/util.d.ts +0 -54
  142. package/dist/storage/mongo/util.js +0 -190
  143. package/dist/storage/mongo/util.js.map +0 -1
  144. package/src/db/db-index.ts +0 -1
  145. package/src/db/mongo.ts +0 -81
  146. package/src/locks/LockManager.ts +0 -16
  147. package/src/locks/MongoLocks.ts +0 -142
  148. package/src/locks/locks-index.ts +0 -2
  149. package/src/migrations/db/migrations/1684951997326-init.ts +0 -38
  150. package/src/migrations/db/migrations/1688556755264-initial-sync-rules.ts +0 -5
  151. package/src/migrations/db/migrations/1702295701188-sync-rule-state.ts +0 -102
  152. package/src/migrations/db/migrations/1711543888062-write-checkpoint-index.ts +0 -34
  153. package/src/migrations/db/migrations/1727099539247-custom-write-checkpoint-index.ts +0 -37
  154. package/src/migrations/definitions.ts +0 -21
  155. package/src/migrations/executor.ts +0 -87
  156. package/src/migrations/migrations.ts +0 -142
  157. package/src/migrations/store/migration-store.ts +0 -63
  158. package/src/storage/MongoBucketStorage.ts +0 -541
  159. package/src/storage/mongo/MongoBucketBatch.ts +0 -900
  160. package/src/storage/mongo/MongoCompactor.ts +0 -393
  161. package/src/storage/mongo/MongoIdSequence.ts +0 -24
  162. package/src/storage/mongo/MongoPersistedSyncRules.ts +0 -16
  163. package/src/storage/mongo/MongoPersistedSyncRulesContent.ts +0 -50
  164. package/src/storage/mongo/MongoStorageProvider.ts +0 -31
  165. package/src/storage/mongo/MongoSyncBucketStorage.ts +0 -640
  166. package/src/storage/mongo/MongoSyncRulesLock.ts +0 -85
  167. package/src/storage/mongo/MongoWriteCheckpointAPI.ts +0 -154
  168. package/src/storage/mongo/OperationBatch.ts +0 -131
  169. package/src/storage/mongo/PersistedBatch.ts +0 -285
  170. package/src/storage/mongo/config.ts +0 -40
  171. package/src/storage/mongo/db.ts +0 -88
  172. package/src/storage/mongo/models.ts +0 -187
  173. package/src/storage/mongo/util.ts +0 -203
  174. package/test/src/__snapshots__/sync.test.ts.snap +0 -332
  175. package/test/src/bucket_validation.test.ts +0 -143
  176. package/test/src/bucket_validation.ts +0 -60
  177. package/test/src/compacting.test.ts +0 -295
  178. package/test/src/data_storage.test.ts +0 -1569
  179. package/test/src/stream_utils.ts +0 -42
  180. package/test/src/sync.test.ts +0 -511
  181. package/test/src/util.ts +0 -150
@@ -1,47 +0,0 @@
1
- import * as db from '../../db/db-index.js';
2
- import { BSON_DESERIALIZE_OPTIONS } from './util.js';
3
- export function createPowerSyncMongo(config) {
4
- return new PowerSyncMongo(db.mongo.createMongoClient(config), { database: config.database });
5
- }
6
- export class PowerSyncMongo {
7
- constructor(client, options) {
8
- this.client = client;
9
- const db = client.db(options?.database, {
10
- ...BSON_DESERIALIZE_OPTIONS
11
- });
12
- this.db = db;
13
- this.current_data = db.collection('current_data');
14
- this.bucket_data = db.collection('bucket_data');
15
- this.bucket_parameters = db.collection('bucket_parameters');
16
- this.op_id_sequence = db.collection('op_id_sequence');
17
- this.sync_rules = db.collection('sync_rules');
18
- this.source_tables = db.collection('source_tables');
19
- this.custom_write_checkpoints = db.collection('custom_write_checkpoints');
20
- this.write_checkpoints = db.collection('write_checkpoints');
21
- this.instance = db.collection('instance');
22
- this.locks = this.db.collection('locks');
23
- }
24
- /**
25
- * Clear all collections.
26
- */
27
- async clear() {
28
- await this.current_data.deleteMany({});
29
- await this.bucket_data.deleteMany({});
30
- await this.bucket_parameters.deleteMany({});
31
- await this.op_id_sequence.deleteMany({});
32
- await this.sync_rules.deleteMany({});
33
- await this.source_tables.deleteMany({});
34
- await this.write_checkpoints.deleteMany({});
35
- await this.instance.deleteOne({});
36
- await this.locks.deleteMany({});
37
- }
38
- /**
39
- * Drop the entire database.
40
- *
41
- * Primarily for tests.
42
- */
43
- async drop() {
44
- await this.db.dropDatabase();
45
- }
46
- }
47
- //# sourceMappingURL=db.js.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"db.js","sourceRoot":"","sources":["../../../src/storage/mongo/db.ts"],"names":[],"mappings":"AAGA,OAAO,KAAK,EAAE,MAAM,sBAAsB,CAAC;AAa3C,OAAO,EAAE,wBAAwB,EAAE,MAAM,WAAW,CAAC;AASrD,MAAM,UAAU,oBAAoB,CAAC,MAA6C;IAChF,OAAO,IAAI,cAAc,CAAC,EAAE,CAAC,KAAK,CAAC,iBAAiB,CAAC,MAAM,CAAC,EAAE,EAAE,QAAQ,EAAE,MAAM,CAAC,QAAQ,EAAE,CAAC,CAAC;AAC/F,CAAC;AAED,MAAM,OAAO,cAAc;IAezB,YAAY,MAAyB,EAAE,OAA+B;QACpE,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;QAErB,MAAM,EAAE,GAAG,MAAM,CAAC,EAAE,CAAC,OAAO,EAAE,QAAQ,EAAE;YACtC,GAAG,wBAAwB;SAC5B,CAAC,CAAC;QACH,IAAI,CAAC,EAAE,GAAG,EAAE,CAAC;QAEb,IAAI,CAAC,YAAY,GAAG,EAAE,CAAC,UAAU,CAAsB,cAAc,CAAC,CAAC;QACvE,IAAI,CAAC,WAAW,GAAG,EAAE,CAAC,UAAU,CAAC,aAAa,CAAC,CAAC;QAChD,IAAI,CAAC,iBAAiB,GAAG,EAAE,CAAC,UAAU,CAAC,mBAAmB,CAAC,CAAC;QAC5D,IAAI,CAAC,cAAc,GAAG,EAAE,CAAC,UAAU,CAAC,gBAAgB,CAAC,CAAC;QACtD,IAAI,CAAC,UAAU,GAAG,EAAE,CAAC,UAAU,CAAC,YAAY,CAAC,CAAC;QAC9C,IAAI,CAAC,aAAa,GAAG,EAAE,CAAC,UAAU,CAAC,eAAe,CAAC,CAAC;QACpD,IAAI,CAAC,wBAAwB,GAAG,EAAE,CAAC,UAAU,CAAC,0BAA0B,CAAC,CAAC;QAC1E,IAAI,CAAC,iBAAiB,GAAG,EAAE,CAAC,UAAU,CAAC,mBAAmB,CAAC,CAAC;QAC5D,IAAI,CAAC,QAAQ,GAAG,EAAE,CAAC,UAAU,CAAC,UAAU,CAAC,CAAC;QAC1C,IAAI,CAAC,KAAK,GAAG,IAAI,CAAC,EAAE,CAAC,UAAU,CAAC,OAAO,CAAC,CAAC;IAC3C,CAAC;IAED;;OAEG;IACH,KAAK,CAAC,KAAK;QACT,MAAM,IAAI,CAAC,YAAY,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC;QACvC,MAAM,IAAI,CAAC,WAAW,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC;QACtC,MAAM,IAAI,CAAC,iBAAiB,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC;QAC5C,MAAM,IAAI,CAAC,cAAc,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC;QACzC,MAAM,IAAI,CAAC,UAAU,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC;QACrC,MAAM,IAAI,CAAC,aAAa,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC;QACxC,MAAM,IAAI,CAAC,iBAAiB,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC;QAC5C,MAAM,IAAI,CAAC,QAAQ,CAAC,SAAS,CAAC,EAAE,CAAC,CAAC;QAClC,MAAM,IAAI,CAAC,KAAK,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC;IAClC,CAAC;IAED;;;;OAIG;IACH,KAAK,CAAC,IAAI;QACR,MAAM,IAAI,CAAC,EAAE,CAAC,YAAY,EAAE,CAAC;IAC/B,CAAC;CACF"}
@@ -1,163 +0,0 @@
1
- import { SqliteJsonValue } from '@powersync/service-sync-rules';
2
- import * as bson from 'bson';
3
- /**
4
- * Replica id uniquely identifying a row on the source database.
5
- *
6
- * Can be any value serializable to BSON.
7
- *
8
- * If the value is an entire document, the data serialized to a v5 UUID may be a good choice here.
9
- */
10
- export type ReplicaId = bson.UUID | bson.Document | any;
11
- export interface SourceKey {
12
- /** group_id */
13
- g: number;
14
- /** source table id */
15
- t: bson.ObjectId;
16
- /** source key */
17
- k: ReplicaId;
18
- }
19
- export interface BucketDataKey {
20
- /** group_id */
21
- g: number;
22
- /** bucket name */
23
- b: string;
24
- /** op_id */
25
- o: bigint;
26
- }
27
- export interface CurrentDataDocument {
28
- _id: SourceKey;
29
- data: bson.Binary;
30
- buckets: CurrentBucket[];
31
- lookups: bson.Binary[];
32
- }
33
- export interface CurrentBucket {
34
- bucket: string;
35
- table: string;
36
- id: string;
37
- }
38
- export interface BucketParameterDocument {
39
- _id: bigint;
40
- key: SourceKey;
41
- lookup: bson.Binary;
42
- bucket_parameters: Record<string, SqliteJsonValue>[];
43
- }
44
- export interface BucketDataDocument {
45
- _id: BucketDataKey;
46
- op: OpType;
47
- source_table?: bson.ObjectId;
48
- source_key?: ReplicaId;
49
- table?: string;
50
- row_id?: string;
51
- checksum: number;
52
- data: string | null;
53
- target_op?: bigint | null;
54
- }
55
- export type OpType = 'PUT' | 'REMOVE' | 'MOVE' | 'CLEAR';
56
- export interface SourceTableDocument {
57
- _id: bson.ObjectId;
58
- group_id: number;
59
- connection_id: number;
60
- relation_id: number | string | undefined;
61
- schema_name: string;
62
- table_name: string;
63
- replica_id_columns: string[] | null;
64
- replica_id_columns2: {
65
- name: string;
66
- type_oid?: number;
67
- type?: string;
68
- }[] | undefined;
69
- snapshot_done: boolean | undefined;
70
- }
71
- export interface IdSequenceDocument {
72
- _id: string;
73
- op_id: bigint;
74
- }
75
- export declare enum SyncRuleState {
76
- /**
77
- * New sync rules - needs to be processed (initial replication).
78
- *
79
- * While multiple sets of sync rules _can_ be in PROCESSING,
80
- * it's generally pointless, so we only keep one in that state.
81
- */
82
- PROCESSING = "PROCESSING",
83
- /**
84
- * Sync rule processing is done, and can be used for sync.
85
- *
86
- * Only one set of sync rules should be in ACTIVE state.
87
- */
88
- ACTIVE = "ACTIVE",
89
- /**
90
- * This state is used when the sync rules has been replaced,
91
- * and replication is or should be stopped.
92
- */
93
- STOP = "STOP",
94
- /**
95
- * After sync rules have been stopped, the data needs to be
96
- * deleted. Once deleted, the state is TERMINATED.
97
- */
98
- TERMINATED = "TERMINATED"
99
- }
100
- export interface SyncRuleDocument {
101
- _id: number;
102
- state: SyncRuleState;
103
- /**
104
- * True if initial snapshot has been replicated.
105
- *
106
- * Can only be false if state == PROCESSING.
107
- */
108
- snapshot_done: boolean;
109
- /**
110
- * The last consistent checkpoint.
111
- *
112
- * There may be higher OpIds used in the database if we're in the middle of replicating a large transaction.
113
- */
114
- last_checkpoint: bigint | null;
115
- /**
116
- * The LSN associated with the last consistent checkpoint.
117
- */
118
- last_checkpoint_lsn: string | null;
119
- /**
120
- * If set, no new checkpoints may be created < this value.
121
- */
122
- no_checkpoint_before: string | null;
123
- /**
124
- * Goes together with no_checkpoint_before.
125
- *
126
- * If a keepalive is triggered that creates the checkpoint > no_checkpoint_before,
127
- * then the checkpoint must be equal to this keepalive_op.
128
- */
129
- keepalive_op: string | null;
130
- slot_name: string | null;
131
- /**
132
- * Last time we persisted a checkpoint.
133
- *
134
- * This may be old if no data is incoming.
135
- */
136
- last_checkpoint_ts: Date | null;
137
- /**
138
- * Last time we persisted a checkpoint or keepalive.
139
- *
140
- * This should stay fairly current while replicating.
141
- */
142
- last_keepalive_ts: Date | null;
143
- /**
144
- * If an error is stopping replication, it will be stored here.
145
- */
146
- last_fatal_error: string | null;
147
- content: string;
148
- }
149
- export interface CustomWriteCheckpointDocument {
150
- _id: bson.ObjectId;
151
- user_id: string;
152
- checkpoint: bigint;
153
- sync_rules_id: number;
154
- }
155
- export interface WriteCheckpointDocument {
156
- _id: bson.ObjectId;
157
- user_id: string;
158
- lsns: Record<string, string>;
159
- client_id: bigint;
160
- }
161
- export interface InstanceDocument {
162
- _id: string;
163
- }
@@ -1,27 +0,0 @@
1
- export var SyncRuleState;
2
- (function (SyncRuleState) {
3
- /**
4
- * New sync rules - needs to be processed (initial replication).
5
- *
6
- * While multiple sets of sync rules _can_ be in PROCESSING,
7
- * it's generally pointless, so we only keep one in that state.
8
- */
9
- SyncRuleState["PROCESSING"] = "PROCESSING";
10
- /**
11
- * Sync rule processing is done, and can be used for sync.
12
- *
13
- * Only one set of sync rules should be in ACTIVE state.
14
- */
15
- SyncRuleState["ACTIVE"] = "ACTIVE";
16
- /**
17
- * This state is used when the sync rules has been replaced,
18
- * and replication is or should be stopped.
19
- */
20
- SyncRuleState["STOP"] = "STOP";
21
- /**
22
- * After sync rules have been stopped, the data needs to be
23
- * deleted. Once deleted, the state is TERMINATED.
24
- */
25
- SyncRuleState["TERMINATED"] = "TERMINATED";
26
- })(SyncRuleState || (SyncRuleState = {}));
27
- //# sourceMappingURL=models.js.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"models.js","sourceRoot":"","sources":["../../../src/storage/mongo/models.ts"],"names":[],"mappings":"AAiFA,MAAM,CAAN,IAAY,aAyBX;AAzBD,WAAY,aAAa;IACvB;;;;;OAKG;IACH,0CAAyB,CAAA;IAEzB;;;;OAIG;IACH,kCAAiB,CAAA;IACjB;;;OAGG;IACH,8BAAa,CAAA;IACb;;;OAGG;IACH,0CAAyB,CAAA;AAC3B,CAAC,EAzBW,aAAa,KAAb,aAAa,QAyBxB"}
@@ -1,54 +0,0 @@
1
- import { SqliteJsonValue } from '@powersync/service-sync-rules';
2
- import * as bson from 'bson';
3
- import * as mongo from 'mongodb';
4
- import { OplogEntry } from '../../util/protocol-types.js';
5
- import { BucketDataDocument, ReplicaId } from './models.js';
6
- /**
7
- * Lookup serialization must be number-agnostic. I.e. normalize numbers, instead of preserving numbers.
8
- * @param lookup
9
- */
10
- export declare function serializeLookup(lookup: SqliteJsonValue[]): bson.Binary;
11
- export declare function idPrefixFilter<T>(prefix: Partial<T>, rest: (keyof T)[]): mongo.Condition<T>;
12
- export declare function generateSlotName(prefix: string, sync_rules_id: number): string;
13
- /**
14
- * Read a single batch of data from a cursor, then close it.
15
- *
16
- * We do our best to avoid MongoDB fetching any more data than this single batch.
17
- *
18
- * This is similar to using `singleBatch: true` in find options.
19
- * However, that makes `has_more` detection very difficult, since the cursor is always closed
20
- * after the first batch. Instead, we do a workaround to only fetch a single batch below.
21
- *
22
- * For this to be effective, set batchSize = limit in the find command.
23
- */
24
- export declare function readSingleBatch<T>(cursor: mongo.FindCursor<T>): Promise<{
25
- data: T[];
26
- hasMore: boolean;
27
- }>;
28
- export declare const BSON_DESERIALIZE_OPTIONS: bson.DeserializeOptions;
29
- export declare function mapOpEntry(row: BucketDataDocument): OplogEntry;
30
- /**
31
- * Returns true if two ReplicaId values are the same (serializes to the same BSON value).
32
- */
33
- export declare function replicaIdEquals(a: ReplicaId, b: ReplicaId): boolean;
34
- export declare function replicaIdToSubkey(table: bson.ObjectId, id: ReplicaId): string;
35
- /**
36
- * True if this is a bson.UUID.
37
- *
38
- * Works even with multiple copies of the bson package.
39
- */
40
- export declare function isUUID(value: any): value is bson.UUID;
41
- /**
42
- * MongoDB bulkWrite internally splits the operations into batches
43
- * so that no batch exceeds 16MB. However, there are cases where
44
- * the batch size is very close to 16MB, where additional metadata
45
- * on the server pushes it over the limit, resulting in this error
46
- * from the server:
47
- *
48
- * > MongoBulkWriteError: BSONObj size: 16814023 (0x1008FC7) is invalid. Size must be between 0 and 16793600(16MB) First element: insert: "bucket_data"
49
- *
50
- * We work around the issue by doing our own batching, limiting the
51
- * batch size to 15MB. This does add additional overhead with
52
- * BSON.calculateObjectSize.
53
- */
54
- export declare function safeBulkWrite<T extends mongo.Document>(collection: mongo.Collection<T>, operations: mongo.AnyBulkWriteOperation<T>[], options: mongo.BulkWriteOptions): Promise<void>;
@@ -1,190 +0,0 @@
1
- import * as bson from 'bson';
2
- import * as crypto from 'crypto';
3
- import * as mongo from 'mongodb';
4
- import * as uuid from 'uuid';
5
- import { ID_NAMESPACE, timestampToOpId } from '../../util/utils.js';
6
- /**
7
- * Lookup serialization must be number-agnostic. I.e. normalize numbers, instead of preserving numbers.
8
- * @param lookup
9
- */
10
- export function serializeLookup(lookup) {
11
- const normalized = lookup.map((value) => {
12
- if (typeof value == 'number' && Number.isInteger(value)) {
13
- return BigInt(value);
14
- }
15
- else {
16
- return value;
17
- }
18
- });
19
- return new bson.Binary(bson.serialize({ l: normalized }));
20
- }
21
- export function idPrefixFilter(prefix, rest) {
22
- let filter = {
23
- $gte: {
24
- ...prefix
25
- },
26
- $lt: {
27
- ...prefix
28
- }
29
- };
30
- for (let key of rest) {
31
- filter.$gte[key] = new bson.MinKey();
32
- filter.$lt[key] = new bson.MaxKey();
33
- }
34
- return filter;
35
- }
36
- export function generateSlotName(prefix, sync_rules_id) {
37
- const slot_suffix = crypto.randomBytes(2).toString('hex');
38
- return `${prefix}${sync_rules_id}_${slot_suffix}`;
39
- }
40
- /**
41
- * Read a single batch of data from a cursor, then close it.
42
- *
43
- * We do our best to avoid MongoDB fetching any more data than this single batch.
44
- *
45
- * This is similar to using `singleBatch: true` in find options.
46
- * However, that makes `has_more` detection very difficult, since the cursor is always closed
47
- * after the first batch. Instead, we do a workaround to only fetch a single batch below.
48
- *
49
- * For this to be effective, set batchSize = limit in the find command.
50
- */
51
- export async function readSingleBatch(cursor) {
52
- try {
53
- let data;
54
- let hasMore = true;
55
- // Let MongoDB load the first batch of data
56
- const hasAny = await cursor.hasNext();
57
- // Now it's in memory, and we can read it
58
- data = cursor.readBufferedDocuments();
59
- if (!hasAny || cursor.id?.isZero()) {
60
- // A zero id means the cursor is exhaused.
61
- // No results (hasAny == false) means even this batch doesn't have data.
62
- // This should similar results as `await cursor.hasNext()`, but without
63
- // actually fetching the next batch.
64
- //
65
- // Note that it is safe (but slightly inefficient) to return `hasMore: true`
66
- // without there being more data, as long as the next batch
67
- // will return `hasMore: false`.
68
- hasMore = false;
69
- }
70
- return { data, hasMore };
71
- }
72
- finally {
73
- // Match the from the cursor iterator logic here:
74
- // https://github.com/mongodb/node-mongodb-native/blob/e02534e7d1c627bf50b85ca39f5995dbf165ad44/src/cursor/abstract_cursor.ts#L327-L331
75
- if (!cursor.closed) {
76
- await cursor.close();
77
- }
78
- }
79
- }
80
- export const BSON_DESERIALIZE_OPTIONS = {
81
- // use bigint instead of Long
82
- useBigInt64: true
83
- };
84
- export function mapOpEntry(row) {
85
- if (row.op == 'PUT' || row.op == 'REMOVE') {
86
- return {
87
- op_id: timestampToOpId(row._id.o),
88
- op: row.op,
89
- object_type: row.table,
90
- object_id: row.row_id,
91
- checksum: Number(row.checksum),
92
- subkey: replicaIdToSubkey(row.source_table, row.source_key),
93
- data: row.data
94
- };
95
- }
96
- else {
97
- // MOVE, CLEAR
98
- return {
99
- op_id: timestampToOpId(row._id.o),
100
- op: row.op,
101
- checksum: Number(row.checksum)
102
- };
103
- }
104
- }
105
- /**
106
- * Returns true if two ReplicaId values are the same (serializes to the same BSON value).
107
- */
108
- export function replicaIdEquals(a, b) {
109
- if (a === b) {
110
- return true;
111
- }
112
- else if (typeof a == 'string' && typeof b == 'string') {
113
- return a == b;
114
- }
115
- else if (isUUID(a) && isUUID(b)) {
116
- return a.equals(b);
117
- }
118
- else if (a == null && b == null) {
119
- return true;
120
- }
121
- else if (a != null || b != null) {
122
- return false;
123
- }
124
- else {
125
- // There are many possible primitive values, this covers them all
126
- return bson.serialize({ id: a }).equals(bson.serialize({ id: b }));
127
- }
128
- }
129
- export function replicaIdToSubkey(table, id) {
130
- if (isUUID(id)) {
131
- // Special case for UUID for backwards-compatiblity
132
- return `${table.toHexString()}/${id.toHexString()}`;
133
- }
134
- else {
135
- // Hashed UUID from the table and id
136
- const repr = bson.serialize({ table, id });
137
- return uuid.v5(repr, ID_NAMESPACE);
138
- }
139
- }
140
- /**
141
- * True if this is a bson.UUID.
142
- *
143
- * Works even with multiple copies of the bson package.
144
- */
145
- export function isUUID(value) {
146
- if (value == null || typeof value != 'object') {
147
- return false;
148
- }
149
- const uuid = value;
150
- return uuid._bsontype == 'Binary' && uuid.sub_type == bson.Binary.SUBTYPE_UUID;
151
- }
152
- /**
153
- * MongoDB bulkWrite internally splits the operations into batches
154
- * so that no batch exceeds 16MB. However, there are cases where
155
- * the batch size is very close to 16MB, where additional metadata
156
- * on the server pushes it over the limit, resulting in this error
157
- * from the server:
158
- *
159
- * > MongoBulkWriteError: BSONObj size: 16814023 (0x1008FC7) is invalid. Size must be between 0 and 16793600(16MB) First element: insert: "bucket_data"
160
- *
161
- * We work around the issue by doing our own batching, limiting the
162
- * batch size to 15MB. This does add additional overhead with
163
- * BSON.calculateObjectSize.
164
- */
165
- export async function safeBulkWrite(collection, operations, options) {
166
- // Must be below 16MB.
167
- // We could probably go a little closer, but 15MB is a safe threshold.
168
- const BULK_WRITE_LIMIT = 15 * 1024 * 1024;
169
- let batch = [];
170
- let currentSize = 0;
171
- // Estimated overhead per operation, should be smaller in reality.
172
- const keySize = 8;
173
- for (let op of operations) {
174
- const bsonSize = mongo.BSON.calculateObjectSize(op, {
175
- checkKeys: false,
176
- ignoreUndefined: true
177
- }) + keySize;
178
- if (batch.length > 0 && currentSize + bsonSize > BULK_WRITE_LIMIT) {
179
- await collection.bulkWrite(batch, options);
180
- currentSize = 0;
181
- batch = [];
182
- }
183
- batch.push(op);
184
- currentSize += bsonSize;
185
- }
186
- if (batch.length > 0) {
187
- await collection.bulkWrite(batch, options);
188
- }
189
- }
190
- //# sourceMappingURL=util.js.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"util.js","sourceRoot":"","sources":["../../../src/storage/mongo/util.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,IAAI,MAAM,MAAM,CAAC;AAC7B,OAAO,KAAK,MAAM,MAAM,QAAQ,CAAC;AACjC,OAAO,KAAK,KAAK,MAAM,SAAS,CAAC;AACjC,OAAO,KAAK,IAAI,MAAM,MAAM,CAAC;AAE7B,OAAO,EAAE,YAAY,EAAE,eAAe,EAAE,MAAM,qBAAqB,CAAC;AAGpE;;;GAGG;AAEH,MAAM,UAAU,eAAe,CAAC,MAAyB;IACvD,MAAM,UAAU,GAAG,MAAM,CAAC,GAAG,CAAC,CAAC,KAAK,EAAE,EAAE;QACtC,IAAI,OAAO,KAAK,IAAI,QAAQ,IAAI,MAAM,CAAC,SAAS,CAAC,KAAK,CAAC,EAAE,CAAC;YACxD,OAAO,MAAM,CAAC,KAAK,CAAC,CAAC;QACvB,CAAC;aAAM,CAAC;YACN,OAAO,KAAK,CAAC;QACf,CAAC;IACH,CAAC,CAAC,CAAC;IACH,OAAO,IAAI,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,SAAS,CAAC,EAAE,CAAC,EAAE,UAAU,EAAE,CAAC,CAAC,CAAC;AAC5D,CAAC;AAED,MAAM,UAAU,cAAc,CAAI,MAAkB,EAAE,IAAiB;IACrE,IAAI,MAAM,GAAG;QACX,IAAI,EAAE;YACJ,GAAG,MAAM;SACH;QACR,GAAG,EAAE;YACH,GAAG,MAAM;SACH;KACT,CAAC;IAEF,KAAK,IAAI,GAAG,IAAI,IAAI,EAAE,CAAC;QACrB,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,IAAI,IAAI,CAAC,MAAM,EAAE,CAAC;QACrC,MAAM,CAAC,GAAG,CAAC,GAAG,CAAC,GAAG,IAAI,IAAI,CAAC,MAAM,EAAE,CAAC;IACtC,CAAC;IAED,OAAO,MAAM,CAAC;AAChB,CAAC;AAED,MAAM,UAAU,gBAAgB,CAAC,MAAc,EAAE,aAAqB;IACpE,MAAM,WAAW,GAAG,MAAM,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC;IAC1D,OAAO,GAAG,MAAM,GAAG,aAAa,IAAI,WAAW,EAAE,CAAC;AACpD,CAAC;AAED;;;;;;;;;;GAUG;AACH,MAAM,CAAC,KAAK,UAAU,eAAe,CAAI,MAA2B;IAClE,IAAI,CAAC;QACH,IAAI,IAAS,CAAC;QACd,IAAI,OAAO,GAAG,IAAI,CAAC;QACnB,2CAA2C;QAC3C,MAAM,MAAM,GAAG,MAAM,MAAM,CAAC,OAAO,EAAE,CAAC;QACtC,yCAAyC;QACzC,IAAI,GAAG,MAAM,CAAC,qBAAqB,EAAE,CAAC;QACtC,IAAI,CAAC,MAAM,IAAI,MAAM,CAAC,EAAE,EAAE,MAAM,EAAE,EAAE,CAAC;YACnC,0CAA0C;YAC1C,wEAAwE;YACxE,uEAAuE;YACvE,oCAAoC;YACpC,EAAE;YACF,4EAA4E;YAC5E,2DAA2D;YAC3D,gCAAgC;YAChC,OAAO,GAAG,KAAK,CAAC;QAClB,CAAC;QACD,OAAO,EAAE,IAAI,EAAE,OAAO,EAAE,CAAC;IAC3B,CAAC;YAAS,CAAC;QACT,iDAAiD;QACjD,uIAAuI;QACvI,IAAI,CAAC,MAAM,CAAC,MAAM,EAAE,CAAC;YACnB,MAAM,MAAM,CAAC,KAAK,EAAE,CAAC;QACvB,CAAC;IACH,CAAC;AACH,CAAC;AAED,MAAM,CAAC,MAAM,wBAAwB,GAA4B;IAC/D,6BAA6B;IAC7B,WAAW,EAAE,IAAI;CAClB,CAAC;AAEF,MAAM,UAAU,UAAU,CAAC,GAAuB;IAChD,IAAI,GAAG,CAAC,EAAE,IAAI,KAAK,IAAI,GAAG,CAAC,EAAE,IAAI,QAAQ,EAAE,CAAC;QAC1C,OAAO;YACL,KAAK,EAAE,eAAe,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC;YACjC,EAAE,EAAE,GAAG,CAAC,EAAE;YACV,WAAW,EAAE,GAAG,CAAC,KAAK;YACtB,SAAS,EAAE,GAAG,CAAC,MAAM;YACrB,QAAQ,EAAE,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC;YAC9B,MAAM,EAAE,iBAAiB,CAAC,GAAG,CAAC,YAAa,EAAE,GAAG,CAAC,UAAW,CAAC;YAC7D,IAAI,EAAE,GAAG,CAAC,IAAI;SACf,CAAC;IACJ,CAAC;SAAM,CAAC;QACN,cAAc;QAEd,OAAO;YACL,KAAK,EAAE,eAAe,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC,CAAC;YACjC,EAAE,EAAE,GAAG,CAAC,EAAE;YACV,QAAQ,EAAE,MAAM,CAAC,GAAG,CAAC,QAAQ,CAAC;SAC/B,CAAC;IACJ,CAAC;AACH,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,eAAe,CAAC,CAAY,EAAE,CAAY;IACxD,IAAI,CAAC,KAAK,CAAC,EAAE,CAAC;QACZ,OAAO,IAAI,CAAC;IACd,CAAC;SAAM,IAAI,OAAO,CAAC,IAAI,QAAQ,IAAI,OAAO,CAAC,IAAI,QAAQ,EAAE,CAAC;QACxD,OAAO,CAAC,IAAI,CAAC,CAAC;IAChB,CAAC;SAAM,IAAI,MAAM,CAAC,CAAC,CAAC,IAAI,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC;QAClC,OAAO,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC;IACrB,CAAC;SAAM,IAAI,CAAC,IAAI,IAAI,IAAI,CAAC,IAAI,IAAI,EAAE,CAAC;QAClC,OAAO,IAAI,CAAC;IACd,CAAC;SAAM,IAAI,CAAC,IAAI,IAAI,IAAI,CAAC,IAAI,IAAI,EAAE,CAAC;QAClC,OAAO,KAAK,CAAC;IACf,CAAC;SAAM,CAAC;QACN,iEAAiE;QACjE,OAAQ,IAAI,CAAC,SAAS,CAAC,EAAE,EAAE,EAAE,CAAC,EAAE,CAAY,CAAC,MAAM,CAAC,IAAI,CAAC,SAAS,CAAC,EAAE,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC;IACjF,CAAC;AACH,CAAC;AAED,MAAM,UAAU,iBAAiB,CAAC,KAAoB,EAAE,EAAa;IACnE,IAAI,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC;QACf,mDAAmD;QACnD,OAAO,GAAG,KAAK,CAAC,WAAW,EAAE,IAAI,EAAE,CAAC,WAAW,EAAE,EAAE,CAAC;IACtD,CAAC;SAAM,CAAC;QACN,oCAAoC;QACpC,MAAM,IAAI,GAAG,IAAI,CAAC,SAAS,CAAC,EAAE,KAAK,EAAE,EAAE,EAAE,CAAC,CAAC;QAC3C,OAAO,IAAI,CAAC,EAAE,CAAC,IAAI,EAAE,YAAY,CAAC,CAAC;IACrC,CAAC;AACH,CAAC;AAED;;;;GAIG;AACH,MAAM,UAAU,MAAM,CAAC,KAAU;IAC/B,IAAI,KAAK,IAAI,IAAI,IAAI,OAAO,KAAK,IAAI,QAAQ,EAAE,CAAC;QAC9C,OAAO,KAAK,CAAC;IACf,CAAC;IACD,MAAM,IAAI,GAAG,KAAkB,CAAC;IAChC,OAAO,IAAI,CAAC,SAAS,IAAI,QAAQ,IAAI,IAAI,CAAC,QAAQ,IAAI,IAAI,CAAC,MAAM,CAAC,YAAY,CAAC;AACjF,CAAC;AAED;;;;;;;;;;;;GAYG;AACH,MAAM,CAAC,KAAK,UAAU,aAAa,CACjC,UAA+B,EAC/B,UAA4C,EAC5C,OAA+B;IAE/B,sBAAsB;IACtB,sEAAsE;IACtE,MAAM,gBAAgB,GAAG,EAAE,GAAG,IAAI,GAAG,IAAI,CAAC;IAE1C,IAAI,KAAK,GAAqC,EAAE,CAAC;IACjD,IAAI,WAAW,GAAG,CAAC,CAAC;IACpB,kEAAkE;IAClE,MAAM,OAAO,GAAG,CAAC,CAAC;IAClB,KAAK,IAAI,EAAE,IAAI,UAAU,EAAE,CAAC;QAC1B,MAAM,QAAQ,GACZ,KAAK,CAAC,IAAI,CAAC,mBAAmB,CAAC,EAAE,EAAE;YACjC,SAAS,EAAE,KAAK;YAChB,eAAe,EAAE,IAAI;SACf,CAAC,GAAG,OAAO,CAAC;QACtB,IAAI,KAAK,CAAC,MAAM,GAAG,CAAC,IAAI,WAAW,GAAG,QAAQ,GAAG,gBAAgB,EAAE,CAAC;YAClE,MAAM,UAAU,CAAC,SAAS,CAAC,KAAK,EAAE,OAAO,CAAC,CAAC;YAC3C,WAAW,GAAG,CAAC,CAAC;YAChB,KAAK,GAAG,EAAE,CAAC;QACb,CAAC;QACD,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC;QACf,WAAW,IAAI,QAAQ,CAAC;IAC1B,CAAC;IACD,IAAI,KAAK,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;QACrB,MAAM,UAAU,CAAC,SAAS,CAAC,KAAK,EAAE,OAAO,CAAC,CAAC;IAC7C,CAAC;AACH,CAAC"}
@@ -1 +0,0 @@
1
- export * as mongo from './mongo.js';
package/src/db/mongo.ts DELETED
@@ -1,81 +0,0 @@
1
- import * as mongo from 'mongodb';
2
- import * as timers from 'timers/promises';
3
-
4
- import { configFile } from '@powersync/service-types';
5
- import { normalizeMongoConfig } from '../storage/storage-index.js';
6
-
7
- /**
8
- * Time for new connection to timeout.
9
- */
10
- export const MONGO_CONNECT_TIMEOUT_MS = 10_000;
11
-
12
- /**
13
- * Time for individual requests to timeout the socket.
14
- */
15
- export const MONGO_SOCKET_TIMEOUT_MS = 60_000;
16
-
17
- /**
18
- * Time for individual requests to timeout the operation.
19
- *
20
- * This is time spent on the cursor, not total time.
21
- *
22
- * Must be less than MONGO_SOCKET_TIMEOUT_MS to ensure proper error handling.
23
- */
24
- export const MONGO_OPERATION_TIMEOUT_MS = 30_000;
25
-
26
- /**
27
- * Same as above, but specifically for clear operations.
28
- *
29
- * These are retried when reaching the timeout.
30
- */
31
- export const MONGO_CLEAR_OPERATION_TIMEOUT_MS = 5_000;
32
-
33
- export function createMongoClient(config: configFile.PowerSyncConfig['storage']) {
34
- const normalized = normalizeMongoConfig(config);
35
- return new mongo.MongoClient(normalized.uri, {
36
- auth: {
37
- username: normalized.username,
38
- password: normalized.password
39
- },
40
- // Time for connection to timeout
41
- connectTimeoutMS: MONGO_CONNECT_TIMEOUT_MS,
42
- // Time for individual requests to timeout
43
- socketTimeoutMS: MONGO_SOCKET_TIMEOUT_MS,
44
- // How long to wait for new primary selection
45
- serverSelectionTimeoutMS: 30_000,
46
-
47
- // Avoid too many connections:
48
- // 1. It can overwhelm the source database.
49
- // 2. Processing too many queries in parallel can cause the process to run out of memory.
50
- maxPoolSize: 8,
51
-
52
- maxConnecting: 3,
53
- maxIdleTimeMS: 60_000
54
- });
55
- }
56
-
57
- /**
58
- * Wait up to a minute for authentication errors to resolve.
59
- *
60
- * There can be a delay between an Atlas user being created, and that user being
61
- * available on the database cluster. This works around it.
62
- *
63
- * This is specifically relevant for migrations and teardown - other parts of the stack
64
- * can generate handle these failures and just retry or restart.
65
- */
66
- export async function waitForAuth(db: mongo.Db) {
67
- const start = Date.now();
68
- while (Date.now() - start < 60_000) {
69
- try {
70
- await db.command({ ping: 1 });
71
- // Success
72
- break;
73
- } catch (e) {
74
- if (e.codeName == 'AuthenticationFailed') {
75
- await timers.setTimeout(1_000);
76
- continue;
77
- }
78
- throw e;
79
- }
80
- }
81
- }
@@ -1,16 +0,0 @@
1
- import * as bson from 'bson';
2
-
3
- export class LockActiveError extends Error {
4
- constructor() {
5
- super('Lock is already active');
6
- this.name = this.constructor.name;
7
- }
8
- }
9
-
10
- export type LockManager = {
11
- acquire: () => Promise<bson.ObjectId | null>;
12
- refresh: (lock_id: bson.ObjectId) => Promise<void>;
13
- release: (lock_id: bson.ObjectId) => Promise<void>;
14
-
15
- lock: (handler: (refresh: () => Promise<void>) => Promise<void>) => Promise<void>;
16
- };