@topgunbuild/server 0.5.0 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +97 -0
- package/dist/index.d.mts +573 -3
- package/dist/index.d.ts +573 -3
- package/dist/index.js +1786 -262
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1697 -183
- package/dist/index.mjs.map +1 -1
- package/package.json +12 -12
package/dist/index.js
CHANGED
|
@@ -38,12 +38,18 @@ __export(index_exports, {
|
|
|
38
38
|
ConnectionRateLimiter: () => ConnectionRateLimiter,
|
|
39
39
|
DEFAULT_CLUSTER_COORDINATOR_CONFIG: () => DEFAULT_CLUSTER_COORDINATOR_CONFIG,
|
|
40
40
|
DEFAULT_CONFLICT_RESOLVER_CONFIG: () => DEFAULT_CONFLICT_RESOLVER_CONFIG,
|
|
41
|
+
DEFAULT_FAILURE_DETECTOR_CONFIG: () => DEFAULT_FAILURE_DETECTOR_CONFIG,
|
|
41
42
|
DEFAULT_INDEX_CONFIG: () => DEFAULT_INDEX_CONFIG,
|
|
42
43
|
DEFAULT_JOURNAL_SERVICE_CONFIG: () => DEFAULT_JOURNAL_SERVICE_CONFIG,
|
|
43
44
|
DEFAULT_LAG_TRACKER_CONFIG: () => DEFAULT_LAG_TRACKER_CONFIG,
|
|
45
|
+
DEFAULT_MERKLE_TREE_CONFIG: () => DEFAULT_MERKLE_TREE_CONFIG,
|
|
46
|
+
DEFAULT_READ_REPLICA_CONFIG: () => DEFAULT_READ_REPLICA_CONFIG,
|
|
47
|
+
DEFAULT_REASSIGNER_CONFIG: () => DEFAULT_REASSIGNER_CONFIG,
|
|
48
|
+
DEFAULT_REPAIR_CONFIG: () => DEFAULT_REPAIR_CONFIG,
|
|
44
49
|
DEFAULT_SANDBOX_CONFIG: () => DEFAULT_SANDBOX_CONFIG,
|
|
45
50
|
EntryProcessorHandler: () => EntryProcessorHandler,
|
|
46
51
|
EventJournalService: () => EventJournalService,
|
|
52
|
+
FailureDetector: () => FailureDetector,
|
|
47
53
|
FilterTasklet: () => FilterTasklet,
|
|
48
54
|
ForEachTasklet: () => ForEachTasklet,
|
|
49
55
|
IteratorTasklet: () => IteratorTasklet,
|
|
@@ -53,13 +59,17 @@ __export(index_exports, {
|
|
|
53
59
|
MapTasklet: () => MapTasklet,
|
|
54
60
|
MapWithResolver: () => MapWithResolver,
|
|
55
61
|
MemoryServerAdapter: () => MemoryServerAdapter,
|
|
62
|
+
MerkleTreeManager: () => MerkleTreeManager,
|
|
56
63
|
MigrationManager: () => MigrationManager,
|
|
57
64
|
ObjectPool: () => ObjectPool,
|
|
65
|
+
PartitionReassigner: () => PartitionReassigner,
|
|
58
66
|
PartitionService: () => PartitionService,
|
|
59
67
|
PostgresAdapter: () => PostgresAdapter,
|
|
60
68
|
ProcessorSandbox: () => ProcessorSandbox,
|
|
61
69
|
RateLimitInterceptor: () => RateLimitInterceptor,
|
|
70
|
+
ReadReplicaHandler: () => ReadReplicaHandler,
|
|
62
71
|
ReduceTasklet: () => ReduceTasklet,
|
|
72
|
+
RepairScheduler: () => RepairScheduler,
|
|
63
73
|
ReplicationPipeline: () => ReplicationPipeline,
|
|
64
74
|
SecurityManager: () => SecurityManager,
|
|
65
75
|
ServerCoordinator: () => ServerCoordinator,
|
|
@@ -95,7 +105,7 @@ var import_http = require("http");
|
|
|
95
105
|
var import_https = require("https");
|
|
96
106
|
var import_fs2 = require("fs");
|
|
97
107
|
var import_ws3 = require("ws");
|
|
98
|
-
var
|
|
108
|
+
var import_core19 = require("@topgunbuild/core");
|
|
99
109
|
var jwt = __toESM(require("jsonwebtoken"));
|
|
100
110
|
var crypto = __toESM(require("crypto"));
|
|
101
111
|
|
|
@@ -1211,6 +1221,47 @@ var ClusterManager = class extends import_events2.EventEmitter {
|
|
|
1211
1221
|
handleHeartbeat(senderId, _payload) {
|
|
1212
1222
|
this.failureDetector.recordHeartbeat(senderId);
|
|
1213
1223
|
}
|
|
1224
|
+
/**
|
|
1225
|
+
* Send current member list to a specific node (gossip protocol).
|
|
1226
|
+
* Called when a new node joins to propagate cluster topology.
|
|
1227
|
+
*/
|
|
1228
|
+
sendMemberList(targetNodeId) {
|
|
1229
|
+
const members = [];
|
|
1230
|
+
for (const [nodeId, member] of this.members) {
|
|
1231
|
+
members.push({
|
|
1232
|
+
nodeId,
|
|
1233
|
+
host: member.host,
|
|
1234
|
+
port: member.port
|
|
1235
|
+
});
|
|
1236
|
+
}
|
|
1237
|
+
this.send(targetNodeId, "MEMBER_LIST", { members });
|
|
1238
|
+
logger.debug({ targetNodeId, memberCount: members.length }, "Sent member list");
|
|
1239
|
+
}
|
|
1240
|
+
/**
|
|
1241
|
+
* Broadcast member list to all connected nodes.
|
|
1242
|
+
* Called when cluster membership changes.
|
|
1243
|
+
*/
|
|
1244
|
+
broadcastMemberList() {
|
|
1245
|
+
for (const [nodeId, member] of this.members) {
|
|
1246
|
+
if (member.isSelf) continue;
|
|
1247
|
+
if (member.socket && member.socket.readyState === import_ws.WebSocket.OPEN) {
|
|
1248
|
+
this.sendMemberList(nodeId);
|
|
1249
|
+
}
|
|
1250
|
+
}
|
|
1251
|
+
}
|
|
1252
|
+
/**
|
|
1253
|
+
* Handle incoming member list from a peer (gossip protocol).
|
|
1254
|
+
* Attempts to connect to unknown members.
|
|
1255
|
+
*/
|
|
1256
|
+
handleMemberList(payload) {
|
|
1257
|
+
for (const memberInfo of payload.members) {
|
|
1258
|
+
if (memberInfo.nodeId === this.config.nodeId) continue;
|
|
1259
|
+
if (this.members.has(memberInfo.nodeId)) continue;
|
|
1260
|
+
const peerAddress = `${memberInfo.host}:${memberInfo.port}`;
|
|
1261
|
+
logger.info({ nodeId: memberInfo.nodeId, peerAddress }, "Discovered new member via gossip");
|
|
1262
|
+
this.connectToPeer(peerAddress);
|
|
1263
|
+
}
|
|
1264
|
+
}
|
|
1214
1265
|
/**
|
|
1215
1266
|
* Handle confirmed node failure.
|
|
1216
1267
|
*/
|
|
@@ -1349,6 +1400,9 @@ var ClusterManager = class extends import_events2.EventEmitter {
|
|
|
1349
1400
|
this.failureDetector.startMonitoring(remoteNodeId);
|
|
1350
1401
|
this.startHeartbeat();
|
|
1351
1402
|
this.emit("memberJoined", remoteNodeId);
|
|
1403
|
+
this.broadcastMemberList();
|
|
1404
|
+
} else if (msg.type === "MEMBER_LIST") {
|
|
1405
|
+
this.handleMemberList(msg.payload);
|
|
1352
1406
|
} else if (msg.type === "HEARTBEAT") {
|
|
1353
1407
|
if (remoteNodeId) {
|
|
1354
1408
|
this.handleHeartbeat(remoteNodeId, msg.payload);
|
|
@@ -6634,238 +6688,1438 @@ var ReplicationPipeline = class extends import_events8.EventEmitter {
|
|
|
6634
6688
|
}
|
|
6635
6689
|
};
|
|
6636
6690
|
|
|
6637
|
-
// src/
|
|
6691
|
+
// src/cluster/PartitionReassigner.ts
|
|
6692
|
+
var import_events9 = require("events");
|
|
6638
6693
|
var import_core10 = require("@topgunbuild/core");
|
|
6639
|
-
var
|
|
6640
|
-
|
|
6641
|
-
|
|
6642
|
-
|
|
6643
|
-
|
|
6644
|
-
|
|
6694
|
+
var DEFAULT_REASSIGNER_CONFIG = {
|
|
6695
|
+
reassignmentDelayMs: 1e3,
|
|
6696
|
+
maxConcurrentTransfers: 10,
|
|
6697
|
+
autoPromoteBackups: true,
|
|
6698
|
+
autoAssignNewBackups: true
|
|
6699
|
+
};
|
|
6700
|
+
var PartitionReassigner = class extends import_events9.EventEmitter {
|
|
6701
|
+
constructor(clusterManager, partitionService, config = {}) {
|
|
6702
|
+
super();
|
|
6703
|
+
this.failoverInProgress = false;
|
|
6704
|
+
this.partitionsReassigned = 0;
|
|
6705
|
+
this.pendingReassignments = /* @__PURE__ */ new Set();
|
|
6706
|
+
this.clusterManager = clusterManager;
|
|
6707
|
+
this.partitionService = partitionService;
|
|
6708
|
+
this.config = { ...DEFAULT_REASSIGNER_CONFIG, ...config };
|
|
6709
|
+
this.setupEventHandlers();
|
|
6710
|
+
}
|
|
6711
|
+
setupEventHandlers() {
|
|
6712
|
+
this.clusterManager.on("nodeConfirmedFailed", (nodeId) => {
|
|
6713
|
+
logger.warn({ nodeId }, "Node failure confirmed, initiating partition reassignment");
|
|
6714
|
+
this.handleNodeFailure(nodeId);
|
|
6715
|
+
});
|
|
6716
|
+
this.clusterManager.on("memberLeft", (nodeId) => {
|
|
6717
|
+
if (this.currentFailedNode !== nodeId) {
|
|
6718
|
+
logger.info({ nodeId }, "Member left cluster, checking partition reassignment");
|
|
6719
|
+
this.handleNodeDeparture(nodeId);
|
|
6720
|
+
}
|
|
6721
|
+
});
|
|
6645
6722
|
}
|
|
6646
6723
|
/**
|
|
6647
|
-
*
|
|
6724
|
+
* Handle a node failure - initiates failover process
|
|
6648
6725
|
*/
|
|
6649
|
-
|
|
6650
|
-
|
|
6651
|
-
|
|
6652
|
-
|
|
6653
|
-
this.counters.set(name, counter);
|
|
6654
|
-
logger.debug({ name }, "Created new counter");
|
|
6726
|
+
handleNodeFailure(failedNodeId) {
|
|
6727
|
+
if (this.failoverInProgress && this.currentFailedNode === failedNodeId) {
|
|
6728
|
+
logger.debug({ failedNodeId }, "Failover already in progress for this node");
|
|
6729
|
+
return;
|
|
6655
6730
|
}
|
|
6656
|
-
|
|
6731
|
+
if (this.reassignmentTimer) {
|
|
6732
|
+
clearTimeout(this.reassignmentTimer);
|
|
6733
|
+
}
|
|
6734
|
+
this.reassignmentTimer = setTimeout(() => {
|
|
6735
|
+
this.executeFailover(failedNodeId);
|
|
6736
|
+
}, this.config.reassignmentDelayMs);
|
|
6657
6737
|
}
|
|
6658
6738
|
/**
|
|
6659
|
-
* Handle
|
|
6660
|
-
* @returns Response message to send back to client
|
|
6739
|
+
* Handle a graceful node departure
|
|
6661
6740
|
*/
|
|
6662
|
-
|
|
6663
|
-
const
|
|
6664
|
-
|
|
6665
|
-
|
|
6666
|
-
|
|
6667
|
-
|
|
6668
|
-
type: "COUNTER_RESPONSE",
|
|
6669
|
-
payload: {
|
|
6670
|
-
name,
|
|
6671
|
-
state: this.stateToObject(state)
|
|
6672
|
-
}
|
|
6673
|
-
};
|
|
6741
|
+
handleNodeDeparture(nodeId) {
|
|
6742
|
+
const orphanedPartitions = this.findOrphanedPartitions(nodeId);
|
|
6743
|
+
if (orphanedPartitions.length > 0) {
|
|
6744
|
+
logger.warn({ nodeId, count: orphanedPartitions.length }, "Found orphaned partitions after departure");
|
|
6745
|
+
this.executeFailover(nodeId);
|
|
6746
|
+
}
|
|
6674
6747
|
}
|
|
6675
6748
|
/**
|
|
6676
|
-
*
|
|
6677
|
-
* @returns Merged state and list of clients to broadcast to
|
|
6749
|
+
* Execute the failover process for a failed node
|
|
6678
6750
|
*/
|
|
6679
|
-
|
|
6680
|
-
|
|
6681
|
-
|
|
6682
|
-
|
|
6683
|
-
|
|
6684
|
-
|
|
6685
|
-
logger.
|
|
6686
|
-
|
|
6687
|
-
|
|
6688
|
-
|
|
6689
|
-
|
|
6690
|
-
|
|
6691
|
-
|
|
6692
|
-
return {
|
|
6693
|
-
// Response to the sending client
|
|
6694
|
-
response: {
|
|
6695
|
-
type: "COUNTER_UPDATE",
|
|
6696
|
-
payload: {
|
|
6697
|
-
name,
|
|
6698
|
-
state: mergedStateObj
|
|
6699
|
-
}
|
|
6700
|
-
},
|
|
6701
|
-
// Broadcast to other clients
|
|
6702
|
-
broadcastTo,
|
|
6703
|
-
broadcastMessage: {
|
|
6704
|
-
type: "COUNTER_UPDATE",
|
|
6705
|
-
payload: {
|
|
6706
|
-
name,
|
|
6707
|
-
state: mergedStateObj
|
|
6708
|
-
}
|
|
6751
|
+
async executeFailover(failedNodeId) {
|
|
6752
|
+
this.failoverInProgress = true;
|
|
6753
|
+
this.currentFailedNode = failedNodeId;
|
|
6754
|
+
this.reassignmentStartTime = Date.now();
|
|
6755
|
+
this.partitionsReassigned = 0;
|
|
6756
|
+
this.pendingReassignments.clear();
|
|
6757
|
+
logger.info({ failedNodeId }, "Starting partition failover");
|
|
6758
|
+
try {
|
|
6759
|
+
const orphanedPartitions = this.findOrphanedPartitions(failedNodeId);
|
|
6760
|
+
if (orphanedPartitions.length === 0) {
|
|
6761
|
+
logger.info({ failedNodeId }, "No partitions to reassign");
|
|
6762
|
+
this.completeFailover();
|
|
6763
|
+
return;
|
|
6709
6764
|
}
|
|
6710
|
-
|
|
6765
|
+
logger.info({
|
|
6766
|
+
failedNodeId,
|
|
6767
|
+
partitionCount: orphanedPartitions.length
|
|
6768
|
+
}, "Reassigning partitions from failed node");
|
|
6769
|
+
for (const partitionId of orphanedPartitions) {
|
|
6770
|
+
this.pendingReassignments.add(partitionId);
|
|
6771
|
+
}
|
|
6772
|
+
const changes = [];
|
|
6773
|
+
for (const partitionId of orphanedPartitions) {
|
|
6774
|
+
const change = await this.reassignPartition(partitionId, failedNodeId);
|
|
6775
|
+
if (change) {
|
|
6776
|
+
changes.push(change);
|
|
6777
|
+
this.partitionsReassigned++;
|
|
6778
|
+
}
|
|
6779
|
+
this.pendingReassignments.delete(partitionId);
|
|
6780
|
+
}
|
|
6781
|
+
if (changes.length > 0) {
|
|
6782
|
+
this.emit("partitionsReassigned", {
|
|
6783
|
+
failedNodeId,
|
|
6784
|
+
changes,
|
|
6785
|
+
partitionMap: this.partitionService.getPartitionMap()
|
|
6786
|
+
});
|
|
6787
|
+
}
|
|
6788
|
+
this.completeFailover();
|
|
6789
|
+
} catch (error) {
|
|
6790
|
+
logger.error({ failedNodeId, error }, "Failover failed");
|
|
6791
|
+
this.emit("failoverError", { failedNodeId, error });
|
|
6792
|
+
this.completeFailover();
|
|
6793
|
+
}
|
|
6711
6794
|
}
|
|
6712
6795
|
/**
|
|
6713
|
-
*
|
|
6796
|
+
* Find all partitions that need reassignment
|
|
6714
6797
|
*/
|
|
6715
|
-
|
|
6716
|
-
|
|
6717
|
-
|
|
6798
|
+
findOrphanedPartitions(failedNodeId) {
|
|
6799
|
+
const orphaned = [];
|
|
6800
|
+
const partitionMap = this.partitionService.getPartitionMap();
|
|
6801
|
+
for (const partition of partitionMap.partitions) {
|
|
6802
|
+
if (partition.ownerNodeId === failedNodeId) {
|
|
6803
|
+
orphaned.push(partition.partitionId);
|
|
6804
|
+
}
|
|
6718
6805
|
}
|
|
6719
|
-
|
|
6720
|
-
logger.debug({ clientId, counterName }, "Client subscribed to counter");
|
|
6806
|
+
return orphaned;
|
|
6721
6807
|
}
|
|
6722
6808
|
/**
|
|
6723
|
-
*
|
|
6809
|
+
* Reassign a single partition
|
|
6724
6810
|
*/
|
|
6725
|
-
|
|
6726
|
-
const
|
|
6727
|
-
|
|
6728
|
-
|
|
6729
|
-
|
|
6730
|
-
|
|
6811
|
+
async reassignPartition(partitionId, failedNodeId) {
|
|
6812
|
+
const currentBackups = this.partitionService.getBackups(partitionId);
|
|
6813
|
+
const aliveMembers = this.clusterManager.getMembers().filter((m) => m !== failedNodeId);
|
|
6814
|
+
if (aliveMembers.length === 0) {
|
|
6815
|
+
logger.error({ partitionId }, "No alive members to reassign partition to");
|
|
6816
|
+
return null;
|
|
6817
|
+
}
|
|
6818
|
+
let newOwner = null;
|
|
6819
|
+
if (this.config.autoPromoteBackups) {
|
|
6820
|
+
for (const backup of currentBackups) {
|
|
6821
|
+
if (aliveMembers.includes(backup)) {
|
|
6822
|
+
newOwner = backup;
|
|
6823
|
+
break;
|
|
6824
|
+
}
|
|
6731
6825
|
}
|
|
6732
6826
|
}
|
|
6827
|
+
if (!newOwner) {
|
|
6828
|
+
const ownerIndex = partitionId % aliveMembers.length;
|
|
6829
|
+
newOwner = aliveMembers.sort()[ownerIndex];
|
|
6830
|
+
}
|
|
6831
|
+
this.partitionService.setOwner(partitionId, newOwner);
|
|
6832
|
+
logger.info({
|
|
6833
|
+
partitionId,
|
|
6834
|
+
previousOwner: failedNodeId,
|
|
6835
|
+
newOwner
|
|
6836
|
+
}, "Partition owner promoted");
|
|
6837
|
+
this.emit("reassignment", {
|
|
6838
|
+
type: "backup-promoted",
|
|
6839
|
+
partitionId,
|
|
6840
|
+
previousOwner: failedNodeId,
|
|
6841
|
+
newOwner
|
|
6842
|
+
});
|
|
6843
|
+
if (this.config.autoAssignNewBackups) {
|
|
6844
|
+
const newBackups = this.selectBackups(partitionId, newOwner, aliveMembers);
|
|
6845
|
+
}
|
|
6846
|
+
return {
|
|
6847
|
+
partitionId,
|
|
6848
|
+
previousOwner: failedNodeId,
|
|
6849
|
+
newOwner,
|
|
6850
|
+
reason: "FAILOVER"
|
|
6851
|
+
};
|
|
6733
6852
|
}
|
|
6734
6853
|
/**
|
|
6735
|
-
*
|
|
6854
|
+
* Select backup nodes for a partition
|
|
6736
6855
|
*/
|
|
6737
|
-
|
|
6738
|
-
|
|
6739
|
-
|
|
6740
|
-
|
|
6741
|
-
|
|
6742
|
-
|
|
6856
|
+
selectBackups(partitionId, owner, aliveMembers) {
|
|
6857
|
+
const backups = [];
|
|
6858
|
+
const sortedMembers = aliveMembers.filter((m) => m !== owner).sort();
|
|
6859
|
+
const startIndex = partitionId % sortedMembers.length;
|
|
6860
|
+
for (let i = 0; i < Math.min(import_core10.DEFAULT_BACKUP_COUNT, sortedMembers.length); i++) {
|
|
6861
|
+
const backupIndex = (startIndex + i) % sortedMembers.length;
|
|
6862
|
+
backups.push(sortedMembers[backupIndex]);
|
|
6743
6863
|
}
|
|
6744
|
-
|
|
6864
|
+
return backups;
|
|
6745
6865
|
}
|
|
6746
6866
|
/**
|
|
6747
|
-
*
|
|
6867
|
+
* Complete the failover process
|
|
6748
6868
|
*/
|
|
6749
|
-
|
|
6750
|
-
const
|
|
6751
|
-
|
|
6869
|
+
completeFailover() {
|
|
6870
|
+
const duration = this.reassignmentStartTime ? Date.now() - this.reassignmentStartTime : 0;
|
|
6871
|
+
logger.info({
|
|
6872
|
+
failedNodeId: this.currentFailedNode,
|
|
6873
|
+
partitionsReassigned: this.partitionsReassigned,
|
|
6874
|
+
durationMs: duration
|
|
6875
|
+
}, "Failover completed");
|
|
6876
|
+
this.emit("failoverComplete", {
|
|
6877
|
+
failedNodeId: this.currentFailedNode,
|
|
6878
|
+
partitionsReassigned: this.partitionsReassigned,
|
|
6879
|
+
durationMs: duration
|
|
6880
|
+
});
|
|
6881
|
+
this.failoverInProgress = false;
|
|
6882
|
+
this.currentFailedNode = void 0;
|
|
6883
|
+
this.reassignmentStartTime = void 0;
|
|
6884
|
+
this.pendingReassignments.clear();
|
|
6752
6885
|
}
|
|
6753
6886
|
/**
|
|
6754
|
-
* Get
|
|
6887
|
+
* Get current failover status
|
|
6755
6888
|
*/
|
|
6756
|
-
|
|
6757
|
-
return
|
|
6889
|
+
getStatus() {
|
|
6890
|
+
return {
|
|
6891
|
+
inProgress: this.failoverInProgress,
|
|
6892
|
+
failedNodeId: this.currentFailedNode,
|
|
6893
|
+
partitionsReassigned: this.partitionsReassigned,
|
|
6894
|
+
partitionsPending: this.pendingReassignments.size,
|
|
6895
|
+
startedAt: this.reassignmentStartTime,
|
|
6896
|
+
completedAt: this.failoverInProgress ? void 0 : Date.now()
|
|
6897
|
+
};
|
|
6758
6898
|
}
|
|
6759
6899
|
/**
|
|
6760
|
-
*
|
|
6900
|
+
* Check if failover is in progress
|
|
6761
6901
|
*/
|
|
6762
|
-
|
|
6763
|
-
return this.
|
|
6902
|
+
isFailoverInProgress() {
|
|
6903
|
+
return this.failoverInProgress;
|
|
6764
6904
|
}
|
|
6765
6905
|
/**
|
|
6766
|
-
*
|
|
6906
|
+
* Force immediate reassignment (for testing/manual intervention)
|
|
6767
6907
|
*/
|
|
6768
|
-
|
|
6769
|
-
|
|
6770
|
-
|
|
6771
|
-
|
|
6772
|
-
|
|
6908
|
+
forceReassignment(failedNodeId) {
|
|
6909
|
+
if (this.reassignmentTimer) {
|
|
6910
|
+
clearTimeout(this.reassignmentTimer);
|
|
6911
|
+
}
|
|
6912
|
+
this.executeFailover(failedNodeId);
|
|
6773
6913
|
}
|
|
6774
6914
|
/**
|
|
6775
|
-
*
|
|
6915
|
+
* Stop any pending reassignment
|
|
6776
6916
|
*/
|
|
6777
|
-
|
|
6778
|
-
|
|
6779
|
-
|
|
6780
|
-
|
|
6781
|
-
}
|
|
6917
|
+
stop() {
|
|
6918
|
+
if (this.reassignmentTimer) {
|
|
6919
|
+
clearTimeout(this.reassignmentTimer);
|
|
6920
|
+
this.reassignmentTimer = void 0;
|
|
6921
|
+
}
|
|
6922
|
+
this.failoverInProgress = false;
|
|
6923
|
+
this.pendingReassignments.clear();
|
|
6782
6924
|
}
|
|
6783
6925
|
};
|
|
6784
6926
|
|
|
6785
|
-
// src/
|
|
6786
|
-
var
|
|
6787
|
-
|
|
6788
|
-
// src/ProcessorSandbox.ts
|
|
6927
|
+
// src/cluster/ReadReplicaHandler.ts
|
|
6928
|
+
var import_events10 = require("events");
|
|
6789
6929
|
var import_core11 = require("@topgunbuild/core");
|
|
6790
|
-
var
|
|
6791
|
-
|
|
6792
|
-
|
|
6793
|
-
|
|
6794
|
-
|
|
6795
|
-
if (isProduction) {
|
|
6796
|
-
logger.error(
|
|
6797
|
-
"SECURITY WARNING: isolated-vm not available in production! Entry processors will run in less secure fallback mode. Install isolated-vm for production environments: pnpm add isolated-vm"
|
|
6798
|
-
);
|
|
6799
|
-
} else {
|
|
6800
|
-
logger.warn("isolated-vm not available, falling back to less secure VM");
|
|
6801
|
-
}
|
|
6802
|
-
}
|
|
6803
|
-
var DEFAULT_SANDBOX_CONFIG = {
|
|
6804
|
-
memoryLimitMb: 8,
|
|
6805
|
-
timeoutMs: 100,
|
|
6806
|
-
maxCachedIsolates: 100,
|
|
6807
|
-
strictValidation: true
|
|
6930
|
+
var DEFAULT_READ_REPLICA_CONFIG = {
|
|
6931
|
+
defaultConsistency: import_core11.ConsistencyLevel.STRONG,
|
|
6932
|
+
maxStalenessMs: 5e3,
|
|
6933
|
+
preferLocalReplica: true,
|
|
6934
|
+
loadBalancing: "latency-based"
|
|
6808
6935
|
};
|
|
6809
|
-
var
|
|
6810
|
-
constructor(config = {}) {
|
|
6811
|
-
|
|
6812
|
-
|
|
6813
|
-
this.
|
|
6814
|
-
this.
|
|
6815
|
-
this.
|
|
6936
|
+
var ReadReplicaHandler = class extends import_events10.EventEmitter {
|
|
6937
|
+
constructor(partitionService, clusterManager, nodeId, lagTracker, config = {}) {
|
|
6938
|
+
super();
|
|
6939
|
+
// Round-robin counters for load balancing
|
|
6940
|
+
this.roundRobinCounters = /* @__PURE__ */ new Map();
|
|
6941
|
+
this.partitionService = partitionService;
|
|
6942
|
+
this.clusterManager = clusterManager;
|
|
6943
|
+
this.nodeId = nodeId;
|
|
6944
|
+
this.lagTracker = lagTracker;
|
|
6945
|
+
this.config = { ...DEFAULT_READ_REPLICA_CONFIG, ...config };
|
|
6816
6946
|
}
|
|
6817
6947
|
/**
|
|
6818
|
-
*
|
|
6819
|
-
*
|
|
6820
|
-
* @param processor The processor definition (name, code, args)
|
|
6821
|
-
* @param value The current value for the key (or undefined)
|
|
6822
|
-
* @param key The key being processed
|
|
6823
|
-
* @returns Result containing success status, result, and new value
|
|
6948
|
+
* Determine if a read request can be served locally
|
|
6824
6949
|
*/
|
|
6825
|
-
|
|
6826
|
-
|
|
6827
|
-
|
|
6828
|
-
|
|
6829
|
-
error: "Sandbox has been disposed"
|
|
6830
|
-
};
|
|
6950
|
+
canServeLocally(request) {
|
|
6951
|
+
const consistency = request.options?.consistency ?? this.config.defaultConsistency;
|
|
6952
|
+
if (consistency === import_core11.ConsistencyLevel.STRONG) {
|
|
6953
|
+
return this.partitionService.isLocalOwner(request.key);
|
|
6831
6954
|
}
|
|
6832
|
-
|
|
6833
|
-
|
|
6834
|
-
|
|
6835
|
-
|
|
6836
|
-
|
|
6837
|
-
|
|
6838
|
-
|
|
6955
|
+
return this.partitionService.isRelated(request.key);
|
|
6956
|
+
}
|
|
6957
|
+
/**
|
|
6958
|
+
* Determine which node should handle the read
|
|
6959
|
+
*/
|
|
6960
|
+
selectReadNode(request) {
|
|
6961
|
+
const key = request.key;
|
|
6962
|
+
const consistency = request.options?.consistency ?? this.config.defaultConsistency;
|
|
6963
|
+
const partitionId = this.partitionService.getPartitionId(key);
|
|
6964
|
+
const distribution = this.partitionService.getDistribution(key);
|
|
6965
|
+
if (consistency === import_core11.ConsistencyLevel.STRONG) {
|
|
6966
|
+
if (!this.isNodeAlive(distribution.owner)) {
|
|
6967
|
+
if (request.options?.allowStale) {
|
|
6968
|
+
return this.selectAliveBackup(distribution.backups);
|
|
6969
|
+
}
|
|
6970
|
+
return null;
|
|
6839
6971
|
}
|
|
6972
|
+
return distribution.owner;
|
|
6840
6973
|
}
|
|
6841
|
-
|
|
6842
|
-
|
|
6843
|
-
|
|
6844
|
-
return
|
|
6974
|
+
const allReplicas = [distribution.owner, ...distribution.backups];
|
|
6975
|
+
const aliveReplicas = allReplicas.filter((n) => this.isNodeAlive(n));
|
|
6976
|
+
if (aliveReplicas.length === 0) {
|
|
6977
|
+
return null;
|
|
6845
6978
|
}
|
|
6979
|
+
if (request.options?.maxStaleness) {
|
|
6980
|
+
const withinStaleness = aliveReplicas.filter(
|
|
6981
|
+
(n) => this.getNodeStaleness(n) <= (request.options?.maxStaleness ?? Infinity)
|
|
6982
|
+
);
|
|
6983
|
+
if (withinStaleness.length > 0) {
|
|
6984
|
+
return this.selectByStrategy(withinStaleness, partitionId);
|
|
6985
|
+
}
|
|
6986
|
+
if (this.isNodeAlive(distribution.owner)) {
|
|
6987
|
+
return distribution.owner;
|
|
6988
|
+
}
|
|
6989
|
+
}
|
|
6990
|
+
if (this.config.preferLocalReplica && aliveReplicas.includes(this.nodeId)) {
|
|
6991
|
+
return this.nodeId;
|
|
6992
|
+
}
|
|
6993
|
+
return this.selectByStrategy(aliveReplicas, partitionId);
|
|
6846
6994
|
}
|
|
6847
6995
|
/**
|
|
6848
|
-
*
|
|
6996
|
+
* Select replica using configured load balancing strategy
|
|
6849
6997
|
*/
|
|
6850
|
-
|
|
6851
|
-
if (
|
|
6852
|
-
|
|
6998
|
+
selectByStrategy(replicas, partitionId) {
|
|
6999
|
+
if (replicas.length === 0) {
|
|
7000
|
+
throw new Error("No replicas available");
|
|
6853
7001
|
}
|
|
6854
|
-
|
|
6855
|
-
|
|
6856
|
-
|
|
6857
|
-
|
|
6858
|
-
|
|
6859
|
-
|
|
6860
|
-
|
|
6861
|
-
|
|
6862
|
-
|
|
6863
|
-
|
|
6864
|
-
|
|
6865
|
-
|
|
6866
|
-
|
|
6867
|
-
|
|
6868
|
-
|
|
7002
|
+
if (replicas.length === 1) {
|
|
7003
|
+
return replicas[0];
|
|
7004
|
+
}
|
|
7005
|
+
switch (this.config.loadBalancing) {
|
|
7006
|
+
case "round-robin":
|
|
7007
|
+
return this.selectRoundRobin(replicas, partitionId);
|
|
7008
|
+
case "latency-based":
|
|
7009
|
+
return this.selectByLatency(replicas);
|
|
7010
|
+
case "least-connections":
|
|
7011
|
+
return this.selectRoundRobin(replicas, partitionId);
|
|
7012
|
+
default:
|
|
7013
|
+
return replicas[0];
|
|
7014
|
+
}
|
|
7015
|
+
}
|
|
7016
|
+
/**
|
|
7017
|
+
* Round-robin selection
|
|
7018
|
+
*/
|
|
7019
|
+
selectRoundRobin(replicas, partitionId) {
|
|
7020
|
+
const counter = this.roundRobinCounters.get(partitionId) ?? 0;
|
|
7021
|
+
const selected = replicas[counter % replicas.length];
|
|
7022
|
+
this.roundRobinCounters.set(partitionId, counter + 1);
|
|
7023
|
+
return selected;
|
|
7024
|
+
}
|
|
7025
|
+
/**
|
|
7026
|
+
* Latency-based selection using lag tracker
|
|
7027
|
+
*/
|
|
7028
|
+
selectByLatency(replicas) {
|
|
7029
|
+
if (!this.lagTracker) {
|
|
7030
|
+
return replicas[0];
|
|
7031
|
+
}
|
|
7032
|
+
let bestNode = replicas[0];
|
|
7033
|
+
let bestLatency = Infinity;
|
|
7034
|
+
for (const nodeId of replicas) {
|
|
7035
|
+
const lag = this.lagTracker.getLag(nodeId);
|
|
7036
|
+
if (lag && lag.current < bestLatency) {
|
|
7037
|
+
bestLatency = lag.current;
|
|
7038
|
+
bestNode = nodeId;
|
|
7039
|
+
}
|
|
7040
|
+
}
|
|
7041
|
+
return bestNode;
|
|
7042
|
+
}
|
|
7043
|
+
/**
|
|
7044
|
+
* Get estimated staleness for a node in ms
|
|
7045
|
+
*/
|
|
7046
|
+
getNodeStaleness(nodeId) {
|
|
7047
|
+
if (nodeId === this.partitionService.getOwner("")) {
|
|
7048
|
+
return 0;
|
|
7049
|
+
}
|
|
7050
|
+
if (this.lagTracker) {
|
|
7051
|
+
const lag = this.lagTracker.getLag(nodeId);
|
|
7052
|
+
return lag?.current ?? 0;
|
|
7053
|
+
}
|
|
7054
|
+
return 0;
|
|
7055
|
+
}
|
|
7056
|
+
/**
|
|
7057
|
+
* Check if a node is alive in the cluster
|
|
7058
|
+
*/
|
|
7059
|
+
isNodeAlive(nodeId) {
|
|
7060
|
+
const members = this.clusterManager.getMembers();
|
|
7061
|
+
return members.includes(nodeId);
|
|
7062
|
+
}
|
|
7063
|
+
/**
|
|
7064
|
+
* Select first alive backup from list
|
|
7065
|
+
*/
|
|
7066
|
+
selectAliveBackup(backups) {
|
|
7067
|
+
for (const backup of backups) {
|
|
7068
|
+
if (this.isNodeAlive(backup)) {
|
|
7069
|
+
return backup;
|
|
7070
|
+
}
|
|
7071
|
+
}
|
|
7072
|
+
return null;
|
|
7073
|
+
}
|
|
7074
|
+
/**
|
|
7075
|
+
* Create read response metadata
|
|
7076
|
+
*/
|
|
7077
|
+
createReadMetadata(key, options) {
|
|
7078
|
+
const consistency = options?.consistency ?? this.config.defaultConsistency;
|
|
7079
|
+
const isOwner = this.partitionService.isLocalOwner(key);
|
|
7080
|
+
return {
|
|
7081
|
+
source: this.nodeId,
|
|
7082
|
+
isOwner,
|
|
7083
|
+
consistency
|
|
7084
|
+
};
|
|
7085
|
+
}
|
|
7086
|
+
/**
|
|
7087
|
+
* Check if local node should forward read to owner
|
|
7088
|
+
*/
|
|
7089
|
+
shouldForwardRead(request) {
|
|
7090
|
+
const consistency = request.options?.consistency ?? this.config.defaultConsistency;
|
|
7091
|
+
if (consistency === import_core11.ConsistencyLevel.STRONG) {
|
|
7092
|
+
return !this.partitionService.isLocalOwner(request.key);
|
|
7093
|
+
}
|
|
7094
|
+
if (!this.partitionService.isRelated(request.key)) {
|
|
7095
|
+
return true;
|
|
7096
|
+
}
|
|
7097
|
+
return false;
|
|
7098
|
+
}
|
|
7099
|
+
/**
|
|
7100
|
+
* Get metrics for monitoring
|
|
7101
|
+
*/
|
|
7102
|
+
getMetrics() {
|
|
7103
|
+
return {
|
|
7104
|
+
defaultConsistency: this.config.defaultConsistency,
|
|
7105
|
+
preferLocalReplica: this.config.preferLocalReplica,
|
|
7106
|
+
loadBalancing: this.config.loadBalancing,
|
|
7107
|
+
roundRobinPartitions: this.roundRobinCounters.size
|
|
7108
|
+
};
|
|
7109
|
+
}
|
|
7110
|
+
};
|
|
7111
|
+
|
|
7112
|
+
// src/cluster/MerkleTreeManager.ts
|
|
7113
|
+
var import_events11 = require("events");
|
|
7114
|
+
var import_core12 = require("@topgunbuild/core");
|
|
7115
|
+
var DEFAULT_MERKLE_TREE_CONFIG = {
|
|
7116
|
+
treeDepth: 3,
|
|
7117
|
+
autoUpdate: true,
|
|
7118
|
+
lazyInit: true
|
|
7119
|
+
};
|
|
7120
|
+
var MerkleTreeManager = class extends import_events11.EventEmitter {
|
|
7121
|
+
constructor(nodeId, config = {}) {
|
|
7122
|
+
super();
|
|
7123
|
+
this.trees = /* @__PURE__ */ new Map();
|
|
7124
|
+
this.keyCounts = /* @__PURE__ */ new Map();
|
|
7125
|
+
this.lastUpdated = /* @__PURE__ */ new Map();
|
|
7126
|
+
this.nodeId = nodeId;
|
|
7127
|
+
this.config = { ...DEFAULT_MERKLE_TREE_CONFIG, ...config };
|
|
7128
|
+
}
|
|
7129
|
+
/**
|
|
7130
|
+
* Get or create a Merkle tree for a partition
|
|
7131
|
+
*/
|
|
7132
|
+
getTree(partitionId) {
|
|
7133
|
+
let tree = this.trees.get(partitionId);
|
|
7134
|
+
if (!tree) {
|
|
7135
|
+
tree = new import_core12.MerkleTree(/* @__PURE__ */ new Map(), this.config.treeDepth);
|
|
7136
|
+
this.trees.set(partitionId, tree);
|
|
7137
|
+
this.keyCounts.set(partitionId, 0);
|
|
7138
|
+
this.lastUpdated.set(partitionId, Date.now());
|
|
7139
|
+
}
|
|
7140
|
+
return tree;
|
|
7141
|
+
}
|
|
7142
|
+
/**
|
|
7143
|
+
* Build tree for a partition from existing data
|
|
7144
|
+
*/
|
|
7145
|
+
buildTree(partitionId, records) {
|
|
7146
|
+
const tree = new import_core12.MerkleTree(records, this.config.treeDepth);
|
|
7147
|
+
this.trees.set(partitionId, tree);
|
|
7148
|
+
this.keyCounts.set(partitionId, records.size);
|
|
7149
|
+
this.lastUpdated.set(partitionId, Date.now());
|
|
7150
|
+
logger.debug({
|
|
7151
|
+
partitionId,
|
|
7152
|
+
keyCount: records.size,
|
|
7153
|
+
rootHash: tree.getRootHash()
|
|
7154
|
+
}, "Built Merkle tree for partition");
|
|
7155
|
+
}
|
|
7156
|
+
/**
|
|
7157
|
+
* Incrementally update tree when a record changes
|
|
7158
|
+
*/
|
|
7159
|
+
updateRecord(partitionId, key, record) {
|
|
7160
|
+
if (!this.config.autoUpdate) return;
|
|
7161
|
+
const tree = this.getTree(partitionId);
|
|
7162
|
+
const previousKeyCount = this.keyCounts.get(partitionId) ?? 0;
|
|
7163
|
+
const existingBuckets = tree.getBuckets("");
|
|
7164
|
+
const wasNewKey = Object.keys(existingBuckets).length === 0 || !tree.getKeysInBucket(this.getKeyPath(key)).includes(key);
|
|
7165
|
+
tree.update(key, record);
|
|
7166
|
+
if (wasNewKey) {
|
|
7167
|
+
this.keyCounts.set(partitionId, previousKeyCount + 1);
|
|
7168
|
+
}
|
|
7169
|
+
this.lastUpdated.set(partitionId, Date.now());
|
|
7170
|
+
this.emit("treeUpdated", {
|
|
7171
|
+
partitionId,
|
|
7172
|
+
key,
|
|
7173
|
+
rootHash: tree.getRootHash()
|
|
7174
|
+
});
|
|
7175
|
+
}
|
|
7176
|
+
/**
|
|
7177
|
+
* Remove a key from the tree (e.g., after GC)
|
|
7178
|
+
*/
|
|
7179
|
+
removeRecord(partitionId, key) {
|
|
7180
|
+
const tree = this.trees.get(partitionId);
|
|
7181
|
+
if (!tree) return;
|
|
7182
|
+
tree.remove(key);
|
|
7183
|
+
const currentCount = this.keyCounts.get(partitionId) ?? 0;
|
|
7184
|
+
if (currentCount > 0) {
|
|
7185
|
+
this.keyCounts.set(partitionId, currentCount - 1);
|
|
7186
|
+
}
|
|
7187
|
+
this.lastUpdated.set(partitionId, Date.now());
|
|
7188
|
+
this.emit("treeUpdated", {
|
|
7189
|
+
partitionId,
|
|
7190
|
+
key,
|
|
7191
|
+
rootHash: tree.getRootHash()
|
|
7192
|
+
});
|
|
7193
|
+
}
|
|
7194
|
+
/**
|
|
7195
|
+
* Get the path prefix for a key in the Merkle tree
|
|
7196
|
+
*/
|
|
7197
|
+
getKeyPath(key) {
|
|
7198
|
+
const hash = (0, import_core12.hashString)(key).toString(16).padStart(8, "0");
|
|
7199
|
+
return hash.slice(0, this.config.treeDepth);
|
|
7200
|
+
}
|
|
7201
|
+
/**
|
|
7202
|
+
* Get root hash for a partition
|
|
7203
|
+
*/
|
|
7204
|
+
getRootHash(partitionId) {
|
|
7205
|
+
const tree = this.trees.get(partitionId);
|
|
7206
|
+
return tree?.getRootHash() ?? 0;
|
|
7207
|
+
}
|
|
7208
|
+
/**
|
|
7209
|
+
* Compare local tree with remote root hash
|
|
7210
|
+
*/
|
|
7211
|
+
compareWithRemote(partitionId, remoteRoot) {
|
|
7212
|
+
const tree = this.getTree(partitionId);
|
|
7213
|
+
const localRoot = tree.getRootHash();
|
|
7214
|
+
return {
|
|
7215
|
+
partitionId,
|
|
7216
|
+
localRoot,
|
|
7217
|
+
remoteRoot,
|
|
7218
|
+
needsSync: localRoot !== remoteRoot,
|
|
7219
|
+
differingBuckets: localRoot !== remoteRoot ? this.findDifferingBuckets(tree, remoteRoot) : []
|
|
7220
|
+
};
|
|
7221
|
+
}
|
|
7222
|
+
/**
|
|
7223
|
+
* Find buckets that differ between local and remote tree
|
|
7224
|
+
* Note: This is a simplified version - full implementation would
|
|
7225
|
+
* need to exchange bucket hashes with the remote node
|
|
7226
|
+
*/
|
|
7227
|
+
findDifferingBuckets(tree, _remoteRoot) {
|
|
7228
|
+
const buckets = [];
|
|
7229
|
+
this.collectLeafBuckets(tree, "", buckets);
|
|
7230
|
+
return buckets;
|
|
7231
|
+
}
|
|
7232
|
+
/**
|
|
7233
|
+
* Recursively collect all leaf bucket paths
|
|
7234
|
+
*/
|
|
7235
|
+
collectLeafBuckets(tree, path, result) {
|
|
7236
|
+
if (path.length >= this.config.treeDepth) {
|
|
7237
|
+
const keys = tree.getKeysInBucket(path);
|
|
7238
|
+
if (keys.length > 0) {
|
|
7239
|
+
result.push(path);
|
|
7240
|
+
}
|
|
7241
|
+
return;
|
|
7242
|
+
}
|
|
7243
|
+
const buckets = tree.getBuckets(path);
|
|
7244
|
+
for (const char of Object.keys(buckets)) {
|
|
7245
|
+
this.collectLeafBuckets(tree, path + char, result);
|
|
7246
|
+
}
|
|
7247
|
+
}
|
|
7248
|
+
/**
|
|
7249
|
+
* Get bucket hashes for a partition at a given path
|
|
7250
|
+
*/
|
|
7251
|
+
getBuckets(partitionId, path) {
|
|
7252
|
+
const tree = this.trees.get(partitionId);
|
|
7253
|
+
return tree?.getBuckets(path) ?? {};
|
|
7254
|
+
}
|
|
7255
|
+
/**
|
|
7256
|
+
* Get keys in a specific bucket
|
|
7257
|
+
*/
|
|
7258
|
+
getKeysInBucket(partitionId, path) {
|
|
7259
|
+
const tree = this.trees.get(partitionId);
|
|
7260
|
+
return tree?.getKeysInBucket(path) ?? [];
|
|
7261
|
+
}
|
|
7262
|
+
/**
|
|
7263
|
+
* Get all keys across all buckets for a partition
|
|
7264
|
+
*/
|
|
7265
|
+
getAllKeys(partitionId) {
|
|
7266
|
+
const tree = this.trees.get(partitionId);
|
|
7267
|
+
if (!tree) return [];
|
|
7268
|
+
const keys = [];
|
|
7269
|
+
this.collectAllKeys(tree, "", keys);
|
|
7270
|
+
return keys;
|
|
7271
|
+
}
|
|
7272
|
+
/**
|
|
7273
|
+
* Recursively collect all keys from the tree
|
|
7274
|
+
*/
|
|
7275
|
+
collectAllKeys(tree, path, result) {
|
|
7276
|
+
if (path.length >= this.config.treeDepth) {
|
|
7277
|
+
const keys = tree.getKeysInBucket(path);
|
|
7278
|
+
result.push(...keys);
|
|
7279
|
+
return;
|
|
7280
|
+
}
|
|
7281
|
+
const buckets = tree.getBuckets(path);
|
|
7282
|
+
for (const char of Object.keys(buckets)) {
|
|
7283
|
+
this.collectAllKeys(tree, path + char, result);
|
|
7284
|
+
}
|
|
7285
|
+
}
|
|
7286
|
+
/**
|
|
7287
|
+
* Get info about all managed partitions
|
|
7288
|
+
*/
|
|
7289
|
+
getPartitionInfos() {
|
|
7290
|
+
const infos = [];
|
|
7291
|
+
for (const [partitionId, tree] of this.trees) {
|
|
7292
|
+
infos.push({
|
|
7293
|
+
partitionId,
|
|
7294
|
+
rootHash: tree.getRootHash(),
|
|
7295
|
+
keyCount: this.keyCounts.get(partitionId) ?? 0,
|
|
7296
|
+
lastUpdated: this.lastUpdated.get(partitionId) ?? 0
|
|
7297
|
+
});
|
|
7298
|
+
}
|
|
7299
|
+
return infos;
|
|
7300
|
+
}
|
|
7301
|
+
/**
|
|
7302
|
+
* Get info for a specific partition
|
|
7303
|
+
*/
|
|
7304
|
+
getPartitionInfo(partitionId) {
|
|
7305
|
+
const tree = this.trees.get(partitionId);
|
|
7306
|
+
if (!tree) return null;
|
|
7307
|
+
return {
|
|
7308
|
+
partitionId,
|
|
7309
|
+
rootHash: tree.getRootHash(),
|
|
7310
|
+
keyCount: this.keyCounts.get(partitionId) ?? 0,
|
|
7311
|
+
lastUpdated: this.lastUpdated.get(partitionId) ?? 0
|
|
7312
|
+
};
|
|
7313
|
+
}
|
|
7314
|
+
/**
|
|
7315
|
+
* Clear tree for a partition (e.g., after migration)
|
|
7316
|
+
*/
|
|
7317
|
+
clearPartition(partitionId) {
|
|
7318
|
+
this.trees.delete(partitionId);
|
|
7319
|
+
this.keyCounts.delete(partitionId);
|
|
7320
|
+
this.lastUpdated.delete(partitionId);
|
|
7321
|
+
}
|
|
7322
|
+
/**
|
|
7323
|
+
* Clear all trees
|
|
7324
|
+
*/
|
|
7325
|
+
clearAll() {
|
|
7326
|
+
this.trees.clear();
|
|
7327
|
+
this.keyCounts.clear();
|
|
7328
|
+
this.lastUpdated.clear();
|
|
7329
|
+
}
|
|
7330
|
+
/**
|
|
7331
|
+
* Get metrics for monitoring
|
|
7332
|
+
*/
|
|
7333
|
+
getMetrics() {
|
|
7334
|
+
let totalKeys = 0;
|
|
7335
|
+
for (const count of this.keyCounts.values()) {
|
|
7336
|
+
totalKeys += count;
|
|
7337
|
+
}
|
|
7338
|
+
return {
|
|
7339
|
+
totalPartitions: this.trees.size,
|
|
7340
|
+
totalKeys,
|
|
7341
|
+
averageKeysPerPartition: this.trees.size > 0 ? totalKeys / this.trees.size : 0
|
|
7342
|
+
};
|
|
7343
|
+
}
|
|
7344
|
+
/**
|
|
7345
|
+
* Serialize tree state for network transfer
|
|
7346
|
+
*/
|
|
7347
|
+
serializeTree(partitionId) {
|
|
7348
|
+
const tree = this.trees.get(partitionId);
|
|
7349
|
+
if (!tree) return null;
|
|
7350
|
+
const buckets = {};
|
|
7351
|
+
for (let depth = 0; depth < this.config.treeDepth; depth++) {
|
|
7352
|
+
this.collectBucketsAtDepth(tree, "", depth, buckets);
|
|
7353
|
+
}
|
|
7354
|
+
return {
|
|
7355
|
+
rootHash: tree.getRootHash(),
|
|
7356
|
+
buckets
|
|
7357
|
+
};
|
|
7358
|
+
}
|
|
7359
|
+
collectBucketsAtDepth(tree, path, targetDepth, result) {
|
|
7360
|
+
if (path.length === targetDepth) {
|
|
7361
|
+
const buckets2 = tree.getBuckets(path);
|
|
7362
|
+
if (Object.keys(buckets2).length > 0) {
|
|
7363
|
+
result[path] = buckets2;
|
|
7364
|
+
}
|
|
7365
|
+
return;
|
|
7366
|
+
}
|
|
7367
|
+
if (path.length > targetDepth) return;
|
|
7368
|
+
const buckets = tree.getBuckets(path);
|
|
7369
|
+
for (const char of Object.keys(buckets)) {
|
|
7370
|
+
this.collectBucketsAtDepth(tree, path + char, targetDepth, result);
|
|
7371
|
+
}
|
|
7372
|
+
}
|
|
7373
|
+
};
|
|
7374
|
+
|
|
7375
|
+
// src/cluster/RepairScheduler.ts
|
|
7376
|
+
var import_events12 = require("events");
|
|
7377
|
+
var import_core13 = require("@topgunbuild/core");
|
|
7378
|
+
var DEFAULT_REPAIR_CONFIG = {
|
|
7379
|
+
enabled: true,
|
|
7380
|
+
scanIntervalMs: 36e5,
|
|
7381
|
+
// 1 hour
|
|
7382
|
+
repairBatchSize: 1e3,
|
|
7383
|
+
maxConcurrentRepairs: 2,
|
|
7384
|
+
throttleMs: 100,
|
|
7385
|
+
prioritizeRecent: true,
|
|
7386
|
+
requestTimeoutMs: 5e3
|
|
7387
|
+
};
|
|
7388
|
+
var RepairScheduler = class extends import_events12.EventEmitter {
|
|
7389
|
+
constructor(merkleManager, clusterManager, partitionService, nodeId, config = {}) {
|
|
7390
|
+
super();
|
|
7391
|
+
this.repairQueue = [];
|
|
7392
|
+
this.activeRepairs = /* @__PURE__ */ new Set();
|
|
7393
|
+
this.started = false;
|
|
7394
|
+
// Pending network requests
|
|
7395
|
+
this.pendingRequests = /* @__PURE__ */ new Map();
|
|
7396
|
+
// Metrics
|
|
7397
|
+
this.metrics = {
|
|
7398
|
+
scansCompleted: 0,
|
|
7399
|
+
repairsExecuted: 0,
|
|
7400
|
+
keysRepaired: 0,
|
|
7401
|
+
errorsEncountered: 0,
|
|
7402
|
+
averageRepairDurationMs: 0
|
|
7403
|
+
};
|
|
7404
|
+
this.merkleManager = merkleManager;
|
|
7405
|
+
this.clusterManager = clusterManager;
|
|
7406
|
+
this.partitionService = partitionService;
|
|
7407
|
+
this.nodeId = nodeId;
|
|
7408
|
+
this.config = { ...DEFAULT_REPAIR_CONFIG, ...config };
|
|
7409
|
+
this.setupNetworkHandlers();
|
|
7410
|
+
}
|
|
7411
|
+
/**
|
|
7412
|
+
* Set data access callbacks
|
|
7413
|
+
*/
|
|
7414
|
+
setDataAccessors(getRecord, setRecord) {
|
|
7415
|
+
this.getRecord = getRecord;
|
|
7416
|
+
this.setRecord = setRecord;
|
|
7417
|
+
}
|
|
7418
|
+
/**
|
|
7419
|
+
* Setup network message handlers
|
|
7420
|
+
*/
|
|
7421
|
+
setupNetworkHandlers() {
|
|
7422
|
+
this.clusterManager.on("message", (msg) => {
|
|
7423
|
+
this.handleClusterMessage(msg);
|
|
7424
|
+
});
|
|
7425
|
+
}
|
|
7426
|
+
/**
|
|
7427
|
+
* Handle incoming cluster messages
|
|
7428
|
+
*/
|
|
7429
|
+
handleClusterMessage(msg) {
|
|
7430
|
+
switch (msg.type) {
|
|
7431
|
+
case "CLUSTER_MERKLE_ROOT_REQ":
|
|
7432
|
+
this.handleMerkleRootReq(msg);
|
|
7433
|
+
break;
|
|
7434
|
+
case "CLUSTER_MERKLE_ROOT_RESP":
|
|
7435
|
+
this.handleResponse(msg);
|
|
7436
|
+
break;
|
|
7437
|
+
case "CLUSTER_MERKLE_BUCKETS_REQ":
|
|
7438
|
+
this.handleMerkleBucketsReq(msg);
|
|
7439
|
+
break;
|
|
7440
|
+
case "CLUSTER_MERKLE_BUCKETS_RESP":
|
|
7441
|
+
this.handleResponse(msg);
|
|
7442
|
+
break;
|
|
7443
|
+
case "CLUSTER_MERKLE_KEYS_REQ":
|
|
7444
|
+
this.handleMerkleKeysReq(msg);
|
|
7445
|
+
break;
|
|
7446
|
+
case "CLUSTER_MERKLE_KEYS_RESP":
|
|
7447
|
+
this.handleResponse(msg);
|
|
7448
|
+
break;
|
|
7449
|
+
case "CLUSTER_REPAIR_DATA_REQ":
|
|
7450
|
+
this.handleRepairDataReq(msg);
|
|
7451
|
+
break;
|
|
7452
|
+
case "CLUSTER_REPAIR_DATA_RESP":
|
|
7453
|
+
this.handleResponse(msg);
|
|
7454
|
+
break;
|
|
7455
|
+
}
|
|
7456
|
+
}
|
|
7457
|
+
// === Request Handlers (Passive) ===
|
|
7458
|
+
handleMerkleRootReq(msg) {
|
|
7459
|
+
const { requestId, partitionId } = msg.payload;
|
|
7460
|
+
const rootHash = this.merkleManager.getRootHash(partitionId);
|
|
7461
|
+
this.clusterManager.send(msg.senderId, "CLUSTER_MERKLE_ROOT_RESP", {
|
|
7462
|
+
requestId,
|
|
7463
|
+
partitionId,
|
|
7464
|
+
rootHash
|
|
7465
|
+
});
|
|
7466
|
+
}
|
|
7467
|
+
handleMerkleBucketsReq(msg) {
|
|
7468
|
+
const { requestId, partitionId } = msg.payload;
|
|
7469
|
+
const tree = this.merkleManager.serializeTree(partitionId);
|
|
7470
|
+
this.clusterManager.send(msg.senderId, "CLUSTER_MERKLE_BUCKETS_RESP", {
|
|
7471
|
+
requestId,
|
|
7472
|
+
partitionId,
|
|
7473
|
+
buckets: tree?.buckets || {}
|
|
7474
|
+
});
|
|
7475
|
+
}
|
|
7476
|
+
handleMerkleKeysReq(msg) {
|
|
7477
|
+
const { requestId, partitionId, path } = msg.payload;
|
|
7478
|
+
const keys = this.merkleManager.getKeysInBucket(partitionId, path);
|
|
7479
|
+
this.clusterManager.send(msg.senderId, "CLUSTER_MERKLE_KEYS_RESP", {
|
|
7480
|
+
requestId,
|
|
7481
|
+
partitionId,
|
|
7482
|
+
path,
|
|
7483
|
+
keys
|
|
7484
|
+
});
|
|
7485
|
+
}
|
|
7486
|
+
handleRepairDataReq(msg) {
|
|
7487
|
+
const { requestId, key } = msg.payload;
|
|
7488
|
+
if (!this.getRecord) return;
|
|
7489
|
+
const record = this.getRecord(key);
|
|
7490
|
+
this.clusterManager.send(msg.senderId, "CLUSTER_REPAIR_DATA_RESP", {
|
|
7491
|
+
requestId,
|
|
7492
|
+
key,
|
|
7493
|
+
record
|
|
7494
|
+
});
|
|
7495
|
+
}
|
|
7496
|
+
handleResponse(msg) {
|
|
7497
|
+
const { requestId } = msg.payload;
|
|
7498
|
+
const pending = this.pendingRequests.get(requestId);
|
|
7499
|
+
if (pending) {
|
|
7500
|
+
clearTimeout(pending.timer);
|
|
7501
|
+
this.pendingRequests.delete(requestId);
|
|
7502
|
+
pending.resolve(msg.payload);
|
|
7503
|
+
}
|
|
7504
|
+
}
|
|
7505
|
+
// === Lifecycle Methods ===
|
|
7506
|
+
/**
|
|
7507
|
+
* Start the repair scheduler
|
|
7508
|
+
*/
|
|
7509
|
+
start() {
|
|
7510
|
+
if (this.started || !this.config.enabled) return;
|
|
7511
|
+
this.started = true;
|
|
7512
|
+
logger.info({ config: this.config }, "Starting RepairScheduler");
|
|
7513
|
+
this.scanTimer = setInterval(() => {
|
|
7514
|
+
this.scheduleFullScan();
|
|
7515
|
+
}, this.config.scanIntervalMs);
|
|
7516
|
+
this.processTimer = setInterval(() => {
|
|
7517
|
+
this.processRepairQueue();
|
|
7518
|
+
}, 1e3);
|
|
7519
|
+
setTimeout(() => {
|
|
7520
|
+
this.scheduleFullScan();
|
|
7521
|
+
}, 6e4);
|
|
7522
|
+
}
|
|
7523
|
+
/**
|
|
7524
|
+
* Stop the repair scheduler
|
|
7525
|
+
*/
|
|
7526
|
+
stop() {
|
|
7527
|
+
if (!this.started) return;
|
|
7528
|
+
this.started = false;
|
|
7529
|
+
if (this.scanTimer) {
|
|
7530
|
+
clearInterval(this.scanTimer);
|
|
7531
|
+
this.scanTimer = void 0;
|
|
7532
|
+
}
|
|
7533
|
+
if (this.processTimer) {
|
|
7534
|
+
clearInterval(this.processTimer);
|
|
7535
|
+
this.processTimer = void 0;
|
|
7536
|
+
}
|
|
7537
|
+
this.repairQueue = [];
|
|
7538
|
+
this.activeRepairs.clear();
|
|
7539
|
+
for (const [id, req] of this.pendingRequests) {
|
|
7540
|
+
clearTimeout(req.timer);
|
|
7541
|
+
req.reject(new Error("Scheduler stopped"));
|
|
7542
|
+
}
|
|
7543
|
+
this.pendingRequests.clear();
|
|
7544
|
+
logger.info("RepairScheduler stopped");
|
|
7545
|
+
}
|
|
7546
|
+
/**
|
|
7547
|
+
* Schedule a full scan of all owned partitions
|
|
7548
|
+
*/
|
|
7549
|
+
scheduleFullScan() {
|
|
7550
|
+
const ownedPartitions = this.getOwnedPartitions();
|
|
7551
|
+
const replicas = this.getReplicaPartitions();
|
|
7552
|
+
const allPartitions = [.../* @__PURE__ */ new Set([...ownedPartitions, ...replicas])];
|
|
7553
|
+
logger.info({
|
|
7554
|
+
ownedCount: ownedPartitions.length,
|
|
7555
|
+
replicaCount: replicas.length,
|
|
7556
|
+
totalPartitions: allPartitions.length
|
|
7557
|
+
}, "Scheduling full anti-entropy scan");
|
|
7558
|
+
for (const partitionId of allPartitions) {
|
|
7559
|
+
this.schedulePartitionRepair(partitionId);
|
|
7560
|
+
}
|
|
7561
|
+
this.metrics.scansCompleted++;
|
|
7562
|
+
this.metrics.lastScanTime = Date.now();
|
|
7563
|
+
}
|
|
7564
|
+
/**
|
|
7565
|
+
* Schedule repair for a specific partition
|
|
7566
|
+
*/
|
|
7567
|
+
schedulePartitionRepair(partitionId, priority = "normal") {
|
|
7568
|
+
const backups = this.partitionService.getBackups(partitionId);
|
|
7569
|
+
const owner = this.partitionService.getPartitionOwner(partitionId);
|
|
7570
|
+
const replicas = this.nodeId === owner ? backups : owner ? [owner] : [];
|
|
7571
|
+
for (const replicaNodeId of replicas) {
|
|
7572
|
+
const exists = this.repairQueue.some(
|
|
7573
|
+
(t) => t.partitionId === partitionId && t.replicaNodeId === replicaNodeId
|
|
7574
|
+
);
|
|
7575
|
+
if (exists) continue;
|
|
7576
|
+
this.repairQueue.push({
|
|
7577
|
+
partitionId,
|
|
7578
|
+
replicaNodeId,
|
|
7579
|
+
priority,
|
|
7580
|
+
scheduledAt: Date.now()
|
|
7581
|
+
});
|
|
7582
|
+
}
|
|
7583
|
+
this.sortRepairQueue();
|
|
7584
|
+
}
|
|
7585
|
+
/**
|
|
7586
|
+
* Sort repair queue by priority
|
|
7587
|
+
*/
|
|
7588
|
+
sortRepairQueue() {
|
|
7589
|
+
const priorityOrder = { high: 0, normal: 1, low: 2 };
|
|
7590
|
+
this.repairQueue.sort((a, b) => {
|
|
7591
|
+
const priorityDiff = priorityOrder[a.priority] - priorityOrder[b.priority];
|
|
7592
|
+
if (priorityDiff !== 0) return priorityDiff;
|
|
7593
|
+
if (this.config.prioritizeRecent) {
|
|
7594
|
+
const infoA = this.merkleManager.getPartitionInfo(a.partitionId);
|
|
7595
|
+
const infoB = this.merkleManager.getPartitionInfo(b.partitionId);
|
|
7596
|
+
if (infoA && infoB) {
|
|
7597
|
+
return infoB.lastUpdated - infoA.lastUpdated;
|
|
7598
|
+
}
|
|
7599
|
+
}
|
|
7600
|
+
return a.scheduledAt - b.scheduledAt;
|
|
7601
|
+
});
|
|
7602
|
+
}
|
|
7603
|
+
/**
|
|
7604
|
+
* Process the repair queue
|
|
7605
|
+
*/
|
|
7606
|
+
async processRepairQueue() {
|
|
7607
|
+
if (this.activeRepairs.size >= this.config.maxConcurrentRepairs) {
|
|
7608
|
+
return;
|
|
7609
|
+
}
|
|
7610
|
+
const task = this.repairQueue.shift();
|
|
7611
|
+
if (!task) return;
|
|
7612
|
+
if (this.activeRepairs.has(task.partitionId)) {
|
|
7613
|
+
return;
|
|
7614
|
+
}
|
|
7615
|
+
if (!this.clusterManager.getMembers().includes(task.replicaNodeId)) {
|
|
7616
|
+
logger.debug({ task }, "Skipping repair - replica not available");
|
|
7617
|
+
return;
|
|
7618
|
+
}
|
|
7619
|
+
this.activeRepairs.add(task.partitionId);
|
|
7620
|
+
try {
|
|
7621
|
+
const result = await this.executeRepair(task);
|
|
7622
|
+
this.emit("repairComplete", result);
|
|
7623
|
+
if (result.success) {
|
|
7624
|
+
this.metrics.repairsExecuted++;
|
|
7625
|
+
this.metrics.keysRepaired += result.keysRepaired;
|
|
7626
|
+
this.updateAverageRepairDuration(result.durationMs);
|
|
7627
|
+
} else {
|
|
7628
|
+
this.metrics.errorsEncountered++;
|
|
7629
|
+
}
|
|
7630
|
+
} catch (error) {
|
|
7631
|
+
logger.error({ task, error }, "Repair failed");
|
|
7632
|
+
this.metrics.errorsEncountered++;
|
|
7633
|
+
} finally {
|
|
7634
|
+
this.activeRepairs.delete(task.partitionId);
|
|
7635
|
+
}
|
|
7636
|
+
}
|
|
7637
|
+
/**
|
|
7638
|
+
* Execute repair for a partition-replica pair
|
|
7639
|
+
*/
|
|
7640
|
+
async executeRepair(task) {
|
|
7641
|
+
const startTime = Date.now();
|
|
7642
|
+
let keysScanned = 0;
|
|
7643
|
+
let keysRepaired = 0;
|
|
7644
|
+
try {
|
|
7645
|
+
const localRoot = this.merkleManager.getRootHash(task.partitionId);
|
|
7646
|
+
const remoteRoot = await this.requestRemoteMerkleRoot(task.replicaNodeId, task.partitionId);
|
|
7647
|
+
if (localRoot === remoteRoot) {
|
|
7648
|
+
logger.debug({
|
|
7649
|
+
partitionId: task.partitionId,
|
|
7650
|
+
replicaNodeId: task.replicaNodeId
|
|
7651
|
+
}, "Partition in sync");
|
|
7652
|
+
return {
|
|
7653
|
+
partitionId: task.partitionId,
|
|
7654
|
+
replicaNodeId: task.replicaNodeId,
|
|
7655
|
+
keysScanned: 0,
|
|
7656
|
+
keysRepaired: 0,
|
|
7657
|
+
durationMs: Date.now() - startTime,
|
|
7658
|
+
success: true
|
|
7659
|
+
};
|
|
7660
|
+
}
|
|
7661
|
+
const differences = await this.findDifferences(task.partitionId, task.replicaNodeId);
|
|
7662
|
+
keysScanned = differences.length;
|
|
7663
|
+
for (const key of differences) {
|
|
7664
|
+
const repaired = await this.repairKey(task.partitionId, task.replicaNodeId, key);
|
|
7665
|
+
if (repaired) {
|
|
7666
|
+
keysRepaired++;
|
|
7667
|
+
}
|
|
7668
|
+
if (keysRepaired % this.config.repairBatchSize === 0) {
|
|
7669
|
+
await this.sleep(this.config.throttleMs);
|
|
7670
|
+
}
|
|
7671
|
+
}
|
|
7672
|
+
logger.info({
|
|
7673
|
+
partitionId: task.partitionId,
|
|
7674
|
+
replicaNodeId: task.replicaNodeId,
|
|
7675
|
+
keysScanned,
|
|
7676
|
+
keysRepaired,
|
|
7677
|
+
durationMs: Date.now() - startTime
|
|
7678
|
+
}, "Partition repair completed");
|
|
7679
|
+
return {
|
|
7680
|
+
partitionId: task.partitionId,
|
|
7681
|
+
replicaNodeId: task.replicaNodeId,
|
|
7682
|
+
keysScanned,
|
|
7683
|
+
keysRepaired,
|
|
7684
|
+
durationMs: Date.now() - startTime,
|
|
7685
|
+
success: true
|
|
7686
|
+
};
|
|
7687
|
+
} catch (error) {
|
|
7688
|
+
return {
|
|
7689
|
+
partitionId: task.partitionId,
|
|
7690
|
+
replicaNodeId: task.replicaNodeId,
|
|
7691
|
+
keysScanned,
|
|
7692
|
+
keysRepaired,
|
|
7693
|
+
durationMs: Date.now() - startTime,
|
|
7694
|
+
success: false,
|
|
7695
|
+
error: String(error)
|
|
7696
|
+
};
|
|
7697
|
+
}
|
|
7698
|
+
}
|
|
7699
|
+
/**
|
|
7700
|
+
* Send a request and wait for response
|
|
7701
|
+
*/
|
|
7702
|
+
sendRequest(nodeId, type, payload) {
|
|
7703
|
+
return new Promise((resolve, reject) => {
|
|
7704
|
+
const requestId = Math.random().toString(36).substring(7);
|
|
7705
|
+
const timer = setTimeout(() => {
|
|
7706
|
+
this.pendingRequests.delete(requestId);
|
|
7707
|
+
reject(new Error(`Request timeout: ${type} to ${nodeId}`));
|
|
7708
|
+
}, this.config.requestTimeoutMs);
|
|
7709
|
+
this.pendingRequests.set(requestId, { resolve, reject, timer });
|
|
7710
|
+
this.clusterManager.send(nodeId, type, { ...payload, requestId });
|
|
7711
|
+
});
|
|
7712
|
+
}
|
|
7713
|
+
/**
|
|
7714
|
+
* Request Merkle root from remote node
|
|
7715
|
+
*/
|
|
7716
|
+
async requestRemoteMerkleRoot(nodeId, partitionId) {
|
|
7717
|
+
const response = await this.sendRequest(
|
|
7718
|
+
nodeId,
|
|
7719
|
+
"CLUSTER_MERKLE_ROOT_REQ",
|
|
7720
|
+
{ partitionId }
|
|
7721
|
+
);
|
|
7722
|
+
return response.rootHash;
|
|
7723
|
+
}
|
|
7724
|
+
/**
|
|
7725
|
+
* Find keys that differ between local and remote using bucket exchange
|
|
7726
|
+
*/
|
|
7727
|
+
async findDifferences(partitionId, replicaNodeId) {
|
|
7728
|
+
const response = await this.sendRequest(
|
|
7729
|
+
replicaNodeId,
|
|
7730
|
+
"CLUSTER_MERKLE_BUCKETS_REQ",
|
|
7731
|
+
{ partitionId }
|
|
7732
|
+
);
|
|
7733
|
+
const remoteBuckets = response.buckets;
|
|
7734
|
+
const localTree = this.merkleManager.getTree(partitionId);
|
|
7735
|
+
if (!localTree) return [];
|
|
7736
|
+
const differingKeys = /* @__PURE__ */ new Set();
|
|
7737
|
+
const queue = [""];
|
|
7738
|
+
const maxDepth = 3;
|
|
7739
|
+
while (queue.length > 0) {
|
|
7740
|
+
const path = queue.shift();
|
|
7741
|
+
const localChildren = localTree.getBuckets(path);
|
|
7742
|
+
const remoteChildren = remoteBuckets[path] || {};
|
|
7743
|
+
const allChars = /* @__PURE__ */ new Set([...Object.keys(localChildren), ...Object.keys(remoteChildren)]);
|
|
7744
|
+
for (const char of allChars) {
|
|
7745
|
+
const localHash = localChildren[char] || 0;
|
|
7746
|
+
const remoteHash = remoteChildren[char] || 0;
|
|
7747
|
+
if (localHash !== remoteHash) {
|
|
7748
|
+
const nextPath = path + char;
|
|
7749
|
+
if (nextPath.length >= maxDepth) {
|
|
7750
|
+
const bucketKeysResp = await this.sendRequest(
|
|
7751
|
+
replicaNodeId,
|
|
7752
|
+
"CLUSTER_MERKLE_KEYS_REQ",
|
|
7753
|
+
{ partitionId, path: nextPath }
|
|
7754
|
+
);
|
|
7755
|
+
const localBucketKeys = localTree.getKeysInBucket(nextPath);
|
|
7756
|
+
const remoteBucketKeys = bucketKeysResp.keys;
|
|
7757
|
+
for (const k of localBucketKeys) differingKeys.add(k);
|
|
7758
|
+
for (const k of remoteBucketKeys) differingKeys.add(k);
|
|
7759
|
+
} else {
|
|
7760
|
+
queue.push(nextPath);
|
|
7761
|
+
}
|
|
7762
|
+
}
|
|
7763
|
+
}
|
|
7764
|
+
}
|
|
7765
|
+
return Array.from(differingKeys);
|
|
7766
|
+
}
|
|
7767
|
+
/**
|
|
7768
|
+
* Repair a single key
|
|
7769
|
+
*/
|
|
7770
|
+
async repairKey(partitionId, replicaNodeId, key) {
|
|
7771
|
+
if (!this.getRecord || !this.setRecord) {
|
|
7772
|
+
return false;
|
|
7773
|
+
}
|
|
7774
|
+
const localRecord = this.getRecord(key);
|
|
7775
|
+
let remoteRecord;
|
|
7776
|
+
try {
|
|
7777
|
+
const response = await this.sendRequest(
|
|
7778
|
+
replicaNodeId,
|
|
7779
|
+
"CLUSTER_REPAIR_DATA_REQ",
|
|
7780
|
+
{ key }
|
|
7781
|
+
);
|
|
7782
|
+
remoteRecord = response.record;
|
|
7783
|
+
} catch (e) {
|
|
7784
|
+
logger.warn({ key, replicaNodeId, err: e }, "Failed to fetch remote record for repair");
|
|
7785
|
+
return false;
|
|
7786
|
+
}
|
|
7787
|
+
const resolved = this.resolveConflict(localRecord, remoteRecord);
|
|
7788
|
+
if (!resolved) return false;
|
|
7789
|
+
if (JSON.stringify(resolved) !== JSON.stringify(localRecord)) {
|
|
7790
|
+
this.setRecord(key, resolved);
|
|
7791
|
+
if (JSON.stringify(resolved) !== JSON.stringify(remoteRecord)) {
|
|
7792
|
+
this.clusterManager.send(replicaNodeId, "CLUSTER_REPAIR_DATA_RESP", {
|
|
7793
|
+
// In future: Use dedicated WRITE/REPAIR message
|
|
7794
|
+
// For now we rely on the fact that repair will eventually run on other node too
|
|
7795
|
+
});
|
|
7796
|
+
}
|
|
7797
|
+
return true;
|
|
7798
|
+
}
|
|
7799
|
+
return false;
|
|
7800
|
+
}
|
|
7801
|
+
/**
|
|
7802
|
+
* Resolve conflict between two records using LWW
|
|
7803
|
+
*/
|
|
7804
|
+
resolveConflict(a, b) {
|
|
7805
|
+
if (!a && !b) return null;
|
|
7806
|
+
if (!a) return b;
|
|
7807
|
+
if (!b) return a;
|
|
7808
|
+
if (this.compareTimestamps(a.timestamp, b.timestamp) > 0) {
|
|
7809
|
+
return a;
|
|
7810
|
+
}
|
|
7811
|
+
if (this.compareTimestamps(b.timestamp, a.timestamp) > 0) {
|
|
7812
|
+
return b;
|
|
7813
|
+
}
|
|
7814
|
+
if (a.timestamp.nodeId > b.timestamp.nodeId) {
|
|
7815
|
+
return a;
|
|
7816
|
+
}
|
|
7817
|
+
return b;
|
|
7818
|
+
}
|
|
7819
|
+
/**
|
|
7820
|
+
* Compare two timestamps
|
|
7821
|
+
*/
|
|
7822
|
+
compareTimestamps(a, b) {
|
|
7823
|
+
if (a.millis !== b.millis) {
|
|
7824
|
+
return a.millis - b.millis;
|
|
7825
|
+
}
|
|
7826
|
+
return a.counter - b.counter;
|
|
7827
|
+
}
|
|
7828
|
+
/**
|
|
7829
|
+
* Get partitions owned by this node
|
|
7830
|
+
*/
|
|
7831
|
+
getOwnedPartitions() {
|
|
7832
|
+
const owned = [];
|
|
7833
|
+
for (let i = 0; i < import_core13.PARTITION_COUNT; i++) {
|
|
7834
|
+
if (this.partitionService.getPartitionOwner(i) === this.nodeId) {
|
|
7835
|
+
owned.push(i);
|
|
7836
|
+
}
|
|
7837
|
+
}
|
|
7838
|
+
return owned;
|
|
7839
|
+
}
|
|
7840
|
+
/**
|
|
7841
|
+
* Get partitions where this node is a backup
|
|
7842
|
+
*/
|
|
7843
|
+
getReplicaPartitions() {
|
|
7844
|
+
const replicas = [];
|
|
7845
|
+
for (let i = 0; i < import_core13.PARTITION_COUNT; i++) {
|
|
7846
|
+
const backups = this.partitionService.getBackups(i);
|
|
7847
|
+
if (backups.includes(this.nodeId)) {
|
|
7848
|
+
replicas.push(i);
|
|
7849
|
+
}
|
|
7850
|
+
}
|
|
7851
|
+
return replicas;
|
|
7852
|
+
}
|
|
7853
|
+
/**
|
|
7854
|
+
* Update average repair duration
|
|
7855
|
+
*/
|
|
7856
|
+
updateAverageRepairDuration(durationMs) {
|
|
7857
|
+
const count = this.metrics.repairsExecuted;
|
|
7858
|
+
const currentAvg = this.metrics.averageRepairDurationMs;
|
|
7859
|
+
this.metrics.averageRepairDurationMs = (currentAvg * (count - 1) + durationMs) / count;
|
|
7860
|
+
}
|
|
7861
|
+
/**
|
|
7862
|
+
* Get repair metrics
|
|
7863
|
+
*/
|
|
7864
|
+
getMetrics() {
|
|
7865
|
+
return { ...this.metrics };
|
|
7866
|
+
}
|
|
7867
|
+
/**
|
|
7868
|
+
* Get repair queue status
|
|
7869
|
+
*/
|
|
7870
|
+
getQueueStatus() {
|
|
7871
|
+
return {
|
|
7872
|
+
queueLength: this.repairQueue.length,
|
|
7873
|
+
activeRepairs: this.activeRepairs.size,
|
|
7874
|
+
maxConcurrent: this.config.maxConcurrentRepairs
|
|
7875
|
+
};
|
|
7876
|
+
}
|
|
7877
|
+
/**
|
|
7878
|
+
* Force immediate repair for a partition
|
|
7879
|
+
*/
|
|
7880
|
+
forceRepair(partitionId) {
|
|
7881
|
+
this.schedulePartitionRepair(partitionId, "high");
|
|
7882
|
+
}
|
|
7883
|
+
/**
|
|
7884
|
+
* Sleep utility
|
|
7885
|
+
*/
|
|
7886
|
+
sleep(ms) {
|
|
7887
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
7888
|
+
}
|
|
7889
|
+
};
|
|
7890
|
+
|
|
7891
|
+
// src/handlers/CounterHandler.ts
|
|
7892
|
+
var import_core14 = require("@topgunbuild/core");
|
|
7893
|
+
var CounterHandler = class {
|
|
7894
|
+
// counterName -> Set<clientId>
|
|
7895
|
+
constructor(nodeId = "server") {
|
|
7896
|
+
this.nodeId = nodeId;
|
|
7897
|
+
this.counters = /* @__PURE__ */ new Map();
|
|
7898
|
+
this.subscriptions = /* @__PURE__ */ new Map();
|
|
7899
|
+
}
|
|
7900
|
+
/**
|
|
7901
|
+
* Get or create a counter by name.
|
|
7902
|
+
*/
|
|
7903
|
+
getOrCreateCounter(name) {
|
|
7904
|
+
let counter = this.counters.get(name);
|
|
7905
|
+
if (!counter) {
|
|
7906
|
+
counter = new import_core14.PNCounterImpl({ nodeId: this.nodeId });
|
|
7907
|
+
this.counters.set(name, counter);
|
|
7908
|
+
logger.debug({ name }, "Created new counter");
|
|
7909
|
+
}
|
|
7910
|
+
return counter;
|
|
7911
|
+
}
|
|
7912
|
+
/**
|
|
7913
|
+
* Handle COUNTER_REQUEST - client wants initial state.
|
|
7914
|
+
* @returns Response message to send back to client
|
|
7915
|
+
*/
|
|
7916
|
+
handleCounterRequest(clientId, name) {
|
|
7917
|
+
const counter = this.getOrCreateCounter(name);
|
|
7918
|
+
this.subscribe(clientId, name);
|
|
7919
|
+
const state = counter.getState();
|
|
7920
|
+
logger.debug({ clientId, name, value: counter.get() }, "Counter request handled");
|
|
7921
|
+
return {
|
|
7922
|
+
type: "COUNTER_RESPONSE",
|
|
7923
|
+
payload: {
|
|
7924
|
+
name,
|
|
7925
|
+
state: this.stateToObject(state)
|
|
7926
|
+
}
|
|
7927
|
+
};
|
|
7928
|
+
}
|
|
7929
|
+
/**
|
|
7930
|
+
* Handle COUNTER_SYNC - client sends their state to merge.
|
|
7931
|
+
* @returns Merged state and list of clients to broadcast to
|
|
7932
|
+
*/
|
|
7933
|
+
handleCounterSync(clientId, name, stateObj) {
|
|
7934
|
+
const counter = this.getOrCreateCounter(name);
|
|
7935
|
+
const incomingState = this.objectToState(stateObj);
|
|
7936
|
+
counter.merge(incomingState);
|
|
7937
|
+
const mergedState = counter.getState();
|
|
7938
|
+
const mergedStateObj = this.stateToObject(mergedState);
|
|
7939
|
+
logger.debug(
|
|
7940
|
+
{ clientId, name, value: counter.get() },
|
|
7941
|
+
"Counter sync handled"
|
|
7942
|
+
);
|
|
7943
|
+
this.subscribe(clientId, name);
|
|
7944
|
+
const subscribers = this.subscriptions.get(name) || /* @__PURE__ */ new Set();
|
|
7945
|
+
const broadcastTo = Array.from(subscribers).filter((id) => id !== clientId);
|
|
7946
|
+
return {
|
|
7947
|
+
// Response to the sending client
|
|
7948
|
+
response: {
|
|
7949
|
+
type: "COUNTER_UPDATE",
|
|
7950
|
+
payload: {
|
|
7951
|
+
name,
|
|
7952
|
+
state: mergedStateObj
|
|
7953
|
+
}
|
|
7954
|
+
},
|
|
7955
|
+
// Broadcast to other clients
|
|
7956
|
+
broadcastTo,
|
|
7957
|
+
broadcastMessage: {
|
|
7958
|
+
type: "COUNTER_UPDATE",
|
|
7959
|
+
payload: {
|
|
7960
|
+
name,
|
|
7961
|
+
state: mergedStateObj
|
|
7962
|
+
}
|
|
7963
|
+
}
|
|
7964
|
+
};
|
|
7965
|
+
}
|
|
7966
|
+
/**
|
|
7967
|
+
* Subscribe a client to counter updates.
|
|
7968
|
+
*/
|
|
7969
|
+
subscribe(clientId, counterName) {
|
|
7970
|
+
if (!this.subscriptions.has(counterName)) {
|
|
7971
|
+
this.subscriptions.set(counterName, /* @__PURE__ */ new Set());
|
|
7972
|
+
}
|
|
7973
|
+
this.subscriptions.get(counterName).add(clientId);
|
|
7974
|
+
logger.debug({ clientId, counterName }, "Client subscribed to counter");
|
|
7975
|
+
}
|
|
7976
|
+
/**
|
|
7977
|
+
* Unsubscribe a client from counter updates.
|
|
7978
|
+
*/
|
|
7979
|
+
unsubscribe(clientId, counterName) {
|
|
7980
|
+
const subs = this.subscriptions.get(counterName);
|
|
7981
|
+
if (subs) {
|
|
7982
|
+
subs.delete(clientId);
|
|
7983
|
+
if (subs.size === 0) {
|
|
7984
|
+
this.subscriptions.delete(counterName);
|
|
7985
|
+
}
|
|
7986
|
+
}
|
|
7987
|
+
}
|
|
7988
|
+
/**
|
|
7989
|
+
* Unsubscribe a client from all counters (e.g., on disconnect).
|
|
7990
|
+
*/
|
|
7991
|
+
unsubscribeAll(clientId) {
|
|
7992
|
+
for (const [counterName, subs] of this.subscriptions) {
|
|
7993
|
+
subs.delete(clientId);
|
|
7994
|
+
if (subs.size === 0) {
|
|
7995
|
+
this.subscriptions.delete(counterName);
|
|
7996
|
+
}
|
|
7997
|
+
}
|
|
7998
|
+
logger.debug({ clientId }, "Client unsubscribed from all counters");
|
|
7999
|
+
}
|
|
8000
|
+
/**
|
|
8001
|
+
* Get current counter value (for monitoring/debugging).
|
|
8002
|
+
*/
|
|
8003
|
+
getCounterValue(name) {
|
|
8004
|
+
const counter = this.counters.get(name);
|
|
8005
|
+
return counter ? counter.get() : 0;
|
|
8006
|
+
}
|
|
8007
|
+
/**
|
|
8008
|
+
* Get all counter names.
|
|
8009
|
+
*/
|
|
8010
|
+
getCounterNames() {
|
|
8011
|
+
return Array.from(this.counters.keys());
|
|
8012
|
+
}
|
|
8013
|
+
/**
|
|
8014
|
+
* Get number of subscribers for a counter.
|
|
8015
|
+
*/
|
|
8016
|
+
getSubscriberCount(name) {
|
|
8017
|
+
return this.subscriptions.get(name)?.size || 0;
|
|
8018
|
+
}
|
|
8019
|
+
/**
|
|
8020
|
+
* Convert Map-based state to plain object for serialization.
|
|
8021
|
+
*/
|
|
8022
|
+
stateToObject(state) {
|
|
8023
|
+
return {
|
|
8024
|
+
p: Object.fromEntries(state.positive),
|
|
8025
|
+
n: Object.fromEntries(state.negative)
|
|
8026
|
+
};
|
|
8027
|
+
}
|
|
8028
|
+
/**
|
|
8029
|
+
* Convert plain object to Map-based state.
|
|
8030
|
+
*/
|
|
8031
|
+
objectToState(obj) {
|
|
8032
|
+
return {
|
|
8033
|
+
positive: new Map(Object.entries(obj.p || {})),
|
|
8034
|
+
negative: new Map(Object.entries(obj.n || {}))
|
|
8035
|
+
};
|
|
8036
|
+
}
|
|
8037
|
+
};
|
|
8038
|
+
|
|
8039
|
+
// src/handlers/EntryProcessorHandler.ts
|
|
8040
|
+
var import_core16 = require("@topgunbuild/core");
|
|
8041
|
+
|
|
8042
|
+
// src/ProcessorSandbox.ts
|
|
8043
|
+
var import_core15 = require("@topgunbuild/core");
|
|
8044
|
+
var ivm = null;
|
|
8045
|
+
try {
|
|
8046
|
+
ivm = require("isolated-vm");
|
|
8047
|
+
} catch {
|
|
8048
|
+
const isProduction = process.env.NODE_ENV === "production";
|
|
8049
|
+
if (isProduction) {
|
|
8050
|
+
logger.error(
|
|
8051
|
+
"SECURITY WARNING: isolated-vm not available in production! Entry processors will run in less secure fallback mode. Install isolated-vm for production environments: pnpm add isolated-vm"
|
|
8052
|
+
);
|
|
8053
|
+
} else {
|
|
8054
|
+
logger.warn("isolated-vm not available, falling back to less secure VM");
|
|
8055
|
+
}
|
|
8056
|
+
}
|
|
8057
|
+
var DEFAULT_SANDBOX_CONFIG = {
|
|
8058
|
+
memoryLimitMb: 8,
|
|
8059
|
+
timeoutMs: 100,
|
|
8060
|
+
maxCachedIsolates: 100,
|
|
8061
|
+
strictValidation: true
|
|
8062
|
+
};
|
|
8063
|
+
var ProcessorSandbox = class {
|
|
8064
|
+
constructor(config = {}) {
|
|
8065
|
+
this.isolateCache = /* @__PURE__ */ new Map();
|
|
8066
|
+
this.scriptCache = /* @__PURE__ */ new Map();
|
|
8067
|
+
this.fallbackScriptCache = /* @__PURE__ */ new Map();
|
|
8068
|
+
this.disposed = false;
|
|
8069
|
+
this.config = { ...DEFAULT_SANDBOX_CONFIG, ...config };
|
|
8070
|
+
}
|
|
8071
|
+
/**
|
|
8072
|
+
* Execute an entry processor in the sandbox.
|
|
8073
|
+
*
|
|
8074
|
+
* @param processor The processor definition (name, code, args)
|
|
8075
|
+
* @param value The current value for the key (or undefined)
|
|
8076
|
+
* @param key The key being processed
|
|
8077
|
+
* @returns Result containing success status, result, and new value
|
|
8078
|
+
*/
|
|
8079
|
+
async execute(processor, value, key) {
|
|
8080
|
+
if (this.disposed) {
|
|
8081
|
+
return {
|
|
8082
|
+
success: false,
|
|
8083
|
+
error: "Sandbox has been disposed"
|
|
8084
|
+
};
|
|
8085
|
+
}
|
|
8086
|
+
if (this.config.strictValidation) {
|
|
8087
|
+
const validation = (0, import_core15.validateProcessorCode)(processor.code);
|
|
8088
|
+
if (!validation.valid) {
|
|
8089
|
+
return {
|
|
8090
|
+
success: false,
|
|
8091
|
+
error: validation.error
|
|
8092
|
+
};
|
|
8093
|
+
}
|
|
8094
|
+
}
|
|
8095
|
+
if (ivm) {
|
|
8096
|
+
return this.executeInIsolate(processor, value, key);
|
|
8097
|
+
} else {
|
|
8098
|
+
return this.executeInFallback(processor, value, key);
|
|
8099
|
+
}
|
|
8100
|
+
}
|
|
8101
|
+
/**
|
|
8102
|
+
* Execute processor in isolated-vm (secure production mode).
|
|
8103
|
+
*/
|
|
8104
|
+
async executeInIsolate(processor, value, key) {
|
|
8105
|
+
if (!ivm) {
|
|
8106
|
+
return { success: false, error: "isolated-vm not available" };
|
|
8107
|
+
}
|
|
8108
|
+
const isolate = this.getOrCreateIsolate(processor.name);
|
|
8109
|
+
try {
|
|
8110
|
+
const context = await isolate.createContext();
|
|
8111
|
+
const jail = context.global;
|
|
8112
|
+
await jail.set("global", jail.derefInto());
|
|
8113
|
+
await context.eval(`
|
|
8114
|
+
var value = ${JSON.stringify(value)};
|
|
8115
|
+
var key = ${JSON.stringify(key)};
|
|
8116
|
+
var args = ${JSON.stringify(processor.args)};
|
|
8117
|
+
`);
|
|
8118
|
+
const wrappedCode = `
|
|
8119
|
+
(function() {
|
|
8120
|
+
${processor.code}
|
|
8121
|
+
})()
|
|
8122
|
+
`;
|
|
6869
8123
|
const script = await this.getOrCompileScript(
|
|
6870
8124
|
processor.name,
|
|
6871
8125
|
wrappedCode,
|
|
@@ -7045,7 +8299,7 @@ var EntryProcessorHandler = class {
|
|
|
7045
8299
|
* @returns Result with success status, processor result, and new value
|
|
7046
8300
|
*/
|
|
7047
8301
|
async executeOnKey(map, key, processorDef) {
|
|
7048
|
-
const parseResult =
|
|
8302
|
+
const parseResult = import_core16.EntryProcessorDefSchema.safeParse(processorDef);
|
|
7049
8303
|
if (!parseResult.success) {
|
|
7050
8304
|
logger.warn(
|
|
7051
8305
|
{ key, error: parseResult.error.message },
|
|
@@ -7111,7 +8365,7 @@ var EntryProcessorHandler = class {
|
|
|
7111
8365
|
async executeOnKeys(map, keys, processorDef) {
|
|
7112
8366
|
const results = /* @__PURE__ */ new Map();
|
|
7113
8367
|
const timestamps = /* @__PURE__ */ new Map();
|
|
7114
|
-
const parseResult =
|
|
8368
|
+
const parseResult = import_core16.EntryProcessorDefSchema.safeParse(processorDef);
|
|
7115
8369
|
if (!parseResult.success) {
|
|
7116
8370
|
const errorResult = {
|
|
7117
8371
|
success: false,
|
|
@@ -7148,7 +8402,7 @@ var EntryProcessorHandler = class {
|
|
|
7148
8402
|
async executeOnEntries(map, processorDef, predicateCode) {
|
|
7149
8403
|
const results = /* @__PURE__ */ new Map();
|
|
7150
8404
|
const timestamps = /* @__PURE__ */ new Map();
|
|
7151
|
-
const parseResult =
|
|
8405
|
+
const parseResult = import_core16.EntryProcessorDefSchema.safeParse(processorDef);
|
|
7152
8406
|
if (!parseResult.success) {
|
|
7153
8407
|
return { results, timestamps };
|
|
7154
8408
|
}
|
|
@@ -7207,7 +8461,7 @@ var EntryProcessorHandler = class {
|
|
|
7207
8461
|
};
|
|
7208
8462
|
|
|
7209
8463
|
// src/ConflictResolverService.ts
|
|
7210
|
-
var
|
|
8464
|
+
var import_core17 = require("@topgunbuild/core");
|
|
7211
8465
|
var DEFAULT_CONFLICT_RESOLVER_CONFIG = {
|
|
7212
8466
|
maxResolversPerMap: 100,
|
|
7213
8467
|
enableSandboxedResolvers: true,
|
|
@@ -7238,7 +8492,7 @@ var ConflictResolverService = class {
|
|
|
7238
8492
|
throw new Error("ConflictResolverService has been disposed");
|
|
7239
8493
|
}
|
|
7240
8494
|
if (resolver.code) {
|
|
7241
|
-
const parsed =
|
|
8495
|
+
const parsed = import_core17.ConflictResolverDefSchema.safeParse({
|
|
7242
8496
|
name: resolver.name,
|
|
7243
8497
|
code: resolver.code,
|
|
7244
8498
|
priority: resolver.priority,
|
|
@@ -7247,7 +8501,7 @@ var ConflictResolverService = class {
|
|
|
7247
8501
|
if (!parsed.success) {
|
|
7248
8502
|
throw new Error(`Invalid resolver definition: ${parsed.error.message}`);
|
|
7249
8503
|
}
|
|
7250
|
-
const validation = (0,
|
|
8504
|
+
const validation = (0, import_core17.validateResolverCode)(resolver.code);
|
|
7251
8505
|
if (!validation.valid) {
|
|
7252
8506
|
throw new Error(`Invalid resolver code: ${validation.error}`);
|
|
7253
8507
|
}
|
|
@@ -7309,7 +8563,7 @@ var ConflictResolverService = class {
|
|
|
7309
8563
|
const entries = this.resolvers.get(context.mapName) ?? [];
|
|
7310
8564
|
const allEntries = [
|
|
7311
8565
|
...entries,
|
|
7312
|
-
{ resolver:
|
|
8566
|
+
{ resolver: import_core17.BuiltInResolvers.LWW() }
|
|
7313
8567
|
];
|
|
7314
8568
|
for (const entry of allEntries) {
|
|
7315
8569
|
const { resolver } = entry;
|
|
@@ -7665,9 +8919,9 @@ var ConflictResolverHandler = class {
|
|
|
7665
8919
|
};
|
|
7666
8920
|
|
|
7667
8921
|
// src/EventJournalService.ts
|
|
7668
|
-
var
|
|
8922
|
+
var import_core18 = require("@topgunbuild/core");
|
|
7669
8923
|
var DEFAULT_JOURNAL_SERVICE_CONFIG = {
|
|
7670
|
-
...
|
|
8924
|
+
...import_core18.DEFAULT_EVENT_JOURNAL_CONFIG,
|
|
7671
8925
|
tableName: "event_journal",
|
|
7672
8926
|
persistBatchSize: 100,
|
|
7673
8927
|
persistIntervalMs: 1e3
|
|
@@ -7680,7 +8934,7 @@ function validateTableName(name) {
|
|
|
7680
8934
|
);
|
|
7681
8935
|
}
|
|
7682
8936
|
}
|
|
7683
|
-
var EventJournalService = class extends
|
|
8937
|
+
var EventJournalService = class extends import_core18.EventJournalImpl {
|
|
7684
8938
|
constructor(config) {
|
|
7685
8939
|
super(config);
|
|
7686
8940
|
this.pendingPersist = [];
|
|
@@ -8027,7 +9281,7 @@ var ServerCoordinator = class {
|
|
|
8027
9281
|
this._readyResolve = resolve;
|
|
8028
9282
|
});
|
|
8029
9283
|
this._nodeId = config.nodeId;
|
|
8030
|
-
this.hlc = new
|
|
9284
|
+
this.hlc = new import_core19.HLC(config.nodeId);
|
|
8031
9285
|
this.storage = config.storage;
|
|
8032
9286
|
const rawSecret = config.jwtSecret || process.env.JWT_SECRET || "topgun-secret-dev";
|
|
8033
9287
|
this.jwtSecret = rawSecret.replace(/\\n/g, "\n");
|
|
@@ -8169,8 +9423,8 @@ var ServerCoordinator = class {
|
|
|
8169
9423
|
this.cluster,
|
|
8170
9424
|
this.partitionService,
|
|
8171
9425
|
{
|
|
8172
|
-
...
|
|
8173
|
-
defaultConsistency: config.defaultConsistency ??
|
|
9426
|
+
...import_core19.DEFAULT_REPLICATION_CONFIG,
|
|
9427
|
+
defaultConsistency: config.defaultConsistency ?? import_core19.ConsistencyLevel.EVENTUAL,
|
|
8174
9428
|
...config.replicationConfig
|
|
8175
9429
|
}
|
|
8176
9430
|
);
|
|
@@ -8212,6 +9466,52 @@ var ServerCoordinator = class {
|
|
|
8212
9466
|
logger.error({ err }, "Failed to initialize EventJournalService");
|
|
8213
9467
|
});
|
|
8214
9468
|
}
|
|
9469
|
+
this.partitionReassigner = new PartitionReassigner(
|
|
9470
|
+
this.cluster,
|
|
9471
|
+
this.partitionService,
|
|
9472
|
+
{ reassignmentDelayMs: 1e3 }
|
|
9473
|
+
);
|
|
9474
|
+
this.partitionReassigner.on("failoverComplete", (event) => {
|
|
9475
|
+
logger.info({
|
|
9476
|
+
failedNodeId: event.failedNodeId,
|
|
9477
|
+
partitionsReassigned: event.partitionsReassigned,
|
|
9478
|
+
durationMs: event.durationMs
|
|
9479
|
+
}, "Partition failover completed");
|
|
9480
|
+
this.broadcastPartitionMap(this.partitionService.getPartitionMap());
|
|
9481
|
+
});
|
|
9482
|
+
logger.info("PartitionReassigner initialized");
|
|
9483
|
+
this.readReplicaHandler = new ReadReplicaHandler(
|
|
9484
|
+
this.partitionService,
|
|
9485
|
+
this.cluster,
|
|
9486
|
+
this._nodeId,
|
|
9487
|
+
void 0,
|
|
9488
|
+
// LagTracker - can be added later
|
|
9489
|
+
{
|
|
9490
|
+
defaultConsistency: config.defaultConsistency ?? import_core19.ConsistencyLevel.STRONG,
|
|
9491
|
+
preferLocalReplica: true,
|
|
9492
|
+
loadBalancing: "latency-based"
|
|
9493
|
+
}
|
|
9494
|
+
);
|
|
9495
|
+
logger.info("ReadReplicaHandler initialized");
|
|
9496
|
+
this.merkleTreeManager = new MerkleTreeManager(this._nodeId);
|
|
9497
|
+
this.repairScheduler = new RepairScheduler(
|
|
9498
|
+
this.merkleTreeManager,
|
|
9499
|
+
this.cluster,
|
|
9500
|
+
this.partitionService,
|
|
9501
|
+
this._nodeId,
|
|
9502
|
+
{
|
|
9503
|
+
enabled: true,
|
|
9504
|
+
scanIntervalMs: 3e5,
|
|
9505
|
+
// 5 minutes
|
|
9506
|
+
maxConcurrentRepairs: 2
|
|
9507
|
+
}
|
|
9508
|
+
);
|
|
9509
|
+
this.repairScheduler.setDataAccessors(
|
|
9510
|
+
(key) => this.getLocalRecord(key) ?? void 0,
|
|
9511
|
+
(key, record) => this.applyRepairRecord(key, record)
|
|
9512
|
+
);
|
|
9513
|
+
this.repairScheduler.start();
|
|
9514
|
+
logger.info("MerkleTreeManager and RepairScheduler initialized");
|
|
8215
9515
|
this.systemManager = new SystemManager(
|
|
8216
9516
|
this.cluster,
|
|
8217
9517
|
this.metricsService,
|
|
@@ -8308,8 +9608,84 @@ var ServerCoordinator = class {
|
|
|
8308
9608
|
getTaskletScheduler() {
|
|
8309
9609
|
return this.taskletScheduler;
|
|
8310
9610
|
}
|
|
9611
|
+
/**
|
|
9612
|
+
* Phase 10.02: Graceful cluster departure
|
|
9613
|
+
*
|
|
9614
|
+
* Notifies the cluster that this node is leaving and allows time for:
|
|
9615
|
+
* 1. Pending replication to complete
|
|
9616
|
+
* 2. Other nodes to detect departure
|
|
9617
|
+
* 3. Partition reassignment to begin
|
|
9618
|
+
*/
|
|
9619
|
+
async gracefulClusterDeparture() {
|
|
9620
|
+
if (!this.cluster || this.cluster.getMembers().length <= 1) {
|
|
9621
|
+
return;
|
|
9622
|
+
}
|
|
9623
|
+
const nodeId = this._nodeId;
|
|
9624
|
+
const ownedPartitions = this.partitionService ? this.getOwnedPartitions() : [];
|
|
9625
|
+
logger.info({
|
|
9626
|
+
nodeId,
|
|
9627
|
+
ownedPartitions: ownedPartitions.length,
|
|
9628
|
+
clusterMembers: this.cluster.getMembers().length
|
|
9629
|
+
}, "Initiating graceful cluster departure");
|
|
9630
|
+
const departureMessage = {
|
|
9631
|
+
type: "NODE_LEAVING",
|
|
9632
|
+
nodeId,
|
|
9633
|
+
partitions: ownedPartitions,
|
|
9634
|
+
timestamp: Date.now()
|
|
9635
|
+
};
|
|
9636
|
+
for (const memberId of this.cluster.getMembers()) {
|
|
9637
|
+
if (memberId !== nodeId) {
|
|
9638
|
+
try {
|
|
9639
|
+
this.cluster.send(memberId, "CLUSTER_EVENT", departureMessage);
|
|
9640
|
+
} catch (e) {
|
|
9641
|
+
logger.warn({ memberId, err: e }, "Failed to notify peer of departure");
|
|
9642
|
+
}
|
|
9643
|
+
}
|
|
9644
|
+
}
|
|
9645
|
+
if (this.replicationPipeline) {
|
|
9646
|
+
logger.info("Waiting for pending replication to complete...");
|
|
9647
|
+
try {
|
|
9648
|
+
await this.waitForReplicationFlush(3e3);
|
|
9649
|
+
logger.info("Replication flush complete");
|
|
9650
|
+
} catch (e) {
|
|
9651
|
+
logger.warn({ err: e }, "Replication flush timeout - some data may not be replicated");
|
|
9652
|
+
}
|
|
9653
|
+
}
|
|
9654
|
+
await new Promise((resolve) => setTimeout(resolve, 500));
|
|
9655
|
+
logger.info({ nodeId }, "Graceful cluster departure complete");
|
|
9656
|
+
}
|
|
9657
|
+
/**
|
|
9658
|
+
* Get list of partition IDs owned by this node
|
|
9659
|
+
*/
|
|
9660
|
+
getOwnedPartitions() {
|
|
9661
|
+
if (!this.partitionService) return [];
|
|
9662
|
+
const partitionMap = this.partitionService.getPartitionMap();
|
|
9663
|
+
const owned = [];
|
|
9664
|
+
for (const partition of partitionMap.partitions) {
|
|
9665
|
+
if (partition.ownerNodeId === this._nodeId) {
|
|
9666
|
+
owned.push(partition.partitionId);
|
|
9667
|
+
}
|
|
9668
|
+
}
|
|
9669
|
+
return owned;
|
|
9670
|
+
}
|
|
9671
|
+
/**
|
|
9672
|
+
* Wait for replication pipeline to flush pending operations
|
|
9673
|
+
*/
|
|
9674
|
+
async waitForReplicationFlush(timeoutMs) {
|
|
9675
|
+
if (!this.replicationPipeline) return;
|
|
9676
|
+
const startTime = Date.now();
|
|
9677
|
+
while (Date.now() - startTime < timeoutMs) {
|
|
9678
|
+
const pendingOps = this.replicationPipeline.getTotalPending();
|
|
9679
|
+
if (pendingOps === 0) {
|
|
9680
|
+
return;
|
|
9681
|
+
}
|
|
9682
|
+
await new Promise((resolve) => setTimeout(resolve, 100));
|
|
9683
|
+
}
|
|
9684
|
+
throw new Error("Replication flush timeout");
|
|
9685
|
+
}
|
|
8311
9686
|
async shutdown() {
|
|
8312
9687
|
logger.info("Shutting down Server Coordinator...");
|
|
9688
|
+
await this.gracefulClusterDeparture();
|
|
8313
9689
|
this.httpServer.close();
|
|
8314
9690
|
if (this.metricsServer) {
|
|
8315
9691
|
this.metricsServer.close();
|
|
@@ -8317,7 +9693,7 @@ var ServerCoordinator = class {
|
|
|
8317
9693
|
this.metricsService.destroy();
|
|
8318
9694
|
this.wss.close();
|
|
8319
9695
|
logger.info(`Closing ${this.clients.size} client connections...`);
|
|
8320
|
-
const shutdownMsg = (0,
|
|
9696
|
+
const shutdownMsg = (0, import_core19.serialize)({ type: "SHUTDOWN_PENDING", retryAfter: 5e3 });
|
|
8321
9697
|
for (const client of this.clients.values()) {
|
|
8322
9698
|
try {
|
|
8323
9699
|
if (client.socket.readyState === import_ws3.WebSocket.OPEN) {
|
|
@@ -8342,6 +9718,14 @@ var ServerCoordinator = class {
|
|
|
8342
9718
|
if (this.replicationPipeline) {
|
|
8343
9719
|
this.replicationPipeline.close();
|
|
8344
9720
|
}
|
|
9721
|
+
if (this.repairScheduler) {
|
|
9722
|
+
this.repairScheduler.stop();
|
|
9723
|
+
logger.info("RepairScheduler stopped");
|
|
9724
|
+
}
|
|
9725
|
+
if (this.partitionReassigner) {
|
|
9726
|
+
this.partitionReassigner.stop();
|
|
9727
|
+
logger.info("PartitionReassigner stopped");
|
|
9728
|
+
}
|
|
8345
9729
|
if (this.cluster) {
|
|
8346
9730
|
this.cluster.stop();
|
|
8347
9731
|
}
|
|
@@ -8442,7 +9826,7 @@ var ServerCoordinator = class {
|
|
|
8442
9826
|
buf = Buffer.from(message);
|
|
8443
9827
|
}
|
|
8444
9828
|
try {
|
|
8445
|
-
data = (0,
|
|
9829
|
+
data = (0, import_core19.deserialize)(buf);
|
|
8446
9830
|
} catch (e) {
|
|
8447
9831
|
try {
|
|
8448
9832
|
const text = Buffer.isBuffer(buf) ? buf.toString() : new TextDecoder().decode(buf);
|
|
@@ -8494,10 +9878,10 @@ var ServerCoordinator = class {
|
|
|
8494
9878
|
this.clients.delete(clientId);
|
|
8495
9879
|
this.metricsService.setConnectedClients(this.clients.size);
|
|
8496
9880
|
});
|
|
8497
|
-
ws.send((0,
|
|
9881
|
+
ws.send((0, import_core19.serialize)({ type: "AUTH_REQUIRED" }));
|
|
8498
9882
|
}
|
|
8499
9883
|
async handleMessage(client, rawMessage) {
|
|
8500
|
-
const parseResult =
|
|
9884
|
+
const parseResult = import_core19.MessageSchema.safeParse(rawMessage);
|
|
8501
9885
|
if (!parseResult.success) {
|
|
8502
9886
|
logger.error({ clientId: client.id, error: parseResult.error }, "Invalid message format from client");
|
|
8503
9887
|
client.writer.write({
|
|
@@ -8557,7 +9941,32 @@ var ServerCoordinator = class {
|
|
|
8557
9941
|
logger.info({ clientId: client.id, mapName, query }, "Client subscribed");
|
|
8558
9942
|
this.metricsService.incOp("SUBSCRIBE", mapName);
|
|
8559
9943
|
const allMembers = this.cluster.getMembers();
|
|
8560
|
-
|
|
9944
|
+
let remoteMembers = allMembers.filter((id) => !this.cluster.isLocal(id));
|
|
9945
|
+
const queryKey = query._id || query.where?._id;
|
|
9946
|
+
if (queryKey && typeof queryKey === "string" && this.readReplicaHandler) {
|
|
9947
|
+
try {
|
|
9948
|
+
const targetNode = this.readReplicaHandler.selectReadNode({
|
|
9949
|
+
mapName,
|
|
9950
|
+
key: queryKey,
|
|
9951
|
+
options: {
|
|
9952
|
+
// Default to EVENTUAL for read scaling unless specified otherwise
|
|
9953
|
+
// In future, we could extract consistency from query options if available
|
|
9954
|
+
consistency: import_core19.ConsistencyLevel.EVENTUAL
|
|
9955
|
+
}
|
|
9956
|
+
});
|
|
9957
|
+
if (targetNode) {
|
|
9958
|
+
if (this.cluster.isLocal(targetNode)) {
|
|
9959
|
+
remoteMembers = [];
|
|
9960
|
+
logger.debug({ clientId: client.id, mapName, key: queryKey }, "Read optimization: Serving locally");
|
|
9961
|
+
} else if (remoteMembers.includes(targetNode)) {
|
|
9962
|
+
remoteMembers = [targetNode];
|
|
9963
|
+
logger.debug({ clientId: client.id, mapName, key: queryKey, targetNode }, "Read optimization: Routing to replica");
|
|
9964
|
+
}
|
|
9965
|
+
}
|
|
9966
|
+
} catch (e) {
|
|
9967
|
+
logger.warn({ err: e }, "Error in ReadReplicaHandler selection");
|
|
9968
|
+
}
|
|
9969
|
+
}
|
|
8561
9970
|
const requestId = crypto.randomUUID();
|
|
8562
9971
|
const pending = {
|
|
8563
9972
|
requestId,
|
|
@@ -8737,7 +10146,7 @@ var ServerCoordinator = class {
|
|
|
8737
10146
|
this.metricsService.incOp("GET", message.mapName);
|
|
8738
10147
|
try {
|
|
8739
10148
|
const mapForSync = await this.getMapAsync(message.mapName);
|
|
8740
|
-
if (mapForSync instanceof
|
|
10149
|
+
if (mapForSync instanceof import_core19.LWWMap) {
|
|
8741
10150
|
const tree = mapForSync.getMerkleTree();
|
|
8742
10151
|
const rootHash = tree.getRootHash();
|
|
8743
10152
|
client.writer.write({
|
|
@@ -8775,7 +10184,7 @@ var ServerCoordinator = class {
|
|
|
8775
10184
|
const { mapName, path } = message.payload;
|
|
8776
10185
|
try {
|
|
8777
10186
|
const mapForBucket = await this.getMapAsync(mapName);
|
|
8778
|
-
if (mapForBucket instanceof
|
|
10187
|
+
if (mapForBucket instanceof import_core19.LWWMap) {
|
|
8779
10188
|
const treeForBucket = mapForBucket.getMerkleTree();
|
|
8780
10189
|
const buckets = treeForBucket.getBuckets(path);
|
|
8781
10190
|
const node = treeForBucket.getNode(path);
|
|
@@ -9157,7 +10566,7 @@ var ServerCoordinator = class {
|
|
|
9157
10566
|
this.metricsService.incOp("GET", message.mapName);
|
|
9158
10567
|
try {
|
|
9159
10568
|
const mapForSync = await this.getMapAsync(message.mapName, "OR");
|
|
9160
|
-
if (mapForSync instanceof
|
|
10569
|
+
if (mapForSync instanceof import_core19.ORMap) {
|
|
9161
10570
|
const tree = mapForSync.getMerkleTree();
|
|
9162
10571
|
const rootHash = tree.getRootHash();
|
|
9163
10572
|
client.writer.write({
|
|
@@ -9194,7 +10603,7 @@ var ServerCoordinator = class {
|
|
|
9194
10603
|
const { mapName, path } = message.payload;
|
|
9195
10604
|
try {
|
|
9196
10605
|
const mapForBucket = await this.getMapAsync(mapName, "OR");
|
|
9197
|
-
if (mapForBucket instanceof
|
|
10606
|
+
if (mapForBucket instanceof import_core19.ORMap) {
|
|
9198
10607
|
const tree = mapForBucket.getMerkleTree();
|
|
9199
10608
|
const buckets = tree.getBuckets(path);
|
|
9200
10609
|
const isLeaf = tree.isLeaf(path);
|
|
@@ -9238,7 +10647,7 @@ var ServerCoordinator = class {
|
|
|
9238
10647
|
const { mapName: diffMapName, keys } = message.payload;
|
|
9239
10648
|
try {
|
|
9240
10649
|
const mapForDiff = await this.getMapAsync(diffMapName, "OR");
|
|
9241
|
-
if (mapForDiff instanceof
|
|
10650
|
+
if (mapForDiff instanceof import_core19.ORMap) {
|
|
9242
10651
|
const entries = [];
|
|
9243
10652
|
const allTombstones = mapForDiff.getTombstones();
|
|
9244
10653
|
for (const key of keys) {
|
|
@@ -9270,7 +10679,7 @@ var ServerCoordinator = class {
|
|
|
9270
10679
|
const { mapName: pushMapName, entries: pushEntries } = message.payload;
|
|
9271
10680
|
try {
|
|
9272
10681
|
const mapForPush = await this.getMapAsync(pushMapName, "OR");
|
|
9273
|
-
if (mapForPush instanceof
|
|
10682
|
+
if (mapForPush instanceof import_core19.ORMap) {
|
|
9274
10683
|
let totalAdded = 0;
|
|
9275
10684
|
let totalUpdated = 0;
|
|
9276
10685
|
for (const entry of pushEntries) {
|
|
@@ -9411,7 +10820,7 @@ var ServerCoordinator = class {
|
|
|
9411
10820
|
} else if (op.orRecord && op.orRecord.timestamp) {
|
|
9412
10821
|
} else if (op.orTag) {
|
|
9413
10822
|
try {
|
|
9414
|
-
ts =
|
|
10823
|
+
ts = import_core19.HLC.parse(op.orTag);
|
|
9415
10824
|
} catch (e) {
|
|
9416
10825
|
}
|
|
9417
10826
|
}
|
|
@@ -9435,7 +10844,7 @@ var ServerCoordinator = class {
|
|
|
9435
10844
|
};
|
|
9436
10845
|
let broadcastCount = 0;
|
|
9437
10846
|
for (const client of this.clients.values()) {
|
|
9438
|
-
if (client.isAuthenticated && client.socket.readyState === import_ws3.WebSocket.OPEN) {
|
|
10847
|
+
if (client.isAuthenticated && client.socket.readyState === import_ws3.WebSocket.OPEN && client.writer) {
|
|
9439
10848
|
client.writer.write(message);
|
|
9440
10849
|
broadcastCount++;
|
|
9441
10850
|
}
|
|
@@ -9508,7 +10917,7 @@ var ServerCoordinator = class {
|
|
|
9508
10917
|
client.writer.write({ ...message, payload: newPayload });
|
|
9509
10918
|
}
|
|
9510
10919
|
} else {
|
|
9511
|
-
const msgData = (0,
|
|
10920
|
+
const msgData = (0, import_core19.serialize)(message);
|
|
9512
10921
|
for (const [id, client] of this.clients) {
|
|
9513
10922
|
if (id !== excludeClientId && client.socket.readyState === 1) {
|
|
9514
10923
|
client.writer.writeRaw(msgData);
|
|
@@ -9586,7 +10995,7 @@ var ServerCoordinator = class {
|
|
|
9586
10995
|
payload: { events: filteredEvents },
|
|
9587
10996
|
timestamp: this.hlc.now()
|
|
9588
10997
|
};
|
|
9589
|
-
const serializedBatch = (0,
|
|
10998
|
+
const serializedBatch = (0, import_core19.serialize)(batchMessage);
|
|
9590
10999
|
for (const client of clients) {
|
|
9591
11000
|
try {
|
|
9592
11001
|
client.writer.writeRaw(serializedBatch);
|
|
@@ -9671,7 +11080,7 @@ var ServerCoordinator = class {
|
|
|
9671
11080
|
payload: { events: filteredEvents },
|
|
9672
11081
|
timestamp: this.hlc.now()
|
|
9673
11082
|
};
|
|
9674
|
-
const serializedBatch = (0,
|
|
11083
|
+
const serializedBatch = (0, import_core19.serialize)(batchMessage);
|
|
9675
11084
|
for (const client of clients) {
|
|
9676
11085
|
sendPromises.push(new Promise((resolve, reject) => {
|
|
9677
11086
|
try {
|
|
@@ -9702,7 +11111,14 @@ var ServerCoordinator = class {
|
|
|
9702
11111
|
this.cluster.on("message", (msg) => {
|
|
9703
11112
|
switch (msg.type) {
|
|
9704
11113
|
case "OP_FORWARD":
|
|
11114
|
+
if (msg.payload._replication || msg.payload._migration) {
|
|
11115
|
+
break;
|
|
11116
|
+
}
|
|
9705
11117
|
logger.info({ senderId: msg.senderId }, "Received forwarded op");
|
|
11118
|
+
if (!msg.payload.key) {
|
|
11119
|
+
logger.warn({ senderId: msg.senderId }, "OP_FORWARD missing key, dropping");
|
|
11120
|
+
break;
|
|
11121
|
+
}
|
|
9706
11122
|
if (this.partitionService.isLocalOwner(msg.payload.key)) {
|
|
9707
11123
|
this.processLocalOp(msg.payload, true, msg.senderId).catch((err) => {
|
|
9708
11124
|
logger.error({ err, senderId: msg.senderId }, "Forwarded op failed");
|
|
@@ -9809,6 +11225,51 @@ var ServerCoordinator = class {
|
|
|
9809
11225
|
this.topicManager.publish(topic, data, originalSenderId, true);
|
|
9810
11226
|
break;
|
|
9811
11227
|
}
|
|
11228
|
+
// Phase 10.04: Anti-entropy repair messages
|
|
11229
|
+
case "CLUSTER_MERKLE_ROOT_REQ": {
|
|
11230
|
+
const { partitionId, requestId } = msg.payload;
|
|
11231
|
+
const rootHash = this.merkleTreeManager?.getRootHash(partitionId) ?? 0;
|
|
11232
|
+
this.cluster.send(msg.senderId, "CLUSTER_MERKLE_ROOT_RESP", {
|
|
11233
|
+
requestId,
|
|
11234
|
+
partitionId,
|
|
11235
|
+
rootHash
|
|
11236
|
+
});
|
|
11237
|
+
break;
|
|
11238
|
+
}
|
|
11239
|
+
case "CLUSTER_MERKLE_ROOT_RESP": {
|
|
11240
|
+
if (this.repairScheduler) {
|
|
11241
|
+
this.repairScheduler.emit("merkleRootResponse", {
|
|
11242
|
+
nodeId: msg.senderId,
|
|
11243
|
+
...msg.payload
|
|
11244
|
+
});
|
|
11245
|
+
}
|
|
11246
|
+
break;
|
|
11247
|
+
}
|
|
11248
|
+
case "CLUSTER_REPAIR_DATA_REQ": {
|
|
11249
|
+
const { partitionId, keys, requestId } = msg.payload;
|
|
11250
|
+
const records = {};
|
|
11251
|
+
for (const key of keys) {
|
|
11252
|
+
const record = this.getLocalRecord(key);
|
|
11253
|
+
if (record) {
|
|
11254
|
+
records[key] = record;
|
|
11255
|
+
}
|
|
11256
|
+
}
|
|
11257
|
+
this.cluster.send(msg.senderId, "CLUSTER_REPAIR_DATA_RESP", {
|
|
11258
|
+
requestId,
|
|
11259
|
+
partitionId,
|
|
11260
|
+
records
|
|
11261
|
+
});
|
|
11262
|
+
break;
|
|
11263
|
+
}
|
|
11264
|
+
case "CLUSTER_REPAIR_DATA_RESP": {
|
|
11265
|
+
if (this.repairScheduler) {
|
|
11266
|
+
this.repairScheduler.emit("repairDataResponse", {
|
|
11267
|
+
nodeId: msg.senderId,
|
|
11268
|
+
...msg.payload
|
|
11269
|
+
});
|
|
11270
|
+
}
|
|
11271
|
+
break;
|
|
11272
|
+
}
|
|
9812
11273
|
}
|
|
9813
11274
|
});
|
|
9814
11275
|
}
|
|
@@ -9817,7 +11278,7 @@ var ServerCoordinator = class {
|
|
|
9817
11278
|
const localQuery = { ...query };
|
|
9818
11279
|
delete localQuery.offset;
|
|
9819
11280
|
delete localQuery.limit;
|
|
9820
|
-
if (map instanceof
|
|
11281
|
+
if (map instanceof import_core19.IndexedLWWMap) {
|
|
9821
11282
|
const coreQuery = this.convertToCoreQuery(localQuery);
|
|
9822
11283
|
if (coreQuery) {
|
|
9823
11284
|
const entries = map.queryEntries(coreQuery);
|
|
@@ -9827,7 +11288,7 @@ var ServerCoordinator = class {
|
|
|
9827
11288
|
});
|
|
9828
11289
|
}
|
|
9829
11290
|
}
|
|
9830
|
-
if (map instanceof
|
|
11291
|
+
if (map instanceof import_core19.IndexedORMap) {
|
|
9831
11292
|
const coreQuery = this.convertToCoreQuery(localQuery);
|
|
9832
11293
|
if (coreQuery) {
|
|
9833
11294
|
const results = map.query(coreQuery);
|
|
@@ -9835,14 +11296,14 @@ var ServerCoordinator = class {
|
|
|
9835
11296
|
}
|
|
9836
11297
|
}
|
|
9837
11298
|
const records = /* @__PURE__ */ new Map();
|
|
9838
|
-
if (map instanceof
|
|
11299
|
+
if (map instanceof import_core19.LWWMap) {
|
|
9839
11300
|
for (const key of map.allKeys()) {
|
|
9840
11301
|
const rec = map.getRecord(key);
|
|
9841
11302
|
if (rec && rec.value !== null) {
|
|
9842
11303
|
records.set(key, rec);
|
|
9843
11304
|
}
|
|
9844
11305
|
}
|
|
9845
|
-
} else if (map instanceof
|
|
11306
|
+
} else if (map instanceof import_core19.ORMap) {
|
|
9846
11307
|
const items = map.items;
|
|
9847
11308
|
for (const key of items.keys()) {
|
|
9848
11309
|
const values = map.get(key);
|
|
@@ -9990,11 +11451,11 @@ var ServerCoordinator = class {
|
|
|
9990
11451
|
async applyOpToMap(op, remoteNodeId) {
|
|
9991
11452
|
const typeHint = op.opType === "OR_ADD" || op.opType === "OR_REMOVE" ? "OR" : "LWW";
|
|
9992
11453
|
const map = this.getMap(op.mapName, typeHint);
|
|
9993
|
-
if (typeHint === "OR" && map instanceof
|
|
11454
|
+
if (typeHint === "OR" && map instanceof import_core19.LWWMap) {
|
|
9994
11455
|
logger.error({ mapName: op.mapName }, "Map type mismatch: LWWMap but received OR op");
|
|
9995
11456
|
throw new Error("Map type mismatch: LWWMap but received OR op");
|
|
9996
11457
|
}
|
|
9997
|
-
if (typeHint === "LWW" && map instanceof
|
|
11458
|
+
if (typeHint === "LWW" && map instanceof import_core19.ORMap) {
|
|
9998
11459
|
logger.error({ mapName: op.mapName }, "Map type mismatch: ORMap but received LWW op");
|
|
9999
11460
|
throw new Error("Map type mismatch: ORMap but received LWW op");
|
|
10000
11461
|
}
|
|
@@ -10005,7 +11466,7 @@ var ServerCoordinator = class {
|
|
|
10005
11466
|
mapName: op.mapName,
|
|
10006
11467
|
key: op.key
|
|
10007
11468
|
};
|
|
10008
|
-
if (map instanceof
|
|
11469
|
+
if (map instanceof import_core19.LWWMap) {
|
|
10009
11470
|
oldRecord = map.getRecord(op.key);
|
|
10010
11471
|
if (this.conflictResolverHandler.hasResolvers(op.mapName)) {
|
|
10011
11472
|
const mergeResult = await this.conflictResolverHandler.mergeWithResolver(
|
|
@@ -10033,7 +11494,7 @@ var ServerCoordinator = class {
|
|
|
10033
11494
|
eventPayload.eventType = "UPDATED";
|
|
10034
11495
|
eventPayload.record = op.record;
|
|
10035
11496
|
}
|
|
10036
|
-
} else if (map instanceof
|
|
11497
|
+
} else if (map instanceof import_core19.ORMap) {
|
|
10037
11498
|
oldRecord = map.getRecords(op.key);
|
|
10038
11499
|
if (op.opType === "OR_ADD") {
|
|
10039
11500
|
map.apply(op.key, op.orRecord);
|
|
@@ -10049,7 +11510,7 @@ var ServerCoordinator = class {
|
|
|
10049
11510
|
}
|
|
10050
11511
|
}
|
|
10051
11512
|
this.queryRegistry.processChange(op.mapName, map, op.key, op.record || op.orRecord, oldRecord);
|
|
10052
|
-
const mapSize = map instanceof
|
|
11513
|
+
const mapSize = map instanceof import_core19.ORMap ? map.totalRecords : map.size;
|
|
10053
11514
|
this.metricsService.setMapSize(op.mapName, mapSize);
|
|
10054
11515
|
if (this.storage) {
|
|
10055
11516
|
if (recordToStore) {
|
|
@@ -10078,6 +11539,10 @@ var ServerCoordinator = class {
|
|
|
10078
11539
|
nodeId: this._nodeId
|
|
10079
11540
|
});
|
|
10080
11541
|
}
|
|
11542
|
+
if (this.merkleTreeManager && recordToStore && op.key) {
|
|
11543
|
+
const partitionId = this.partitionService.getPartitionId(op.key);
|
|
11544
|
+
this.merkleTreeManager.updateRecord(partitionId, op.key, recordToStore);
|
|
11545
|
+
}
|
|
10081
11546
|
return { eventPayload, oldRecord };
|
|
10082
11547
|
}
|
|
10083
11548
|
/**
|
|
@@ -10205,7 +11670,7 @@ var ServerCoordinator = class {
|
|
|
10205
11670
|
if (rejected || !eventPayload) {
|
|
10206
11671
|
return;
|
|
10207
11672
|
}
|
|
10208
|
-
if (this.replicationPipeline
|
|
11673
|
+
if (this.replicationPipeline) {
|
|
10209
11674
|
const opId = op.id || `${op.mapName}:${op.key}:${Date.now()}`;
|
|
10210
11675
|
this.replicationPipeline.replicate(op, opId, op.key).catch((err) => {
|
|
10211
11676
|
logger.warn({ opId, key: op.key, err }, "Replication failed (non-fatal)");
|
|
@@ -10348,12 +11813,16 @@ var ServerCoordinator = class {
|
|
|
10348
11813
|
}
|
|
10349
11814
|
handleClusterEvent(payload) {
|
|
10350
11815
|
const { mapName, key, eventType } = payload;
|
|
11816
|
+
if (!key) {
|
|
11817
|
+
logger.warn({ mapName, eventType }, "Received cluster event with undefined key, ignoring");
|
|
11818
|
+
return;
|
|
11819
|
+
}
|
|
10351
11820
|
const map = this.getMap(mapName, eventType === "OR_ADD" || eventType === "OR_REMOVE" ? "OR" : "LWW");
|
|
10352
|
-
const oldRecord = map instanceof
|
|
11821
|
+
const oldRecord = map instanceof import_core19.LWWMap ? map.getRecord(key) : null;
|
|
10353
11822
|
if (this.partitionService.isRelated(key)) {
|
|
10354
|
-
if (map instanceof
|
|
11823
|
+
if (map instanceof import_core19.LWWMap && payload.record) {
|
|
10355
11824
|
map.merge(key, payload.record);
|
|
10356
|
-
} else if (map instanceof
|
|
11825
|
+
} else if (map instanceof import_core19.ORMap) {
|
|
10357
11826
|
if (eventType === "OR_ADD" && payload.orRecord) {
|
|
10358
11827
|
map.apply(key, payload.orRecord);
|
|
10359
11828
|
} else if (eventType === "OR_REMOVE" && payload.orTag) {
|
|
@@ -10372,9 +11841,9 @@ var ServerCoordinator = class {
|
|
|
10372
11841
|
if (!this.maps.has(name)) {
|
|
10373
11842
|
let map;
|
|
10374
11843
|
if (typeHint === "OR") {
|
|
10375
|
-
map = new
|
|
11844
|
+
map = new import_core19.ORMap(this.hlc);
|
|
10376
11845
|
} else {
|
|
10377
|
-
map = new
|
|
11846
|
+
map = new import_core19.LWWMap(this.hlc);
|
|
10378
11847
|
}
|
|
10379
11848
|
this.maps.set(name, map);
|
|
10380
11849
|
if (this.storage) {
|
|
@@ -10397,7 +11866,7 @@ var ServerCoordinator = class {
|
|
|
10397
11866
|
this.getMap(name, typeHint);
|
|
10398
11867
|
const loadingPromise = this.mapLoadingPromises.get(name);
|
|
10399
11868
|
const map = this.maps.get(name);
|
|
10400
|
-
const mapSize = map instanceof
|
|
11869
|
+
const mapSize = map instanceof import_core19.LWWMap ? Array.from(map.entries()).length : map instanceof import_core19.ORMap ? map.size : 0;
|
|
10401
11870
|
logger.info({
|
|
10402
11871
|
mapName: name,
|
|
10403
11872
|
mapExisted,
|
|
@@ -10407,11 +11876,56 @@ var ServerCoordinator = class {
|
|
|
10407
11876
|
if (loadingPromise) {
|
|
10408
11877
|
logger.info({ mapName: name }, "[getMapAsync] Waiting for loadMapFromStorage...");
|
|
10409
11878
|
await loadingPromise;
|
|
10410
|
-
const newMapSize = map instanceof
|
|
11879
|
+
const newMapSize = map instanceof import_core19.LWWMap ? Array.from(map.entries()).length : map instanceof import_core19.ORMap ? map.size : 0;
|
|
10411
11880
|
logger.info({ mapName: name, mapSizeAfterLoad: newMapSize }, "[getMapAsync] Load completed");
|
|
10412
11881
|
}
|
|
10413
11882
|
return this.maps.get(name);
|
|
10414
11883
|
}
|
|
11884
|
+
/**
|
|
11885
|
+
* Phase 10.04: Get local record for anti-entropy repair
|
|
11886
|
+
* Returns the LWWRecord for a key, used by RepairScheduler
|
|
11887
|
+
*/
|
|
11888
|
+
getLocalRecord(key) {
|
|
11889
|
+
const separatorIndex = key.indexOf(":");
|
|
11890
|
+
if (separatorIndex === -1) {
|
|
11891
|
+
return null;
|
|
11892
|
+
}
|
|
11893
|
+
const mapName = key.substring(0, separatorIndex);
|
|
11894
|
+
const actualKey = key.substring(separatorIndex + 1);
|
|
11895
|
+
const map = this.maps.get(mapName);
|
|
11896
|
+
if (!map || !(map instanceof import_core19.LWWMap)) {
|
|
11897
|
+
return null;
|
|
11898
|
+
}
|
|
11899
|
+
return map.getRecord(actualKey) ?? null;
|
|
11900
|
+
}
|
|
11901
|
+
/**
|
|
11902
|
+
* Phase 10.04: Apply repaired record from anti-entropy repair
|
|
11903
|
+
* Used by RepairScheduler to apply resolved conflicts
|
|
11904
|
+
*/
|
|
11905
|
+
applyRepairRecord(key, record) {
|
|
11906
|
+
const separatorIndex = key.indexOf(":");
|
|
11907
|
+
if (separatorIndex === -1) {
|
|
11908
|
+
logger.warn({ key }, "Invalid key format for repair");
|
|
11909
|
+
return;
|
|
11910
|
+
}
|
|
11911
|
+
const mapName = key.substring(0, separatorIndex);
|
|
11912
|
+
const actualKey = key.substring(separatorIndex + 1);
|
|
11913
|
+
const map = this.getMap(mapName, "LWW");
|
|
11914
|
+
const existingRecord = map.getRecord(actualKey);
|
|
11915
|
+
if (!existingRecord || record.timestamp.millis > existingRecord.timestamp.millis || record.timestamp.millis === existingRecord.timestamp.millis && record.timestamp.counter > existingRecord.timestamp.counter) {
|
|
11916
|
+
map.merge(actualKey, record);
|
|
11917
|
+
logger.debug({ mapName, key: actualKey }, "Applied repair record");
|
|
11918
|
+
if (this.storage) {
|
|
11919
|
+
this.storage.store(mapName, actualKey, record).catch((err) => {
|
|
11920
|
+
logger.error({ err, mapName, key: actualKey }, "Failed to persist repair record");
|
|
11921
|
+
});
|
|
11922
|
+
}
|
|
11923
|
+
if (this.merkleTreeManager) {
|
|
11924
|
+
const partitionId = this.partitionService.getPartitionId(actualKey);
|
|
11925
|
+
this.merkleTreeManager.updateRecord(partitionId, actualKey, record);
|
|
11926
|
+
}
|
|
11927
|
+
}
|
|
11928
|
+
}
|
|
10415
11929
|
async loadMapFromStorage(name, typeHint) {
|
|
10416
11930
|
try {
|
|
10417
11931
|
const keys = await this.storage.loadAllKeys(name);
|
|
@@ -10433,16 +11947,16 @@ var ServerCoordinator = class {
|
|
|
10433
11947
|
const currentMap = this.maps.get(name);
|
|
10434
11948
|
if (!currentMap) return;
|
|
10435
11949
|
let targetMap = currentMap;
|
|
10436
|
-
if (isOR && currentMap instanceof
|
|
11950
|
+
if (isOR && currentMap instanceof import_core19.LWWMap) {
|
|
10437
11951
|
logger.info({ mapName: name }, "Map auto-detected as ORMap. Switching type.");
|
|
10438
|
-
targetMap = new
|
|
11952
|
+
targetMap = new import_core19.ORMap(this.hlc);
|
|
10439
11953
|
this.maps.set(name, targetMap);
|
|
10440
|
-
} else if (!isOR && currentMap instanceof
|
|
11954
|
+
} else if (!isOR && currentMap instanceof import_core19.ORMap && typeHint !== "OR") {
|
|
10441
11955
|
logger.info({ mapName: name }, "Map auto-detected as LWWMap. Switching type.");
|
|
10442
|
-
targetMap = new
|
|
11956
|
+
targetMap = new import_core19.LWWMap(this.hlc);
|
|
10443
11957
|
this.maps.set(name, targetMap);
|
|
10444
11958
|
}
|
|
10445
|
-
if (targetMap instanceof
|
|
11959
|
+
if (targetMap instanceof import_core19.ORMap) {
|
|
10446
11960
|
for (const [key, record] of records) {
|
|
10447
11961
|
if (key === "__tombstones__") {
|
|
10448
11962
|
const t = record;
|
|
@@ -10455,7 +11969,7 @@ var ServerCoordinator = class {
|
|
|
10455
11969
|
}
|
|
10456
11970
|
}
|
|
10457
11971
|
}
|
|
10458
|
-
} else if (targetMap instanceof
|
|
11972
|
+
} else if (targetMap instanceof import_core19.LWWMap) {
|
|
10459
11973
|
for (const [key, record] of records) {
|
|
10460
11974
|
if (!record.type) {
|
|
10461
11975
|
targetMap.merge(key, record);
|
|
@@ -10466,7 +11980,7 @@ var ServerCoordinator = class {
|
|
|
10466
11980
|
if (count > 0) {
|
|
10467
11981
|
logger.info({ mapName: name, count }, "Loaded records for map");
|
|
10468
11982
|
this.queryRegistry.refreshSubscriptions(name, targetMap);
|
|
10469
|
-
const mapSize = targetMap instanceof
|
|
11983
|
+
const mapSize = targetMap instanceof import_core19.ORMap ? targetMap.totalRecords : targetMap.size;
|
|
10470
11984
|
this.metricsService.setMapSize(name, mapSize);
|
|
10471
11985
|
}
|
|
10472
11986
|
} catch (err) {
|
|
@@ -10548,7 +12062,7 @@ var ServerCoordinator = class {
|
|
|
10548
12062
|
reportLocalHlc() {
|
|
10549
12063
|
let minHlc = this.hlc.now();
|
|
10550
12064
|
for (const client of this.clients.values()) {
|
|
10551
|
-
if (
|
|
12065
|
+
if (import_core19.HLC.compare(client.lastActiveHlc, minHlc) < 0) {
|
|
10552
12066
|
minHlc = client.lastActiveHlc;
|
|
10553
12067
|
}
|
|
10554
12068
|
}
|
|
@@ -10569,7 +12083,7 @@ var ServerCoordinator = class {
|
|
|
10569
12083
|
let globalSafe = this.hlc.now();
|
|
10570
12084
|
let initialized = false;
|
|
10571
12085
|
for (const ts of this.gcReports.values()) {
|
|
10572
|
-
if (!initialized ||
|
|
12086
|
+
if (!initialized || import_core19.HLC.compare(ts, globalSafe) < 0) {
|
|
10573
12087
|
globalSafe = ts;
|
|
10574
12088
|
initialized = true;
|
|
10575
12089
|
}
|
|
@@ -10604,7 +12118,7 @@ var ServerCoordinator = class {
|
|
|
10604
12118
|
logger.info({ olderThanMillis: olderThan.millis }, "Performing Garbage Collection");
|
|
10605
12119
|
const now = Date.now();
|
|
10606
12120
|
for (const [name, map] of this.maps) {
|
|
10607
|
-
if (map instanceof
|
|
12121
|
+
if (map instanceof import_core19.LWWMap) {
|
|
10608
12122
|
for (const key of map.allKeys()) {
|
|
10609
12123
|
const record = map.getRecord(key);
|
|
10610
12124
|
if (record && record.value !== null && record.ttlMs) {
|
|
@@ -10656,7 +12170,7 @@ var ServerCoordinator = class {
|
|
|
10656
12170
|
});
|
|
10657
12171
|
}
|
|
10658
12172
|
}
|
|
10659
|
-
} else if (map instanceof
|
|
12173
|
+
} else if (map instanceof import_core19.ORMap) {
|
|
10660
12174
|
const items = map.items;
|
|
10661
12175
|
const tombstonesSet = map.tombstones;
|
|
10662
12176
|
const tagsToExpire = [];
|
|
@@ -10759,17 +12273,17 @@ var ServerCoordinator = class {
|
|
|
10759
12273
|
stringToWriteConcern(value) {
|
|
10760
12274
|
switch (value) {
|
|
10761
12275
|
case "FIRE_AND_FORGET":
|
|
10762
|
-
return
|
|
12276
|
+
return import_core19.WriteConcern.FIRE_AND_FORGET;
|
|
10763
12277
|
case "MEMORY":
|
|
10764
|
-
return
|
|
12278
|
+
return import_core19.WriteConcern.MEMORY;
|
|
10765
12279
|
case "APPLIED":
|
|
10766
|
-
return
|
|
12280
|
+
return import_core19.WriteConcern.APPLIED;
|
|
10767
12281
|
case "REPLICATED":
|
|
10768
|
-
return
|
|
12282
|
+
return import_core19.WriteConcern.REPLICATED;
|
|
10769
12283
|
case "PERSISTED":
|
|
10770
|
-
return
|
|
12284
|
+
return import_core19.WriteConcern.PERSISTED;
|
|
10771
12285
|
default:
|
|
10772
|
-
return
|
|
12286
|
+
return import_core19.WriteConcern.MEMORY;
|
|
10773
12287
|
}
|
|
10774
12288
|
}
|
|
10775
12289
|
/**
|
|
@@ -10826,7 +12340,7 @@ var ServerCoordinator = class {
|
|
|
10826
12340
|
}
|
|
10827
12341
|
});
|
|
10828
12342
|
if (op.id) {
|
|
10829
|
-
this.writeAckManager.notifyLevel(op.id,
|
|
12343
|
+
this.writeAckManager.notifyLevel(op.id, import_core19.WriteConcern.REPLICATED);
|
|
10830
12344
|
}
|
|
10831
12345
|
}
|
|
10832
12346
|
}
|
|
@@ -10834,7 +12348,7 @@ var ServerCoordinator = class {
|
|
|
10834
12348
|
this.broadcastBatch(batchedEvents, clientId);
|
|
10835
12349
|
for (const op of ops) {
|
|
10836
12350
|
if (op.id && this.partitionService.isLocalOwner(op.key)) {
|
|
10837
|
-
this.writeAckManager.notifyLevel(op.id,
|
|
12351
|
+
this.writeAckManager.notifyLevel(op.id, import_core19.WriteConcern.REPLICATED);
|
|
10838
12352
|
}
|
|
10839
12353
|
}
|
|
10840
12354
|
}
|
|
@@ -10862,7 +12376,7 @@ var ServerCoordinator = class {
|
|
|
10862
12376
|
const owner = this.partitionService.getOwner(op.key);
|
|
10863
12377
|
await this.forwardOpAndWait(op, owner);
|
|
10864
12378
|
if (op.id) {
|
|
10865
|
-
this.writeAckManager.notifyLevel(op.id,
|
|
12379
|
+
this.writeAckManager.notifyLevel(op.id, import_core19.WriteConcern.REPLICATED);
|
|
10866
12380
|
}
|
|
10867
12381
|
}
|
|
10868
12382
|
}
|
|
@@ -10870,7 +12384,7 @@ var ServerCoordinator = class {
|
|
|
10870
12384
|
await this.broadcastBatchSync(batchedEvents, clientId);
|
|
10871
12385
|
for (const op of ops) {
|
|
10872
12386
|
if (op.id && this.partitionService.isLocalOwner(op.key)) {
|
|
10873
|
-
this.writeAckManager.notifyLevel(op.id,
|
|
12387
|
+
this.writeAckManager.notifyLevel(op.id, import_core19.WriteConcern.REPLICATED);
|
|
10874
12388
|
}
|
|
10875
12389
|
}
|
|
10876
12390
|
}
|
|
@@ -10904,7 +12418,7 @@ var ServerCoordinator = class {
|
|
|
10904
12418
|
return;
|
|
10905
12419
|
}
|
|
10906
12420
|
if (op.id) {
|
|
10907
|
-
this.writeAckManager.notifyLevel(op.id,
|
|
12421
|
+
this.writeAckManager.notifyLevel(op.id, import_core19.WriteConcern.APPLIED);
|
|
10908
12422
|
}
|
|
10909
12423
|
if (eventPayload) {
|
|
10910
12424
|
batchedEvents.push({
|
|
@@ -10918,7 +12432,7 @@ var ServerCoordinator = class {
|
|
|
10918
12432
|
try {
|
|
10919
12433
|
await this.persistOpSync(op);
|
|
10920
12434
|
if (op.id) {
|
|
10921
|
-
this.writeAckManager.notifyLevel(op.id,
|
|
12435
|
+
this.writeAckManager.notifyLevel(op.id, import_core19.WriteConcern.PERSISTED);
|
|
10922
12436
|
}
|
|
10923
12437
|
} catch (err) {
|
|
10924
12438
|
logger.error({ opId: op.id, err }, "Persistence failed");
|
|
@@ -11261,10 +12775,10 @@ var RateLimitInterceptor = class {
|
|
|
11261
12775
|
};
|
|
11262
12776
|
|
|
11263
12777
|
// src/utils/nativeStats.ts
|
|
11264
|
-
var
|
|
12778
|
+
var import_core20 = require("@topgunbuild/core");
|
|
11265
12779
|
function getNativeModuleStatus() {
|
|
11266
12780
|
return {
|
|
11267
|
-
nativeHash: (0,
|
|
12781
|
+
nativeHash: (0, import_core20.isUsingNativeHash)(),
|
|
11268
12782
|
sharedArrayBuffer: SharedMemoryManager.isAvailable()
|
|
11269
12783
|
};
|
|
11270
12784
|
}
|
|
@@ -11297,15 +12811,15 @@ function logNativeStatus() {
|
|
|
11297
12811
|
}
|
|
11298
12812
|
|
|
11299
12813
|
// src/cluster/ClusterCoordinator.ts
|
|
11300
|
-
var
|
|
11301
|
-
var
|
|
12814
|
+
var import_events13 = require("events");
|
|
12815
|
+
var import_core21 = require("@topgunbuild/core");
|
|
11302
12816
|
var DEFAULT_CLUSTER_COORDINATOR_CONFIG = {
|
|
11303
12817
|
gradualRebalancing: true,
|
|
11304
|
-
migration:
|
|
11305
|
-
replication:
|
|
12818
|
+
migration: import_core21.DEFAULT_MIGRATION_CONFIG,
|
|
12819
|
+
replication: import_core21.DEFAULT_REPLICATION_CONFIG,
|
|
11306
12820
|
replicationEnabled: true
|
|
11307
12821
|
};
|
|
11308
|
-
var ClusterCoordinator = class extends
|
|
12822
|
+
var ClusterCoordinator = class extends import_events13.EventEmitter {
|
|
11309
12823
|
constructor(config) {
|
|
11310
12824
|
super();
|
|
11311
12825
|
this.replicationPipeline = null;
|
|
@@ -11670,12 +13184,12 @@ var ClusterCoordinator = class extends import_events9.EventEmitter {
|
|
|
11670
13184
|
};
|
|
11671
13185
|
|
|
11672
13186
|
// src/MapWithResolver.ts
|
|
11673
|
-
var
|
|
13187
|
+
var import_core22 = require("@topgunbuild/core");
|
|
11674
13188
|
var MapWithResolver = class {
|
|
11675
13189
|
constructor(config) {
|
|
11676
13190
|
this.mapName = config.name;
|
|
11677
|
-
this.hlc = new
|
|
11678
|
-
this.map = new
|
|
13191
|
+
this.hlc = new import_core22.HLC(config.nodeId);
|
|
13192
|
+
this.map = new import_core22.LWWMap(this.hlc);
|
|
11679
13193
|
this.resolverService = config.resolverService;
|
|
11680
13194
|
this.onRejection = config.onRejection;
|
|
11681
13195
|
}
|
|
@@ -11931,7 +13445,7 @@ function mergeWithDefaults(userConfig) {
|
|
|
11931
13445
|
}
|
|
11932
13446
|
|
|
11933
13447
|
// src/config/MapFactory.ts
|
|
11934
|
-
var
|
|
13448
|
+
var import_core23 = require("@topgunbuild/core");
|
|
11935
13449
|
var MapFactory = class {
|
|
11936
13450
|
/**
|
|
11937
13451
|
* Create a MapFactory.
|
|
@@ -11955,9 +13469,9 @@ var MapFactory = class {
|
|
|
11955
13469
|
createLWWMap(mapName, hlc) {
|
|
11956
13470
|
const mapConfig = this.mapConfigs.get(mapName);
|
|
11957
13471
|
if (!mapConfig || mapConfig.indexes.length === 0) {
|
|
11958
|
-
return new
|
|
13472
|
+
return new import_core23.LWWMap(hlc);
|
|
11959
13473
|
}
|
|
11960
|
-
const map = new
|
|
13474
|
+
const map = new import_core23.IndexedLWWMap(hlc);
|
|
11961
13475
|
for (const indexDef of mapConfig.indexes) {
|
|
11962
13476
|
this.addIndexToLWWMap(map, indexDef);
|
|
11963
13477
|
}
|
|
@@ -11973,9 +13487,9 @@ var MapFactory = class {
|
|
|
11973
13487
|
createORMap(mapName, hlc) {
|
|
11974
13488
|
const mapConfig = this.mapConfigs.get(mapName);
|
|
11975
13489
|
if (!mapConfig || mapConfig.indexes.length === 0) {
|
|
11976
|
-
return new
|
|
13490
|
+
return new import_core23.ORMap(hlc);
|
|
11977
13491
|
}
|
|
11978
|
-
const map = new
|
|
13492
|
+
const map = new import_core23.IndexedORMap(hlc);
|
|
11979
13493
|
for (const indexDef of mapConfig.indexes) {
|
|
11980
13494
|
this.addIndexToORMap(map, indexDef);
|
|
11981
13495
|
}
|
|
@@ -12012,7 +13526,7 @@ var MapFactory = class {
|
|
|
12012
13526
|
* Supports dot notation for nested paths.
|
|
12013
13527
|
*/
|
|
12014
13528
|
createAttribute(path) {
|
|
12015
|
-
return (0,
|
|
13529
|
+
return (0, import_core23.simpleAttribute)(path, (record) => {
|
|
12016
13530
|
return this.getNestedValue(record, path);
|
|
12017
13531
|
});
|
|
12018
13532
|
}
|
|
@@ -12111,12 +13625,18 @@ var MapFactory = class {
|
|
|
12111
13625
|
ConnectionRateLimiter,
|
|
12112
13626
|
DEFAULT_CLUSTER_COORDINATOR_CONFIG,
|
|
12113
13627
|
DEFAULT_CONFLICT_RESOLVER_CONFIG,
|
|
13628
|
+
DEFAULT_FAILURE_DETECTOR_CONFIG,
|
|
12114
13629
|
DEFAULT_INDEX_CONFIG,
|
|
12115
13630
|
DEFAULT_JOURNAL_SERVICE_CONFIG,
|
|
12116
13631
|
DEFAULT_LAG_TRACKER_CONFIG,
|
|
13632
|
+
DEFAULT_MERKLE_TREE_CONFIG,
|
|
13633
|
+
DEFAULT_READ_REPLICA_CONFIG,
|
|
13634
|
+
DEFAULT_REASSIGNER_CONFIG,
|
|
13635
|
+
DEFAULT_REPAIR_CONFIG,
|
|
12117
13636
|
DEFAULT_SANDBOX_CONFIG,
|
|
12118
13637
|
EntryProcessorHandler,
|
|
12119
13638
|
EventJournalService,
|
|
13639
|
+
FailureDetector,
|
|
12120
13640
|
FilterTasklet,
|
|
12121
13641
|
ForEachTasklet,
|
|
12122
13642
|
IteratorTasklet,
|
|
@@ -12126,13 +13646,17 @@ var MapFactory = class {
|
|
|
12126
13646
|
MapTasklet,
|
|
12127
13647
|
MapWithResolver,
|
|
12128
13648
|
MemoryServerAdapter,
|
|
13649
|
+
MerkleTreeManager,
|
|
12129
13650
|
MigrationManager,
|
|
12130
13651
|
ObjectPool,
|
|
13652
|
+
PartitionReassigner,
|
|
12131
13653
|
PartitionService,
|
|
12132
13654
|
PostgresAdapter,
|
|
12133
13655
|
ProcessorSandbox,
|
|
12134
13656
|
RateLimitInterceptor,
|
|
13657
|
+
ReadReplicaHandler,
|
|
12135
13658
|
ReduceTasklet,
|
|
13659
|
+
RepairScheduler,
|
|
12136
13660
|
ReplicationPipeline,
|
|
12137
13661
|
SecurityManager,
|
|
12138
13662
|
ServerCoordinator,
|