@topgunbuild/server 0.6.0 → 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +874 -3
- package/dist/index.d.ts +874 -3
- package/dist/index.js +2595 -302
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +2506 -222
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -38,12 +38,18 @@ __export(index_exports, {
|
|
|
38
38
|
ConnectionRateLimiter: () => ConnectionRateLimiter,
|
|
39
39
|
DEFAULT_CLUSTER_COORDINATOR_CONFIG: () => DEFAULT_CLUSTER_COORDINATOR_CONFIG,
|
|
40
40
|
DEFAULT_CONFLICT_RESOLVER_CONFIG: () => DEFAULT_CONFLICT_RESOLVER_CONFIG,
|
|
41
|
+
DEFAULT_FAILURE_DETECTOR_CONFIG: () => DEFAULT_FAILURE_DETECTOR_CONFIG,
|
|
41
42
|
DEFAULT_INDEX_CONFIG: () => DEFAULT_INDEX_CONFIG,
|
|
42
43
|
DEFAULT_JOURNAL_SERVICE_CONFIG: () => DEFAULT_JOURNAL_SERVICE_CONFIG,
|
|
43
44
|
DEFAULT_LAG_TRACKER_CONFIG: () => DEFAULT_LAG_TRACKER_CONFIG,
|
|
45
|
+
DEFAULT_MERKLE_TREE_CONFIG: () => DEFAULT_MERKLE_TREE_CONFIG,
|
|
46
|
+
DEFAULT_READ_REPLICA_CONFIG: () => DEFAULT_READ_REPLICA_CONFIG,
|
|
47
|
+
DEFAULT_REASSIGNER_CONFIG: () => DEFAULT_REASSIGNER_CONFIG,
|
|
48
|
+
DEFAULT_REPAIR_CONFIG: () => DEFAULT_REPAIR_CONFIG,
|
|
44
49
|
DEFAULT_SANDBOX_CONFIG: () => DEFAULT_SANDBOX_CONFIG,
|
|
45
50
|
EntryProcessorHandler: () => EntryProcessorHandler,
|
|
46
51
|
EventJournalService: () => EventJournalService,
|
|
52
|
+
FailureDetector: () => FailureDetector,
|
|
47
53
|
FilterTasklet: () => FilterTasklet,
|
|
48
54
|
ForEachTasklet: () => ForEachTasklet,
|
|
49
55
|
IteratorTasklet: () => IteratorTasklet,
|
|
@@ -53,14 +59,19 @@ __export(index_exports, {
|
|
|
53
59
|
MapTasklet: () => MapTasklet,
|
|
54
60
|
MapWithResolver: () => MapWithResolver,
|
|
55
61
|
MemoryServerAdapter: () => MemoryServerAdapter,
|
|
62
|
+
MerkleTreeManager: () => MerkleTreeManager,
|
|
56
63
|
MigrationManager: () => MigrationManager,
|
|
57
64
|
ObjectPool: () => ObjectPool,
|
|
65
|
+
PartitionReassigner: () => PartitionReassigner,
|
|
58
66
|
PartitionService: () => PartitionService,
|
|
59
67
|
PostgresAdapter: () => PostgresAdapter,
|
|
60
68
|
ProcessorSandbox: () => ProcessorSandbox,
|
|
61
69
|
RateLimitInterceptor: () => RateLimitInterceptor,
|
|
70
|
+
ReadReplicaHandler: () => ReadReplicaHandler,
|
|
62
71
|
ReduceTasklet: () => ReduceTasklet,
|
|
72
|
+
RepairScheduler: () => RepairScheduler,
|
|
63
73
|
ReplicationPipeline: () => ReplicationPipeline,
|
|
74
|
+
SearchCoordinator: () => SearchCoordinator,
|
|
64
75
|
SecurityManager: () => SecurityManager,
|
|
65
76
|
ServerCoordinator: () => ServerCoordinator,
|
|
66
77
|
TaskletScheduler: () => TaskletScheduler,
|
|
@@ -95,7 +106,7 @@ var import_http = require("http");
|
|
|
95
106
|
var import_https = require("https");
|
|
96
107
|
var import_fs2 = require("fs");
|
|
97
108
|
var import_ws3 = require("ws");
|
|
98
|
-
var
|
|
109
|
+
var import_core20 = require("@topgunbuild/core");
|
|
99
110
|
var jwt = __toESM(require("jsonwebtoken"));
|
|
100
111
|
var crypto = __toESM(require("crypto"));
|
|
101
112
|
|
|
@@ -1211,6 +1222,47 @@ var ClusterManager = class extends import_events2.EventEmitter {
|
|
|
1211
1222
|
handleHeartbeat(senderId, _payload) {
|
|
1212
1223
|
this.failureDetector.recordHeartbeat(senderId);
|
|
1213
1224
|
}
|
|
1225
|
+
/**
|
|
1226
|
+
* Send current member list to a specific node (gossip protocol).
|
|
1227
|
+
* Called when a new node joins to propagate cluster topology.
|
|
1228
|
+
*/
|
|
1229
|
+
sendMemberList(targetNodeId) {
|
|
1230
|
+
const members = [];
|
|
1231
|
+
for (const [nodeId, member] of this.members) {
|
|
1232
|
+
members.push({
|
|
1233
|
+
nodeId,
|
|
1234
|
+
host: member.host,
|
|
1235
|
+
port: member.port
|
|
1236
|
+
});
|
|
1237
|
+
}
|
|
1238
|
+
this.send(targetNodeId, "MEMBER_LIST", { members });
|
|
1239
|
+
logger.debug({ targetNodeId, memberCount: members.length }, "Sent member list");
|
|
1240
|
+
}
|
|
1241
|
+
/**
|
|
1242
|
+
* Broadcast member list to all connected nodes.
|
|
1243
|
+
* Called when cluster membership changes.
|
|
1244
|
+
*/
|
|
1245
|
+
broadcastMemberList() {
|
|
1246
|
+
for (const [nodeId, member] of this.members) {
|
|
1247
|
+
if (member.isSelf) continue;
|
|
1248
|
+
if (member.socket && member.socket.readyState === import_ws.WebSocket.OPEN) {
|
|
1249
|
+
this.sendMemberList(nodeId);
|
|
1250
|
+
}
|
|
1251
|
+
}
|
|
1252
|
+
}
|
|
1253
|
+
/**
|
|
1254
|
+
* Handle incoming member list from a peer (gossip protocol).
|
|
1255
|
+
* Attempts to connect to unknown members.
|
|
1256
|
+
*/
|
|
1257
|
+
handleMemberList(payload) {
|
|
1258
|
+
for (const memberInfo of payload.members) {
|
|
1259
|
+
if (memberInfo.nodeId === this.config.nodeId) continue;
|
|
1260
|
+
if (this.members.has(memberInfo.nodeId)) continue;
|
|
1261
|
+
const peerAddress = `${memberInfo.host}:${memberInfo.port}`;
|
|
1262
|
+
logger.info({ nodeId: memberInfo.nodeId, peerAddress }, "Discovered new member via gossip");
|
|
1263
|
+
this.connectToPeer(peerAddress);
|
|
1264
|
+
}
|
|
1265
|
+
}
|
|
1214
1266
|
/**
|
|
1215
1267
|
* Handle confirmed node failure.
|
|
1216
1268
|
*/
|
|
@@ -1349,6 +1401,9 @@ var ClusterManager = class extends import_events2.EventEmitter {
|
|
|
1349
1401
|
this.failureDetector.startMonitoring(remoteNodeId);
|
|
1350
1402
|
this.startHeartbeat();
|
|
1351
1403
|
this.emit("memberJoined", remoteNodeId);
|
|
1404
|
+
this.broadcastMemberList();
|
|
1405
|
+
} else if (msg.type === "MEMBER_LIST") {
|
|
1406
|
+
this.handleMemberList(msg.payload);
|
|
1352
1407
|
} else if (msg.type === "HEARTBEAT") {
|
|
1353
1408
|
if (remoteNodeId) {
|
|
1354
1409
|
this.handleHeartbeat(remoteNodeId, msg.payload);
|
|
@@ -6634,239 +6689,1439 @@ var ReplicationPipeline = class extends import_events8.EventEmitter {
|
|
|
6634
6689
|
}
|
|
6635
6690
|
};
|
|
6636
6691
|
|
|
6637
|
-
// src/
|
|
6692
|
+
// src/cluster/PartitionReassigner.ts
|
|
6693
|
+
var import_events9 = require("events");
|
|
6638
6694
|
var import_core10 = require("@topgunbuild/core");
|
|
6639
|
-
var
|
|
6640
|
-
|
|
6641
|
-
|
|
6642
|
-
|
|
6643
|
-
|
|
6644
|
-
|
|
6695
|
+
var DEFAULT_REASSIGNER_CONFIG = {
|
|
6696
|
+
reassignmentDelayMs: 1e3,
|
|
6697
|
+
maxConcurrentTransfers: 10,
|
|
6698
|
+
autoPromoteBackups: true,
|
|
6699
|
+
autoAssignNewBackups: true
|
|
6700
|
+
};
|
|
6701
|
+
var PartitionReassigner = class extends import_events9.EventEmitter {
|
|
6702
|
+
constructor(clusterManager, partitionService, config = {}) {
|
|
6703
|
+
super();
|
|
6704
|
+
this.failoverInProgress = false;
|
|
6705
|
+
this.partitionsReassigned = 0;
|
|
6706
|
+
this.pendingReassignments = /* @__PURE__ */ new Set();
|
|
6707
|
+
this.clusterManager = clusterManager;
|
|
6708
|
+
this.partitionService = partitionService;
|
|
6709
|
+
this.config = { ...DEFAULT_REASSIGNER_CONFIG, ...config };
|
|
6710
|
+
this.setupEventHandlers();
|
|
6711
|
+
}
|
|
6712
|
+
setupEventHandlers() {
|
|
6713
|
+
this.clusterManager.on("nodeConfirmedFailed", (nodeId) => {
|
|
6714
|
+
logger.warn({ nodeId }, "Node failure confirmed, initiating partition reassignment");
|
|
6715
|
+
this.handleNodeFailure(nodeId);
|
|
6716
|
+
});
|
|
6717
|
+
this.clusterManager.on("memberLeft", (nodeId) => {
|
|
6718
|
+
if (this.currentFailedNode !== nodeId) {
|
|
6719
|
+
logger.info({ nodeId }, "Member left cluster, checking partition reassignment");
|
|
6720
|
+
this.handleNodeDeparture(nodeId);
|
|
6721
|
+
}
|
|
6722
|
+
});
|
|
6645
6723
|
}
|
|
6646
6724
|
/**
|
|
6647
|
-
*
|
|
6725
|
+
* Handle a node failure - initiates failover process
|
|
6648
6726
|
*/
|
|
6649
|
-
|
|
6650
|
-
|
|
6651
|
-
|
|
6652
|
-
|
|
6653
|
-
this.counters.set(name, counter);
|
|
6654
|
-
logger.debug({ name }, "Created new counter");
|
|
6727
|
+
handleNodeFailure(failedNodeId) {
|
|
6728
|
+
if (this.failoverInProgress && this.currentFailedNode === failedNodeId) {
|
|
6729
|
+
logger.debug({ failedNodeId }, "Failover already in progress for this node");
|
|
6730
|
+
return;
|
|
6655
6731
|
}
|
|
6656
|
-
|
|
6732
|
+
if (this.reassignmentTimer) {
|
|
6733
|
+
clearTimeout(this.reassignmentTimer);
|
|
6734
|
+
}
|
|
6735
|
+
this.reassignmentTimer = setTimeout(() => {
|
|
6736
|
+
this.executeFailover(failedNodeId);
|
|
6737
|
+
}, this.config.reassignmentDelayMs);
|
|
6657
6738
|
}
|
|
6658
6739
|
/**
|
|
6659
|
-
* Handle
|
|
6660
|
-
* @returns Response message to send back to client
|
|
6740
|
+
* Handle a graceful node departure
|
|
6661
6741
|
*/
|
|
6662
|
-
|
|
6663
|
-
const
|
|
6664
|
-
|
|
6665
|
-
|
|
6666
|
-
|
|
6667
|
-
|
|
6668
|
-
type: "COUNTER_RESPONSE",
|
|
6669
|
-
payload: {
|
|
6670
|
-
name,
|
|
6671
|
-
state: this.stateToObject(state)
|
|
6672
|
-
}
|
|
6673
|
-
};
|
|
6742
|
+
handleNodeDeparture(nodeId) {
|
|
6743
|
+
const orphanedPartitions = this.findOrphanedPartitions(nodeId);
|
|
6744
|
+
if (orphanedPartitions.length > 0) {
|
|
6745
|
+
logger.warn({ nodeId, count: orphanedPartitions.length }, "Found orphaned partitions after departure");
|
|
6746
|
+
this.executeFailover(nodeId);
|
|
6747
|
+
}
|
|
6674
6748
|
}
|
|
6675
6749
|
/**
|
|
6676
|
-
*
|
|
6677
|
-
* @returns Merged state and list of clients to broadcast to
|
|
6750
|
+
* Execute the failover process for a failed node
|
|
6678
6751
|
*/
|
|
6679
|
-
|
|
6680
|
-
|
|
6681
|
-
|
|
6682
|
-
|
|
6683
|
-
|
|
6684
|
-
|
|
6685
|
-
logger.
|
|
6686
|
-
|
|
6687
|
-
|
|
6688
|
-
|
|
6689
|
-
|
|
6690
|
-
|
|
6691
|
-
|
|
6692
|
-
return {
|
|
6693
|
-
// Response to the sending client
|
|
6694
|
-
response: {
|
|
6695
|
-
type: "COUNTER_UPDATE",
|
|
6696
|
-
payload: {
|
|
6697
|
-
name,
|
|
6698
|
-
state: mergedStateObj
|
|
6699
|
-
}
|
|
6700
|
-
},
|
|
6701
|
-
// Broadcast to other clients
|
|
6702
|
-
broadcastTo,
|
|
6703
|
-
broadcastMessage: {
|
|
6704
|
-
type: "COUNTER_UPDATE",
|
|
6705
|
-
payload: {
|
|
6706
|
-
name,
|
|
6707
|
-
state: mergedStateObj
|
|
6708
|
-
}
|
|
6752
|
+
async executeFailover(failedNodeId) {
|
|
6753
|
+
this.failoverInProgress = true;
|
|
6754
|
+
this.currentFailedNode = failedNodeId;
|
|
6755
|
+
this.reassignmentStartTime = Date.now();
|
|
6756
|
+
this.partitionsReassigned = 0;
|
|
6757
|
+
this.pendingReassignments.clear();
|
|
6758
|
+
logger.info({ failedNodeId }, "Starting partition failover");
|
|
6759
|
+
try {
|
|
6760
|
+
const orphanedPartitions = this.findOrphanedPartitions(failedNodeId);
|
|
6761
|
+
if (orphanedPartitions.length === 0) {
|
|
6762
|
+
logger.info({ failedNodeId }, "No partitions to reassign");
|
|
6763
|
+
this.completeFailover();
|
|
6764
|
+
return;
|
|
6709
6765
|
}
|
|
6710
|
-
|
|
6766
|
+
logger.info({
|
|
6767
|
+
failedNodeId,
|
|
6768
|
+
partitionCount: orphanedPartitions.length
|
|
6769
|
+
}, "Reassigning partitions from failed node");
|
|
6770
|
+
for (const partitionId of orphanedPartitions) {
|
|
6771
|
+
this.pendingReassignments.add(partitionId);
|
|
6772
|
+
}
|
|
6773
|
+
const changes = [];
|
|
6774
|
+
for (const partitionId of orphanedPartitions) {
|
|
6775
|
+
const change = await this.reassignPartition(partitionId, failedNodeId);
|
|
6776
|
+
if (change) {
|
|
6777
|
+
changes.push(change);
|
|
6778
|
+
this.partitionsReassigned++;
|
|
6779
|
+
}
|
|
6780
|
+
this.pendingReassignments.delete(partitionId);
|
|
6781
|
+
}
|
|
6782
|
+
if (changes.length > 0) {
|
|
6783
|
+
this.emit("partitionsReassigned", {
|
|
6784
|
+
failedNodeId,
|
|
6785
|
+
changes,
|
|
6786
|
+
partitionMap: this.partitionService.getPartitionMap()
|
|
6787
|
+
});
|
|
6788
|
+
}
|
|
6789
|
+
this.completeFailover();
|
|
6790
|
+
} catch (error) {
|
|
6791
|
+
logger.error({ failedNodeId, error }, "Failover failed");
|
|
6792
|
+
this.emit("failoverError", { failedNodeId, error });
|
|
6793
|
+
this.completeFailover();
|
|
6794
|
+
}
|
|
6711
6795
|
}
|
|
6712
6796
|
/**
|
|
6713
|
-
*
|
|
6797
|
+
* Find all partitions that need reassignment
|
|
6714
6798
|
*/
|
|
6715
|
-
|
|
6716
|
-
|
|
6717
|
-
|
|
6799
|
+
findOrphanedPartitions(failedNodeId) {
|
|
6800
|
+
const orphaned = [];
|
|
6801
|
+
const partitionMap = this.partitionService.getPartitionMap();
|
|
6802
|
+
for (const partition of partitionMap.partitions) {
|
|
6803
|
+
if (partition.ownerNodeId === failedNodeId) {
|
|
6804
|
+
orphaned.push(partition.partitionId);
|
|
6805
|
+
}
|
|
6718
6806
|
}
|
|
6719
|
-
|
|
6720
|
-
logger.debug({ clientId, counterName }, "Client subscribed to counter");
|
|
6807
|
+
return orphaned;
|
|
6721
6808
|
}
|
|
6722
6809
|
/**
|
|
6723
|
-
*
|
|
6810
|
+
* Reassign a single partition
|
|
6724
6811
|
*/
|
|
6725
|
-
|
|
6726
|
-
const
|
|
6727
|
-
|
|
6728
|
-
|
|
6729
|
-
|
|
6730
|
-
|
|
6812
|
+
async reassignPartition(partitionId, failedNodeId) {
|
|
6813
|
+
const currentBackups = this.partitionService.getBackups(partitionId);
|
|
6814
|
+
const aliveMembers = this.clusterManager.getMembers().filter((m) => m !== failedNodeId);
|
|
6815
|
+
if (aliveMembers.length === 0) {
|
|
6816
|
+
logger.error({ partitionId }, "No alive members to reassign partition to");
|
|
6817
|
+
return null;
|
|
6818
|
+
}
|
|
6819
|
+
let newOwner = null;
|
|
6820
|
+
if (this.config.autoPromoteBackups) {
|
|
6821
|
+
for (const backup of currentBackups) {
|
|
6822
|
+
if (aliveMembers.includes(backup)) {
|
|
6823
|
+
newOwner = backup;
|
|
6824
|
+
break;
|
|
6825
|
+
}
|
|
6731
6826
|
}
|
|
6732
6827
|
}
|
|
6828
|
+
if (!newOwner) {
|
|
6829
|
+
const ownerIndex = partitionId % aliveMembers.length;
|
|
6830
|
+
newOwner = aliveMembers.sort()[ownerIndex];
|
|
6831
|
+
}
|
|
6832
|
+
this.partitionService.setOwner(partitionId, newOwner);
|
|
6833
|
+
logger.info({
|
|
6834
|
+
partitionId,
|
|
6835
|
+
previousOwner: failedNodeId,
|
|
6836
|
+
newOwner
|
|
6837
|
+
}, "Partition owner promoted");
|
|
6838
|
+
this.emit("reassignment", {
|
|
6839
|
+
type: "backup-promoted",
|
|
6840
|
+
partitionId,
|
|
6841
|
+
previousOwner: failedNodeId,
|
|
6842
|
+
newOwner
|
|
6843
|
+
});
|
|
6844
|
+
if (this.config.autoAssignNewBackups) {
|
|
6845
|
+
const newBackups = this.selectBackups(partitionId, newOwner, aliveMembers);
|
|
6846
|
+
}
|
|
6847
|
+
return {
|
|
6848
|
+
partitionId,
|
|
6849
|
+
previousOwner: failedNodeId,
|
|
6850
|
+
newOwner,
|
|
6851
|
+
reason: "FAILOVER"
|
|
6852
|
+
};
|
|
6733
6853
|
}
|
|
6734
6854
|
/**
|
|
6735
|
-
*
|
|
6855
|
+
* Select backup nodes for a partition
|
|
6736
6856
|
*/
|
|
6737
|
-
|
|
6738
|
-
|
|
6739
|
-
|
|
6740
|
-
|
|
6741
|
-
|
|
6742
|
-
|
|
6857
|
+
selectBackups(partitionId, owner, aliveMembers) {
|
|
6858
|
+
const backups = [];
|
|
6859
|
+
const sortedMembers = aliveMembers.filter((m) => m !== owner).sort();
|
|
6860
|
+
const startIndex = partitionId % sortedMembers.length;
|
|
6861
|
+
for (let i = 0; i < Math.min(import_core10.DEFAULT_BACKUP_COUNT, sortedMembers.length); i++) {
|
|
6862
|
+
const backupIndex = (startIndex + i) % sortedMembers.length;
|
|
6863
|
+
backups.push(sortedMembers[backupIndex]);
|
|
6743
6864
|
}
|
|
6744
|
-
|
|
6865
|
+
return backups;
|
|
6745
6866
|
}
|
|
6746
6867
|
/**
|
|
6747
|
-
*
|
|
6868
|
+
* Complete the failover process
|
|
6748
6869
|
*/
|
|
6749
|
-
|
|
6750
|
-
const
|
|
6751
|
-
|
|
6870
|
+
completeFailover() {
|
|
6871
|
+
const duration = this.reassignmentStartTime ? Date.now() - this.reassignmentStartTime : 0;
|
|
6872
|
+
logger.info({
|
|
6873
|
+
failedNodeId: this.currentFailedNode,
|
|
6874
|
+
partitionsReassigned: this.partitionsReassigned,
|
|
6875
|
+
durationMs: duration
|
|
6876
|
+
}, "Failover completed");
|
|
6877
|
+
this.emit("failoverComplete", {
|
|
6878
|
+
failedNodeId: this.currentFailedNode,
|
|
6879
|
+
partitionsReassigned: this.partitionsReassigned,
|
|
6880
|
+
durationMs: duration
|
|
6881
|
+
});
|
|
6882
|
+
this.failoverInProgress = false;
|
|
6883
|
+
this.currentFailedNode = void 0;
|
|
6884
|
+
this.reassignmentStartTime = void 0;
|
|
6885
|
+
this.pendingReassignments.clear();
|
|
6752
6886
|
}
|
|
6753
6887
|
/**
|
|
6754
|
-
* Get
|
|
6888
|
+
* Get current failover status
|
|
6755
6889
|
*/
|
|
6756
|
-
|
|
6757
|
-
return
|
|
6890
|
+
getStatus() {
|
|
6891
|
+
return {
|
|
6892
|
+
inProgress: this.failoverInProgress,
|
|
6893
|
+
failedNodeId: this.currentFailedNode,
|
|
6894
|
+
partitionsReassigned: this.partitionsReassigned,
|
|
6895
|
+
partitionsPending: this.pendingReassignments.size,
|
|
6896
|
+
startedAt: this.reassignmentStartTime,
|
|
6897
|
+
completedAt: this.failoverInProgress ? void 0 : Date.now()
|
|
6898
|
+
};
|
|
6758
6899
|
}
|
|
6759
6900
|
/**
|
|
6760
|
-
*
|
|
6901
|
+
* Check if failover is in progress
|
|
6761
6902
|
*/
|
|
6762
|
-
|
|
6763
|
-
return this.
|
|
6903
|
+
isFailoverInProgress() {
|
|
6904
|
+
return this.failoverInProgress;
|
|
6764
6905
|
}
|
|
6765
6906
|
/**
|
|
6766
|
-
*
|
|
6907
|
+
* Force immediate reassignment (for testing/manual intervention)
|
|
6767
6908
|
*/
|
|
6768
|
-
|
|
6769
|
-
|
|
6770
|
-
|
|
6771
|
-
|
|
6772
|
-
|
|
6909
|
+
forceReassignment(failedNodeId) {
|
|
6910
|
+
if (this.reassignmentTimer) {
|
|
6911
|
+
clearTimeout(this.reassignmentTimer);
|
|
6912
|
+
}
|
|
6913
|
+
this.executeFailover(failedNodeId);
|
|
6773
6914
|
}
|
|
6774
6915
|
/**
|
|
6775
|
-
*
|
|
6916
|
+
* Stop any pending reassignment
|
|
6776
6917
|
*/
|
|
6777
|
-
|
|
6778
|
-
|
|
6779
|
-
|
|
6780
|
-
|
|
6781
|
-
}
|
|
6918
|
+
stop() {
|
|
6919
|
+
if (this.reassignmentTimer) {
|
|
6920
|
+
clearTimeout(this.reassignmentTimer);
|
|
6921
|
+
this.reassignmentTimer = void 0;
|
|
6922
|
+
}
|
|
6923
|
+
this.failoverInProgress = false;
|
|
6924
|
+
this.pendingReassignments.clear();
|
|
6782
6925
|
}
|
|
6783
6926
|
};
|
|
6784
6927
|
|
|
6785
|
-
// src/
|
|
6786
|
-
var
|
|
6787
|
-
|
|
6788
|
-
// src/ProcessorSandbox.ts
|
|
6928
|
+
// src/cluster/ReadReplicaHandler.ts
|
|
6929
|
+
var import_events10 = require("events");
|
|
6789
6930
|
var import_core11 = require("@topgunbuild/core");
|
|
6790
|
-
var
|
|
6791
|
-
|
|
6792
|
-
|
|
6793
|
-
|
|
6794
|
-
|
|
6795
|
-
if (isProduction) {
|
|
6796
|
-
logger.error(
|
|
6797
|
-
"SECURITY WARNING: isolated-vm not available in production! Entry processors will run in less secure fallback mode. Install isolated-vm for production environments: pnpm add isolated-vm"
|
|
6798
|
-
);
|
|
6799
|
-
} else {
|
|
6800
|
-
logger.warn("isolated-vm not available, falling back to less secure VM");
|
|
6801
|
-
}
|
|
6802
|
-
}
|
|
6803
|
-
var DEFAULT_SANDBOX_CONFIG = {
|
|
6804
|
-
memoryLimitMb: 8,
|
|
6805
|
-
timeoutMs: 100,
|
|
6806
|
-
maxCachedIsolates: 100,
|
|
6807
|
-
strictValidation: true
|
|
6931
|
+
var DEFAULT_READ_REPLICA_CONFIG = {
|
|
6932
|
+
defaultConsistency: import_core11.ConsistencyLevel.STRONG,
|
|
6933
|
+
maxStalenessMs: 5e3,
|
|
6934
|
+
preferLocalReplica: true,
|
|
6935
|
+
loadBalancing: "latency-based"
|
|
6808
6936
|
};
|
|
6809
|
-
var
|
|
6810
|
-
constructor(config = {}) {
|
|
6811
|
-
|
|
6812
|
-
|
|
6813
|
-
this.
|
|
6814
|
-
this.
|
|
6815
|
-
this.
|
|
6937
|
+
var ReadReplicaHandler = class extends import_events10.EventEmitter {
|
|
6938
|
+
constructor(partitionService, clusterManager, nodeId, lagTracker, config = {}) {
|
|
6939
|
+
super();
|
|
6940
|
+
// Round-robin counters for load balancing
|
|
6941
|
+
this.roundRobinCounters = /* @__PURE__ */ new Map();
|
|
6942
|
+
this.partitionService = partitionService;
|
|
6943
|
+
this.clusterManager = clusterManager;
|
|
6944
|
+
this.nodeId = nodeId;
|
|
6945
|
+
this.lagTracker = lagTracker;
|
|
6946
|
+
this.config = { ...DEFAULT_READ_REPLICA_CONFIG, ...config };
|
|
6816
6947
|
}
|
|
6817
6948
|
/**
|
|
6818
|
-
*
|
|
6819
|
-
*
|
|
6820
|
-
* @param processor The processor definition (name, code, args)
|
|
6821
|
-
* @param value The current value for the key (or undefined)
|
|
6822
|
-
* @param key The key being processed
|
|
6823
|
-
* @returns Result containing success status, result, and new value
|
|
6949
|
+
* Determine if a read request can be served locally
|
|
6824
6950
|
*/
|
|
6825
|
-
|
|
6826
|
-
|
|
6827
|
-
|
|
6828
|
-
|
|
6829
|
-
error: "Sandbox has been disposed"
|
|
6830
|
-
};
|
|
6951
|
+
canServeLocally(request) {
|
|
6952
|
+
const consistency = request.options?.consistency ?? this.config.defaultConsistency;
|
|
6953
|
+
if (consistency === import_core11.ConsistencyLevel.STRONG) {
|
|
6954
|
+
return this.partitionService.isLocalOwner(request.key);
|
|
6831
6955
|
}
|
|
6832
|
-
|
|
6833
|
-
|
|
6834
|
-
|
|
6835
|
-
|
|
6836
|
-
|
|
6837
|
-
|
|
6838
|
-
|
|
6956
|
+
return this.partitionService.isRelated(request.key);
|
|
6957
|
+
}
|
|
6958
|
+
/**
|
|
6959
|
+
* Determine which node should handle the read
|
|
6960
|
+
*/
|
|
6961
|
+
selectReadNode(request) {
|
|
6962
|
+
const key = request.key;
|
|
6963
|
+
const consistency = request.options?.consistency ?? this.config.defaultConsistency;
|
|
6964
|
+
const partitionId = this.partitionService.getPartitionId(key);
|
|
6965
|
+
const distribution = this.partitionService.getDistribution(key);
|
|
6966
|
+
if (consistency === import_core11.ConsistencyLevel.STRONG) {
|
|
6967
|
+
if (!this.isNodeAlive(distribution.owner)) {
|
|
6968
|
+
if (request.options?.allowStale) {
|
|
6969
|
+
return this.selectAliveBackup(distribution.backups);
|
|
6970
|
+
}
|
|
6971
|
+
return null;
|
|
6839
6972
|
}
|
|
6973
|
+
return distribution.owner;
|
|
6840
6974
|
}
|
|
6841
|
-
|
|
6842
|
-
|
|
6843
|
-
|
|
6844
|
-
return
|
|
6975
|
+
const allReplicas = [distribution.owner, ...distribution.backups];
|
|
6976
|
+
const aliveReplicas = allReplicas.filter((n) => this.isNodeAlive(n));
|
|
6977
|
+
if (aliveReplicas.length === 0) {
|
|
6978
|
+
return null;
|
|
6979
|
+
}
|
|
6980
|
+
if (request.options?.maxStaleness) {
|
|
6981
|
+
const withinStaleness = aliveReplicas.filter(
|
|
6982
|
+
(n) => this.getNodeStaleness(n) <= (request.options?.maxStaleness ?? Infinity)
|
|
6983
|
+
);
|
|
6984
|
+
if (withinStaleness.length > 0) {
|
|
6985
|
+
return this.selectByStrategy(withinStaleness, partitionId);
|
|
6986
|
+
}
|
|
6987
|
+
if (this.isNodeAlive(distribution.owner)) {
|
|
6988
|
+
return distribution.owner;
|
|
6989
|
+
}
|
|
6990
|
+
}
|
|
6991
|
+
if (this.config.preferLocalReplica && aliveReplicas.includes(this.nodeId)) {
|
|
6992
|
+
return this.nodeId;
|
|
6845
6993
|
}
|
|
6994
|
+
return this.selectByStrategy(aliveReplicas, partitionId);
|
|
6846
6995
|
}
|
|
6847
6996
|
/**
|
|
6848
|
-
*
|
|
6997
|
+
* Select replica using configured load balancing strategy
|
|
6849
6998
|
*/
|
|
6850
|
-
|
|
6851
|
-
if (
|
|
6852
|
-
|
|
6999
|
+
selectByStrategy(replicas, partitionId) {
|
|
7000
|
+
if (replicas.length === 0) {
|
|
7001
|
+
throw new Error("No replicas available");
|
|
6853
7002
|
}
|
|
6854
|
-
|
|
6855
|
-
|
|
6856
|
-
|
|
6857
|
-
|
|
6858
|
-
|
|
6859
|
-
|
|
6860
|
-
|
|
6861
|
-
|
|
6862
|
-
|
|
6863
|
-
|
|
6864
|
-
|
|
6865
|
-
|
|
6866
|
-
|
|
6867
|
-
|
|
6868
|
-
|
|
6869
|
-
|
|
7003
|
+
if (replicas.length === 1) {
|
|
7004
|
+
return replicas[0];
|
|
7005
|
+
}
|
|
7006
|
+
switch (this.config.loadBalancing) {
|
|
7007
|
+
case "round-robin":
|
|
7008
|
+
return this.selectRoundRobin(replicas, partitionId);
|
|
7009
|
+
case "latency-based":
|
|
7010
|
+
return this.selectByLatency(replicas);
|
|
7011
|
+
case "least-connections":
|
|
7012
|
+
return this.selectRoundRobin(replicas, partitionId);
|
|
7013
|
+
default:
|
|
7014
|
+
return replicas[0];
|
|
7015
|
+
}
|
|
7016
|
+
}
|
|
7017
|
+
/**
|
|
7018
|
+
* Round-robin selection
|
|
7019
|
+
*/
|
|
7020
|
+
selectRoundRobin(replicas, partitionId) {
|
|
7021
|
+
const counter = this.roundRobinCounters.get(partitionId) ?? 0;
|
|
7022
|
+
const selected = replicas[counter % replicas.length];
|
|
7023
|
+
this.roundRobinCounters.set(partitionId, counter + 1);
|
|
7024
|
+
return selected;
|
|
7025
|
+
}
|
|
7026
|
+
/**
|
|
7027
|
+
* Latency-based selection using lag tracker
|
|
7028
|
+
*/
|
|
7029
|
+
selectByLatency(replicas) {
|
|
7030
|
+
if (!this.lagTracker) {
|
|
7031
|
+
return replicas[0];
|
|
7032
|
+
}
|
|
7033
|
+
let bestNode = replicas[0];
|
|
7034
|
+
let bestLatency = Infinity;
|
|
7035
|
+
for (const nodeId of replicas) {
|
|
7036
|
+
const lag = this.lagTracker.getLag(nodeId);
|
|
7037
|
+
if (lag && lag.current < bestLatency) {
|
|
7038
|
+
bestLatency = lag.current;
|
|
7039
|
+
bestNode = nodeId;
|
|
7040
|
+
}
|
|
7041
|
+
}
|
|
7042
|
+
return bestNode;
|
|
7043
|
+
}
|
|
7044
|
+
/**
|
|
7045
|
+
* Get estimated staleness for a node in ms
|
|
7046
|
+
*/
|
|
7047
|
+
getNodeStaleness(nodeId) {
|
|
7048
|
+
if (nodeId === this.partitionService.getOwner("")) {
|
|
7049
|
+
return 0;
|
|
7050
|
+
}
|
|
7051
|
+
if (this.lagTracker) {
|
|
7052
|
+
const lag = this.lagTracker.getLag(nodeId);
|
|
7053
|
+
return lag?.current ?? 0;
|
|
7054
|
+
}
|
|
7055
|
+
return 0;
|
|
7056
|
+
}
|
|
7057
|
+
/**
|
|
7058
|
+
* Check if a node is alive in the cluster
|
|
7059
|
+
*/
|
|
7060
|
+
isNodeAlive(nodeId) {
|
|
7061
|
+
const members = this.clusterManager.getMembers();
|
|
7062
|
+
return members.includes(nodeId);
|
|
7063
|
+
}
|
|
7064
|
+
/**
|
|
7065
|
+
* Select first alive backup from list
|
|
7066
|
+
*/
|
|
7067
|
+
selectAliveBackup(backups) {
|
|
7068
|
+
for (const backup of backups) {
|
|
7069
|
+
if (this.isNodeAlive(backup)) {
|
|
7070
|
+
return backup;
|
|
7071
|
+
}
|
|
7072
|
+
}
|
|
7073
|
+
return null;
|
|
7074
|
+
}
|
|
7075
|
+
/**
|
|
7076
|
+
* Create read response metadata
|
|
7077
|
+
*/
|
|
7078
|
+
createReadMetadata(key, options) {
|
|
7079
|
+
const consistency = options?.consistency ?? this.config.defaultConsistency;
|
|
7080
|
+
const isOwner = this.partitionService.isLocalOwner(key);
|
|
7081
|
+
return {
|
|
7082
|
+
source: this.nodeId,
|
|
7083
|
+
isOwner,
|
|
7084
|
+
consistency
|
|
7085
|
+
};
|
|
7086
|
+
}
|
|
7087
|
+
/**
|
|
7088
|
+
* Check if local node should forward read to owner
|
|
7089
|
+
*/
|
|
7090
|
+
shouldForwardRead(request) {
|
|
7091
|
+
const consistency = request.options?.consistency ?? this.config.defaultConsistency;
|
|
7092
|
+
if (consistency === import_core11.ConsistencyLevel.STRONG) {
|
|
7093
|
+
return !this.partitionService.isLocalOwner(request.key);
|
|
7094
|
+
}
|
|
7095
|
+
if (!this.partitionService.isRelated(request.key)) {
|
|
7096
|
+
return true;
|
|
7097
|
+
}
|
|
7098
|
+
return false;
|
|
7099
|
+
}
|
|
7100
|
+
/**
|
|
7101
|
+
* Get metrics for monitoring
|
|
7102
|
+
*/
|
|
7103
|
+
getMetrics() {
|
|
7104
|
+
return {
|
|
7105
|
+
defaultConsistency: this.config.defaultConsistency,
|
|
7106
|
+
preferLocalReplica: this.config.preferLocalReplica,
|
|
7107
|
+
loadBalancing: this.config.loadBalancing,
|
|
7108
|
+
roundRobinPartitions: this.roundRobinCounters.size
|
|
7109
|
+
};
|
|
7110
|
+
}
|
|
7111
|
+
};
|
|
7112
|
+
|
|
7113
|
+
// src/cluster/MerkleTreeManager.ts
|
|
7114
|
+
var import_events11 = require("events");
|
|
7115
|
+
var import_core12 = require("@topgunbuild/core");
|
|
7116
|
+
var DEFAULT_MERKLE_TREE_CONFIG = {
|
|
7117
|
+
treeDepth: 3,
|
|
7118
|
+
autoUpdate: true,
|
|
7119
|
+
lazyInit: true
|
|
7120
|
+
};
|
|
7121
|
+
var MerkleTreeManager = class extends import_events11.EventEmitter {
|
|
7122
|
+
constructor(nodeId, config = {}) {
|
|
7123
|
+
super();
|
|
7124
|
+
this.trees = /* @__PURE__ */ new Map();
|
|
7125
|
+
this.keyCounts = /* @__PURE__ */ new Map();
|
|
7126
|
+
this.lastUpdated = /* @__PURE__ */ new Map();
|
|
7127
|
+
this.nodeId = nodeId;
|
|
7128
|
+
this.config = { ...DEFAULT_MERKLE_TREE_CONFIG, ...config };
|
|
7129
|
+
}
|
|
7130
|
+
/**
|
|
7131
|
+
* Get or create a Merkle tree for a partition
|
|
7132
|
+
*/
|
|
7133
|
+
getTree(partitionId) {
|
|
7134
|
+
let tree = this.trees.get(partitionId);
|
|
7135
|
+
if (!tree) {
|
|
7136
|
+
tree = new import_core12.MerkleTree(/* @__PURE__ */ new Map(), this.config.treeDepth);
|
|
7137
|
+
this.trees.set(partitionId, tree);
|
|
7138
|
+
this.keyCounts.set(partitionId, 0);
|
|
7139
|
+
this.lastUpdated.set(partitionId, Date.now());
|
|
7140
|
+
}
|
|
7141
|
+
return tree;
|
|
7142
|
+
}
|
|
7143
|
+
/**
|
|
7144
|
+
* Build tree for a partition from existing data
|
|
7145
|
+
*/
|
|
7146
|
+
buildTree(partitionId, records) {
|
|
7147
|
+
const tree = new import_core12.MerkleTree(records, this.config.treeDepth);
|
|
7148
|
+
this.trees.set(partitionId, tree);
|
|
7149
|
+
this.keyCounts.set(partitionId, records.size);
|
|
7150
|
+
this.lastUpdated.set(partitionId, Date.now());
|
|
7151
|
+
logger.debug({
|
|
7152
|
+
partitionId,
|
|
7153
|
+
keyCount: records.size,
|
|
7154
|
+
rootHash: tree.getRootHash()
|
|
7155
|
+
}, "Built Merkle tree for partition");
|
|
7156
|
+
}
|
|
7157
|
+
/**
|
|
7158
|
+
* Incrementally update tree when a record changes
|
|
7159
|
+
*/
|
|
7160
|
+
updateRecord(partitionId, key, record) {
|
|
7161
|
+
if (!this.config.autoUpdate) return;
|
|
7162
|
+
const tree = this.getTree(partitionId);
|
|
7163
|
+
const previousKeyCount = this.keyCounts.get(partitionId) ?? 0;
|
|
7164
|
+
const existingBuckets = tree.getBuckets("");
|
|
7165
|
+
const wasNewKey = Object.keys(existingBuckets).length === 0 || !tree.getKeysInBucket(this.getKeyPath(key)).includes(key);
|
|
7166
|
+
tree.update(key, record);
|
|
7167
|
+
if (wasNewKey) {
|
|
7168
|
+
this.keyCounts.set(partitionId, previousKeyCount + 1);
|
|
7169
|
+
}
|
|
7170
|
+
this.lastUpdated.set(partitionId, Date.now());
|
|
7171
|
+
this.emit("treeUpdated", {
|
|
7172
|
+
partitionId,
|
|
7173
|
+
key,
|
|
7174
|
+
rootHash: tree.getRootHash()
|
|
7175
|
+
});
|
|
7176
|
+
}
|
|
7177
|
+
/**
|
|
7178
|
+
* Remove a key from the tree (e.g., after GC)
|
|
7179
|
+
*/
|
|
7180
|
+
removeRecord(partitionId, key) {
|
|
7181
|
+
const tree = this.trees.get(partitionId);
|
|
7182
|
+
if (!tree) return;
|
|
7183
|
+
tree.remove(key);
|
|
7184
|
+
const currentCount = this.keyCounts.get(partitionId) ?? 0;
|
|
7185
|
+
if (currentCount > 0) {
|
|
7186
|
+
this.keyCounts.set(partitionId, currentCount - 1);
|
|
7187
|
+
}
|
|
7188
|
+
this.lastUpdated.set(partitionId, Date.now());
|
|
7189
|
+
this.emit("treeUpdated", {
|
|
7190
|
+
partitionId,
|
|
7191
|
+
key,
|
|
7192
|
+
rootHash: tree.getRootHash()
|
|
7193
|
+
});
|
|
7194
|
+
}
|
|
7195
|
+
/**
|
|
7196
|
+
* Get the path prefix for a key in the Merkle tree
|
|
7197
|
+
*/
|
|
7198
|
+
getKeyPath(key) {
|
|
7199
|
+
const hash = (0, import_core12.hashString)(key).toString(16).padStart(8, "0");
|
|
7200
|
+
return hash.slice(0, this.config.treeDepth);
|
|
7201
|
+
}
|
|
7202
|
+
/**
|
|
7203
|
+
* Get root hash for a partition
|
|
7204
|
+
*/
|
|
7205
|
+
getRootHash(partitionId) {
|
|
7206
|
+
const tree = this.trees.get(partitionId);
|
|
7207
|
+
return tree?.getRootHash() ?? 0;
|
|
7208
|
+
}
|
|
7209
|
+
/**
|
|
7210
|
+
* Compare local tree with remote root hash
|
|
7211
|
+
*/
|
|
7212
|
+
compareWithRemote(partitionId, remoteRoot) {
|
|
7213
|
+
const tree = this.getTree(partitionId);
|
|
7214
|
+
const localRoot = tree.getRootHash();
|
|
7215
|
+
return {
|
|
7216
|
+
partitionId,
|
|
7217
|
+
localRoot,
|
|
7218
|
+
remoteRoot,
|
|
7219
|
+
needsSync: localRoot !== remoteRoot,
|
|
7220
|
+
differingBuckets: localRoot !== remoteRoot ? this.findDifferingBuckets(tree, remoteRoot) : []
|
|
7221
|
+
};
|
|
7222
|
+
}
|
|
7223
|
+
/**
|
|
7224
|
+
* Find buckets that differ between local and remote tree
|
|
7225
|
+
* Note: This is a simplified version - full implementation would
|
|
7226
|
+
* need to exchange bucket hashes with the remote node
|
|
7227
|
+
*/
|
|
7228
|
+
findDifferingBuckets(tree, _remoteRoot) {
|
|
7229
|
+
const buckets = [];
|
|
7230
|
+
this.collectLeafBuckets(tree, "", buckets);
|
|
7231
|
+
return buckets;
|
|
7232
|
+
}
|
|
7233
|
+
/**
|
|
7234
|
+
* Recursively collect all leaf bucket paths
|
|
7235
|
+
*/
|
|
7236
|
+
collectLeafBuckets(tree, path, result) {
|
|
7237
|
+
if (path.length >= this.config.treeDepth) {
|
|
7238
|
+
const keys = tree.getKeysInBucket(path);
|
|
7239
|
+
if (keys.length > 0) {
|
|
7240
|
+
result.push(path);
|
|
7241
|
+
}
|
|
7242
|
+
return;
|
|
7243
|
+
}
|
|
7244
|
+
const buckets = tree.getBuckets(path);
|
|
7245
|
+
for (const char of Object.keys(buckets)) {
|
|
7246
|
+
this.collectLeafBuckets(tree, path + char, result);
|
|
7247
|
+
}
|
|
7248
|
+
}
|
|
7249
|
+
/**
|
|
7250
|
+
* Get bucket hashes for a partition at a given path
|
|
7251
|
+
*/
|
|
7252
|
+
getBuckets(partitionId, path) {
|
|
7253
|
+
const tree = this.trees.get(partitionId);
|
|
7254
|
+
return tree?.getBuckets(path) ?? {};
|
|
7255
|
+
}
|
|
7256
|
+
/**
|
|
7257
|
+
* Get keys in a specific bucket
|
|
7258
|
+
*/
|
|
7259
|
+
getKeysInBucket(partitionId, path) {
|
|
7260
|
+
const tree = this.trees.get(partitionId);
|
|
7261
|
+
return tree?.getKeysInBucket(path) ?? [];
|
|
7262
|
+
}
|
|
7263
|
+
/**
|
|
7264
|
+
* Get all keys across all buckets for a partition
|
|
7265
|
+
*/
|
|
7266
|
+
getAllKeys(partitionId) {
|
|
7267
|
+
const tree = this.trees.get(partitionId);
|
|
7268
|
+
if (!tree) return [];
|
|
7269
|
+
const keys = [];
|
|
7270
|
+
this.collectAllKeys(tree, "", keys);
|
|
7271
|
+
return keys;
|
|
7272
|
+
}
|
|
7273
|
+
/**
|
|
7274
|
+
* Recursively collect all keys from the tree
|
|
7275
|
+
*/
|
|
7276
|
+
collectAllKeys(tree, path, result) {
|
|
7277
|
+
if (path.length >= this.config.treeDepth) {
|
|
7278
|
+
const keys = tree.getKeysInBucket(path);
|
|
7279
|
+
result.push(...keys);
|
|
7280
|
+
return;
|
|
7281
|
+
}
|
|
7282
|
+
const buckets = tree.getBuckets(path);
|
|
7283
|
+
for (const char of Object.keys(buckets)) {
|
|
7284
|
+
this.collectAllKeys(tree, path + char, result);
|
|
7285
|
+
}
|
|
7286
|
+
}
|
|
7287
|
+
/**
|
|
7288
|
+
* Get info about all managed partitions
|
|
7289
|
+
*/
|
|
7290
|
+
getPartitionInfos() {
|
|
7291
|
+
const infos = [];
|
|
7292
|
+
for (const [partitionId, tree] of this.trees) {
|
|
7293
|
+
infos.push({
|
|
7294
|
+
partitionId,
|
|
7295
|
+
rootHash: tree.getRootHash(),
|
|
7296
|
+
keyCount: this.keyCounts.get(partitionId) ?? 0,
|
|
7297
|
+
lastUpdated: this.lastUpdated.get(partitionId) ?? 0
|
|
7298
|
+
});
|
|
7299
|
+
}
|
|
7300
|
+
return infos;
|
|
7301
|
+
}
|
|
7302
|
+
/**
|
|
7303
|
+
* Get info for a specific partition
|
|
7304
|
+
*/
|
|
7305
|
+
getPartitionInfo(partitionId) {
|
|
7306
|
+
const tree = this.trees.get(partitionId);
|
|
7307
|
+
if (!tree) return null;
|
|
7308
|
+
return {
|
|
7309
|
+
partitionId,
|
|
7310
|
+
rootHash: tree.getRootHash(),
|
|
7311
|
+
keyCount: this.keyCounts.get(partitionId) ?? 0,
|
|
7312
|
+
lastUpdated: this.lastUpdated.get(partitionId) ?? 0
|
|
7313
|
+
};
|
|
7314
|
+
}
|
|
7315
|
+
/**
|
|
7316
|
+
* Clear tree for a partition (e.g., after migration)
|
|
7317
|
+
*/
|
|
7318
|
+
clearPartition(partitionId) {
|
|
7319
|
+
this.trees.delete(partitionId);
|
|
7320
|
+
this.keyCounts.delete(partitionId);
|
|
7321
|
+
this.lastUpdated.delete(partitionId);
|
|
7322
|
+
}
|
|
7323
|
+
/**
|
|
7324
|
+
* Clear all trees
|
|
7325
|
+
*/
|
|
7326
|
+
clearAll() {
|
|
7327
|
+
this.trees.clear();
|
|
7328
|
+
this.keyCounts.clear();
|
|
7329
|
+
this.lastUpdated.clear();
|
|
7330
|
+
}
|
|
7331
|
+
/**
|
|
7332
|
+
* Get metrics for monitoring
|
|
7333
|
+
*/
|
|
7334
|
+
getMetrics() {
|
|
7335
|
+
let totalKeys = 0;
|
|
7336
|
+
for (const count of this.keyCounts.values()) {
|
|
7337
|
+
totalKeys += count;
|
|
7338
|
+
}
|
|
7339
|
+
return {
|
|
7340
|
+
totalPartitions: this.trees.size,
|
|
7341
|
+
totalKeys,
|
|
7342
|
+
averageKeysPerPartition: this.trees.size > 0 ? totalKeys / this.trees.size : 0
|
|
7343
|
+
};
|
|
7344
|
+
}
|
|
7345
|
+
/**
|
|
7346
|
+
* Serialize tree state for network transfer
|
|
7347
|
+
*/
|
|
7348
|
+
serializeTree(partitionId) {
|
|
7349
|
+
const tree = this.trees.get(partitionId);
|
|
7350
|
+
if (!tree) return null;
|
|
7351
|
+
const buckets = {};
|
|
7352
|
+
for (let depth = 0; depth < this.config.treeDepth; depth++) {
|
|
7353
|
+
this.collectBucketsAtDepth(tree, "", depth, buckets);
|
|
7354
|
+
}
|
|
7355
|
+
return {
|
|
7356
|
+
rootHash: tree.getRootHash(),
|
|
7357
|
+
buckets
|
|
7358
|
+
};
|
|
7359
|
+
}
|
|
7360
|
+
collectBucketsAtDepth(tree, path, targetDepth, result) {
|
|
7361
|
+
if (path.length === targetDepth) {
|
|
7362
|
+
const buckets2 = tree.getBuckets(path);
|
|
7363
|
+
if (Object.keys(buckets2).length > 0) {
|
|
7364
|
+
result[path] = buckets2;
|
|
7365
|
+
}
|
|
7366
|
+
return;
|
|
7367
|
+
}
|
|
7368
|
+
if (path.length > targetDepth) return;
|
|
7369
|
+
const buckets = tree.getBuckets(path);
|
|
7370
|
+
for (const char of Object.keys(buckets)) {
|
|
7371
|
+
this.collectBucketsAtDepth(tree, path + char, targetDepth, result);
|
|
7372
|
+
}
|
|
7373
|
+
}
|
|
7374
|
+
};
|
|
7375
|
+
|
|
7376
|
+
// src/cluster/RepairScheduler.ts
|
|
7377
|
+
var import_events12 = require("events");
|
|
7378
|
+
var import_core13 = require("@topgunbuild/core");
|
|
7379
|
+
var DEFAULT_REPAIR_CONFIG = {
|
|
7380
|
+
enabled: true,
|
|
7381
|
+
scanIntervalMs: 36e5,
|
|
7382
|
+
// 1 hour
|
|
7383
|
+
repairBatchSize: 1e3,
|
|
7384
|
+
maxConcurrentRepairs: 2,
|
|
7385
|
+
throttleMs: 100,
|
|
7386
|
+
prioritizeRecent: true,
|
|
7387
|
+
requestTimeoutMs: 5e3
|
|
7388
|
+
};
|
|
7389
|
+
var RepairScheduler = class extends import_events12.EventEmitter {
|
|
7390
|
+
constructor(merkleManager, clusterManager, partitionService, nodeId, config = {}) {
|
|
7391
|
+
super();
|
|
7392
|
+
this.repairQueue = [];
|
|
7393
|
+
this.activeRepairs = /* @__PURE__ */ new Set();
|
|
7394
|
+
this.started = false;
|
|
7395
|
+
// Pending network requests
|
|
7396
|
+
this.pendingRequests = /* @__PURE__ */ new Map();
|
|
7397
|
+
// Metrics
|
|
7398
|
+
this.metrics = {
|
|
7399
|
+
scansCompleted: 0,
|
|
7400
|
+
repairsExecuted: 0,
|
|
7401
|
+
keysRepaired: 0,
|
|
7402
|
+
errorsEncountered: 0,
|
|
7403
|
+
averageRepairDurationMs: 0
|
|
7404
|
+
};
|
|
7405
|
+
this.merkleManager = merkleManager;
|
|
7406
|
+
this.clusterManager = clusterManager;
|
|
7407
|
+
this.partitionService = partitionService;
|
|
7408
|
+
this.nodeId = nodeId;
|
|
7409
|
+
this.config = { ...DEFAULT_REPAIR_CONFIG, ...config };
|
|
7410
|
+
this.setupNetworkHandlers();
|
|
7411
|
+
}
|
|
7412
|
+
/**
|
|
7413
|
+
* Set data access callbacks
|
|
7414
|
+
*/
|
|
7415
|
+
setDataAccessors(getRecord, setRecord) {
|
|
7416
|
+
this.getRecord = getRecord;
|
|
7417
|
+
this.setRecord = setRecord;
|
|
7418
|
+
}
|
|
7419
|
+
/**
|
|
7420
|
+
* Setup network message handlers
|
|
7421
|
+
*/
|
|
7422
|
+
setupNetworkHandlers() {
|
|
7423
|
+
this.clusterManager.on("message", (msg) => {
|
|
7424
|
+
this.handleClusterMessage(msg);
|
|
7425
|
+
});
|
|
7426
|
+
}
|
|
7427
|
+
/**
|
|
7428
|
+
* Handle incoming cluster messages
|
|
7429
|
+
*/
|
|
7430
|
+
handleClusterMessage(msg) {
|
|
7431
|
+
switch (msg.type) {
|
|
7432
|
+
case "CLUSTER_MERKLE_ROOT_REQ":
|
|
7433
|
+
this.handleMerkleRootReq(msg);
|
|
7434
|
+
break;
|
|
7435
|
+
case "CLUSTER_MERKLE_ROOT_RESP":
|
|
7436
|
+
this.handleResponse(msg);
|
|
7437
|
+
break;
|
|
7438
|
+
case "CLUSTER_MERKLE_BUCKETS_REQ":
|
|
7439
|
+
this.handleMerkleBucketsReq(msg);
|
|
7440
|
+
break;
|
|
7441
|
+
case "CLUSTER_MERKLE_BUCKETS_RESP":
|
|
7442
|
+
this.handleResponse(msg);
|
|
7443
|
+
break;
|
|
7444
|
+
case "CLUSTER_MERKLE_KEYS_REQ":
|
|
7445
|
+
this.handleMerkleKeysReq(msg);
|
|
7446
|
+
break;
|
|
7447
|
+
case "CLUSTER_MERKLE_KEYS_RESP":
|
|
7448
|
+
this.handleResponse(msg);
|
|
7449
|
+
break;
|
|
7450
|
+
case "CLUSTER_REPAIR_DATA_REQ":
|
|
7451
|
+
this.handleRepairDataReq(msg);
|
|
7452
|
+
break;
|
|
7453
|
+
case "CLUSTER_REPAIR_DATA_RESP":
|
|
7454
|
+
this.handleResponse(msg);
|
|
7455
|
+
break;
|
|
7456
|
+
}
|
|
7457
|
+
}
|
|
7458
|
+
// === Request Handlers (Passive) ===
|
|
7459
|
+
handleMerkleRootReq(msg) {
|
|
7460
|
+
const { requestId, partitionId } = msg.payload;
|
|
7461
|
+
const rootHash = this.merkleManager.getRootHash(partitionId);
|
|
7462
|
+
this.clusterManager.send(msg.senderId, "CLUSTER_MERKLE_ROOT_RESP", {
|
|
7463
|
+
requestId,
|
|
7464
|
+
partitionId,
|
|
7465
|
+
rootHash
|
|
7466
|
+
});
|
|
7467
|
+
}
|
|
7468
|
+
handleMerkleBucketsReq(msg) {
|
|
7469
|
+
const { requestId, partitionId } = msg.payload;
|
|
7470
|
+
const tree = this.merkleManager.serializeTree(partitionId);
|
|
7471
|
+
this.clusterManager.send(msg.senderId, "CLUSTER_MERKLE_BUCKETS_RESP", {
|
|
7472
|
+
requestId,
|
|
7473
|
+
partitionId,
|
|
7474
|
+
buckets: tree?.buckets || {}
|
|
7475
|
+
});
|
|
7476
|
+
}
|
|
7477
|
+
handleMerkleKeysReq(msg) {
|
|
7478
|
+
const { requestId, partitionId, path } = msg.payload;
|
|
7479
|
+
const keys = this.merkleManager.getKeysInBucket(partitionId, path);
|
|
7480
|
+
this.clusterManager.send(msg.senderId, "CLUSTER_MERKLE_KEYS_RESP", {
|
|
7481
|
+
requestId,
|
|
7482
|
+
partitionId,
|
|
7483
|
+
path,
|
|
7484
|
+
keys
|
|
7485
|
+
});
|
|
7486
|
+
}
|
|
7487
|
+
handleRepairDataReq(msg) {
|
|
7488
|
+
const { requestId, key } = msg.payload;
|
|
7489
|
+
if (!this.getRecord) return;
|
|
7490
|
+
const record = this.getRecord(key);
|
|
7491
|
+
this.clusterManager.send(msg.senderId, "CLUSTER_REPAIR_DATA_RESP", {
|
|
7492
|
+
requestId,
|
|
7493
|
+
key,
|
|
7494
|
+
record
|
|
7495
|
+
});
|
|
7496
|
+
}
|
|
7497
|
+
handleResponse(msg) {
|
|
7498
|
+
const { requestId } = msg.payload;
|
|
7499
|
+
const pending = this.pendingRequests.get(requestId);
|
|
7500
|
+
if (pending) {
|
|
7501
|
+
clearTimeout(pending.timer);
|
|
7502
|
+
this.pendingRequests.delete(requestId);
|
|
7503
|
+
pending.resolve(msg.payload);
|
|
7504
|
+
}
|
|
7505
|
+
}
|
|
7506
|
+
// === Lifecycle Methods ===
|
|
7507
|
+
/**
|
|
7508
|
+
* Start the repair scheduler
|
|
7509
|
+
*/
|
|
7510
|
+
start() {
|
|
7511
|
+
if (this.started || !this.config.enabled) return;
|
|
7512
|
+
this.started = true;
|
|
7513
|
+
logger.info({ config: this.config }, "Starting RepairScheduler");
|
|
7514
|
+
this.scanTimer = setInterval(() => {
|
|
7515
|
+
this.scheduleFullScan();
|
|
7516
|
+
}, this.config.scanIntervalMs);
|
|
7517
|
+
this.processTimer = setInterval(() => {
|
|
7518
|
+
this.processRepairQueue();
|
|
7519
|
+
}, 1e3);
|
|
7520
|
+
setTimeout(() => {
|
|
7521
|
+
this.scheduleFullScan();
|
|
7522
|
+
}, 6e4);
|
|
7523
|
+
}
|
|
7524
|
+
/**
|
|
7525
|
+
* Stop the repair scheduler
|
|
7526
|
+
*/
|
|
7527
|
+
stop() {
|
|
7528
|
+
if (!this.started) return;
|
|
7529
|
+
this.started = false;
|
|
7530
|
+
if (this.scanTimer) {
|
|
7531
|
+
clearInterval(this.scanTimer);
|
|
7532
|
+
this.scanTimer = void 0;
|
|
7533
|
+
}
|
|
7534
|
+
if (this.processTimer) {
|
|
7535
|
+
clearInterval(this.processTimer);
|
|
7536
|
+
this.processTimer = void 0;
|
|
7537
|
+
}
|
|
7538
|
+
this.repairQueue = [];
|
|
7539
|
+
this.activeRepairs.clear();
|
|
7540
|
+
for (const [id, req] of this.pendingRequests) {
|
|
7541
|
+
clearTimeout(req.timer);
|
|
7542
|
+
req.reject(new Error("Scheduler stopped"));
|
|
7543
|
+
}
|
|
7544
|
+
this.pendingRequests.clear();
|
|
7545
|
+
logger.info("RepairScheduler stopped");
|
|
7546
|
+
}
|
|
7547
|
+
/**
|
|
7548
|
+
* Schedule a full scan of all owned partitions
|
|
7549
|
+
*/
|
|
7550
|
+
scheduleFullScan() {
|
|
7551
|
+
const ownedPartitions = this.getOwnedPartitions();
|
|
7552
|
+
const replicas = this.getReplicaPartitions();
|
|
7553
|
+
const allPartitions = [.../* @__PURE__ */ new Set([...ownedPartitions, ...replicas])];
|
|
7554
|
+
logger.info({
|
|
7555
|
+
ownedCount: ownedPartitions.length,
|
|
7556
|
+
replicaCount: replicas.length,
|
|
7557
|
+
totalPartitions: allPartitions.length
|
|
7558
|
+
}, "Scheduling full anti-entropy scan");
|
|
7559
|
+
for (const partitionId of allPartitions) {
|
|
7560
|
+
this.schedulePartitionRepair(partitionId);
|
|
7561
|
+
}
|
|
7562
|
+
this.metrics.scansCompleted++;
|
|
7563
|
+
this.metrics.lastScanTime = Date.now();
|
|
7564
|
+
}
|
|
7565
|
+
/**
|
|
7566
|
+
* Schedule repair for a specific partition
|
|
7567
|
+
*/
|
|
7568
|
+
schedulePartitionRepair(partitionId, priority = "normal") {
|
|
7569
|
+
const backups = this.partitionService.getBackups(partitionId);
|
|
7570
|
+
const owner = this.partitionService.getPartitionOwner(partitionId);
|
|
7571
|
+
const replicas = this.nodeId === owner ? backups : owner ? [owner] : [];
|
|
7572
|
+
for (const replicaNodeId of replicas) {
|
|
7573
|
+
const exists = this.repairQueue.some(
|
|
7574
|
+
(t) => t.partitionId === partitionId && t.replicaNodeId === replicaNodeId
|
|
7575
|
+
);
|
|
7576
|
+
if (exists) continue;
|
|
7577
|
+
this.repairQueue.push({
|
|
7578
|
+
partitionId,
|
|
7579
|
+
replicaNodeId,
|
|
7580
|
+
priority,
|
|
7581
|
+
scheduledAt: Date.now()
|
|
7582
|
+
});
|
|
7583
|
+
}
|
|
7584
|
+
this.sortRepairQueue();
|
|
7585
|
+
}
|
|
7586
|
+
/**
|
|
7587
|
+
* Sort repair queue by priority
|
|
7588
|
+
*/
|
|
7589
|
+
sortRepairQueue() {
|
|
7590
|
+
const priorityOrder = { high: 0, normal: 1, low: 2 };
|
|
7591
|
+
this.repairQueue.sort((a, b) => {
|
|
7592
|
+
const priorityDiff = priorityOrder[a.priority] - priorityOrder[b.priority];
|
|
7593
|
+
if (priorityDiff !== 0) return priorityDiff;
|
|
7594
|
+
if (this.config.prioritizeRecent) {
|
|
7595
|
+
const infoA = this.merkleManager.getPartitionInfo(a.partitionId);
|
|
7596
|
+
const infoB = this.merkleManager.getPartitionInfo(b.partitionId);
|
|
7597
|
+
if (infoA && infoB) {
|
|
7598
|
+
return infoB.lastUpdated - infoA.lastUpdated;
|
|
7599
|
+
}
|
|
7600
|
+
}
|
|
7601
|
+
return a.scheduledAt - b.scheduledAt;
|
|
7602
|
+
});
|
|
7603
|
+
}
|
|
7604
|
+
/**
|
|
7605
|
+
* Process the repair queue
|
|
7606
|
+
*/
|
|
7607
|
+
async processRepairQueue() {
|
|
7608
|
+
if (this.activeRepairs.size >= this.config.maxConcurrentRepairs) {
|
|
7609
|
+
return;
|
|
7610
|
+
}
|
|
7611
|
+
const task = this.repairQueue.shift();
|
|
7612
|
+
if (!task) return;
|
|
7613
|
+
if (this.activeRepairs.has(task.partitionId)) {
|
|
7614
|
+
return;
|
|
7615
|
+
}
|
|
7616
|
+
if (!this.clusterManager.getMembers().includes(task.replicaNodeId)) {
|
|
7617
|
+
logger.debug({ task }, "Skipping repair - replica not available");
|
|
7618
|
+
return;
|
|
7619
|
+
}
|
|
7620
|
+
this.activeRepairs.add(task.partitionId);
|
|
7621
|
+
try {
|
|
7622
|
+
const result = await this.executeRepair(task);
|
|
7623
|
+
this.emit("repairComplete", result);
|
|
7624
|
+
if (result.success) {
|
|
7625
|
+
this.metrics.repairsExecuted++;
|
|
7626
|
+
this.metrics.keysRepaired += result.keysRepaired;
|
|
7627
|
+
this.updateAverageRepairDuration(result.durationMs);
|
|
7628
|
+
} else {
|
|
7629
|
+
this.metrics.errorsEncountered++;
|
|
7630
|
+
}
|
|
7631
|
+
} catch (error) {
|
|
7632
|
+
logger.error({ task, error }, "Repair failed");
|
|
7633
|
+
this.metrics.errorsEncountered++;
|
|
7634
|
+
} finally {
|
|
7635
|
+
this.activeRepairs.delete(task.partitionId);
|
|
7636
|
+
}
|
|
7637
|
+
}
|
|
7638
|
+
/**
|
|
7639
|
+
* Execute repair for a partition-replica pair
|
|
7640
|
+
*/
|
|
7641
|
+
async executeRepair(task) {
|
|
7642
|
+
const startTime = Date.now();
|
|
7643
|
+
let keysScanned = 0;
|
|
7644
|
+
let keysRepaired = 0;
|
|
7645
|
+
try {
|
|
7646
|
+
const localRoot = this.merkleManager.getRootHash(task.partitionId);
|
|
7647
|
+
const remoteRoot = await this.requestRemoteMerkleRoot(task.replicaNodeId, task.partitionId);
|
|
7648
|
+
if (localRoot === remoteRoot) {
|
|
7649
|
+
logger.debug({
|
|
7650
|
+
partitionId: task.partitionId,
|
|
7651
|
+
replicaNodeId: task.replicaNodeId
|
|
7652
|
+
}, "Partition in sync");
|
|
7653
|
+
return {
|
|
7654
|
+
partitionId: task.partitionId,
|
|
7655
|
+
replicaNodeId: task.replicaNodeId,
|
|
7656
|
+
keysScanned: 0,
|
|
7657
|
+
keysRepaired: 0,
|
|
7658
|
+
durationMs: Date.now() - startTime,
|
|
7659
|
+
success: true
|
|
7660
|
+
};
|
|
7661
|
+
}
|
|
7662
|
+
const differences = await this.findDifferences(task.partitionId, task.replicaNodeId);
|
|
7663
|
+
keysScanned = differences.length;
|
|
7664
|
+
for (const key of differences) {
|
|
7665
|
+
const repaired = await this.repairKey(task.partitionId, task.replicaNodeId, key);
|
|
7666
|
+
if (repaired) {
|
|
7667
|
+
keysRepaired++;
|
|
7668
|
+
}
|
|
7669
|
+
if (keysRepaired % this.config.repairBatchSize === 0) {
|
|
7670
|
+
await this.sleep(this.config.throttleMs);
|
|
7671
|
+
}
|
|
7672
|
+
}
|
|
7673
|
+
logger.info({
|
|
7674
|
+
partitionId: task.partitionId,
|
|
7675
|
+
replicaNodeId: task.replicaNodeId,
|
|
7676
|
+
keysScanned,
|
|
7677
|
+
keysRepaired,
|
|
7678
|
+
durationMs: Date.now() - startTime
|
|
7679
|
+
}, "Partition repair completed");
|
|
7680
|
+
return {
|
|
7681
|
+
partitionId: task.partitionId,
|
|
7682
|
+
replicaNodeId: task.replicaNodeId,
|
|
7683
|
+
keysScanned,
|
|
7684
|
+
keysRepaired,
|
|
7685
|
+
durationMs: Date.now() - startTime,
|
|
7686
|
+
success: true
|
|
7687
|
+
};
|
|
7688
|
+
} catch (error) {
|
|
7689
|
+
return {
|
|
7690
|
+
partitionId: task.partitionId,
|
|
7691
|
+
replicaNodeId: task.replicaNodeId,
|
|
7692
|
+
keysScanned,
|
|
7693
|
+
keysRepaired,
|
|
7694
|
+
durationMs: Date.now() - startTime,
|
|
7695
|
+
success: false,
|
|
7696
|
+
error: String(error)
|
|
7697
|
+
};
|
|
7698
|
+
}
|
|
7699
|
+
}
|
|
7700
|
+
/**
|
|
7701
|
+
* Send a request and wait for response
|
|
7702
|
+
*/
|
|
7703
|
+
sendRequest(nodeId, type, payload) {
|
|
7704
|
+
return new Promise((resolve, reject) => {
|
|
7705
|
+
const requestId = Math.random().toString(36).substring(7);
|
|
7706
|
+
const timer = setTimeout(() => {
|
|
7707
|
+
this.pendingRequests.delete(requestId);
|
|
7708
|
+
reject(new Error(`Request timeout: ${type} to ${nodeId}`));
|
|
7709
|
+
}, this.config.requestTimeoutMs);
|
|
7710
|
+
this.pendingRequests.set(requestId, { resolve, reject, timer });
|
|
7711
|
+
this.clusterManager.send(nodeId, type, { ...payload, requestId });
|
|
7712
|
+
});
|
|
7713
|
+
}
|
|
7714
|
+
/**
|
|
7715
|
+
* Request Merkle root from remote node
|
|
7716
|
+
*/
|
|
7717
|
+
async requestRemoteMerkleRoot(nodeId, partitionId) {
|
|
7718
|
+
const response = await this.sendRequest(
|
|
7719
|
+
nodeId,
|
|
7720
|
+
"CLUSTER_MERKLE_ROOT_REQ",
|
|
7721
|
+
{ partitionId }
|
|
7722
|
+
);
|
|
7723
|
+
return response.rootHash;
|
|
7724
|
+
}
|
|
7725
|
+
/**
|
|
7726
|
+
* Find keys that differ between local and remote using bucket exchange
|
|
7727
|
+
*/
|
|
7728
|
+
async findDifferences(partitionId, replicaNodeId) {
|
|
7729
|
+
const response = await this.sendRequest(
|
|
7730
|
+
replicaNodeId,
|
|
7731
|
+
"CLUSTER_MERKLE_BUCKETS_REQ",
|
|
7732
|
+
{ partitionId }
|
|
7733
|
+
);
|
|
7734
|
+
const remoteBuckets = response.buckets;
|
|
7735
|
+
const localTree = this.merkleManager.getTree(partitionId);
|
|
7736
|
+
if (!localTree) return [];
|
|
7737
|
+
const differingKeys = /* @__PURE__ */ new Set();
|
|
7738
|
+
const queue = [""];
|
|
7739
|
+
const maxDepth = 3;
|
|
7740
|
+
while (queue.length > 0) {
|
|
7741
|
+
const path = queue.shift();
|
|
7742
|
+
const localChildren = localTree.getBuckets(path);
|
|
7743
|
+
const remoteChildren = remoteBuckets[path] || {};
|
|
7744
|
+
const allChars = /* @__PURE__ */ new Set([...Object.keys(localChildren), ...Object.keys(remoteChildren)]);
|
|
7745
|
+
for (const char of allChars) {
|
|
7746
|
+
const localHash = localChildren[char] || 0;
|
|
7747
|
+
const remoteHash = remoteChildren[char] || 0;
|
|
7748
|
+
if (localHash !== remoteHash) {
|
|
7749
|
+
const nextPath = path + char;
|
|
7750
|
+
if (nextPath.length >= maxDepth) {
|
|
7751
|
+
const bucketKeysResp = await this.sendRequest(
|
|
7752
|
+
replicaNodeId,
|
|
7753
|
+
"CLUSTER_MERKLE_KEYS_REQ",
|
|
7754
|
+
{ partitionId, path: nextPath }
|
|
7755
|
+
);
|
|
7756
|
+
const localBucketKeys = localTree.getKeysInBucket(nextPath);
|
|
7757
|
+
const remoteBucketKeys = bucketKeysResp.keys;
|
|
7758
|
+
for (const k of localBucketKeys) differingKeys.add(k);
|
|
7759
|
+
for (const k of remoteBucketKeys) differingKeys.add(k);
|
|
7760
|
+
} else {
|
|
7761
|
+
queue.push(nextPath);
|
|
7762
|
+
}
|
|
7763
|
+
}
|
|
7764
|
+
}
|
|
7765
|
+
}
|
|
7766
|
+
return Array.from(differingKeys);
|
|
7767
|
+
}
|
|
7768
|
+
/**
|
|
7769
|
+
* Repair a single key
|
|
7770
|
+
*/
|
|
7771
|
+
async repairKey(partitionId, replicaNodeId, key) {
|
|
7772
|
+
if (!this.getRecord || !this.setRecord) {
|
|
7773
|
+
return false;
|
|
7774
|
+
}
|
|
7775
|
+
const localRecord = this.getRecord(key);
|
|
7776
|
+
let remoteRecord;
|
|
7777
|
+
try {
|
|
7778
|
+
const response = await this.sendRequest(
|
|
7779
|
+
replicaNodeId,
|
|
7780
|
+
"CLUSTER_REPAIR_DATA_REQ",
|
|
7781
|
+
{ key }
|
|
7782
|
+
);
|
|
7783
|
+
remoteRecord = response.record;
|
|
7784
|
+
} catch (e) {
|
|
7785
|
+
logger.warn({ key, replicaNodeId, err: e }, "Failed to fetch remote record for repair");
|
|
7786
|
+
return false;
|
|
7787
|
+
}
|
|
7788
|
+
const resolved = this.resolveConflict(localRecord, remoteRecord);
|
|
7789
|
+
if (!resolved) return false;
|
|
7790
|
+
if (JSON.stringify(resolved) !== JSON.stringify(localRecord)) {
|
|
7791
|
+
this.setRecord(key, resolved);
|
|
7792
|
+
if (JSON.stringify(resolved) !== JSON.stringify(remoteRecord)) {
|
|
7793
|
+
this.clusterManager.send(replicaNodeId, "CLUSTER_REPAIR_DATA_RESP", {
|
|
7794
|
+
// In future: Use dedicated WRITE/REPAIR message
|
|
7795
|
+
// For now we rely on the fact that repair will eventually run on other node too
|
|
7796
|
+
});
|
|
7797
|
+
}
|
|
7798
|
+
return true;
|
|
7799
|
+
}
|
|
7800
|
+
return false;
|
|
7801
|
+
}
|
|
7802
|
+
/**
|
|
7803
|
+
* Resolve conflict between two records using LWW
|
|
7804
|
+
*/
|
|
7805
|
+
resolveConflict(a, b) {
|
|
7806
|
+
if (!a && !b) return null;
|
|
7807
|
+
if (!a) return b;
|
|
7808
|
+
if (!b) return a;
|
|
7809
|
+
if (this.compareTimestamps(a.timestamp, b.timestamp) > 0) {
|
|
7810
|
+
return a;
|
|
7811
|
+
}
|
|
7812
|
+
if (this.compareTimestamps(b.timestamp, a.timestamp) > 0) {
|
|
7813
|
+
return b;
|
|
7814
|
+
}
|
|
7815
|
+
if (a.timestamp.nodeId > b.timestamp.nodeId) {
|
|
7816
|
+
return a;
|
|
7817
|
+
}
|
|
7818
|
+
return b;
|
|
7819
|
+
}
|
|
7820
|
+
/**
|
|
7821
|
+
* Compare two timestamps
|
|
7822
|
+
*/
|
|
7823
|
+
compareTimestamps(a, b) {
|
|
7824
|
+
if (a.millis !== b.millis) {
|
|
7825
|
+
return a.millis - b.millis;
|
|
7826
|
+
}
|
|
7827
|
+
return a.counter - b.counter;
|
|
7828
|
+
}
|
|
7829
|
+
/**
|
|
7830
|
+
* Get partitions owned by this node
|
|
7831
|
+
*/
|
|
7832
|
+
getOwnedPartitions() {
|
|
7833
|
+
const owned = [];
|
|
7834
|
+
for (let i = 0; i < import_core13.PARTITION_COUNT; i++) {
|
|
7835
|
+
if (this.partitionService.getPartitionOwner(i) === this.nodeId) {
|
|
7836
|
+
owned.push(i);
|
|
7837
|
+
}
|
|
7838
|
+
}
|
|
7839
|
+
return owned;
|
|
7840
|
+
}
|
|
7841
|
+
/**
|
|
7842
|
+
* Get partitions where this node is a backup
|
|
7843
|
+
*/
|
|
7844
|
+
getReplicaPartitions() {
|
|
7845
|
+
const replicas = [];
|
|
7846
|
+
for (let i = 0; i < import_core13.PARTITION_COUNT; i++) {
|
|
7847
|
+
const backups = this.partitionService.getBackups(i);
|
|
7848
|
+
if (backups.includes(this.nodeId)) {
|
|
7849
|
+
replicas.push(i);
|
|
7850
|
+
}
|
|
7851
|
+
}
|
|
7852
|
+
return replicas;
|
|
7853
|
+
}
|
|
7854
|
+
/**
|
|
7855
|
+
* Update average repair duration
|
|
7856
|
+
*/
|
|
7857
|
+
updateAverageRepairDuration(durationMs) {
|
|
7858
|
+
const count = this.metrics.repairsExecuted;
|
|
7859
|
+
const currentAvg = this.metrics.averageRepairDurationMs;
|
|
7860
|
+
this.metrics.averageRepairDurationMs = (currentAvg * (count - 1) + durationMs) / count;
|
|
7861
|
+
}
|
|
7862
|
+
/**
|
|
7863
|
+
* Get repair metrics
|
|
7864
|
+
*/
|
|
7865
|
+
getMetrics() {
|
|
7866
|
+
return { ...this.metrics };
|
|
7867
|
+
}
|
|
7868
|
+
/**
|
|
7869
|
+
* Get repair queue status
|
|
7870
|
+
*/
|
|
7871
|
+
getQueueStatus() {
|
|
7872
|
+
return {
|
|
7873
|
+
queueLength: this.repairQueue.length,
|
|
7874
|
+
activeRepairs: this.activeRepairs.size,
|
|
7875
|
+
maxConcurrent: this.config.maxConcurrentRepairs
|
|
7876
|
+
};
|
|
7877
|
+
}
|
|
7878
|
+
/**
|
|
7879
|
+
* Force immediate repair for a partition
|
|
7880
|
+
*/
|
|
7881
|
+
forceRepair(partitionId) {
|
|
7882
|
+
this.schedulePartitionRepair(partitionId, "high");
|
|
7883
|
+
}
|
|
7884
|
+
/**
|
|
7885
|
+
* Sleep utility
|
|
7886
|
+
*/
|
|
7887
|
+
sleep(ms) {
|
|
7888
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
7889
|
+
}
|
|
7890
|
+
};
|
|
7891
|
+
|
|
7892
|
+
// src/handlers/CounterHandler.ts
|
|
7893
|
+
var import_core14 = require("@topgunbuild/core");
|
|
7894
|
+
var CounterHandler = class {
|
|
7895
|
+
// counterName -> Set<clientId>
|
|
7896
|
+
constructor(nodeId = "server") {
|
|
7897
|
+
this.nodeId = nodeId;
|
|
7898
|
+
this.counters = /* @__PURE__ */ new Map();
|
|
7899
|
+
this.subscriptions = /* @__PURE__ */ new Map();
|
|
7900
|
+
}
|
|
7901
|
+
/**
|
|
7902
|
+
* Get or create a counter by name.
|
|
7903
|
+
*/
|
|
7904
|
+
getOrCreateCounter(name) {
|
|
7905
|
+
let counter = this.counters.get(name);
|
|
7906
|
+
if (!counter) {
|
|
7907
|
+
counter = new import_core14.PNCounterImpl({ nodeId: this.nodeId });
|
|
7908
|
+
this.counters.set(name, counter);
|
|
7909
|
+
logger.debug({ name }, "Created new counter");
|
|
7910
|
+
}
|
|
7911
|
+
return counter;
|
|
7912
|
+
}
|
|
7913
|
+
/**
|
|
7914
|
+
* Handle COUNTER_REQUEST - client wants initial state.
|
|
7915
|
+
* @returns Response message to send back to client
|
|
7916
|
+
*/
|
|
7917
|
+
handleCounterRequest(clientId, name) {
|
|
7918
|
+
const counter = this.getOrCreateCounter(name);
|
|
7919
|
+
this.subscribe(clientId, name);
|
|
7920
|
+
const state = counter.getState();
|
|
7921
|
+
logger.debug({ clientId, name, value: counter.get() }, "Counter request handled");
|
|
7922
|
+
return {
|
|
7923
|
+
type: "COUNTER_RESPONSE",
|
|
7924
|
+
payload: {
|
|
7925
|
+
name,
|
|
7926
|
+
state: this.stateToObject(state)
|
|
7927
|
+
}
|
|
7928
|
+
};
|
|
7929
|
+
}
|
|
7930
|
+
/**
|
|
7931
|
+
* Handle COUNTER_SYNC - client sends their state to merge.
|
|
7932
|
+
* @returns Merged state and list of clients to broadcast to
|
|
7933
|
+
*/
|
|
7934
|
+
handleCounterSync(clientId, name, stateObj) {
|
|
7935
|
+
const counter = this.getOrCreateCounter(name);
|
|
7936
|
+
const incomingState = this.objectToState(stateObj);
|
|
7937
|
+
counter.merge(incomingState);
|
|
7938
|
+
const mergedState = counter.getState();
|
|
7939
|
+
const mergedStateObj = this.stateToObject(mergedState);
|
|
7940
|
+
logger.debug(
|
|
7941
|
+
{ clientId, name, value: counter.get() },
|
|
7942
|
+
"Counter sync handled"
|
|
7943
|
+
);
|
|
7944
|
+
this.subscribe(clientId, name);
|
|
7945
|
+
const subscribers = this.subscriptions.get(name) || /* @__PURE__ */ new Set();
|
|
7946
|
+
const broadcastTo = Array.from(subscribers).filter((id) => id !== clientId);
|
|
7947
|
+
return {
|
|
7948
|
+
// Response to the sending client
|
|
7949
|
+
response: {
|
|
7950
|
+
type: "COUNTER_UPDATE",
|
|
7951
|
+
payload: {
|
|
7952
|
+
name,
|
|
7953
|
+
state: mergedStateObj
|
|
7954
|
+
}
|
|
7955
|
+
},
|
|
7956
|
+
// Broadcast to other clients
|
|
7957
|
+
broadcastTo,
|
|
7958
|
+
broadcastMessage: {
|
|
7959
|
+
type: "COUNTER_UPDATE",
|
|
7960
|
+
payload: {
|
|
7961
|
+
name,
|
|
7962
|
+
state: mergedStateObj
|
|
7963
|
+
}
|
|
7964
|
+
}
|
|
7965
|
+
};
|
|
7966
|
+
}
|
|
7967
|
+
/**
|
|
7968
|
+
* Subscribe a client to counter updates.
|
|
7969
|
+
*/
|
|
7970
|
+
subscribe(clientId, counterName) {
|
|
7971
|
+
if (!this.subscriptions.has(counterName)) {
|
|
7972
|
+
this.subscriptions.set(counterName, /* @__PURE__ */ new Set());
|
|
7973
|
+
}
|
|
7974
|
+
this.subscriptions.get(counterName).add(clientId);
|
|
7975
|
+
logger.debug({ clientId, counterName }, "Client subscribed to counter");
|
|
7976
|
+
}
|
|
7977
|
+
/**
|
|
7978
|
+
* Unsubscribe a client from counter updates.
|
|
7979
|
+
*/
|
|
7980
|
+
unsubscribe(clientId, counterName) {
|
|
7981
|
+
const subs = this.subscriptions.get(counterName);
|
|
7982
|
+
if (subs) {
|
|
7983
|
+
subs.delete(clientId);
|
|
7984
|
+
if (subs.size === 0) {
|
|
7985
|
+
this.subscriptions.delete(counterName);
|
|
7986
|
+
}
|
|
7987
|
+
}
|
|
7988
|
+
}
|
|
7989
|
+
/**
|
|
7990
|
+
* Unsubscribe a client from all counters (e.g., on disconnect).
|
|
7991
|
+
*/
|
|
7992
|
+
unsubscribeAll(clientId) {
|
|
7993
|
+
for (const [counterName, subs] of this.subscriptions) {
|
|
7994
|
+
subs.delete(clientId);
|
|
7995
|
+
if (subs.size === 0) {
|
|
7996
|
+
this.subscriptions.delete(counterName);
|
|
7997
|
+
}
|
|
7998
|
+
}
|
|
7999
|
+
logger.debug({ clientId }, "Client unsubscribed from all counters");
|
|
8000
|
+
}
|
|
8001
|
+
/**
|
|
8002
|
+
* Get current counter value (for monitoring/debugging).
|
|
8003
|
+
*/
|
|
8004
|
+
getCounterValue(name) {
|
|
8005
|
+
const counter = this.counters.get(name);
|
|
8006
|
+
return counter ? counter.get() : 0;
|
|
8007
|
+
}
|
|
8008
|
+
/**
|
|
8009
|
+
* Get all counter names.
|
|
8010
|
+
*/
|
|
8011
|
+
getCounterNames() {
|
|
8012
|
+
return Array.from(this.counters.keys());
|
|
8013
|
+
}
|
|
8014
|
+
/**
|
|
8015
|
+
* Get number of subscribers for a counter.
|
|
8016
|
+
*/
|
|
8017
|
+
getSubscriberCount(name) {
|
|
8018
|
+
return this.subscriptions.get(name)?.size || 0;
|
|
8019
|
+
}
|
|
8020
|
+
/**
|
|
8021
|
+
* Convert Map-based state to plain object for serialization.
|
|
8022
|
+
*/
|
|
8023
|
+
stateToObject(state) {
|
|
8024
|
+
return {
|
|
8025
|
+
p: Object.fromEntries(state.positive),
|
|
8026
|
+
n: Object.fromEntries(state.negative)
|
|
8027
|
+
};
|
|
8028
|
+
}
|
|
8029
|
+
/**
|
|
8030
|
+
* Convert plain object to Map-based state.
|
|
8031
|
+
*/
|
|
8032
|
+
objectToState(obj) {
|
|
8033
|
+
return {
|
|
8034
|
+
positive: new Map(Object.entries(obj.p || {})),
|
|
8035
|
+
negative: new Map(Object.entries(obj.n || {}))
|
|
8036
|
+
};
|
|
8037
|
+
}
|
|
8038
|
+
};
|
|
8039
|
+
|
|
8040
|
+
// src/handlers/EntryProcessorHandler.ts
|
|
8041
|
+
var import_core16 = require("@topgunbuild/core");
|
|
8042
|
+
|
|
8043
|
+
// src/ProcessorSandbox.ts
|
|
8044
|
+
var import_core15 = require("@topgunbuild/core");
|
|
8045
|
+
var ivm = null;
|
|
8046
|
+
try {
|
|
8047
|
+
ivm = require("isolated-vm");
|
|
8048
|
+
} catch {
|
|
8049
|
+
const isProduction = process.env.NODE_ENV === "production";
|
|
8050
|
+
if (isProduction) {
|
|
8051
|
+
logger.error(
|
|
8052
|
+
"SECURITY WARNING: isolated-vm not available in production! Entry processors will run in less secure fallback mode. Install isolated-vm for production environments: pnpm add isolated-vm"
|
|
8053
|
+
);
|
|
8054
|
+
} else {
|
|
8055
|
+
logger.warn("isolated-vm not available, falling back to less secure VM");
|
|
8056
|
+
}
|
|
8057
|
+
}
|
|
8058
|
+
var DEFAULT_SANDBOX_CONFIG = {
|
|
8059
|
+
memoryLimitMb: 8,
|
|
8060
|
+
timeoutMs: 100,
|
|
8061
|
+
maxCachedIsolates: 100,
|
|
8062
|
+
strictValidation: true
|
|
8063
|
+
};
|
|
8064
|
+
var ProcessorSandbox = class {
|
|
8065
|
+
constructor(config = {}) {
|
|
8066
|
+
this.isolateCache = /* @__PURE__ */ new Map();
|
|
8067
|
+
this.scriptCache = /* @__PURE__ */ new Map();
|
|
8068
|
+
this.fallbackScriptCache = /* @__PURE__ */ new Map();
|
|
8069
|
+
this.disposed = false;
|
|
8070
|
+
this.config = { ...DEFAULT_SANDBOX_CONFIG, ...config };
|
|
8071
|
+
}
|
|
8072
|
+
/**
|
|
8073
|
+
* Execute an entry processor in the sandbox.
|
|
8074
|
+
*
|
|
8075
|
+
* @param processor The processor definition (name, code, args)
|
|
8076
|
+
* @param value The current value for the key (or undefined)
|
|
8077
|
+
* @param key The key being processed
|
|
8078
|
+
* @returns Result containing success status, result, and new value
|
|
8079
|
+
*/
|
|
8080
|
+
async execute(processor, value, key) {
|
|
8081
|
+
if (this.disposed) {
|
|
8082
|
+
return {
|
|
8083
|
+
success: false,
|
|
8084
|
+
error: "Sandbox has been disposed"
|
|
8085
|
+
};
|
|
8086
|
+
}
|
|
8087
|
+
if (this.config.strictValidation) {
|
|
8088
|
+
const validation = (0, import_core15.validateProcessorCode)(processor.code);
|
|
8089
|
+
if (!validation.valid) {
|
|
8090
|
+
return {
|
|
8091
|
+
success: false,
|
|
8092
|
+
error: validation.error
|
|
8093
|
+
};
|
|
8094
|
+
}
|
|
8095
|
+
}
|
|
8096
|
+
if (ivm) {
|
|
8097
|
+
return this.executeInIsolate(processor, value, key);
|
|
8098
|
+
} else {
|
|
8099
|
+
return this.executeInFallback(processor, value, key);
|
|
8100
|
+
}
|
|
8101
|
+
}
|
|
8102
|
+
/**
|
|
8103
|
+
* Execute processor in isolated-vm (secure production mode).
|
|
8104
|
+
*/
|
|
8105
|
+
async executeInIsolate(processor, value, key) {
|
|
8106
|
+
if (!ivm) {
|
|
8107
|
+
return { success: false, error: "isolated-vm not available" };
|
|
8108
|
+
}
|
|
8109
|
+
const isolate = this.getOrCreateIsolate(processor.name);
|
|
8110
|
+
try {
|
|
8111
|
+
const context = await isolate.createContext();
|
|
8112
|
+
const jail = context.global;
|
|
8113
|
+
await jail.set("global", jail.derefInto());
|
|
8114
|
+
await context.eval(`
|
|
8115
|
+
var value = ${JSON.stringify(value)};
|
|
8116
|
+
var key = ${JSON.stringify(key)};
|
|
8117
|
+
var args = ${JSON.stringify(processor.args)};
|
|
8118
|
+
`);
|
|
8119
|
+
const wrappedCode = `
|
|
8120
|
+
(function() {
|
|
8121
|
+
${processor.code}
|
|
8122
|
+
})()
|
|
8123
|
+
`;
|
|
8124
|
+
const script = await this.getOrCompileScript(
|
|
6870
8125
|
processor.name,
|
|
6871
8126
|
wrappedCode,
|
|
6872
8127
|
isolate
|
|
@@ -7045,7 +8300,7 @@ var EntryProcessorHandler = class {
|
|
|
7045
8300
|
* @returns Result with success status, processor result, and new value
|
|
7046
8301
|
*/
|
|
7047
8302
|
async executeOnKey(map, key, processorDef) {
|
|
7048
|
-
const parseResult =
|
|
8303
|
+
const parseResult = import_core16.EntryProcessorDefSchema.safeParse(processorDef);
|
|
7049
8304
|
if (!parseResult.success) {
|
|
7050
8305
|
logger.warn(
|
|
7051
8306
|
{ key, error: parseResult.error.message },
|
|
@@ -7111,7 +8366,7 @@ var EntryProcessorHandler = class {
|
|
|
7111
8366
|
async executeOnKeys(map, keys, processorDef) {
|
|
7112
8367
|
const results = /* @__PURE__ */ new Map();
|
|
7113
8368
|
const timestamps = /* @__PURE__ */ new Map();
|
|
7114
|
-
const parseResult =
|
|
8369
|
+
const parseResult = import_core16.EntryProcessorDefSchema.safeParse(processorDef);
|
|
7115
8370
|
if (!parseResult.success) {
|
|
7116
8371
|
const errorResult = {
|
|
7117
8372
|
success: false,
|
|
@@ -7148,7 +8403,7 @@ var EntryProcessorHandler = class {
|
|
|
7148
8403
|
async executeOnEntries(map, processorDef, predicateCode) {
|
|
7149
8404
|
const results = /* @__PURE__ */ new Map();
|
|
7150
8405
|
const timestamps = /* @__PURE__ */ new Map();
|
|
7151
|
-
const parseResult =
|
|
8406
|
+
const parseResult = import_core16.EntryProcessorDefSchema.safeParse(processorDef);
|
|
7152
8407
|
if (!parseResult.success) {
|
|
7153
8408
|
return { results, timestamps };
|
|
7154
8409
|
}
|
|
@@ -7207,7 +8462,7 @@ var EntryProcessorHandler = class {
|
|
|
7207
8462
|
};
|
|
7208
8463
|
|
|
7209
8464
|
// src/ConflictResolverService.ts
|
|
7210
|
-
var
|
|
8465
|
+
var import_core17 = require("@topgunbuild/core");
|
|
7211
8466
|
var DEFAULT_CONFLICT_RESOLVER_CONFIG = {
|
|
7212
8467
|
maxResolversPerMap: 100,
|
|
7213
8468
|
enableSandboxedResolvers: true,
|
|
@@ -7238,7 +8493,7 @@ var ConflictResolverService = class {
|
|
|
7238
8493
|
throw new Error("ConflictResolverService has been disposed");
|
|
7239
8494
|
}
|
|
7240
8495
|
if (resolver.code) {
|
|
7241
|
-
const parsed =
|
|
8496
|
+
const parsed = import_core17.ConflictResolverDefSchema.safeParse({
|
|
7242
8497
|
name: resolver.name,
|
|
7243
8498
|
code: resolver.code,
|
|
7244
8499
|
priority: resolver.priority,
|
|
@@ -7247,7 +8502,7 @@ var ConflictResolverService = class {
|
|
|
7247
8502
|
if (!parsed.success) {
|
|
7248
8503
|
throw new Error(`Invalid resolver definition: ${parsed.error.message}`);
|
|
7249
8504
|
}
|
|
7250
|
-
const validation = (0,
|
|
8505
|
+
const validation = (0, import_core17.validateResolverCode)(resolver.code);
|
|
7251
8506
|
if (!validation.valid) {
|
|
7252
8507
|
throw new Error(`Invalid resolver code: ${validation.error}`);
|
|
7253
8508
|
}
|
|
@@ -7309,7 +8564,7 @@ var ConflictResolverService = class {
|
|
|
7309
8564
|
const entries = this.resolvers.get(context.mapName) ?? [];
|
|
7310
8565
|
const allEntries = [
|
|
7311
8566
|
...entries,
|
|
7312
|
-
{ resolver:
|
|
8567
|
+
{ resolver: import_core17.BuiltInResolvers.LWW() }
|
|
7313
8568
|
];
|
|
7314
8569
|
for (const entry of allEntries) {
|
|
7315
8570
|
const { resolver } = entry;
|
|
@@ -7665,9 +8920,9 @@ var ConflictResolverHandler = class {
|
|
|
7665
8920
|
};
|
|
7666
8921
|
|
|
7667
8922
|
// src/EventJournalService.ts
|
|
7668
|
-
var
|
|
8923
|
+
var import_core18 = require("@topgunbuild/core");
|
|
7669
8924
|
var DEFAULT_JOURNAL_SERVICE_CONFIG = {
|
|
7670
|
-
...
|
|
8925
|
+
...import_core18.DEFAULT_EVENT_JOURNAL_CONFIG,
|
|
7671
8926
|
tableName: "event_journal",
|
|
7672
8927
|
persistBatchSize: 100,
|
|
7673
8928
|
persistIntervalMs: 1e3
|
|
@@ -7680,7 +8935,7 @@ function validateTableName(name) {
|
|
|
7680
8935
|
);
|
|
7681
8936
|
}
|
|
7682
8937
|
}
|
|
7683
|
-
var EventJournalService = class extends
|
|
8938
|
+
var EventJournalService = class extends import_core18.EventJournalImpl {
|
|
7684
8939
|
constructor(config) {
|
|
7685
8940
|
super(config);
|
|
7686
8941
|
this.pendingPersist = [];
|
|
@@ -7945,59 +9200,607 @@ var EventJournalService = class extends import_core14.EventJournalImpl {
|
|
|
7945
9200
|
return parseInt(result.rows[0].count, 10);
|
|
7946
9201
|
}
|
|
7947
9202
|
/**
|
|
7948
|
-
* Cleanup old events based on retention policy.
|
|
7949
|
-
*/
|
|
7950
|
-
async cleanupOldEvents(retentionDays) {
|
|
7951
|
-
const result = await this.pool.query(
|
|
7952
|
-
`DELETE FROM ${this.tableName}
|
|
7953
|
-
WHERE created_at < NOW() - ($1 || ' days')::INTERVAL
|
|
7954
|
-
RETURNING sequence`,
|
|
7955
|
-
[retentionDays]
|
|
7956
|
-
);
|
|
7957
|
-
const count = result.rowCount ?? 0;
|
|
7958
|
-
if (count > 0) {
|
|
7959
|
-
logger.info({ deletedCount: count, retentionDays }, "Cleaned up old journal events");
|
|
9203
|
+
* Cleanup old events based on retention policy.
|
|
9204
|
+
*/
|
|
9205
|
+
async cleanupOldEvents(retentionDays) {
|
|
9206
|
+
const result = await this.pool.query(
|
|
9207
|
+
`DELETE FROM ${this.tableName}
|
|
9208
|
+
WHERE created_at < NOW() - ($1 || ' days')::INTERVAL
|
|
9209
|
+
RETURNING sequence`,
|
|
9210
|
+
[retentionDays]
|
|
9211
|
+
);
|
|
9212
|
+
const count = result.rowCount ?? 0;
|
|
9213
|
+
if (count > 0) {
|
|
9214
|
+
logger.info({ deletedCount: count, retentionDays }, "Cleaned up old journal events");
|
|
9215
|
+
}
|
|
9216
|
+
return count;
|
|
9217
|
+
}
|
|
9218
|
+
/**
|
|
9219
|
+
* Start the periodic persistence timer.
|
|
9220
|
+
*/
|
|
9221
|
+
startPersistTimer() {
|
|
9222
|
+
this.persistTimer = setInterval(() => {
|
|
9223
|
+
if (this.pendingPersist.length > 0) {
|
|
9224
|
+
this.persistToStorage().catch((err) => {
|
|
9225
|
+
logger.error({ err }, "Periodic persist failed");
|
|
9226
|
+
});
|
|
9227
|
+
}
|
|
9228
|
+
}, this.persistIntervalMs);
|
|
9229
|
+
}
|
|
9230
|
+
/**
|
|
9231
|
+
* Stop the periodic persistence timer.
|
|
9232
|
+
*/
|
|
9233
|
+
stopPersistTimer() {
|
|
9234
|
+
if (this.persistTimer) {
|
|
9235
|
+
clearInterval(this.persistTimer);
|
|
9236
|
+
this.persistTimer = void 0;
|
|
9237
|
+
}
|
|
9238
|
+
}
|
|
9239
|
+
/**
|
|
9240
|
+
* Dispose resources and persist remaining events.
|
|
9241
|
+
*/
|
|
9242
|
+
dispose() {
|
|
9243
|
+
this.stopPersistTimer();
|
|
9244
|
+
if (this.pendingPersist.length > 0) {
|
|
9245
|
+
this.persistToStorage().catch((err) => {
|
|
9246
|
+
logger.error({ err }, "Final persist failed on dispose");
|
|
9247
|
+
});
|
|
9248
|
+
}
|
|
9249
|
+
super.dispose();
|
|
9250
|
+
}
|
|
9251
|
+
/**
|
|
9252
|
+
* Get pending persist count (for monitoring).
|
|
9253
|
+
*/
|
|
9254
|
+
getPendingPersistCount() {
|
|
9255
|
+
return this.pendingPersist.length;
|
|
9256
|
+
}
|
|
9257
|
+
};
|
|
9258
|
+
|
|
9259
|
+
// src/search/SearchCoordinator.ts
|
|
9260
|
+
var import_core19 = require("@topgunbuild/core");
|
|
9261
|
+
var SearchCoordinator = class {
|
|
9262
|
+
constructor() {
|
|
9263
|
+
/** Map name → FullTextIndex */
|
|
9264
|
+
this.indexes = /* @__PURE__ */ new Map();
|
|
9265
|
+
/** Map name → FullTextIndexConfig (for reference) */
|
|
9266
|
+
this.configs = /* @__PURE__ */ new Map();
|
|
9267
|
+
// ============================================
|
|
9268
|
+
// Phase 11.1b: Live Search Subscription tracking
|
|
9269
|
+
// ============================================
|
|
9270
|
+
/** Subscription ID → SearchSubscription */
|
|
9271
|
+
this.subscriptions = /* @__PURE__ */ new Map();
|
|
9272
|
+
/** Map name → Set of subscription IDs */
|
|
9273
|
+
this.subscriptionsByMap = /* @__PURE__ */ new Map();
|
|
9274
|
+
/** Client ID → Set of subscription IDs */
|
|
9275
|
+
this.subscriptionsByClient = /* @__PURE__ */ new Map();
|
|
9276
|
+
// ============================================
|
|
9277
|
+
// Phase 11.2: Notification Batching
|
|
9278
|
+
// ============================================
|
|
9279
|
+
/** Queue of pending notifications per map */
|
|
9280
|
+
this.pendingNotifications = /* @__PURE__ */ new Map();
|
|
9281
|
+
/** Timer for batching notifications */
|
|
9282
|
+
this.notificationTimer = null;
|
|
9283
|
+
/** Batch interval in milliseconds (~1 frame at 60fps) */
|
|
9284
|
+
this.BATCH_INTERVAL = 16;
|
|
9285
|
+
logger.debug("SearchCoordinator initialized");
|
|
9286
|
+
}
|
|
9287
|
+
/**
|
|
9288
|
+
* Set the callback for sending updates to clients.
|
|
9289
|
+
* Called by ServerCoordinator during initialization.
|
|
9290
|
+
*/
|
|
9291
|
+
setSendUpdateCallback(callback) {
|
|
9292
|
+
this.sendUpdate = callback;
|
|
9293
|
+
}
|
|
9294
|
+
/**
|
|
9295
|
+
* Set the callback for sending batched updates to clients.
|
|
9296
|
+
* When set, notifications are batched within BATCH_INTERVAL (16ms) window.
|
|
9297
|
+
* Called by ServerCoordinator during initialization.
|
|
9298
|
+
*
|
|
9299
|
+
* @param callback - Function to call with batched updates
|
|
9300
|
+
*/
|
|
9301
|
+
setSendBatchUpdateCallback(callback) {
|
|
9302
|
+
this.sendBatchUpdate = callback;
|
|
9303
|
+
}
|
|
9304
|
+
/**
|
|
9305
|
+
* Set the callback for retrieving document values.
|
|
9306
|
+
* Called by ServerCoordinator during initialization.
|
|
9307
|
+
*/
|
|
9308
|
+
setDocumentValueGetter(getter) {
|
|
9309
|
+
this.getDocumentValue = getter;
|
|
9310
|
+
}
|
|
9311
|
+
/**
|
|
9312
|
+
* Enable full-text search for a map.
|
|
9313
|
+
*
|
|
9314
|
+
* @param mapName - Name of the map to enable FTS for
|
|
9315
|
+
* @param config - FTS configuration (fields, tokenizer, bm25 options)
|
|
9316
|
+
*/
|
|
9317
|
+
enableSearch(mapName, config) {
|
|
9318
|
+
if (this.indexes.has(mapName)) {
|
|
9319
|
+
logger.warn({ mapName }, "FTS already enabled for map, replacing index");
|
|
9320
|
+
this.indexes.delete(mapName);
|
|
9321
|
+
}
|
|
9322
|
+
const index = new import_core19.FullTextIndex(config);
|
|
9323
|
+
this.indexes.set(mapName, index);
|
|
9324
|
+
this.configs.set(mapName, config);
|
|
9325
|
+
logger.info({ mapName, fields: config.fields }, "FTS enabled for map");
|
|
9326
|
+
}
|
|
9327
|
+
/**
|
|
9328
|
+
* Disable full-text search for a map.
|
|
9329
|
+
*
|
|
9330
|
+
* @param mapName - Name of the map to disable FTS for
|
|
9331
|
+
*/
|
|
9332
|
+
disableSearch(mapName) {
|
|
9333
|
+
if (!this.indexes.has(mapName)) {
|
|
9334
|
+
logger.warn({ mapName }, "FTS not enabled for map, nothing to disable");
|
|
9335
|
+
return;
|
|
9336
|
+
}
|
|
9337
|
+
this.indexes.delete(mapName);
|
|
9338
|
+
this.configs.delete(mapName);
|
|
9339
|
+
logger.info({ mapName }, "FTS disabled for map");
|
|
9340
|
+
}
|
|
9341
|
+
/**
|
|
9342
|
+
* Check if FTS is enabled for a map.
|
|
9343
|
+
*/
|
|
9344
|
+
isSearchEnabled(mapName) {
|
|
9345
|
+
return this.indexes.has(mapName);
|
|
9346
|
+
}
|
|
9347
|
+
/**
|
|
9348
|
+
* Get enabled map names.
|
|
9349
|
+
*/
|
|
9350
|
+
getEnabledMaps() {
|
|
9351
|
+
return Array.from(this.indexes.keys());
|
|
9352
|
+
}
|
|
9353
|
+
/**
|
|
9354
|
+
* Execute a one-shot search query.
|
|
9355
|
+
*
|
|
9356
|
+
* @param mapName - Name of the map to search
|
|
9357
|
+
* @param query - Search query text
|
|
9358
|
+
* @param options - Search options (limit, minScore, boost)
|
|
9359
|
+
* @returns Search response payload
|
|
9360
|
+
*/
|
|
9361
|
+
search(mapName, query, options) {
|
|
9362
|
+
const index = this.indexes.get(mapName);
|
|
9363
|
+
if (!index) {
|
|
9364
|
+
logger.warn({ mapName }, "Search requested for map without FTS enabled");
|
|
9365
|
+
return {
|
|
9366
|
+
requestId: "",
|
|
9367
|
+
results: [],
|
|
9368
|
+
totalCount: 0,
|
|
9369
|
+
error: `Full-text search not enabled for map: ${mapName}`
|
|
9370
|
+
};
|
|
9371
|
+
}
|
|
9372
|
+
try {
|
|
9373
|
+
const searchResults = index.search(query, options);
|
|
9374
|
+
const results = searchResults.map((result) => {
|
|
9375
|
+
const value = this.getDocumentValue ? this.getDocumentValue(mapName, result.docId) : void 0;
|
|
9376
|
+
return {
|
|
9377
|
+
key: result.docId,
|
|
9378
|
+
value,
|
|
9379
|
+
score: result.score,
|
|
9380
|
+
matchedTerms: result.matchedTerms || []
|
|
9381
|
+
};
|
|
9382
|
+
});
|
|
9383
|
+
logger.debug(
|
|
9384
|
+
{ mapName, query, resultCount: results.length },
|
|
9385
|
+
"Search executed"
|
|
9386
|
+
);
|
|
9387
|
+
return {
|
|
9388
|
+
requestId: "",
|
|
9389
|
+
results,
|
|
9390
|
+
totalCount: searchResults.length
|
|
9391
|
+
};
|
|
9392
|
+
} catch (err) {
|
|
9393
|
+
logger.error({ mapName, query, err }, "Search failed");
|
|
9394
|
+
return {
|
|
9395
|
+
requestId: "",
|
|
9396
|
+
results: [],
|
|
9397
|
+
totalCount: 0,
|
|
9398
|
+
error: `Search failed: ${err.message}`
|
|
9399
|
+
};
|
|
9400
|
+
}
|
|
9401
|
+
}
|
|
9402
|
+
/**
|
|
9403
|
+
* Handle document set/update.
|
|
9404
|
+
* Called by ServerCoordinator when data changes.
|
|
9405
|
+
*
|
|
9406
|
+
* @param mapName - Name of the map
|
|
9407
|
+
* @param key - Document key
|
|
9408
|
+
* @param value - Document value
|
|
9409
|
+
*/
|
|
9410
|
+
onDataChange(mapName, key, value, changeType) {
|
|
9411
|
+
const index = this.indexes.get(mapName);
|
|
9412
|
+
if (!index) {
|
|
9413
|
+
return;
|
|
9414
|
+
}
|
|
9415
|
+
if (changeType === "remove" || value === null || value === void 0) {
|
|
9416
|
+
index.onRemove(key);
|
|
9417
|
+
} else {
|
|
9418
|
+
index.onSet(key, value);
|
|
9419
|
+
}
|
|
9420
|
+
this.notifySubscribers(mapName, key, value ?? null, changeType);
|
|
9421
|
+
}
|
|
9422
|
+
/**
|
|
9423
|
+
* Build index from existing map entries.
|
|
9424
|
+
* Called when FTS is enabled for a map that already has data.
|
|
9425
|
+
*
|
|
9426
|
+
* @param mapName - Name of the map
|
|
9427
|
+
* @param entries - Iterator of [key, value] tuples
|
|
9428
|
+
*/
|
|
9429
|
+
buildIndexFromEntries(mapName, entries) {
|
|
9430
|
+
const index = this.indexes.get(mapName);
|
|
9431
|
+
if (!index) {
|
|
9432
|
+
logger.warn({ mapName }, "Cannot build index: FTS not enabled for map");
|
|
9433
|
+
return;
|
|
9434
|
+
}
|
|
9435
|
+
let count = 0;
|
|
9436
|
+
for (const [key, value] of entries) {
|
|
9437
|
+
if (value !== null) {
|
|
9438
|
+
index.onSet(key, value);
|
|
9439
|
+
count++;
|
|
9440
|
+
}
|
|
9441
|
+
}
|
|
9442
|
+
logger.info({ mapName, documentCount: count }, "Index built from entries");
|
|
9443
|
+
}
|
|
9444
|
+
/**
|
|
9445
|
+
* Get index statistics for monitoring.
|
|
9446
|
+
*/
|
|
9447
|
+
getIndexStats(mapName) {
|
|
9448
|
+
const index = this.indexes.get(mapName);
|
|
9449
|
+
const config = this.configs.get(mapName);
|
|
9450
|
+
if (!index || !config) {
|
|
9451
|
+
return null;
|
|
9452
|
+
}
|
|
9453
|
+
return {
|
|
9454
|
+
documentCount: index.getSize(),
|
|
9455
|
+
fields: config.fields
|
|
9456
|
+
};
|
|
9457
|
+
}
|
|
9458
|
+
/**
|
|
9459
|
+
* Clear all indexes (for testing or shutdown).
|
|
9460
|
+
*/
|
|
9461
|
+
clear() {
|
|
9462
|
+
for (const index of this.indexes.values()) {
|
|
9463
|
+
index.clear();
|
|
9464
|
+
}
|
|
9465
|
+
this.indexes.clear();
|
|
9466
|
+
this.configs.clear();
|
|
9467
|
+
this.subscriptions.clear();
|
|
9468
|
+
this.subscriptionsByMap.clear();
|
|
9469
|
+
this.subscriptionsByClient.clear();
|
|
9470
|
+
this.pendingNotifications.clear();
|
|
9471
|
+
if (this.notificationTimer) {
|
|
9472
|
+
clearTimeout(this.notificationTimer);
|
|
9473
|
+
this.notificationTimer = null;
|
|
9474
|
+
}
|
|
9475
|
+
logger.debug("SearchCoordinator cleared");
|
|
9476
|
+
}
|
|
9477
|
+
// ============================================
|
|
9478
|
+
// Phase 11.1b: Live Search Subscription Methods
|
|
9479
|
+
// ============================================
|
|
9480
|
+
/**
|
|
9481
|
+
* Subscribe to live search results.
|
|
9482
|
+
* Returns initial results and tracks the subscription for delta updates.
|
|
9483
|
+
*
|
|
9484
|
+
* @param clientId - ID of the subscribing client
|
|
9485
|
+
* @param subscriptionId - Unique subscription identifier
|
|
9486
|
+
* @param mapName - Name of the map to search
|
|
9487
|
+
* @param query - Search query text
|
|
9488
|
+
* @param options - Search options (limit, minScore, boost)
|
|
9489
|
+
* @returns Initial search results
|
|
9490
|
+
*/
|
|
9491
|
+
subscribe(clientId, subscriptionId, mapName, query, options) {
|
|
9492
|
+
const index = this.indexes.get(mapName);
|
|
9493
|
+
if (!index) {
|
|
9494
|
+
logger.warn({ mapName }, "Subscribe requested for map without FTS enabled");
|
|
9495
|
+
return [];
|
|
9496
|
+
}
|
|
9497
|
+
const queryTerms = index.tokenizeQuery(query);
|
|
9498
|
+
const searchResults = index.search(query, options);
|
|
9499
|
+
const currentResults = /* @__PURE__ */ new Map();
|
|
9500
|
+
const results = [];
|
|
9501
|
+
for (const result of searchResults) {
|
|
9502
|
+
const value = this.getDocumentValue ? this.getDocumentValue(mapName, result.docId) : void 0;
|
|
9503
|
+
currentResults.set(result.docId, {
|
|
9504
|
+
score: result.score,
|
|
9505
|
+
matchedTerms: result.matchedTerms || []
|
|
9506
|
+
});
|
|
9507
|
+
results.push({
|
|
9508
|
+
key: result.docId,
|
|
9509
|
+
value,
|
|
9510
|
+
score: result.score,
|
|
9511
|
+
matchedTerms: result.matchedTerms || []
|
|
9512
|
+
});
|
|
9513
|
+
}
|
|
9514
|
+
const subscription = {
|
|
9515
|
+
id: subscriptionId,
|
|
9516
|
+
clientId,
|
|
9517
|
+
mapName,
|
|
9518
|
+
query,
|
|
9519
|
+
queryTerms,
|
|
9520
|
+
options: options || {},
|
|
9521
|
+
currentResults
|
|
9522
|
+
};
|
|
9523
|
+
this.subscriptions.set(subscriptionId, subscription);
|
|
9524
|
+
if (!this.subscriptionsByMap.has(mapName)) {
|
|
9525
|
+
this.subscriptionsByMap.set(mapName, /* @__PURE__ */ new Set());
|
|
9526
|
+
}
|
|
9527
|
+
this.subscriptionsByMap.get(mapName).add(subscriptionId);
|
|
9528
|
+
if (!this.subscriptionsByClient.has(clientId)) {
|
|
9529
|
+
this.subscriptionsByClient.set(clientId, /* @__PURE__ */ new Set());
|
|
9530
|
+
}
|
|
9531
|
+
this.subscriptionsByClient.get(clientId).add(subscriptionId);
|
|
9532
|
+
logger.debug(
|
|
9533
|
+
{ subscriptionId, clientId, mapName, query, resultCount: results.length },
|
|
9534
|
+
"Search subscription created"
|
|
9535
|
+
);
|
|
9536
|
+
return results;
|
|
9537
|
+
}
|
|
9538
|
+
/**
|
|
9539
|
+
* Unsubscribe from a live search.
|
|
9540
|
+
*
|
|
9541
|
+
* @param subscriptionId - Subscription to remove
|
|
9542
|
+
*/
|
|
9543
|
+
unsubscribe(subscriptionId) {
|
|
9544
|
+
const subscription = this.subscriptions.get(subscriptionId);
|
|
9545
|
+
if (!subscription) {
|
|
9546
|
+
return;
|
|
9547
|
+
}
|
|
9548
|
+
this.subscriptions.delete(subscriptionId);
|
|
9549
|
+
const mapSubs = this.subscriptionsByMap.get(subscription.mapName);
|
|
9550
|
+
if (mapSubs) {
|
|
9551
|
+
mapSubs.delete(subscriptionId);
|
|
9552
|
+
if (mapSubs.size === 0) {
|
|
9553
|
+
this.subscriptionsByMap.delete(subscription.mapName);
|
|
9554
|
+
}
|
|
9555
|
+
}
|
|
9556
|
+
const clientSubs = this.subscriptionsByClient.get(subscription.clientId);
|
|
9557
|
+
if (clientSubs) {
|
|
9558
|
+
clientSubs.delete(subscriptionId);
|
|
9559
|
+
if (clientSubs.size === 0) {
|
|
9560
|
+
this.subscriptionsByClient.delete(subscription.clientId);
|
|
9561
|
+
}
|
|
9562
|
+
}
|
|
9563
|
+
logger.debug({ subscriptionId }, "Search subscription removed");
|
|
9564
|
+
}
|
|
9565
|
+
/**
|
|
9566
|
+
* Unsubscribe all subscriptions for a client.
|
|
9567
|
+
* Called when a client disconnects.
|
|
9568
|
+
*
|
|
9569
|
+
* @param clientId - ID of the disconnected client
|
|
9570
|
+
*/
|
|
9571
|
+
unsubscribeClient(clientId) {
|
|
9572
|
+
const clientSubs = this.subscriptionsByClient.get(clientId);
|
|
9573
|
+
if (!clientSubs) {
|
|
9574
|
+
return;
|
|
9575
|
+
}
|
|
9576
|
+
const subscriptionIds = Array.from(clientSubs);
|
|
9577
|
+
for (const subscriptionId of subscriptionIds) {
|
|
9578
|
+
this.unsubscribe(subscriptionId);
|
|
9579
|
+
}
|
|
9580
|
+
logger.debug({ clientId, count: subscriptionIds.length }, "Client subscriptions cleared");
|
|
9581
|
+
}
|
|
9582
|
+
/**
|
|
9583
|
+
* Get the number of active subscriptions.
|
|
9584
|
+
*/
|
|
9585
|
+
getSubscriptionCount() {
|
|
9586
|
+
return this.subscriptions.size;
|
|
9587
|
+
}
|
|
9588
|
+
/**
|
|
9589
|
+
* Notify subscribers about a document change.
|
|
9590
|
+
* Computes delta (ENTER/UPDATE/LEAVE) for each affected subscription.
|
|
9591
|
+
*
|
|
9592
|
+
* @param mapName - Name of the map that changed
|
|
9593
|
+
* @param key - Document key that changed
|
|
9594
|
+
* @param value - New document value (null if removed)
|
|
9595
|
+
* @param changeType - Type of change
|
|
9596
|
+
*/
|
|
9597
|
+
notifySubscribers(mapName, key, value, changeType) {
|
|
9598
|
+
if (!this.sendUpdate) {
|
|
9599
|
+
return;
|
|
9600
|
+
}
|
|
9601
|
+
const subscriptionIds = this.subscriptionsByMap.get(mapName);
|
|
9602
|
+
if (!subscriptionIds || subscriptionIds.size === 0) {
|
|
9603
|
+
return;
|
|
9604
|
+
}
|
|
9605
|
+
const index = this.indexes.get(mapName);
|
|
9606
|
+
if (!index) {
|
|
9607
|
+
return;
|
|
9608
|
+
}
|
|
9609
|
+
for (const subId of subscriptionIds) {
|
|
9610
|
+
const sub = this.subscriptions.get(subId);
|
|
9611
|
+
if (!sub) continue;
|
|
9612
|
+
const wasInResults = sub.currentResults.has(key);
|
|
9613
|
+
let isInResults = false;
|
|
9614
|
+
let newScore = 0;
|
|
9615
|
+
let matchedTerms = [];
|
|
9616
|
+
logger.debug({ subId, key, wasInResults, changeType }, "Processing subscription update");
|
|
9617
|
+
if (changeType !== "remove" && value !== null) {
|
|
9618
|
+
const result = this.scoreDocument(sub, key, value, index);
|
|
9619
|
+
if (result && result.score >= (sub.options.minScore ?? 0)) {
|
|
9620
|
+
isInResults = true;
|
|
9621
|
+
newScore = result.score;
|
|
9622
|
+
matchedTerms = result.matchedTerms;
|
|
9623
|
+
}
|
|
9624
|
+
}
|
|
9625
|
+
let updateType = null;
|
|
9626
|
+
if (!wasInResults && isInResults) {
|
|
9627
|
+
updateType = "ENTER";
|
|
9628
|
+
sub.currentResults.set(key, { score: newScore, matchedTerms });
|
|
9629
|
+
} else if (wasInResults && !isInResults) {
|
|
9630
|
+
updateType = "LEAVE";
|
|
9631
|
+
sub.currentResults.delete(key);
|
|
9632
|
+
} else if (wasInResults && isInResults) {
|
|
9633
|
+
const old = sub.currentResults.get(key);
|
|
9634
|
+
if (Math.abs(old.score - newScore) > 1e-4 || changeType === "update") {
|
|
9635
|
+
updateType = "UPDATE";
|
|
9636
|
+
sub.currentResults.set(key, { score: newScore, matchedTerms });
|
|
9637
|
+
}
|
|
9638
|
+
}
|
|
9639
|
+
logger.debug({ subId, key, wasInResults, isInResults, updateType, newScore }, "Update decision");
|
|
9640
|
+
if (updateType) {
|
|
9641
|
+
this.sendUpdate(
|
|
9642
|
+
sub.clientId,
|
|
9643
|
+
subId,
|
|
9644
|
+
key,
|
|
9645
|
+
value,
|
|
9646
|
+
newScore,
|
|
9647
|
+
matchedTerms,
|
|
9648
|
+
updateType
|
|
9649
|
+
);
|
|
9650
|
+
}
|
|
9651
|
+
}
|
|
9652
|
+
}
|
|
9653
|
+
/**
|
|
9654
|
+
* Score a single document against a subscription's query.
|
|
9655
|
+
*
|
|
9656
|
+
* OPTIMIZED: O(Q × D) complexity instead of O(N) full index scan.
|
|
9657
|
+
* Uses pre-tokenized queryTerms and FullTextIndex.scoreSingleDocument().
|
|
9658
|
+
*
|
|
9659
|
+
* @param subscription - The subscription containing query and cached queryTerms
|
|
9660
|
+
* @param key - Document key
|
|
9661
|
+
* @param value - Document value
|
|
9662
|
+
* @param index - The FullTextIndex for this map
|
|
9663
|
+
* @returns Scored result or null if document doesn't match
|
|
9664
|
+
*/
|
|
9665
|
+
scoreDocument(subscription, key, value, index) {
|
|
9666
|
+
const result = index.scoreSingleDocument(key, subscription.queryTerms, value);
|
|
9667
|
+
if (!result) {
|
|
9668
|
+
return null;
|
|
9669
|
+
}
|
|
9670
|
+
return {
|
|
9671
|
+
score: result.score,
|
|
9672
|
+
matchedTerms: result.matchedTerms || []
|
|
9673
|
+
};
|
|
9674
|
+
}
|
|
9675
|
+
// ============================================
|
|
9676
|
+
// Phase 11.2: Notification Batching Methods
|
|
9677
|
+
// ============================================
|
|
9678
|
+
/**
|
|
9679
|
+
* Queue a notification for batched processing.
|
|
9680
|
+
* Notifications are collected and processed together after BATCH_INTERVAL.
|
|
9681
|
+
*
|
|
9682
|
+
* @param mapName - Name of the map that changed
|
|
9683
|
+
* @param key - Document key that changed
|
|
9684
|
+
* @param value - New document value (null if removed)
|
|
9685
|
+
* @param changeType - Type of change
|
|
9686
|
+
*/
|
|
9687
|
+
queueNotification(mapName, key, value, changeType) {
|
|
9688
|
+
if (!this.sendBatchUpdate) {
|
|
9689
|
+
this.notifySubscribers(mapName, key, value, changeType);
|
|
9690
|
+
return;
|
|
7960
9691
|
}
|
|
7961
|
-
|
|
9692
|
+
const notification = { key, value, changeType };
|
|
9693
|
+
if (!this.pendingNotifications.has(mapName)) {
|
|
9694
|
+
this.pendingNotifications.set(mapName, []);
|
|
9695
|
+
}
|
|
9696
|
+
this.pendingNotifications.get(mapName).push(notification);
|
|
9697
|
+
this.scheduleNotificationFlush();
|
|
7962
9698
|
}
|
|
7963
9699
|
/**
|
|
7964
|
-
*
|
|
9700
|
+
* Schedule a flush of pending notifications.
|
|
9701
|
+
* Uses setTimeout to batch notifications within BATCH_INTERVAL window.
|
|
7965
9702
|
*/
|
|
7966
|
-
|
|
7967
|
-
this.
|
|
7968
|
-
|
|
7969
|
-
|
|
7970
|
-
|
|
7971
|
-
|
|
7972
|
-
|
|
7973
|
-
}, this.
|
|
9703
|
+
scheduleNotificationFlush() {
|
|
9704
|
+
if (this.notificationTimer) {
|
|
9705
|
+
return;
|
|
9706
|
+
}
|
|
9707
|
+
this.notificationTimer = setTimeout(() => {
|
|
9708
|
+
this.flushNotifications();
|
|
9709
|
+
this.notificationTimer = null;
|
|
9710
|
+
}, this.BATCH_INTERVAL);
|
|
7974
9711
|
}
|
|
7975
9712
|
/**
|
|
7976
|
-
*
|
|
9713
|
+
* Flush all pending notifications.
|
|
9714
|
+
* Processes each map's notifications and sends batched updates.
|
|
7977
9715
|
*/
|
|
7978
|
-
|
|
7979
|
-
if (this.
|
|
7980
|
-
|
|
7981
|
-
this.persistTimer = void 0;
|
|
9716
|
+
flushNotifications() {
|
|
9717
|
+
if (this.pendingNotifications.size === 0) {
|
|
9718
|
+
return;
|
|
7982
9719
|
}
|
|
9720
|
+
for (const [mapName, notifications] of this.pendingNotifications) {
|
|
9721
|
+
this.processBatchedNotifications(mapName, notifications);
|
|
9722
|
+
}
|
|
9723
|
+
this.pendingNotifications.clear();
|
|
7983
9724
|
}
|
|
7984
9725
|
/**
|
|
7985
|
-
*
|
|
9726
|
+
* Process batched notifications for a single map.
|
|
9727
|
+
* Computes updates for each subscription and sends as a batch.
|
|
9728
|
+
*
|
|
9729
|
+
* @param mapName - Name of the map
|
|
9730
|
+
* @param notifications - Array of pending notifications
|
|
7986
9731
|
*/
|
|
7987
|
-
|
|
7988
|
-
this.
|
|
7989
|
-
if (
|
|
7990
|
-
|
|
7991
|
-
|
|
7992
|
-
|
|
9732
|
+
processBatchedNotifications(mapName, notifications) {
|
|
9733
|
+
const subscriptionIds = this.subscriptionsByMap.get(mapName);
|
|
9734
|
+
if (!subscriptionIds || subscriptionIds.size === 0) {
|
|
9735
|
+
return;
|
|
9736
|
+
}
|
|
9737
|
+
const index = this.indexes.get(mapName);
|
|
9738
|
+
if (!index) {
|
|
9739
|
+
return;
|
|
9740
|
+
}
|
|
9741
|
+
for (const subId of subscriptionIds) {
|
|
9742
|
+
const sub = this.subscriptions.get(subId);
|
|
9743
|
+
if (!sub) continue;
|
|
9744
|
+
const updates = [];
|
|
9745
|
+
for (const { key, value, changeType } of notifications) {
|
|
9746
|
+
const update = this.computeSubscriptionUpdate(sub, key, value, changeType, index);
|
|
9747
|
+
if (update) {
|
|
9748
|
+
updates.push(update);
|
|
9749
|
+
}
|
|
9750
|
+
}
|
|
9751
|
+
if (updates.length > 0 && this.sendBatchUpdate) {
|
|
9752
|
+
this.sendBatchUpdate(sub.clientId, subId, updates);
|
|
9753
|
+
}
|
|
7993
9754
|
}
|
|
7994
|
-
super.dispose();
|
|
7995
9755
|
}
|
|
7996
9756
|
/**
|
|
7997
|
-
*
|
|
7998
|
-
|
|
7999
|
-
|
|
8000
|
-
|
|
9757
|
+
* Compute the update for a single document change against a subscription.
|
|
9758
|
+
* Returns null if no update is needed.
|
|
9759
|
+
*
|
|
9760
|
+
* @param subscription - The subscription to check
|
|
9761
|
+
* @param key - Document key
|
|
9762
|
+
* @param value - Document value (null if removed)
|
|
9763
|
+
* @param changeType - Type of change
|
|
9764
|
+
* @param index - The FullTextIndex for this map
|
|
9765
|
+
* @returns BatchedUpdate or null
|
|
9766
|
+
*/
|
|
9767
|
+
computeSubscriptionUpdate(subscription, key, value, changeType, index) {
|
|
9768
|
+
const wasInResults = subscription.currentResults.has(key);
|
|
9769
|
+
let isInResults = false;
|
|
9770
|
+
let newScore = 0;
|
|
9771
|
+
let matchedTerms = [];
|
|
9772
|
+
if (changeType !== "remove" && value !== null) {
|
|
9773
|
+
const result = this.scoreDocument(subscription, key, value, index);
|
|
9774
|
+
if (result && result.score >= (subscription.options.minScore ?? 0)) {
|
|
9775
|
+
isInResults = true;
|
|
9776
|
+
newScore = result.score;
|
|
9777
|
+
matchedTerms = result.matchedTerms;
|
|
9778
|
+
}
|
|
9779
|
+
}
|
|
9780
|
+
let updateType = null;
|
|
9781
|
+
if (!wasInResults && isInResults) {
|
|
9782
|
+
updateType = "ENTER";
|
|
9783
|
+
subscription.currentResults.set(key, { score: newScore, matchedTerms });
|
|
9784
|
+
} else if (wasInResults && !isInResults) {
|
|
9785
|
+
updateType = "LEAVE";
|
|
9786
|
+
subscription.currentResults.delete(key);
|
|
9787
|
+
} else if (wasInResults && isInResults) {
|
|
9788
|
+
const old = subscription.currentResults.get(key);
|
|
9789
|
+
if (Math.abs(old.score - newScore) > 1e-4 || changeType === "update") {
|
|
9790
|
+
updateType = "UPDATE";
|
|
9791
|
+
subscription.currentResults.set(key, { score: newScore, matchedTerms });
|
|
9792
|
+
}
|
|
9793
|
+
}
|
|
9794
|
+
if (!updateType) {
|
|
9795
|
+
return null;
|
|
9796
|
+
}
|
|
9797
|
+
return {
|
|
9798
|
+
key,
|
|
9799
|
+
value,
|
|
9800
|
+
score: newScore,
|
|
9801
|
+
matchedTerms,
|
|
9802
|
+
type: updateType
|
|
9803
|
+
};
|
|
8001
9804
|
}
|
|
8002
9805
|
};
|
|
8003
9806
|
|
|
@@ -8027,7 +9830,7 @@ var ServerCoordinator = class {
|
|
|
8027
9830
|
this._readyResolve = resolve;
|
|
8028
9831
|
});
|
|
8029
9832
|
this._nodeId = config.nodeId;
|
|
8030
|
-
this.hlc = new
|
|
9833
|
+
this.hlc = new import_core20.HLC(config.nodeId);
|
|
8031
9834
|
this.storage = config.storage;
|
|
8032
9835
|
const rawSecret = config.jwtSecret || process.env.JWT_SECRET || "topgun-secret-dev";
|
|
8033
9836
|
this.jwtSecret = rawSecret.replace(/\\n/g, "\n");
|
|
@@ -8169,8 +9972,8 @@ var ServerCoordinator = class {
|
|
|
8169
9972
|
this.cluster,
|
|
8170
9973
|
this.partitionService,
|
|
8171
9974
|
{
|
|
8172
|
-
...
|
|
8173
|
-
defaultConsistency: config.defaultConsistency ??
|
|
9975
|
+
...import_core20.DEFAULT_REPLICATION_CONFIG,
|
|
9976
|
+
defaultConsistency: config.defaultConsistency ?? import_core20.ConsistencyLevel.EVENTUAL,
|
|
8174
9977
|
...config.replicationConfig
|
|
8175
9978
|
}
|
|
8176
9979
|
);
|
|
@@ -8212,6 +10015,80 @@ var ServerCoordinator = class {
|
|
|
8212
10015
|
logger.error({ err }, "Failed to initialize EventJournalService");
|
|
8213
10016
|
});
|
|
8214
10017
|
}
|
|
10018
|
+
this.partitionReassigner = new PartitionReassigner(
|
|
10019
|
+
this.cluster,
|
|
10020
|
+
this.partitionService,
|
|
10021
|
+
{ reassignmentDelayMs: 1e3 }
|
|
10022
|
+
);
|
|
10023
|
+
this.partitionReassigner.on("failoverComplete", (event) => {
|
|
10024
|
+
logger.info({
|
|
10025
|
+
failedNodeId: event.failedNodeId,
|
|
10026
|
+
partitionsReassigned: event.partitionsReassigned,
|
|
10027
|
+
durationMs: event.durationMs
|
|
10028
|
+
}, "Partition failover completed");
|
|
10029
|
+
this.broadcastPartitionMap(this.partitionService.getPartitionMap());
|
|
10030
|
+
});
|
|
10031
|
+
logger.info("PartitionReassigner initialized");
|
|
10032
|
+
this.readReplicaHandler = new ReadReplicaHandler(
|
|
10033
|
+
this.partitionService,
|
|
10034
|
+
this.cluster,
|
|
10035
|
+
this._nodeId,
|
|
10036
|
+
void 0,
|
|
10037
|
+
// LagTracker - can be added later
|
|
10038
|
+
{
|
|
10039
|
+
defaultConsistency: config.defaultConsistency ?? import_core20.ConsistencyLevel.STRONG,
|
|
10040
|
+
preferLocalReplica: true,
|
|
10041
|
+
loadBalancing: "latency-based"
|
|
10042
|
+
}
|
|
10043
|
+
);
|
|
10044
|
+
logger.info("ReadReplicaHandler initialized");
|
|
10045
|
+
this.merkleTreeManager = new MerkleTreeManager(this._nodeId);
|
|
10046
|
+
this.repairScheduler = new RepairScheduler(
|
|
10047
|
+
this.merkleTreeManager,
|
|
10048
|
+
this.cluster,
|
|
10049
|
+
this.partitionService,
|
|
10050
|
+
this._nodeId,
|
|
10051
|
+
{
|
|
10052
|
+
enabled: true,
|
|
10053
|
+
scanIntervalMs: 3e5,
|
|
10054
|
+
// 5 minutes
|
|
10055
|
+
maxConcurrentRepairs: 2
|
|
10056
|
+
}
|
|
10057
|
+
);
|
|
10058
|
+
this.repairScheduler.setDataAccessors(
|
|
10059
|
+
(key) => this.getLocalRecord(key) ?? void 0,
|
|
10060
|
+
(key, record) => this.applyRepairRecord(key, record)
|
|
10061
|
+
);
|
|
10062
|
+
this.repairScheduler.start();
|
|
10063
|
+
logger.info("MerkleTreeManager and RepairScheduler initialized");
|
|
10064
|
+
this.searchCoordinator = new SearchCoordinator();
|
|
10065
|
+
this.searchCoordinator.setDocumentValueGetter((mapName, key) => {
|
|
10066
|
+
const map = this.maps.get(mapName);
|
|
10067
|
+
if (!map) return void 0;
|
|
10068
|
+
return map.get(key);
|
|
10069
|
+
});
|
|
10070
|
+
this.searchCoordinator.setSendUpdateCallback((clientId, subscriptionId, key, value, score, matchedTerms, type) => {
|
|
10071
|
+
const client = this.clients.get(clientId);
|
|
10072
|
+
if (client) {
|
|
10073
|
+
client.writer.write({
|
|
10074
|
+
type: "SEARCH_UPDATE",
|
|
10075
|
+
payload: {
|
|
10076
|
+
subscriptionId,
|
|
10077
|
+
key,
|
|
10078
|
+
value,
|
|
10079
|
+
score,
|
|
10080
|
+
matchedTerms,
|
|
10081
|
+
type
|
|
10082
|
+
}
|
|
10083
|
+
});
|
|
10084
|
+
}
|
|
10085
|
+
});
|
|
10086
|
+
if (config.fullTextSearch) {
|
|
10087
|
+
for (const [mapName, ftsConfig] of Object.entries(config.fullTextSearch)) {
|
|
10088
|
+
this.searchCoordinator.enableSearch(mapName, ftsConfig);
|
|
10089
|
+
logger.info({ mapName, fields: ftsConfig.fields }, "FTS enabled for map");
|
|
10090
|
+
}
|
|
10091
|
+
}
|
|
8215
10092
|
this.systemManager = new SystemManager(
|
|
8216
10093
|
this.cluster,
|
|
8217
10094
|
this.metricsService,
|
|
@@ -8235,6 +10112,7 @@ var ServerCoordinator = class {
|
|
|
8235
10112
|
if (this.storage) {
|
|
8236
10113
|
this.storage.initialize().then(() => {
|
|
8237
10114
|
logger.info("Storage adapter initialized");
|
|
10115
|
+
this.backfillSearchIndexes();
|
|
8238
10116
|
}).catch((err) => {
|
|
8239
10117
|
logger.error({ err }, "Failed to initialize storage");
|
|
8240
10118
|
});
|
|
@@ -8242,6 +10120,36 @@ var ServerCoordinator = class {
|
|
|
8242
10120
|
this.startGarbageCollection();
|
|
8243
10121
|
this.startHeartbeatCheck();
|
|
8244
10122
|
}
|
|
10123
|
+
/**
|
|
10124
|
+
* Populate FTS indexes from existing map data.
|
|
10125
|
+
* Called after storage initialization.
|
|
10126
|
+
*/
|
|
10127
|
+
async backfillSearchIndexes() {
|
|
10128
|
+
const enabledMaps = this.searchCoordinator.getEnabledMaps();
|
|
10129
|
+
const promises2 = enabledMaps.map(async (mapName) => {
|
|
10130
|
+
try {
|
|
10131
|
+
await this.getMapAsync(mapName);
|
|
10132
|
+
const map = this.maps.get(mapName);
|
|
10133
|
+
if (!map) return;
|
|
10134
|
+
if (map instanceof import_core20.LWWMap) {
|
|
10135
|
+
const entries = Array.from(map.entries());
|
|
10136
|
+
if (entries.length > 0) {
|
|
10137
|
+
logger.info({ mapName, count: entries.length }, "Backfilling FTS index");
|
|
10138
|
+
this.searchCoordinator.buildIndexFromEntries(
|
|
10139
|
+
mapName,
|
|
10140
|
+
map.entries()
|
|
10141
|
+
);
|
|
10142
|
+
}
|
|
10143
|
+
} else {
|
|
10144
|
+
logger.warn({ mapName }, "FTS backfill skipped: Map type not supported (only LWWMap)");
|
|
10145
|
+
}
|
|
10146
|
+
} catch (err) {
|
|
10147
|
+
logger.error({ mapName, err }, "Failed to backfill FTS index");
|
|
10148
|
+
}
|
|
10149
|
+
});
|
|
10150
|
+
await Promise.all(promises2);
|
|
10151
|
+
logger.info("FTS backfill completed");
|
|
10152
|
+
}
|
|
8245
10153
|
/** Wait for server to be fully ready (ports assigned) */
|
|
8246
10154
|
ready() {
|
|
8247
10155
|
return this._readyPromise;
|
|
@@ -8308,8 +10216,137 @@ var ServerCoordinator = class {
|
|
|
8308
10216
|
getTaskletScheduler() {
|
|
8309
10217
|
return this.taskletScheduler;
|
|
8310
10218
|
}
|
|
10219
|
+
// === Phase 11.1: Full-Text Search Public API ===
|
|
10220
|
+
/**
|
|
10221
|
+
* Enable full-text search for a map.
|
|
10222
|
+
* Can be called at runtime to enable FTS dynamically.
|
|
10223
|
+
*
|
|
10224
|
+
* @param mapName - Name of the map to enable FTS for
|
|
10225
|
+
* @param config - FTS configuration (fields, tokenizer, bm25 options)
|
|
10226
|
+
*/
|
|
10227
|
+
enableFullTextSearch(mapName, config) {
|
|
10228
|
+
this.searchCoordinator.enableSearch(mapName, config);
|
|
10229
|
+
const map = this.maps.get(mapName);
|
|
10230
|
+
if (map) {
|
|
10231
|
+
const entries = [];
|
|
10232
|
+
if (map instanceof import_core20.LWWMap) {
|
|
10233
|
+
for (const [key, value] of map.entries()) {
|
|
10234
|
+
entries.push([key, value]);
|
|
10235
|
+
}
|
|
10236
|
+
} else if (map instanceof import_core20.ORMap) {
|
|
10237
|
+
for (const key of map.allKeys()) {
|
|
10238
|
+
const values = map.get(key);
|
|
10239
|
+
const value = values.length > 0 ? values[0] : null;
|
|
10240
|
+
entries.push([key, value]);
|
|
10241
|
+
}
|
|
10242
|
+
}
|
|
10243
|
+
this.searchCoordinator.buildIndexFromEntries(mapName, entries);
|
|
10244
|
+
}
|
|
10245
|
+
}
|
|
10246
|
+
/**
|
|
10247
|
+
* Disable full-text search for a map.
|
|
10248
|
+
*
|
|
10249
|
+
* @param mapName - Name of the map to disable FTS for
|
|
10250
|
+
*/
|
|
10251
|
+
disableFullTextSearch(mapName) {
|
|
10252
|
+
this.searchCoordinator.disableSearch(mapName);
|
|
10253
|
+
}
|
|
10254
|
+
/**
|
|
10255
|
+
* Check if full-text search is enabled for a map.
|
|
10256
|
+
*
|
|
10257
|
+
* @param mapName - Name of the map to check
|
|
10258
|
+
* @returns True if FTS is enabled
|
|
10259
|
+
*/
|
|
10260
|
+
isFullTextSearchEnabled(mapName) {
|
|
10261
|
+
return this.searchCoordinator.isSearchEnabled(mapName);
|
|
10262
|
+
}
|
|
10263
|
+
/**
|
|
10264
|
+
* Get FTS index statistics for a map.
|
|
10265
|
+
*
|
|
10266
|
+
* @param mapName - Name of the map
|
|
10267
|
+
* @returns Index stats or null if FTS not enabled
|
|
10268
|
+
*/
|
|
10269
|
+
getFullTextSearchStats(mapName) {
|
|
10270
|
+
return this.searchCoordinator.getIndexStats(mapName);
|
|
10271
|
+
}
|
|
10272
|
+
/**
|
|
10273
|
+
* Phase 10.02: Graceful cluster departure
|
|
10274
|
+
*
|
|
10275
|
+
* Notifies the cluster that this node is leaving and allows time for:
|
|
10276
|
+
* 1. Pending replication to complete
|
|
10277
|
+
* 2. Other nodes to detect departure
|
|
10278
|
+
* 3. Partition reassignment to begin
|
|
10279
|
+
*/
|
|
10280
|
+
async gracefulClusterDeparture() {
|
|
10281
|
+
if (!this.cluster || this.cluster.getMembers().length <= 1) {
|
|
10282
|
+
return;
|
|
10283
|
+
}
|
|
10284
|
+
const nodeId = this._nodeId;
|
|
10285
|
+
const ownedPartitions = this.partitionService ? this.getOwnedPartitions() : [];
|
|
10286
|
+
logger.info({
|
|
10287
|
+
nodeId,
|
|
10288
|
+
ownedPartitions: ownedPartitions.length,
|
|
10289
|
+
clusterMembers: this.cluster.getMembers().length
|
|
10290
|
+
}, "Initiating graceful cluster departure");
|
|
10291
|
+
const departureMessage = {
|
|
10292
|
+
type: "NODE_LEAVING",
|
|
10293
|
+
nodeId,
|
|
10294
|
+
partitions: ownedPartitions,
|
|
10295
|
+
timestamp: Date.now()
|
|
10296
|
+
};
|
|
10297
|
+
for (const memberId of this.cluster.getMembers()) {
|
|
10298
|
+
if (memberId !== nodeId) {
|
|
10299
|
+
try {
|
|
10300
|
+
this.cluster.send(memberId, "CLUSTER_EVENT", departureMessage);
|
|
10301
|
+
} catch (e) {
|
|
10302
|
+
logger.warn({ memberId, err: e }, "Failed to notify peer of departure");
|
|
10303
|
+
}
|
|
10304
|
+
}
|
|
10305
|
+
}
|
|
10306
|
+
if (this.replicationPipeline) {
|
|
10307
|
+
logger.info("Waiting for pending replication to complete...");
|
|
10308
|
+
try {
|
|
10309
|
+
await this.waitForReplicationFlush(3e3);
|
|
10310
|
+
logger.info("Replication flush complete");
|
|
10311
|
+
} catch (e) {
|
|
10312
|
+
logger.warn({ err: e }, "Replication flush timeout - some data may not be replicated");
|
|
10313
|
+
}
|
|
10314
|
+
}
|
|
10315
|
+
await new Promise((resolve) => setTimeout(resolve, 500));
|
|
10316
|
+
logger.info({ nodeId }, "Graceful cluster departure complete");
|
|
10317
|
+
}
|
|
10318
|
+
/**
|
|
10319
|
+
* Get list of partition IDs owned by this node
|
|
10320
|
+
*/
|
|
10321
|
+
getOwnedPartitions() {
|
|
10322
|
+
if (!this.partitionService) return [];
|
|
10323
|
+
const partitionMap = this.partitionService.getPartitionMap();
|
|
10324
|
+
const owned = [];
|
|
10325
|
+
for (const partition of partitionMap.partitions) {
|
|
10326
|
+
if (partition.ownerNodeId === this._nodeId) {
|
|
10327
|
+
owned.push(partition.partitionId);
|
|
10328
|
+
}
|
|
10329
|
+
}
|
|
10330
|
+
return owned;
|
|
10331
|
+
}
|
|
10332
|
+
/**
|
|
10333
|
+
* Wait for replication pipeline to flush pending operations
|
|
10334
|
+
*/
|
|
10335
|
+
async waitForReplicationFlush(timeoutMs) {
|
|
10336
|
+
if (!this.replicationPipeline) return;
|
|
10337
|
+
const startTime = Date.now();
|
|
10338
|
+
while (Date.now() - startTime < timeoutMs) {
|
|
10339
|
+
const pendingOps = this.replicationPipeline.getTotalPending();
|
|
10340
|
+
if (pendingOps === 0) {
|
|
10341
|
+
return;
|
|
10342
|
+
}
|
|
10343
|
+
await new Promise((resolve) => setTimeout(resolve, 100));
|
|
10344
|
+
}
|
|
10345
|
+
throw new Error("Replication flush timeout");
|
|
10346
|
+
}
|
|
8311
10347
|
async shutdown() {
|
|
8312
10348
|
logger.info("Shutting down Server Coordinator...");
|
|
10349
|
+
await this.gracefulClusterDeparture();
|
|
8313
10350
|
this.httpServer.close();
|
|
8314
10351
|
if (this.metricsServer) {
|
|
8315
10352
|
this.metricsServer.close();
|
|
@@ -8317,7 +10354,7 @@ var ServerCoordinator = class {
|
|
|
8317
10354
|
this.metricsService.destroy();
|
|
8318
10355
|
this.wss.close();
|
|
8319
10356
|
logger.info(`Closing ${this.clients.size} client connections...`);
|
|
8320
|
-
const shutdownMsg = (0,
|
|
10357
|
+
const shutdownMsg = (0, import_core20.serialize)({ type: "SHUTDOWN_PENDING", retryAfter: 5e3 });
|
|
8321
10358
|
for (const client of this.clients.values()) {
|
|
8322
10359
|
try {
|
|
8323
10360
|
if (client.socket.readyState === import_ws3.WebSocket.OPEN) {
|
|
@@ -8342,6 +10379,14 @@ var ServerCoordinator = class {
|
|
|
8342
10379
|
if (this.replicationPipeline) {
|
|
8343
10380
|
this.replicationPipeline.close();
|
|
8344
10381
|
}
|
|
10382
|
+
if (this.repairScheduler) {
|
|
10383
|
+
this.repairScheduler.stop();
|
|
10384
|
+
logger.info("RepairScheduler stopped");
|
|
10385
|
+
}
|
|
10386
|
+
if (this.partitionReassigner) {
|
|
10387
|
+
this.partitionReassigner.stop();
|
|
10388
|
+
logger.info("PartitionReassigner stopped");
|
|
10389
|
+
}
|
|
8345
10390
|
if (this.cluster) {
|
|
8346
10391
|
this.cluster.stop();
|
|
8347
10392
|
}
|
|
@@ -8442,7 +10487,7 @@ var ServerCoordinator = class {
|
|
|
8442
10487
|
buf = Buffer.from(message);
|
|
8443
10488
|
}
|
|
8444
10489
|
try {
|
|
8445
|
-
data = (0,
|
|
10490
|
+
data = (0, import_core20.deserialize)(buf);
|
|
8446
10491
|
} catch (e) {
|
|
8447
10492
|
try {
|
|
8448
10493
|
const text = Buffer.isBuffer(buf) ? buf.toString() : new TextDecoder().decode(buf);
|
|
@@ -8482,6 +10527,7 @@ var ServerCoordinator = class {
|
|
|
8482
10527
|
this.lockManager.handleClientDisconnect(clientId);
|
|
8483
10528
|
this.topicManager.unsubscribeAll(clientId);
|
|
8484
10529
|
this.counterHandler.unsubscribeAll(clientId);
|
|
10530
|
+
this.searchCoordinator.unsubscribeClient(clientId);
|
|
8485
10531
|
const members = this.cluster.getMembers();
|
|
8486
10532
|
for (const memberId of members) {
|
|
8487
10533
|
if (!this.cluster.isLocal(memberId)) {
|
|
@@ -8494,10 +10540,10 @@ var ServerCoordinator = class {
|
|
|
8494
10540
|
this.clients.delete(clientId);
|
|
8495
10541
|
this.metricsService.setConnectedClients(this.clients.size);
|
|
8496
10542
|
});
|
|
8497
|
-
ws.send((0,
|
|
10543
|
+
ws.send((0, import_core20.serialize)({ type: "AUTH_REQUIRED" }));
|
|
8498
10544
|
}
|
|
8499
10545
|
async handleMessage(client, rawMessage) {
|
|
8500
|
-
const parseResult =
|
|
10546
|
+
const parseResult = import_core20.MessageSchema.safeParse(rawMessage);
|
|
8501
10547
|
if (!parseResult.success) {
|
|
8502
10548
|
logger.error({ clientId: client.id, error: parseResult.error }, "Invalid message format from client");
|
|
8503
10549
|
client.writer.write({
|
|
@@ -8557,7 +10603,32 @@ var ServerCoordinator = class {
|
|
|
8557
10603
|
logger.info({ clientId: client.id, mapName, query }, "Client subscribed");
|
|
8558
10604
|
this.metricsService.incOp("SUBSCRIBE", mapName);
|
|
8559
10605
|
const allMembers = this.cluster.getMembers();
|
|
8560
|
-
|
|
10606
|
+
let remoteMembers = allMembers.filter((id) => !this.cluster.isLocal(id));
|
|
10607
|
+
const queryKey = query._id || query.where?._id;
|
|
10608
|
+
if (queryKey && typeof queryKey === "string" && this.readReplicaHandler) {
|
|
10609
|
+
try {
|
|
10610
|
+
const targetNode = this.readReplicaHandler.selectReadNode({
|
|
10611
|
+
mapName,
|
|
10612
|
+
key: queryKey,
|
|
10613
|
+
options: {
|
|
10614
|
+
// Default to EVENTUAL for read scaling unless specified otherwise
|
|
10615
|
+
// In future, we could extract consistency from query options if available
|
|
10616
|
+
consistency: import_core20.ConsistencyLevel.EVENTUAL
|
|
10617
|
+
}
|
|
10618
|
+
});
|
|
10619
|
+
if (targetNode) {
|
|
10620
|
+
if (this.cluster.isLocal(targetNode)) {
|
|
10621
|
+
remoteMembers = [];
|
|
10622
|
+
logger.debug({ clientId: client.id, mapName, key: queryKey }, "Read optimization: Serving locally");
|
|
10623
|
+
} else if (remoteMembers.includes(targetNode)) {
|
|
10624
|
+
remoteMembers = [targetNode];
|
|
10625
|
+
logger.debug({ clientId: client.id, mapName, key: queryKey, targetNode }, "Read optimization: Routing to replica");
|
|
10626
|
+
}
|
|
10627
|
+
}
|
|
10628
|
+
} catch (e) {
|
|
10629
|
+
logger.warn({ err: e }, "Error in ReadReplicaHandler selection");
|
|
10630
|
+
}
|
|
10631
|
+
}
|
|
8561
10632
|
const requestId = crypto.randomUUID();
|
|
8562
10633
|
const pending = {
|
|
8563
10634
|
requestId,
|
|
@@ -8737,7 +10808,7 @@ var ServerCoordinator = class {
|
|
|
8737
10808
|
this.metricsService.incOp("GET", message.mapName);
|
|
8738
10809
|
try {
|
|
8739
10810
|
const mapForSync = await this.getMapAsync(message.mapName);
|
|
8740
|
-
if (mapForSync instanceof
|
|
10811
|
+
if (mapForSync instanceof import_core20.LWWMap) {
|
|
8741
10812
|
const tree = mapForSync.getMerkleTree();
|
|
8742
10813
|
const rootHash = tree.getRootHash();
|
|
8743
10814
|
client.writer.write({
|
|
@@ -8775,7 +10846,7 @@ var ServerCoordinator = class {
|
|
|
8775
10846
|
const { mapName, path } = message.payload;
|
|
8776
10847
|
try {
|
|
8777
10848
|
const mapForBucket = await this.getMapAsync(mapName);
|
|
8778
|
-
if (mapForBucket instanceof
|
|
10849
|
+
if (mapForBucket instanceof import_core20.LWWMap) {
|
|
8779
10850
|
const treeForBucket = mapForBucket.getMerkleTree();
|
|
8780
10851
|
const buckets = treeForBucket.getBuckets(path);
|
|
8781
10852
|
const node = treeForBucket.getNode(path);
|
|
@@ -9157,7 +11228,7 @@ var ServerCoordinator = class {
|
|
|
9157
11228
|
this.metricsService.incOp("GET", message.mapName);
|
|
9158
11229
|
try {
|
|
9159
11230
|
const mapForSync = await this.getMapAsync(message.mapName, "OR");
|
|
9160
|
-
if (mapForSync instanceof
|
|
11231
|
+
if (mapForSync instanceof import_core20.ORMap) {
|
|
9161
11232
|
const tree = mapForSync.getMerkleTree();
|
|
9162
11233
|
const rootHash = tree.getRootHash();
|
|
9163
11234
|
client.writer.write({
|
|
@@ -9194,7 +11265,7 @@ var ServerCoordinator = class {
|
|
|
9194
11265
|
const { mapName, path } = message.payload;
|
|
9195
11266
|
try {
|
|
9196
11267
|
const mapForBucket = await this.getMapAsync(mapName, "OR");
|
|
9197
|
-
if (mapForBucket instanceof
|
|
11268
|
+
if (mapForBucket instanceof import_core20.ORMap) {
|
|
9198
11269
|
const tree = mapForBucket.getMerkleTree();
|
|
9199
11270
|
const buckets = tree.getBuckets(path);
|
|
9200
11271
|
const isLeaf = tree.isLeaf(path);
|
|
@@ -9238,7 +11309,7 @@ var ServerCoordinator = class {
|
|
|
9238
11309
|
const { mapName: diffMapName, keys } = message.payload;
|
|
9239
11310
|
try {
|
|
9240
11311
|
const mapForDiff = await this.getMapAsync(diffMapName, "OR");
|
|
9241
|
-
if (mapForDiff instanceof
|
|
11312
|
+
if (mapForDiff instanceof import_core20.ORMap) {
|
|
9242
11313
|
const entries = [];
|
|
9243
11314
|
const allTombstones = mapForDiff.getTombstones();
|
|
9244
11315
|
for (const key of keys) {
|
|
@@ -9270,7 +11341,7 @@ var ServerCoordinator = class {
|
|
|
9270
11341
|
const { mapName: pushMapName, entries: pushEntries } = message.payload;
|
|
9271
11342
|
try {
|
|
9272
11343
|
const mapForPush = await this.getMapAsync(pushMapName, "OR");
|
|
9273
|
-
if (mapForPush instanceof
|
|
11344
|
+
if (mapForPush instanceof import_core20.ORMap) {
|
|
9274
11345
|
let totalAdded = 0;
|
|
9275
11346
|
let totalUpdated = 0;
|
|
9276
11347
|
for (const entry of pushEntries) {
|
|
@@ -9398,6 +11469,106 @@ var ServerCoordinator = class {
|
|
|
9398
11469
|
});
|
|
9399
11470
|
break;
|
|
9400
11471
|
}
|
|
11472
|
+
// Phase 11.1: Full-Text Search
|
|
11473
|
+
case "SEARCH": {
|
|
11474
|
+
const { requestId: searchReqId, mapName: searchMapName, query: searchQuery, options: searchOptions } = message.payload;
|
|
11475
|
+
if (!this.securityManager.checkPermission(client.principal, searchMapName, "READ")) {
|
|
11476
|
+
logger.warn({ clientId: client.id, mapName: searchMapName }, "Access Denied: SEARCH");
|
|
11477
|
+
client.writer.write({
|
|
11478
|
+
type: "SEARCH_RESP",
|
|
11479
|
+
payload: {
|
|
11480
|
+
requestId: searchReqId,
|
|
11481
|
+
results: [],
|
|
11482
|
+
totalCount: 0,
|
|
11483
|
+
error: `Access denied for map: ${searchMapName}`
|
|
11484
|
+
}
|
|
11485
|
+
});
|
|
11486
|
+
break;
|
|
11487
|
+
}
|
|
11488
|
+
if (!this.searchCoordinator.isSearchEnabled(searchMapName)) {
|
|
11489
|
+
client.writer.write({
|
|
11490
|
+
type: "SEARCH_RESP",
|
|
11491
|
+
payload: {
|
|
11492
|
+
requestId: searchReqId,
|
|
11493
|
+
results: [],
|
|
11494
|
+
totalCount: 0,
|
|
11495
|
+
error: `Full-text search not enabled for map: ${searchMapName}`
|
|
11496
|
+
}
|
|
11497
|
+
});
|
|
11498
|
+
break;
|
|
11499
|
+
}
|
|
11500
|
+
const searchResult = this.searchCoordinator.search(searchMapName, searchQuery, searchOptions);
|
|
11501
|
+
searchResult.requestId = searchReqId;
|
|
11502
|
+
logger.debug({
|
|
11503
|
+
clientId: client.id,
|
|
11504
|
+
mapName: searchMapName,
|
|
11505
|
+
query: searchQuery,
|
|
11506
|
+
resultCount: searchResult.results.length
|
|
11507
|
+
}, "Search executed");
|
|
11508
|
+
client.writer.write({
|
|
11509
|
+
type: "SEARCH_RESP",
|
|
11510
|
+
payload: searchResult
|
|
11511
|
+
});
|
|
11512
|
+
break;
|
|
11513
|
+
}
|
|
11514
|
+
// Phase 11.1b: Live Search Subscriptions
|
|
11515
|
+
case "SEARCH_SUB": {
|
|
11516
|
+
const { subscriptionId, mapName: subMapName, query: subQuery, options: subOptions } = message.payload;
|
|
11517
|
+
if (!this.securityManager.checkPermission(client.principal, subMapName, "READ")) {
|
|
11518
|
+
logger.warn({ clientId: client.id, mapName: subMapName }, "Access Denied: SEARCH_SUB");
|
|
11519
|
+
client.writer.write({
|
|
11520
|
+
type: "SEARCH_RESP",
|
|
11521
|
+
payload: {
|
|
11522
|
+
requestId: subscriptionId,
|
|
11523
|
+
results: [],
|
|
11524
|
+
totalCount: 0,
|
|
11525
|
+
error: `Access denied for map: ${subMapName}`
|
|
11526
|
+
}
|
|
11527
|
+
});
|
|
11528
|
+
break;
|
|
11529
|
+
}
|
|
11530
|
+
if (!this.searchCoordinator.isSearchEnabled(subMapName)) {
|
|
11531
|
+
client.writer.write({
|
|
11532
|
+
type: "SEARCH_RESP",
|
|
11533
|
+
payload: {
|
|
11534
|
+
requestId: subscriptionId,
|
|
11535
|
+
results: [],
|
|
11536
|
+
totalCount: 0,
|
|
11537
|
+
error: `Full-text search not enabled for map: ${subMapName}`
|
|
11538
|
+
}
|
|
11539
|
+
});
|
|
11540
|
+
break;
|
|
11541
|
+
}
|
|
11542
|
+
const initialResults = this.searchCoordinator.subscribe(
|
|
11543
|
+
client.id,
|
|
11544
|
+
subscriptionId,
|
|
11545
|
+
subMapName,
|
|
11546
|
+
subQuery,
|
|
11547
|
+
subOptions
|
|
11548
|
+
);
|
|
11549
|
+
logger.debug({
|
|
11550
|
+
clientId: client.id,
|
|
11551
|
+
subscriptionId,
|
|
11552
|
+
mapName: subMapName,
|
|
11553
|
+
query: subQuery,
|
|
11554
|
+
resultCount: initialResults.length
|
|
11555
|
+
}, "Search subscription created");
|
|
11556
|
+
client.writer.write({
|
|
11557
|
+
type: "SEARCH_RESP",
|
|
11558
|
+
payload: {
|
|
11559
|
+
requestId: subscriptionId,
|
|
11560
|
+
results: initialResults,
|
|
11561
|
+
totalCount: initialResults.length
|
|
11562
|
+
}
|
|
11563
|
+
});
|
|
11564
|
+
break;
|
|
11565
|
+
}
|
|
11566
|
+
case "SEARCH_UNSUB": {
|
|
11567
|
+
const { subscriptionId: unsubId } = message.payload;
|
|
11568
|
+
this.searchCoordinator.unsubscribe(unsubId);
|
|
11569
|
+
logger.debug({ clientId: client.id, subscriptionId: unsubId }, "Search unsubscription");
|
|
11570
|
+
break;
|
|
11571
|
+
}
|
|
9401
11572
|
default:
|
|
9402
11573
|
logger.warn({ type: message.type }, "Unknown message type");
|
|
9403
11574
|
}
|
|
@@ -9411,7 +11582,7 @@ var ServerCoordinator = class {
|
|
|
9411
11582
|
} else if (op.orRecord && op.orRecord.timestamp) {
|
|
9412
11583
|
} else if (op.orTag) {
|
|
9413
11584
|
try {
|
|
9414
|
-
ts =
|
|
11585
|
+
ts = import_core20.HLC.parse(op.orTag);
|
|
9415
11586
|
} catch (e) {
|
|
9416
11587
|
}
|
|
9417
11588
|
}
|
|
@@ -9435,7 +11606,7 @@ var ServerCoordinator = class {
|
|
|
9435
11606
|
};
|
|
9436
11607
|
let broadcastCount = 0;
|
|
9437
11608
|
for (const client of this.clients.values()) {
|
|
9438
|
-
if (client.isAuthenticated && client.socket.readyState === import_ws3.WebSocket.OPEN) {
|
|
11609
|
+
if (client.isAuthenticated && client.socket.readyState === import_ws3.WebSocket.OPEN && client.writer) {
|
|
9439
11610
|
client.writer.write(message);
|
|
9440
11611
|
broadcastCount++;
|
|
9441
11612
|
}
|
|
@@ -9508,7 +11679,7 @@ var ServerCoordinator = class {
|
|
|
9508
11679
|
client.writer.write({ ...message, payload: newPayload });
|
|
9509
11680
|
}
|
|
9510
11681
|
} else {
|
|
9511
|
-
const msgData = (0,
|
|
11682
|
+
const msgData = (0, import_core20.serialize)(message);
|
|
9512
11683
|
for (const [id, client] of this.clients) {
|
|
9513
11684
|
if (id !== excludeClientId && client.socket.readyState === 1) {
|
|
9514
11685
|
client.writer.writeRaw(msgData);
|
|
@@ -9586,7 +11757,7 @@ var ServerCoordinator = class {
|
|
|
9586
11757
|
payload: { events: filteredEvents },
|
|
9587
11758
|
timestamp: this.hlc.now()
|
|
9588
11759
|
};
|
|
9589
|
-
const serializedBatch = (0,
|
|
11760
|
+
const serializedBatch = (0, import_core20.serialize)(batchMessage);
|
|
9590
11761
|
for (const client of clients) {
|
|
9591
11762
|
try {
|
|
9592
11763
|
client.writer.writeRaw(serializedBatch);
|
|
@@ -9671,7 +11842,7 @@ var ServerCoordinator = class {
|
|
|
9671
11842
|
payload: { events: filteredEvents },
|
|
9672
11843
|
timestamp: this.hlc.now()
|
|
9673
11844
|
};
|
|
9674
|
-
const serializedBatch = (0,
|
|
11845
|
+
const serializedBatch = (0, import_core20.serialize)(batchMessage);
|
|
9675
11846
|
for (const client of clients) {
|
|
9676
11847
|
sendPromises.push(new Promise((resolve, reject) => {
|
|
9677
11848
|
try {
|
|
@@ -9702,7 +11873,14 @@ var ServerCoordinator = class {
|
|
|
9702
11873
|
this.cluster.on("message", (msg) => {
|
|
9703
11874
|
switch (msg.type) {
|
|
9704
11875
|
case "OP_FORWARD":
|
|
11876
|
+
if (msg.payload._replication || msg.payload._migration) {
|
|
11877
|
+
break;
|
|
11878
|
+
}
|
|
9705
11879
|
logger.info({ senderId: msg.senderId }, "Received forwarded op");
|
|
11880
|
+
if (!msg.payload.key) {
|
|
11881
|
+
logger.warn({ senderId: msg.senderId }, "OP_FORWARD missing key, dropping");
|
|
11882
|
+
break;
|
|
11883
|
+
}
|
|
9706
11884
|
if (this.partitionService.isLocalOwner(msg.payload.key)) {
|
|
9707
11885
|
this.processLocalOp(msg.payload, true, msg.senderId).catch((err) => {
|
|
9708
11886
|
logger.error({ err, senderId: msg.senderId }, "Forwarded op failed");
|
|
@@ -9809,6 +11987,51 @@ var ServerCoordinator = class {
|
|
|
9809
11987
|
this.topicManager.publish(topic, data, originalSenderId, true);
|
|
9810
11988
|
break;
|
|
9811
11989
|
}
|
|
11990
|
+
// Phase 10.04: Anti-entropy repair messages
|
|
11991
|
+
case "CLUSTER_MERKLE_ROOT_REQ": {
|
|
11992
|
+
const { partitionId, requestId } = msg.payload;
|
|
11993
|
+
const rootHash = this.merkleTreeManager?.getRootHash(partitionId) ?? 0;
|
|
11994
|
+
this.cluster.send(msg.senderId, "CLUSTER_MERKLE_ROOT_RESP", {
|
|
11995
|
+
requestId,
|
|
11996
|
+
partitionId,
|
|
11997
|
+
rootHash
|
|
11998
|
+
});
|
|
11999
|
+
break;
|
|
12000
|
+
}
|
|
12001
|
+
case "CLUSTER_MERKLE_ROOT_RESP": {
|
|
12002
|
+
if (this.repairScheduler) {
|
|
12003
|
+
this.repairScheduler.emit("merkleRootResponse", {
|
|
12004
|
+
nodeId: msg.senderId,
|
|
12005
|
+
...msg.payload
|
|
12006
|
+
});
|
|
12007
|
+
}
|
|
12008
|
+
break;
|
|
12009
|
+
}
|
|
12010
|
+
case "CLUSTER_REPAIR_DATA_REQ": {
|
|
12011
|
+
const { partitionId, keys, requestId } = msg.payload;
|
|
12012
|
+
const records = {};
|
|
12013
|
+
for (const key of keys) {
|
|
12014
|
+
const record = this.getLocalRecord(key);
|
|
12015
|
+
if (record) {
|
|
12016
|
+
records[key] = record;
|
|
12017
|
+
}
|
|
12018
|
+
}
|
|
12019
|
+
this.cluster.send(msg.senderId, "CLUSTER_REPAIR_DATA_RESP", {
|
|
12020
|
+
requestId,
|
|
12021
|
+
partitionId,
|
|
12022
|
+
records
|
|
12023
|
+
});
|
|
12024
|
+
break;
|
|
12025
|
+
}
|
|
12026
|
+
case "CLUSTER_REPAIR_DATA_RESP": {
|
|
12027
|
+
if (this.repairScheduler) {
|
|
12028
|
+
this.repairScheduler.emit("repairDataResponse", {
|
|
12029
|
+
nodeId: msg.senderId,
|
|
12030
|
+
...msg.payload
|
|
12031
|
+
});
|
|
12032
|
+
}
|
|
12033
|
+
break;
|
|
12034
|
+
}
|
|
9812
12035
|
}
|
|
9813
12036
|
});
|
|
9814
12037
|
}
|
|
@@ -9817,7 +12040,7 @@ var ServerCoordinator = class {
|
|
|
9817
12040
|
const localQuery = { ...query };
|
|
9818
12041
|
delete localQuery.offset;
|
|
9819
12042
|
delete localQuery.limit;
|
|
9820
|
-
if (map instanceof
|
|
12043
|
+
if (map instanceof import_core20.IndexedLWWMap) {
|
|
9821
12044
|
const coreQuery = this.convertToCoreQuery(localQuery);
|
|
9822
12045
|
if (coreQuery) {
|
|
9823
12046
|
const entries = map.queryEntries(coreQuery);
|
|
@@ -9827,7 +12050,7 @@ var ServerCoordinator = class {
|
|
|
9827
12050
|
});
|
|
9828
12051
|
}
|
|
9829
12052
|
}
|
|
9830
|
-
if (map instanceof
|
|
12053
|
+
if (map instanceof import_core20.IndexedORMap) {
|
|
9831
12054
|
const coreQuery = this.convertToCoreQuery(localQuery);
|
|
9832
12055
|
if (coreQuery) {
|
|
9833
12056
|
const results = map.query(coreQuery);
|
|
@@ -9835,14 +12058,14 @@ var ServerCoordinator = class {
|
|
|
9835
12058
|
}
|
|
9836
12059
|
}
|
|
9837
12060
|
const records = /* @__PURE__ */ new Map();
|
|
9838
|
-
if (map instanceof
|
|
12061
|
+
if (map instanceof import_core20.LWWMap) {
|
|
9839
12062
|
for (const key of map.allKeys()) {
|
|
9840
12063
|
const rec = map.getRecord(key);
|
|
9841
12064
|
if (rec && rec.value !== null) {
|
|
9842
12065
|
records.set(key, rec);
|
|
9843
12066
|
}
|
|
9844
12067
|
}
|
|
9845
|
-
} else if (map instanceof
|
|
12068
|
+
} else if (map instanceof import_core20.ORMap) {
|
|
9846
12069
|
const items = map.items;
|
|
9847
12070
|
for (const key of items.keys()) {
|
|
9848
12071
|
const values = map.get(key);
|
|
@@ -9990,11 +12213,11 @@ var ServerCoordinator = class {
|
|
|
9990
12213
|
async applyOpToMap(op, remoteNodeId) {
|
|
9991
12214
|
const typeHint = op.opType === "OR_ADD" || op.opType === "OR_REMOVE" ? "OR" : "LWW";
|
|
9992
12215
|
const map = this.getMap(op.mapName, typeHint);
|
|
9993
|
-
if (typeHint === "OR" && map instanceof
|
|
12216
|
+
if (typeHint === "OR" && map instanceof import_core20.LWWMap) {
|
|
9994
12217
|
logger.error({ mapName: op.mapName }, "Map type mismatch: LWWMap but received OR op");
|
|
9995
12218
|
throw new Error("Map type mismatch: LWWMap but received OR op");
|
|
9996
12219
|
}
|
|
9997
|
-
if (typeHint === "LWW" && map instanceof
|
|
12220
|
+
if (typeHint === "LWW" && map instanceof import_core20.ORMap) {
|
|
9998
12221
|
logger.error({ mapName: op.mapName }, "Map type mismatch: ORMap but received LWW op");
|
|
9999
12222
|
throw new Error("Map type mismatch: ORMap but received LWW op");
|
|
10000
12223
|
}
|
|
@@ -10005,7 +12228,7 @@ var ServerCoordinator = class {
|
|
|
10005
12228
|
mapName: op.mapName,
|
|
10006
12229
|
key: op.key
|
|
10007
12230
|
};
|
|
10008
|
-
if (map instanceof
|
|
12231
|
+
if (map instanceof import_core20.LWWMap) {
|
|
10009
12232
|
oldRecord = map.getRecord(op.key);
|
|
10010
12233
|
if (this.conflictResolverHandler.hasResolvers(op.mapName)) {
|
|
10011
12234
|
const mergeResult = await this.conflictResolverHandler.mergeWithResolver(
|
|
@@ -10033,7 +12256,7 @@ var ServerCoordinator = class {
|
|
|
10033
12256
|
eventPayload.eventType = "UPDATED";
|
|
10034
12257
|
eventPayload.record = op.record;
|
|
10035
12258
|
}
|
|
10036
|
-
} else if (map instanceof
|
|
12259
|
+
} else if (map instanceof import_core20.ORMap) {
|
|
10037
12260
|
oldRecord = map.getRecords(op.key);
|
|
10038
12261
|
if (op.opType === "OR_ADD") {
|
|
10039
12262
|
map.apply(op.key, op.orRecord);
|
|
@@ -10049,7 +12272,7 @@ var ServerCoordinator = class {
|
|
|
10049
12272
|
}
|
|
10050
12273
|
}
|
|
10051
12274
|
this.queryRegistry.processChange(op.mapName, map, op.key, op.record || op.orRecord, oldRecord);
|
|
10052
|
-
const mapSize = map instanceof
|
|
12275
|
+
const mapSize = map instanceof import_core20.ORMap ? map.totalRecords : map.size;
|
|
10053
12276
|
this.metricsService.setMapSize(op.mapName, mapSize);
|
|
10054
12277
|
if (this.storage) {
|
|
10055
12278
|
if (recordToStore) {
|
|
@@ -10078,6 +12301,16 @@ var ServerCoordinator = class {
|
|
|
10078
12301
|
nodeId: this._nodeId
|
|
10079
12302
|
});
|
|
10080
12303
|
}
|
|
12304
|
+
if (this.merkleTreeManager && recordToStore && op.key) {
|
|
12305
|
+
const partitionId = this.partitionService.getPartitionId(op.key);
|
|
12306
|
+
this.merkleTreeManager.updateRecord(partitionId, op.key, recordToStore);
|
|
12307
|
+
}
|
|
12308
|
+
if (this.searchCoordinator.isSearchEnabled(op.mapName)) {
|
|
12309
|
+
const isRemove = op.opType === "REMOVE" || op.record && op.record.value === null;
|
|
12310
|
+
const value = isRemove ? null : op.record?.value ?? op.orRecord?.value;
|
|
12311
|
+
const changeType = isRemove ? "remove" : oldRecord ? "update" : "add";
|
|
12312
|
+
this.searchCoordinator.onDataChange(op.mapName, op.key, value, changeType);
|
|
12313
|
+
}
|
|
10081
12314
|
return { eventPayload, oldRecord };
|
|
10082
12315
|
}
|
|
10083
12316
|
/**
|
|
@@ -10205,7 +12438,7 @@ var ServerCoordinator = class {
|
|
|
10205
12438
|
if (rejected || !eventPayload) {
|
|
10206
12439
|
return;
|
|
10207
12440
|
}
|
|
10208
|
-
if (this.replicationPipeline
|
|
12441
|
+
if (this.replicationPipeline) {
|
|
10209
12442
|
const opId = op.id || `${op.mapName}:${op.key}:${Date.now()}`;
|
|
10210
12443
|
this.replicationPipeline.replicate(op, opId, op.key).catch((err) => {
|
|
10211
12444
|
logger.warn({ opId, key: op.key, err }, "Replication failed (non-fatal)");
|
|
@@ -10348,12 +12581,16 @@ var ServerCoordinator = class {
|
|
|
10348
12581
|
}
|
|
10349
12582
|
handleClusterEvent(payload) {
|
|
10350
12583
|
const { mapName, key, eventType } = payload;
|
|
12584
|
+
if (!key) {
|
|
12585
|
+
logger.warn({ mapName, eventType }, "Received cluster event with undefined key, ignoring");
|
|
12586
|
+
return;
|
|
12587
|
+
}
|
|
10351
12588
|
const map = this.getMap(mapName, eventType === "OR_ADD" || eventType === "OR_REMOVE" ? "OR" : "LWW");
|
|
10352
|
-
const oldRecord = map instanceof
|
|
12589
|
+
const oldRecord = map instanceof import_core20.LWWMap ? map.getRecord(key) : null;
|
|
10353
12590
|
if (this.partitionService.isRelated(key)) {
|
|
10354
|
-
if (map instanceof
|
|
12591
|
+
if (map instanceof import_core20.LWWMap && payload.record) {
|
|
10355
12592
|
map.merge(key, payload.record);
|
|
10356
|
-
} else if (map instanceof
|
|
12593
|
+
} else if (map instanceof import_core20.ORMap) {
|
|
10357
12594
|
if (eventType === "OR_ADD" && payload.orRecord) {
|
|
10358
12595
|
map.apply(key, payload.orRecord);
|
|
10359
12596
|
} else if (eventType === "OR_REMOVE" && payload.orTag) {
|
|
@@ -10372,9 +12609,9 @@ var ServerCoordinator = class {
|
|
|
10372
12609
|
if (!this.maps.has(name)) {
|
|
10373
12610
|
let map;
|
|
10374
12611
|
if (typeHint === "OR") {
|
|
10375
|
-
map = new
|
|
12612
|
+
map = new import_core20.ORMap(this.hlc);
|
|
10376
12613
|
} else {
|
|
10377
|
-
map = new
|
|
12614
|
+
map = new import_core20.LWWMap(this.hlc);
|
|
10378
12615
|
}
|
|
10379
12616
|
this.maps.set(name, map);
|
|
10380
12617
|
if (this.storage) {
|
|
@@ -10397,7 +12634,7 @@ var ServerCoordinator = class {
|
|
|
10397
12634
|
this.getMap(name, typeHint);
|
|
10398
12635
|
const loadingPromise = this.mapLoadingPromises.get(name);
|
|
10399
12636
|
const map = this.maps.get(name);
|
|
10400
|
-
const mapSize = map instanceof
|
|
12637
|
+
const mapSize = map instanceof import_core20.LWWMap ? Array.from(map.entries()).length : map instanceof import_core20.ORMap ? map.size : 0;
|
|
10401
12638
|
logger.info({
|
|
10402
12639
|
mapName: name,
|
|
10403
12640
|
mapExisted,
|
|
@@ -10407,11 +12644,56 @@ var ServerCoordinator = class {
|
|
|
10407
12644
|
if (loadingPromise) {
|
|
10408
12645
|
logger.info({ mapName: name }, "[getMapAsync] Waiting for loadMapFromStorage...");
|
|
10409
12646
|
await loadingPromise;
|
|
10410
|
-
const newMapSize = map instanceof
|
|
12647
|
+
const newMapSize = map instanceof import_core20.LWWMap ? Array.from(map.entries()).length : map instanceof import_core20.ORMap ? map.size : 0;
|
|
10411
12648
|
logger.info({ mapName: name, mapSizeAfterLoad: newMapSize }, "[getMapAsync] Load completed");
|
|
10412
12649
|
}
|
|
10413
12650
|
return this.maps.get(name);
|
|
10414
12651
|
}
|
|
12652
|
+
/**
|
|
12653
|
+
* Phase 10.04: Get local record for anti-entropy repair
|
|
12654
|
+
* Returns the LWWRecord for a key, used by RepairScheduler
|
|
12655
|
+
*/
|
|
12656
|
+
getLocalRecord(key) {
|
|
12657
|
+
const separatorIndex = key.indexOf(":");
|
|
12658
|
+
if (separatorIndex === -1) {
|
|
12659
|
+
return null;
|
|
12660
|
+
}
|
|
12661
|
+
const mapName = key.substring(0, separatorIndex);
|
|
12662
|
+
const actualKey = key.substring(separatorIndex + 1);
|
|
12663
|
+
const map = this.maps.get(mapName);
|
|
12664
|
+
if (!map || !(map instanceof import_core20.LWWMap)) {
|
|
12665
|
+
return null;
|
|
12666
|
+
}
|
|
12667
|
+
return map.getRecord(actualKey) ?? null;
|
|
12668
|
+
}
|
|
12669
|
+
/**
|
|
12670
|
+
* Phase 10.04: Apply repaired record from anti-entropy repair
|
|
12671
|
+
* Used by RepairScheduler to apply resolved conflicts
|
|
12672
|
+
*/
|
|
12673
|
+
applyRepairRecord(key, record) {
|
|
12674
|
+
const separatorIndex = key.indexOf(":");
|
|
12675
|
+
if (separatorIndex === -1) {
|
|
12676
|
+
logger.warn({ key }, "Invalid key format for repair");
|
|
12677
|
+
return;
|
|
12678
|
+
}
|
|
12679
|
+
const mapName = key.substring(0, separatorIndex);
|
|
12680
|
+
const actualKey = key.substring(separatorIndex + 1);
|
|
12681
|
+
const map = this.getMap(mapName, "LWW");
|
|
12682
|
+
const existingRecord = map.getRecord(actualKey);
|
|
12683
|
+
if (!existingRecord || record.timestamp.millis > existingRecord.timestamp.millis || record.timestamp.millis === existingRecord.timestamp.millis && record.timestamp.counter > existingRecord.timestamp.counter) {
|
|
12684
|
+
map.merge(actualKey, record);
|
|
12685
|
+
logger.debug({ mapName, key: actualKey }, "Applied repair record");
|
|
12686
|
+
if (this.storage) {
|
|
12687
|
+
this.storage.store(mapName, actualKey, record).catch((err) => {
|
|
12688
|
+
logger.error({ err, mapName, key: actualKey }, "Failed to persist repair record");
|
|
12689
|
+
});
|
|
12690
|
+
}
|
|
12691
|
+
if (this.merkleTreeManager) {
|
|
12692
|
+
const partitionId = this.partitionService.getPartitionId(actualKey);
|
|
12693
|
+
this.merkleTreeManager.updateRecord(partitionId, actualKey, record);
|
|
12694
|
+
}
|
|
12695
|
+
}
|
|
12696
|
+
}
|
|
10415
12697
|
async loadMapFromStorage(name, typeHint) {
|
|
10416
12698
|
try {
|
|
10417
12699
|
const keys = await this.storage.loadAllKeys(name);
|
|
@@ -10433,16 +12715,16 @@ var ServerCoordinator = class {
|
|
|
10433
12715
|
const currentMap = this.maps.get(name);
|
|
10434
12716
|
if (!currentMap) return;
|
|
10435
12717
|
let targetMap = currentMap;
|
|
10436
|
-
if (isOR && currentMap instanceof
|
|
12718
|
+
if (isOR && currentMap instanceof import_core20.LWWMap) {
|
|
10437
12719
|
logger.info({ mapName: name }, "Map auto-detected as ORMap. Switching type.");
|
|
10438
|
-
targetMap = new
|
|
12720
|
+
targetMap = new import_core20.ORMap(this.hlc);
|
|
10439
12721
|
this.maps.set(name, targetMap);
|
|
10440
|
-
} else if (!isOR && currentMap instanceof
|
|
12722
|
+
} else if (!isOR && currentMap instanceof import_core20.ORMap && typeHint !== "OR") {
|
|
10441
12723
|
logger.info({ mapName: name }, "Map auto-detected as LWWMap. Switching type.");
|
|
10442
|
-
targetMap = new
|
|
12724
|
+
targetMap = new import_core20.LWWMap(this.hlc);
|
|
10443
12725
|
this.maps.set(name, targetMap);
|
|
10444
12726
|
}
|
|
10445
|
-
if (targetMap instanceof
|
|
12727
|
+
if (targetMap instanceof import_core20.ORMap) {
|
|
10446
12728
|
for (const [key, record] of records) {
|
|
10447
12729
|
if (key === "__tombstones__") {
|
|
10448
12730
|
const t = record;
|
|
@@ -10455,7 +12737,7 @@ var ServerCoordinator = class {
|
|
|
10455
12737
|
}
|
|
10456
12738
|
}
|
|
10457
12739
|
}
|
|
10458
|
-
} else if (targetMap instanceof
|
|
12740
|
+
} else if (targetMap instanceof import_core20.LWWMap) {
|
|
10459
12741
|
for (const [key, record] of records) {
|
|
10460
12742
|
if (!record.type) {
|
|
10461
12743
|
targetMap.merge(key, record);
|
|
@@ -10466,7 +12748,7 @@ var ServerCoordinator = class {
|
|
|
10466
12748
|
if (count > 0) {
|
|
10467
12749
|
logger.info({ mapName: name, count }, "Loaded records for map");
|
|
10468
12750
|
this.queryRegistry.refreshSubscriptions(name, targetMap);
|
|
10469
|
-
const mapSize = targetMap instanceof
|
|
12751
|
+
const mapSize = targetMap instanceof import_core20.ORMap ? targetMap.totalRecords : targetMap.size;
|
|
10470
12752
|
this.metricsService.setMapSize(name, mapSize);
|
|
10471
12753
|
}
|
|
10472
12754
|
} catch (err) {
|
|
@@ -10548,7 +12830,7 @@ var ServerCoordinator = class {
|
|
|
10548
12830
|
reportLocalHlc() {
|
|
10549
12831
|
let minHlc = this.hlc.now();
|
|
10550
12832
|
for (const client of this.clients.values()) {
|
|
10551
|
-
if (
|
|
12833
|
+
if (import_core20.HLC.compare(client.lastActiveHlc, minHlc) < 0) {
|
|
10552
12834
|
minHlc = client.lastActiveHlc;
|
|
10553
12835
|
}
|
|
10554
12836
|
}
|
|
@@ -10569,7 +12851,7 @@ var ServerCoordinator = class {
|
|
|
10569
12851
|
let globalSafe = this.hlc.now();
|
|
10570
12852
|
let initialized = false;
|
|
10571
12853
|
for (const ts of this.gcReports.values()) {
|
|
10572
|
-
if (!initialized ||
|
|
12854
|
+
if (!initialized || import_core20.HLC.compare(ts, globalSafe) < 0) {
|
|
10573
12855
|
globalSafe = ts;
|
|
10574
12856
|
initialized = true;
|
|
10575
12857
|
}
|
|
@@ -10604,7 +12886,7 @@ var ServerCoordinator = class {
|
|
|
10604
12886
|
logger.info({ olderThanMillis: olderThan.millis }, "Performing Garbage Collection");
|
|
10605
12887
|
const now = Date.now();
|
|
10606
12888
|
for (const [name, map] of this.maps) {
|
|
10607
|
-
if (map instanceof
|
|
12889
|
+
if (map instanceof import_core20.LWWMap) {
|
|
10608
12890
|
for (const key of map.allKeys()) {
|
|
10609
12891
|
const record = map.getRecord(key);
|
|
10610
12892
|
if (record && record.value !== null && record.ttlMs) {
|
|
@@ -10656,7 +12938,7 @@ var ServerCoordinator = class {
|
|
|
10656
12938
|
});
|
|
10657
12939
|
}
|
|
10658
12940
|
}
|
|
10659
|
-
} else if (map instanceof
|
|
12941
|
+
} else if (map instanceof import_core20.ORMap) {
|
|
10660
12942
|
const items = map.items;
|
|
10661
12943
|
const tombstonesSet = map.tombstones;
|
|
10662
12944
|
const tagsToExpire = [];
|
|
@@ -10759,17 +13041,17 @@ var ServerCoordinator = class {
|
|
|
10759
13041
|
stringToWriteConcern(value) {
|
|
10760
13042
|
switch (value) {
|
|
10761
13043
|
case "FIRE_AND_FORGET":
|
|
10762
|
-
return
|
|
13044
|
+
return import_core20.WriteConcern.FIRE_AND_FORGET;
|
|
10763
13045
|
case "MEMORY":
|
|
10764
|
-
return
|
|
13046
|
+
return import_core20.WriteConcern.MEMORY;
|
|
10765
13047
|
case "APPLIED":
|
|
10766
|
-
return
|
|
13048
|
+
return import_core20.WriteConcern.APPLIED;
|
|
10767
13049
|
case "REPLICATED":
|
|
10768
|
-
return
|
|
13050
|
+
return import_core20.WriteConcern.REPLICATED;
|
|
10769
13051
|
case "PERSISTED":
|
|
10770
|
-
return
|
|
13052
|
+
return import_core20.WriteConcern.PERSISTED;
|
|
10771
13053
|
default:
|
|
10772
|
-
return
|
|
13054
|
+
return import_core20.WriteConcern.MEMORY;
|
|
10773
13055
|
}
|
|
10774
13056
|
}
|
|
10775
13057
|
/**
|
|
@@ -10826,7 +13108,7 @@ var ServerCoordinator = class {
|
|
|
10826
13108
|
}
|
|
10827
13109
|
});
|
|
10828
13110
|
if (op.id) {
|
|
10829
|
-
this.writeAckManager.notifyLevel(op.id,
|
|
13111
|
+
this.writeAckManager.notifyLevel(op.id, import_core20.WriteConcern.REPLICATED);
|
|
10830
13112
|
}
|
|
10831
13113
|
}
|
|
10832
13114
|
}
|
|
@@ -10834,7 +13116,7 @@ var ServerCoordinator = class {
|
|
|
10834
13116
|
this.broadcastBatch(batchedEvents, clientId);
|
|
10835
13117
|
for (const op of ops) {
|
|
10836
13118
|
if (op.id && this.partitionService.isLocalOwner(op.key)) {
|
|
10837
|
-
this.writeAckManager.notifyLevel(op.id,
|
|
13119
|
+
this.writeAckManager.notifyLevel(op.id, import_core20.WriteConcern.REPLICATED);
|
|
10838
13120
|
}
|
|
10839
13121
|
}
|
|
10840
13122
|
}
|
|
@@ -10862,7 +13144,7 @@ var ServerCoordinator = class {
|
|
|
10862
13144
|
const owner = this.partitionService.getOwner(op.key);
|
|
10863
13145
|
await this.forwardOpAndWait(op, owner);
|
|
10864
13146
|
if (op.id) {
|
|
10865
|
-
this.writeAckManager.notifyLevel(op.id,
|
|
13147
|
+
this.writeAckManager.notifyLevel(op.id, import_core20.WriteConcern.REPLICATED);
|
|
10866
13148
|
}
|
|
10867
13149
|
}
|
|
10868
13150
|
}
|
|
@@ -10870,7 +13152,7 @@ var ServerCoordinator = class {
|
|
|
10870
13152
|
await this.broadcastBatchSync(batchedEvents, clientId);
|
|
10871
13153
|
for (const op of ops) {
|
|
10872
13154
|
if (op.id && this.partitionService.isLocalOwner(op.key)) {
|
|
10873
|
-
this.writeAckManager.notifyLevel(op.id,
|
|
13155
|
+
this.writeAckManager.notifyLevel(op.id, import_core20.WriteConcern.REPLICATED);
|
|
10874
13156
|
}
|
|
10875
13157
|
}
|
|
10876
13158
|
}
|
|
@@ -10904,7 +13186,7 @@ var ServerCoordinator = class {
|
|
|
10904
13186
|
return;
|
|
10905
13187
|
}
|
|
10906
13188
|
if (op.id) {
|
|
10907
|
-
this.writeAckManager.notifyLevel(op.id,
|
|
13189
|
+
this.writeAckManager.notifyLevel(op.id, import_core20.WriteConcern.APPLIED);
|
|
10908
13190
|
}
|
|
10909
13191
|
if (eventPayload) {
|
|
10910
13192
|
batchedEvents.push({
|
|
@@ -10918,7 +13200,7 @@ var ServerCoordinator = class {
|
|
|
10918
13200
|
try {
|
|
10919
13201
|
await this.persistOpSync(op);
|
|
10920
13202
|
if (op.id) {
|
|
10921
|
-
this.writeAckManager.notifyLevel(op.id,
|
|
13203
|
+
this.writeAckManager.notifyLevel(op.id, import_core20.WriteConcern.PERSISTED);
|
|
10922
13204
|
}
|
|
10923
13205
|
} catch (err) {
|
|
10924
13206
|
logger.error({ opId: op.id, err }, "Persistence failed");
|
|
@@ -11261,10 +13543,10 @@ var RateLimitInterceptor = class {
|
|
|
11261
13543
|
};
|
|
11262
13544
|
|
|
11263
13545
|
// src/utils/nativeStats.ts
|
|
11264
|
-
var
|
|
13546
|
+
var import_core21 = require("@topgunbuild/core");
|
|
11265
13547
|
function getNativeModuleStatus() {
|
|
11266
13548
|
return {
|
|
11267
|
-
nativeHash: (0,
|
|
13549
|
+
nativeHash: (0, import_core21.isUsingNativeHash)(),
|
|
11268
13550
|
sharedArrayBuffer: SharedMemoryManager.isAvailable()
|
|
11269
13551
|
};
|
|
11270
13552
|
}
|
|
@@ -11297,15 +13579,15 @@ function logNativeStatus() {
|
|
|
11297
13579
|
}
|
|
11298
13580
|
|
|
11299
13581
|
// src/cluster/ClusterCoordinator.ts
|
|
11300
|
-
var
|
|
11301
|
-
var
|
|
13582
|
+
var import_events13 = require("events");
|
|
13583
|
+
var import_core22 = require("@topgunbuild/core");
|
|
11302
13584
|
var DEFAULT_CLUSTER_COORDINATOR_CONFIG = {
|
|
11303
13585
|
gradualRebalancing: true,
|
|
11304
|
-
migration:
|
|
11305
|
-
replication:
|
|
13586
|
+
migration: import_core22.DEFAULT_MIGRATION_CONFIG,
|
|
13587
|
+
replication: import_core22.DEFAULT_REPLICATION_CONFIG,
|
|
11306
13588
|
replicationEnabled: true
|
|
11307
13589
|
};
|
|
11308
|
-
var ClusterCoordinator = class extends
|
|
13590
|
+
var ClusterCoordinator = class extends import_events13.EventEmitter {
|
|
11309
13591
|
constructor(config) {
|
|
11310
13592
|
super();
|
|
11311
13593
|
this.replicationPipeline = null;
|
|
@@ -11670,12 +13952,12 @@ var ClusterCoordinator = class extends import_events9.EventEmitter {
|
|
|
11670
13952
|
};
|
|
11671
13953
|
|
|
11672
13954
|
// src/MapWithResolver.ts
|
|
11673
|
-
var
|
|
13955
|
+
var import_core23 = require("@topgunbuild/core");
|
|
11674
13956
|
var MapWithResolver = class {
|
|
11675
13957
|
constructor(config) {
|
|
11676
13958
|
this.mapName = config.name;
|
|
11677
|
-
this.hlc = new
|
|
11678
|
-
this.map = new
|
|
13959
|
+
this.hlc = new import_core23.HLC(config.nodeId);
|
|
13960
|
+
this.map = new import_core23.LWWMap(this.hlc);
|
|
11679
13961
|
this.resolverService = config.resolverService;
|
|
11680
13962
|
this.onRejection = config.onRejection;
|
|
11681
13963
|
}
|
|
@@ -11931,7 +14213,7 @@ function mergeWithDefaults(userConfig) {
|
|
|
11931
14213
|
}
|
|
11932
14214
|
|
|
11933
14215
|
// src/config/MapFactory.ts
|
|
11934
|
-
var
|
|
14216
|
+
var import_core24 = require("@topgunbuild/core");
|
|
11935
14217
|
var MapFactory = class {
|
|
11936
14218
|
/**
|
|
11937
14219
|
* Create a MapFactory.
|
|
@@ -11955,9 +14237,9 @@ var MapFactory = class {
|
|
|
11955
14237
|
createLWWMap(mapName, hlc) {
|
|
11956
14238
|
const mapConfig = this.mapConfigs.get(mapName);
|
|
11957
14239
|
if (!mapConfig || mapConfig.indexes.length === 0) {
|
|
11958
|
-
return new
|
|
14240
|
+
return new import_core24.LWWMap(hlc);
|
|
11959
14241
|
}
|
|
11960
|
-
const map = new
|
|
14242
|
+
const map = new import_core24.IndexedLWWMap(hlc);
|
|
11961
14243
|
for (const indexDef of mapConfig.indexes) {
|
|
11962
14244
|
this.addIndexToLWWMap(map, indexDef);
|
|
11963
14245
|
}
|
|
@@ -11973,9 +14255,9 @@ var MapFactory = class {
|
|
|
11973
14255
|
createORMap(mapName, hlc) {
|
|
11974
14256
|
const mapConfig = this.mapConfigs.get(mapName);
|
|
11975
14257
|
if (!mapConfig || mapConfig.indexes.length === 0) {
|
|
11976
|
-
return new
|
|
14258
|
+
return new import_core24.ORMap(hlc);
|
|
11977
14259
|
}
|
|
11978
|
-
const map = new
|
|
14260
|
+
const map = new import_core24.IndexedORMap(hlc);
|
|
11979
14261
|
for (const indexDef of mapConfig.indexes) {
|
|
11980
14262
|
this.addIndexToORMap(map, indexDef);
|
|
11981
14263
|
}
|
|
@@ -12012,7 +14294,7 @@ var MapFactory = class {
|
|
|
12012
14294
|
* Supports dot notation for nested paths.
|
|
12013
14295
|
*/
|
|
12014
14296
|
createAttribute(path) {
|
|
12015
|
-
return (0,
|
|
14297
|
+
return (0, import_core24.simpleAttribute)(path, (record) => {
|
|
12016
14298
|
return this.getNestedValue(record, path);
|
|
12017
14299
|
});
|
|
12018
14300
|
}
|
|
@@ -12111,12 +14393,18 @@ var MapFactory = class {
|
|
|
12111
14393
|
ConnectionRateLimiter,
|
|
12112
14394
|
DEFAULT_CLUSTER_COORDINATOR_CONFIG,
|
|
12113
14395
|
DEFAULT_CONFLICT_RESOLVER_CONFIG,
|
|
14396
|
+
DEFAULT_FAILURE_DETECTOR_CONFIG,
|
|
12114
14397
|
DEFAULT_INDEX_CONFIG,
|
|
12115
14398
|
DEFAULT_JOURNAL_SERVICE_CONFIG,
|
|
12116
14399
|
DEFAULT_LAG_TRACKER_CONFIG,
|
|
14400
|
+
DEFAULT_MERKLE_TREE_CONFIG,
|
|
14401
|
+
DEFAULT_READ_REPLICA_CONFIG,
|
|
14402
|
+
DEFAULT_REASSIGNER_CONFIG,
|
|
14403
|
+
DEFAULT_REPAIR_CONFIG,
|
|
12117
14404
|
DEFAULT_SANDBOX_CONFIG,
|
|
12118
14405
|
EntryProcessorHandler,
|
|
12119
14406
|
EventJournalService,
|
|
14407
|
+
FailureDetector,
|
|
12120
14408
|
FilterTasklet,
|
|
12121
14409
|
ForEachTasklet,
|
|
12122
14410
|
IteratorTasklet,
|
|
@@ -12126,14 +14414,19 @@ var MapFactory = class {
|
|
|
12126
14414
|
MapTasklet,
|
|
12127
14415
|
MapWithResolver,
|
|
12128
14416
|
MemoryServerAdapter,
|
|
14417
|
+
MerkleTreeManager,
|
|
12129
14418
|
MigrationManager,
|
|
12130
14419
|
ObjectPool,
|
|
14420
|
+
PartitionReassigner,
|
|
12131
14421
|
PartitionService,
|
|
12132
14422
|
PostgresAdapter,
|
|
12133
14423
|
ProcessorSandbox,
|
|
12134
14424
|
RateLimitInterceptor,
|
|
14425
|
+
ReadReplicaHandler,
|
|
12135
14426
|
ReduceTasklet,
|
|
14427
|
+
RepairScheduler,
|
|
12136
14428
|
ReplicationPipeline,
|
|
14429
|
+
SearchCoordinator,
|
|
12137
14430
|
SecurityManager,
|
|
12138
14431
|
ServerCoordinator,
|
|
12139
14432
|
TaskletScheduler,
|