@optimystic/db-p2p 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.min.js +52 -0
- package/dist/index.min.js.map +7 -0
- package/dist/src/cluster/client.d.ts +12 -0
- package/dist/src/cluster/client.d.ts.map +1 -0
- package/dist/src/cluster/client.js +65 -0
- package/dist/src/cluster/client.js.map +1 -0
- package/dist/src/cluster/cluster-repo.d.ts +79 -0
- package/dist/src/cluster/cluster-repo.d.ts.map +1 -0
- package/dist/src/cluster/cluster-repo.js +613 -0
- package/dist/src/cluster/cluster-repo.js.map +1 -0
- package/dist/src/cluster/partition-detector.d.ts +59 -0
- package/dist/src/cluster/partition-detector.d.ts.map +1 -0
- package/dist/src/cluster/partition-detector.js +129 -0
- package/dist/src/cluster/partition-detector.js.map +1 -0
- package/dist/src/cluster/service.d.ts +49 -0
- package/dist/src/cluster/service.d.ts.map +1 -0
- package/dist/src/cluster/service.js +107 -0
- package/dist/src/cluster/service.js.map +1 -0
- package/dist/src/index.d.ts +29 -0
- package/dist/src/index.d.ts.map +1 -0
- package/dist/src/index.js +29 -0
- package/dist/src/index.js.map +1 -0
- package/dist/src/it-utility.d.ts +4 -0
- package/dist/src/it-utility.d.ts.map +1 -0
- package/dist/src/it-utility.js +32 -0
- package/dist/src/it-utility.js.map +1 -0
- package/dist/src/libp2p-key-network.d.ts +59 -0
- package/dist/src/libp2p-key-network.d.ts.map +1 -0
- package/dist/src/libp2p-key-network.js +278 -0
- package/dist/src/libp2p-key-network.js.map +1 -0
- package/dist/src/libp2p-node.d.ts +28 -0
- package/dist/src/libp2p-node.d.ts.map +1 -0
- package/dist/src/libp2p-node.js +270 -0
- package/dist/src/libp2p-node.js.map +1 -0
- package/dist/src/logger.d.ts +3 -0
- package/dist/src/logger.d.ts.map +1 -0
- package/dist/src/logger.js +6 -0
- package/dist/src/logger.js.map +1 -0
- package/dist/src/network/get-network-manager.d.ts +4 -0
- package/dist/src/network/get-network-manager.d.ts.map +1 -0
- package/dist/src/network/get-network-manager.js +17 -0
- package/dist/src/network/get-network-manager.js.map +1 -0
- package/dist/src/network/network-manager-service.d.ts +82 -0
- package/dist/src/network/network-manager-service.d.ts.map +1 -0
- package/dist/src/network/network-manager-service.js +283 -0
- package/dist/src/network/network-manager-service.js.map +1 -0
- package/dist/src/peer-utils.d.ts +2 -0
- package/dist/src/peer-utils.d.ts.map +1 -0
- package/dist/src/peer-utils.js +28 -0
- package/dist/src/peer-utils.js.map +1 -0
- package/dist/src/protocol-client.d.ts +12 -0
- package/dist/src/protocol-client.d.ts.map +1 -0
- package/dist/src/protocol-client.js +34 -0
- package/dist/src/protocol-client.js.map +1 -0
- package/dist/src/repo/client.d.ts +17 -0
- package/dist/src/repo/client.d.ts.map +1 -0
- package/dist/src/repo/client.js +82 -0
- package/dist/src/repo/client.js.map +1 -0
- package/dist/src/repo/cluster-coordinator.d.ts +59 -0
- package/dist/src/repo/cluster-coordinator.d.ts.map +1 -0
- package/dist/src/repo/cluster-coordinator.js +539 -0
- package/dist/src/repo/cluster-coordinator.js.map +1 -0
- package/dist/src/repo/coordinator-repo.d.ts +29 -0
- package/dist/src/repo/coordinator-repo.d.ts.map +1 -0
- package/dist/src/repo/coordinator-repo.js +102 -0
- package/dist/src/repo/coordinator-repo.js.map +1 -0
- package/dist/src/repo/redirect.d.ts +14 -0
- package/dist/src/repo/redirect.d.ts.map +1 -0
- package/dist/src/repo/redirect.js +9 -0
- package/dist/src/repo/redirect.js.map +1 -0
- package/dist/src/repo/service.d.ts +52 -0
- package/dist/src/repo/service.d.ts.map +1 -0
- package/dist/src/repo/service.js +181 -0
- package/dist/src/repo/service.js.map +1 -0
- package/dist/src/repo/types.d.ts +7 -0
- package/dist/src/repo/types.d.ts.map +1 -0
- package/dist/src/repo/types.js +2 -0
- package/dist/src/repo/types.js.map +1 -0
- package/dist/src/routing/libp2p-known-peers.d.ts +4 -0
- package/dist/src/routing/libp2p-known-peers.d.ts.map +1 -0
- package/dist/src/routing/libp2p-known-peers.js +19 -0
- package/dist/src/routing/libp2p-known-peers.js.map +1 -0
- package/dist/src/routing/responsibility.d.ts +14 -0
- package/dist/src/routing/responsibility.d.ts.map +1 -0
- package/dist/src/routing/responsibility.js +45 -0
- package/dist/src/routing/responsibility.js.map +1 -0
- package/dist/src/routing/simple-cluster-coordinator.d.ts +23 -0
- package/dist/src/routing/simple-cluster-coordinator.d.ts.map +1 -0
- package/dist/src/routing/simple-cluster-coordinator.js +59 -0
- package/dist/src/routing/simple-cluster-coordinator.js.map +1 -0
- package/dist/src/storage/arachnode-fret-adapter.d.ts +65 -0
- package/dist/src/storage/arachnode-fret-adapter.d.ts.map +1 -0
- package/dist/src/storage/arachnode-fret-adapter.js +93 -0
- package/dist/src/storage/arachnode-fret-adapter.js.map +1 -0
- package/dist/src/storage/block-storage.d.ts +31 -0
- package/dist/src/storage/block-storage.d.ts.map +1 -0
- package/dist/src/storage/block-storage.js +154 -0
- package/dist/src/storage/block-storage.js.map +1 -0
- package/dist/src/storage/file-storage.d.ts +30 -0
- package/dist/src/storage/file-storage.d.ts.map +1 -0
- package/dist/src/storage/file-storage.js +127 -0
- package/dist/src/storage/file-storage.js.map +1 -0
- package/dist/src/storage/helpers.d.ts +3 -0
- package/dist/src/storage/helpers.d.ts.map +1 -0
- package/dist/src/storage/helpers.js +28 -0
- package/dist/src/storage/helpers.js.map +1 -0
- package/dist/src/storage/i-block-storage.d.ts +32 -0
- package/dist/src/storage/i-block-storage.d.ts.map +1 -0
- package/dist/src/storage/i-block-storage.js +2 -0
- package/dist/src/storage/i-block-storage.js.map +1 -0
- package/dist/src/storage/i-raw-storage.d.ts +20 -0
- package/dist/src/storage/i-raw-storage.d.ts.map +1 -0
- package/dist/src/storage/i-raw-storage.js +2 -0
- package/dist/src/storage/i-raw-storage.js.map +1 -0
- package/dist/src/storage/memory-storage.d.ts +27 -0
- package/dist/src/storage/memory-storage.d.ts.map +1 -0
- package/dist/src/storage/memory-storage.js +87 -0
- package/dist/src/storage/memory-storage.js.map +1 -0
- package/dist/src/storage/restoration-coordinator-v2.d.ts +63 -0
- package/dist/src/storage/restoration-coordinator-v2.d.ts.map +1 -0
- package/dist/src/storage/restoration-coordinator-v2.js +157 -0
- package/dist/src/storage/restoration-coordinator-v2.js.map +1 -0
- package/dist/src/storage/ring-selector.d.ts +56 -0
- package/dist/src/storage/ring-selector.d.ts.map +1 -0
- package/dist/src/storage/ring-selector.js +118 -0
- package/dist/src/storage/ring-selector.js.map +1 -0
- package/dist/src/storage/storage-monitor.d.ts +23 -0
- package/dist/src/storage/storage-monitor.d.ts.map +1 -0
- package/dist/src/storage/storage-monitor.js +40 -0
- package/dist/src/storage/storage-monitor.js.map +1 -0
- package/dist/src/storage/storage-repo.d.ts +17 -0
- package/dist/src/storage/storage-repo.d.ts.map +1 -0
- package/dist/src/storage/storage-repo.js +267 -0
- package/dist/src/storage/storage-repo.js.map +1 -0
- package/dist/src/storage/struct.d.ts +29 -0
- package/dist/src/storage/struct.d.ts.map +1 -0
- package/dist/src/storage/struct.js +2 -0
- package/dist/src/storage/struct.js.map +1 -0
- package/dist/src/sync/client.d.ts +27 -0
- package/dist/src/sync/client.d.ts.map +1 -0
- package/dist/src/sync/client.js +32 -0
- package/dist/src/sync/client.js.map +1 -0
- package/dist/src/sync/protocol.d.ts +58 -0
- package/dist/src/sync/protocol.d.ts.map +1 -0
- package/dist/src/sync/protocol.js +12 -0
- package/dist/src/sync/protocol.js.map +1 -0
- package/dist/src/sync/service.d.ts +62 -0
- package/dist/src/sync/service.d.ts.map +1 -0
- package/dist/src/sync/service.js +168 -0
- package/dist/src/sync/service.js.map +1 -0
- package/package.json +73 -0
- package/readme.md +497 -0
- package/src/cluster/client.ts +63 -0
- package/src/cluster/cluster-repo.ts +711 -0
- package/src/cluster/partition-detector.ts +158 -0
- package/src/cluster/service.ts +156 -0
- package/src/index.ts +30 -0
- package/src/it-utility.ts +36 -0
- package/src/libp2p-key-network.ts +334 -0
- package/src/libp2p-node.ts +335 -0
- package/src/logger.ts +9 -0
- package/src/network/get-network-manager.ts +17 -0
- package/src/network/network-manager-service.ts +334 -0
- package/src/peer-utils.ts +24 -0
- package/src/protocol-client.ts +54 -0
- package/src/repo/client.ts +112 -0
- package/src/repo/cluster-coordinator.ts +592 -0
- package/src/repo/coordinator-repo.ts +137 -0
- package/src/repo/redirect.ts +17 -0
- package/src/repo/service.ts +219 -0
- package/src/repo/types.ts +7 -0
- package/src/routing/libp2p-known-peers.ts +26 -0
- package/src/routing/responsibility.ts +63 -0
- package/src/routing/simple-cluster-coordinator.ts +70 -0
- package/src/storage/arachnode-fret-adapter.ts +128 -0
- package/src/storage/block-storage.ts +182 -0
- package/src/storage/file-storage.ts +163 -0
- package/src/storage/helpers.ts +29 -0
- package/src/storage/i-block-storage.ts +40 -0
- package/src/storage/i-raw-storage.ts +30 -0
- package/src/storage/memory-storage.ts +108 -0
- package/src/storage/restoration-coordinator-v2.ts +191 -0
- package/src/storage/ring-selector.ts +155 -0
- package/src/storage/storage-monitor.ts +59 -0
- package/src/storage/storage-repo.ts +320 -0
- package/src/storage/struct.ts +34 -0
- package/src/sync/client.ts +42 -0
- package/src/sync/protocol.ts +71 -0
- package/src/sync/service.ts +229 -0
|
@@ -0,0 +1,592 @@
|
|
|
1
|
+
import { peerIdFromString } from "@libp2p/peer-id";
|
|
2
|
+
import type { ClusterRecord, IKeyNetwork, RepoMessage, BlockId, ClusterPeers, MessageOptions, Signature, ClusterConsensusConfig } from "@optimystic/db-core";
|
|
3
|
+
import { base58btc } from "multiformats/bases/base58";
|
|
4
|
+
import { sha256 } from "multiformats/hashes/sha2";
|
|
5
|
+
import { ClusterClient } from "../cluster/client.js";
|
|
6
|
+
import { Pending } from "@optimystic/db-core";
|
|
7
|
+
import type { PeerId } from "@libp2p/interface";
|
|
8
|
+
import { createLogger } from '../logger.js'
|
|
9
|
+
import type { ClusterLogPeerOutcome } from './types.js'
|
|
10
|
+
import type { FretService } from "p2p-fret";
|
|
11
|
+
|
|
12
|
+
const log = createLogger('cluster')
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Manages the state of cluster transactions for a specific block ID
|
|
16
|
+
*/
|
|
17
|
+
interface CommitRetryState {
|
|
18
|
+
pendingPeers: Set<string>;
|
|
19
|
+
attempt: number;
|
|
20
|
+
intervalMs: number;
|
|
21
|
+
timer?: NodeJS.Timeout;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
interface ClusterTransactionState {
|
|
25
|
+
messageHash: string;
|
|
26
|
+
record: ClusterRecord;
|
|
27
|
+
pending: Pending<ClusterRecord>;
|
|
28
|
+
lastUpdate: number;
|
|
29
|
+
promiseTimeout?: NodeJS.Timeout;
|
|
30
|
+
resolutionTimeout?: NodeJS.Timeout;
|
|
31
|
+
retry?: CommitRetryState;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
/** Manages distributed transactions across clusters */
|
|
35
|
+
export class ClusterCoordinator {
|
|
36
|
+
// TODO: move this into a state management interface so that transaction state can be persisted
|
|
37
|
+
private transactions: Map<string, ClusterTransactionState> = new Map();
|
|
38
|
+
private readonly retryInitialIntervalMs = 2000;
|
|
39
|
+
private readonly retryBackoffFactor = 2;
|
|
40
|
+
private readonly retryMaxIntervalMs = 30000;
|
|
41
|
+
private readonly retryMaxAttempts = 5;
|
|
42
|
+
|
|
43
|
+
constructor(
|
|
44
|
+
private readonly keyNetwork: IKeyNetwork,
|
|
45
|
+
private readonly createClusterClient: (peerId: PeerId) => ClusterClient,
|
|
46
|
+
private readonly cfg: ClusterConsensusConfig & { clusterSize: number },
|
|
47
|
+
private readonly localCluster?: { update: (record: ClusterRecord) => Promise<ClusterRecord>; peerId: PeerId },
|
|
48
|
+
private readonly fretService?: FretService
|
|
49
|
+
) { }
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* Creates a base 58 BTC string hash for a message to uniquely identify a transaction
|
|
53
|
+
*/
|
|
54
|
+
private async createMessageHash(message: RepoMessage): Promise<string> {
|
|
55
|
+
const msgBytes = new TextEncoder().encode(JSON.stringify(message));
|
|
56
|
+
const hashBytes = await sha256.digest(msgBytes);
|
|
57
|
+
return base58btc.encode(hashBytes.digest);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* Gets all peers in the cluster for a specific block ID
|
|
62
|
+
*/
|
|
63
|
+
private async getClusterForBlock(blockId: BlockId): Promise<ClusterPeers> {
|
|
64
|
+
const blockIdBytes = new TextEncoder().encode(blockId);
|
|
65
|
+
try {
|
|
66
|
+
const peers = await this.keyNetwork.findCluster(blockIdBytes);
|
|
67
|
+
const peerIds = Object.keys(peers ?? {});
|
|
68
|
+
log('cluster-tx:cluster-members', { blockId, peerIds });
|
|
69
|
+
return peers;
|
|
70
|
+
} catch (e) {
|
|
71
|
+
log('WARN findCluster failed for %s: %o', blockId, e)
|
|
72
|
+
return {} as ClusterPeers
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
private makeRecord(peers: ClusterPeers, messageHash: string, message: RepoMessage): ClusterRecord {
|
|
77
|
+
const peerCount = Object.keys(peers ?? {}).length;
|
|
78
|
+
const record: ClusterRecord = {
|
|
79
|
+
messageHash,
|
|
80
|
+
peers,
|
|
81
|
+
message,
|
|
82
|
+
coordinatingBlockIds: message.coordinatingBlockIds,
|
|
83
|
+
promises: {},
|
|
84
|
+
commits: {},
|
|
85
|
+
suggestedClusterSize: peerCount || undefined,
|
|
86
|
+
minRequiredSize: this.cfg.allowClusterDownsize ? undefined : this.cfg.clusterSize
|
|
87
|
+
};
|
|
88
|
+
|
|
89
|
+
// Add network size hint if available
|
|
90
|
+
if (this.fretService) {
|
|
91
|
+
try {
|
|
92
|
+
const estimate = this.fretService.getNetworkSizeEstimate();
|
|
93
|
+
if (estimate.size_estimate > 0) {
|
|
94
|
+
record.networkSizeHint = estimate.size_estimate;
|
|
95
|
+
record.networkSizeConfidence = estimate.confidence;
|
|
96
|
+
}
|
|
97
|
+
} catch (err) {
|
|
98
|
+
// Ignore errors getting size estimate
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
return record;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
/**
|
|
106
|
+
* Initiates a 2-phase transaction for a specific block ID
|
|
107
|
+
*/
|
|
108
|
+
async executeClusterTransaction(blockId: BlockId, message: RepoMessage, options?: MessageOptions): Promise<any> {
|
|
109
|
+
// Get the cluster peers for this block
|
|
110
|
+
const peers = await this.getClusterForBlock(blockId);
|
|
111
|
+
|
|
112
|
+
// Create a unique hash for this transaction
|
|
113
|
+
const messageHash = await this.createMessageHash(message);
|
|
114
|
+
|
|
115
|
+
// Create a cluster record for this transaction
|
|
116
|
+
const record = this.makeRecord(peers, messageHash, message);
|
|
117
|
+
log('cluster-tx:start', {
|
|
118
|
+
messageHash,
|
|
119
|
+
blockId,
|
|
120
|
+
peerCount: Object.keys(peers ?? {}).length,
|
|
121
|
+
allowDownsize: this.cfg.allowClusterDownsize,
|
|
122
|
+
configuredSize: this.cfg.clusterSize,
|
|
123
|
+
suggestedSize: record.suggestedClusterSize,
|
|
124
|
+
minRequiredSize: record.minRequiredSize
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
// Create a new pending transaction
|
|
128
|
+
const transactionPromise = this.executeTransaction(peers, record);
|
|
129
|
+
const pending = new Pending(transactionPromise);
|
|
130
|
+
|
|
131
|
+
// Store the transaction state
|
|
132
|
+
const state: ClusterTransactionState = {
|
|
133
|
+
messageHash,
|
|
134
|
+
record,
|
|
135
|
+
pending,
|
|
136
|
+
lastUpdate: Date.now()
|
|
137
|
+
};
|
|
138
|
+
this.transactions.set(messageHash, state);
|
|
139
|
+
log('cluster-tx:transaction-store', {
|
|
140
|
+
messageHash,
|
|
141
|
+
transactionKeys: Array.from(this.transactions.keys())
|
|
142
|
+
});
|
|
143
|
+
|
|
144
|
+
// Wait for the transaction to complete
|
|
145
|
+
try {
|
|
146
|
+
const result = await pending.result();
|
|
147
|
+
return result;
|
|
148
|
+
} finally {
|
|
149
|
+
const stored = this.transactions.get(messageHash);
|
|
150
|
+
const retrySnapshot = stored?.retry ? {
|
|
151
|
+
attempt: stored.retry.attempt,
|
|
152
|
+
pending: Array.from(stored.retry.pendingPeers ?? [])
|
|
153
|
+
} : undefined;
|
|
154
|
+
log('cluster-tx:complete', {
|
|
155
|
+
messageHash,
|
|
156
|
+
finalPromises: stored ? Object.keys(stored.record.promises ?? {}) : undefined,
|
|
157
|
+
finalCommits: stored ? Object.keys(stored.record.commits ?? {}) : undefined,
|
|
158
|
+
retry: retrySnapshot
|
|
159
|
+
});
|
|
160
|
+
// Don't remove transaction immediately if retries are scheduled
|
|
161
|
+
// Let the retry completion or abort handle cleanup
|
|
162
|
+
if (!stored?.retry) {
|
|
163
|
+
// Wait a bit before cleanup to allow any in-flight responses to arrive
|
|
164
|
+
setTimeout(() => {
|
|
165
|
+
this.transactions.delete(messageHash);
|
|
166
|
+
log('cluster-tx:transaction-remove', {
|
|
167
|
+
messageHash,
|
|
168
|
+
remaining: Array.from(this.transactions.keys())
|
|
169
|
+
});
|
|
170
|
+
}, 100);
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
/**
|
|
176
|
+
* Executes the full transaction process
|
|
177
|
+
*/
|
|
178
|
+
private async executeTransaction(peers: ClusterPeers, record: ClusterRecord): Promise<ClusterRecord> {
|
|
179
|
+
const peerCount = Object.keys(peers).length;
|
|
180
|
+
|
|
181
|
+
// Validate against minimum cluster size
|
|
182
|
+
if (peerCount < this.cfg.minAbsoluteClusterSize) {
|
|
183
|
+
const validated = await this.validateSmallCluster(peerCount, peers);
|
|
184
|
+
if (!validated) {
|
|
185
|
+
log('cluster-tx:reject-too-small', {
|
|
186
|
+
peerCount,
|
|
187
|
+
minRequired: this.cfg.minAbsoluteClusterSize
|
|
188
|
+
});
|
|
189
|
+
throw new Error(`Cluster size ${peerCount} below minimum ${this.cfg.minAbsoluteClusterSize} and not validated`);
|
|
190
|
+
}
|
|
191
|
+
log('cluster-tx:small-cluster-validated', { peerCount });
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
// Check configured cluster size
|
|
195
|
+
if (!this.cfg.allowClusterDownsize && peerCount < this.cfg.clusterSize) {
|
|
196
|
+
log('cluster-tx:reject-downsize', { peerCount, required: this.cfg.clusterSize });
|
|
197
|
+
throw new Error(`Cluster size ${peerCount} below configured minimum ${this.cfg.clusterSize}`);
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
// Collect promises with super-majority requirement
|
|
201
|
+
const promised = await this.collectPromises(peers, record);
|
|
202
|
+
const superMajority = Math.ceil(peerCount * this.cfg.superMajorityThreshold);
|
|
203
|
+
|
|
204
|
+
// Count approvals and rejections separately
|
|
205
|
+
const promises = promised.record.promises;
|
|
206
|
+
const approvalCount = Object.values(promises).filter(sig => sig.type === 'approve').length;
|
|
207
|
+
const rejectionCount = Object.values(promises).filter(sig => sig.type === 'reject').length;
|
|
208
|
+
|
|
209
|
+
// Check if rejections make super-majority impossible
|
|
210
|
+
// If more than (peerCount - superMajority) nodes reject, we can never reach super-majority
|
|
211
|
+
const maxAllowedRejections = peerCount - superMajority;
|
|
212
|
+
if (rejectionCount > maxAllowedRejections) {
|
|
213
|
+
const rejectReasons = Object.entries(promises)
|
|
214
|
+
.filter(([_, sig]) => sig.type === 'reject')
|
|
215
|
+
.map(([peerId, sig]) => `${peerId}: ${sig.rejectReason ?? 'unknown'}`)
|
|
216
|
+
.join('; ');
|
|
217
|
+
log('cluster-tx:rejected-by-validators', {
|
|
218
|
+
messageHash: record.messageHash,
|
|
219
|
+
peerCount,
|
|
220
|
+
rejections: rejectionCount,
|
|
221
|
+
maxAllowed: maxAllowedRejections,
|
|
222
|
+
reasons: rejectReasons
|
|
223
|
+
});
|
|
224
|
+
this.updateTransactionRecord(promised.record, 'rejected-by-validators');
|
|
225
|
+
throw new Error(`Transaction rejected by validators (${rejectionCount}/${peerCount} rejected): ${rejectReasons}`);
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
if (peerCount > 1 && approvalCount < superMajority) {
|
|
229
|
+
log('cluster-tx:supermajority-failed', {
|
|
230
|
+
messageHash: record.messageHash,
|
|
231
|
+
peerCount,
|
|
232
|
+
approvals: approvalCount,
|
|
233
|
+
rejections: rejectionCount,
|
|
234
|
+
superMajority,
|
|
235
|
+
threshold: this.cfg.superMajorityThreshold
|
|
236
|
+
});
|
|
237
|
+
this.updateTransactionRecord(promised.record, 'supermajority-failed');
|
|
238
|
+
throw new Error(`Failed to get super-majority: ${approvalCount}/${peerCount} approvals (needed ${superMajority}, ${rejectionCount} rejections)`);
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
return await this.commitTransaction(promised.record);
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
async getClusterSize(blockId: BlockId): Promise<number> {
|
|
245
|
+
const peers = await this.getClusterForBlock(blockId);
|
|
246
|
+
return Object.keys(peers ?? {}).length;
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
/**
|
|
250
|
+
* Validate that a small cluster size is legitimate by querying remote peers
|
|
251
|
+
* for their network size estimates. Returns true if estimates roughly agree.
|
|
252
|
+
*/
|
|
253
|
+
private async validateSmallCluster(localSize: number, peers: ClusterPeers): Promise<boolean> {
|
|
254
|
+
// If we have FRET and it shows confident estimate
|
|
255
|
+
if (this.fretService) {
|
|
256
|
+
try {
|
|
257
|
+
const estimate = this.fretService.getNetworkSizeEstimate();
|
|
258
|
+
if (estimate.confidence > 0.5) {
|
|
259
|
+
// Check if FRET estimate roughly matches observed cluster size
|
|
260
|
+
const orderOfMagnitude = Math.floor(Math.log10(estimate.size_estimate + 1));
|
|
261
|
+
const localOrderOfMagnitude = Math.floor(Math.log10(localSize + 1));
|
|
262
|
+
|
|
263
|
+
// If within same order of magnitude, accept it
|
|
264
|
+
if (Math.abs(orderOfMagnitude - localOrderOfMagnitude) <= 1) {
|
|
265
|
+
log('cluster-tx:small-cluster-validated-by-fret', {
|
|
266
|
+
localSize,
|
|
267
|
+
fretEstimate: estimate.size_estimate,
|
|
268
|
+
confidence: estimate.confidence,
|
|
269
|
+
sources: estimate.sources
|
|
270
|
+
});
|
|
271
|
+
return true;
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
} catch (err) {
|
|
275
|
+
// Ignore errors
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
// Fallback: accept small clusters in development/testing scenarios
|
|
280
|
+
// In production, FRET should provide validation
|
|
281
|
+
log('cluster-tx:small-cluster-accepted-without-validation', {
|
|
282
|
+
localSize,
|
|
283
|
+
reason: 'no-confident-network-size-estimate'
|
|
284
|
+
});
|
|
285
|
+
return true;
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
/**
|
|
289
|
+
* Collects promises from all peers in the cluster
|
|
290
|
+
*/
|
|
291
|
+
private async collectPromises(peers: ClusterPeers, record: ClusterRecord): Promise<{ record: ClusterRecord }> {
|
|
292
|
+
const peerIds = Object.keys(peers);
|
|
293
|
+
const summary: ClusterLogPeerOutcome[] = [];
|
|
294
|
+
// For each peer, create a client and request a promise
|
|
295
|
+
const promiseRequests = peerIds.map(peerIdStr => {
|
|
296
|
+
const isLocal = this.localCluster && peerIdStr === this.localCluster.peerId.toString();
|
|
297
|
+
log('cluster-tx:promise-request', { messageHash: record.messageHash, peerId: peerIdStr, isLocal });
|
|
298
|
+
const promise = isLocal
|
|
299
|
+
? this.localCluster!.update(record)
|
|
300
|
+
: this.createClusterClient(peerIdFromString(peerIdStr)).update(record);
|
|
301
|
+
return new Pending(promise);
|
|
302
|
+
});
|
|
303
|
+
|
|
304
|
+
// Wait for all promises to complete
|
|
305
|
+
const results = await Promise.all(promiseRequests.map((p, idx) => p.result().then(res => {
|
|
306
|
+
const peerIdStr = peerIds[idx]!;
|
|
307
|
+
log('cluster-tx:promise-response', {
|
|
308
|
+
messageHash: record.messageHash,
|
|
309
|
+
peerId: peerIdStr,
|
|
310
|
+
success: true,
|
|
311
|
+
returnedPromises: Object.keys(res.promises ?? {}),
|
|
312
|
+
returnedCommits: Object.keys(res.commits ?? {})
|
|
313
|
+
});
|
|
314
|
+
summary.push({ peerId: peerIdStr, success: true });
|
|
315
|
+
return res;
|
|
316
|
+
}).catch(err => {
|
|
317
|
+
const peerIdStr = peerIds[idx]!;
|
|
318
|
+
log('cluster-tx:promise-response', { messageHash: record.messageHash, peerId: peerIdStr, success: false, error: err });
|
|
319
|
+
summary.push({ peerId: peerIdStr, success: false, error: err instanceof Error ? err.message : String(err) });
|
|
320
|
+
return null;
|
|
321
|
+
})));
|
|
322
|
+
const successes = summary.filter(entry => entry.success).map(entry => entry.peerId);
|
|
323
|
+
const failures = summary.filter(entry => !entry.success);
|
|
324
|
+
log('cluster-tx:promise-summary', {
|
|
325
|
+
messageHash: record.messageHash,
|
|
326
|
+
successes,
|
|
327
|
+
failures
|
|
328
|
+
});
|
|
329
|
+
|
|
330
|
+
log('cluster-tx:promise-merge-begin', {
|
|
331
|
+
messageHash: record.messageHash,
|
|
332
|
+
initialPromises: Object.keys(record.promises ?? {}),
|
|
333
|
+
transactionsKeys: Array.from(this.transactions.keys()),
|
|
334
|
+
hasTransaction: this.transactions.has(record.messageHash)
|
|
335
|
+
});
|
|
336
|
+
|
|
337
|
+
// Merge all promises into the record
|
|
338
|
+
for (const result of results.filter(Boolean) as ClusterRecord[]) {
|
|
339
|
+
log('cluster-tx:promise-merge-input', {
|
|
340
|
+
messageHash: record.messageHash,
|
|
341
|
+
resultFrom: Object.keys(result.promises ?? {}),
|
|
342
|
+
recordBefore: Object.keys(record.promises ?? {})
|
|
343
|
+
});
|
|
344
|
+
const resultPromises = Object.keys(result.promises ?? {});
|
|
345
|
+
log('cluster-tx:promise-merge-result', {
|
|
346
|
+
messageHash: record.messageHash,
|
|
347
|
+
peerPromises: resultPromises
|
|
348
|
+
});
|
|
349
|
+
if (typeof record.suggestedClusterSize === 'number' && typeof result.suggestedClusterSize === 'number') {
|
|
350
|
+
const expected = result.suggestedClusterSize;
|
|
351
|
+
const actual = Object.keys(peers).length;
|
|
352
|
+
const maxDiff = Math.ceil(Math.max(1, expected * this.cfg.clusterSizeTolerance));
|
|
353
|
+
if (Math.abs(actual - expected) > maxDiff) {
|
|
354
|
+
log('cluster-tx:size-variance', { expected, actual, tolerance: this.cfg.clusterSizeTolerance });
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
record.promises = { ...record.promises, ...result.promises };
|
|
358
|
+
log('cluster-tx:promise-merge-after', {
|
|
359
|
+
messageHash: record.messageHash,
|
|
360
|
+
mergedPromises: Object.keys(record.promises ?? {})
|
|
361
|
+
});
|
|
362
|
+
}
|
|
363
|
+
log('cluster-tx:promise-merge', {
|
|
364
|
+
messageHash: record.messageHash,
|
|
365
|
+
mergedPromises: Object.keys(record.promises ?? {})
|
|
366
|
+
});
|
|
367
|
+
log('cluster-tx:promise-merge-end', {
|
|
368
|
+
messageHash: record.messageHash,
|
|
369
|
+
finalPromises: Object.keys(record.promises ?? {}),
|
|
370
|
+
transactionsEntry: this.transactions.get(record.messageHash)
|
|
371
|
+
});
|
|
372
|
+
this.updateTransactionRecord(record, 'after-promises');
|
|
373
|
+
return { record };
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
/**
|
|
377
|
+
* Commits the transaction to all peers in the cluster
|
|
378
|
+
*/
|
|
379
|
+
private async commitTransaction(record: ClusterRecord): Promise<ClusterRecord> {
|
|
380
|
+
// For each peer, create a client and send the commit
|
|
381
|
+
const peerIds = Object.keys(record.peers);
|
|
382
|
+
const summary: ClusterLogPeerOutcome[] = [];
|
|
383
|
+
// Send the record with promises to all peers
|
|
384
|
+
// Each peer will add its own commit signature
|
|
385
|
+
const commitPayload = {
|
|
386
|
+
...record
|
|
387
|
+
};
|
|
388
|
+
const commitRequests = peerIds.map(peerIdStr => {
|
|
389
|
+
const isLocal = this.localCluster && peerIdStr === this.localCluster.peerId.toString();
|
|
390
|
+
log('cluster-tx:commit-request', { messageHash: record.messageHash, peerId: peerIdStr, isLocal });
|
|
391
|
+
const promise = isLocal
|
|
392
|
+
? this.localCluster!.update(commitPayload)
|
|
393
|
+
: this.createClusterClient(peerIdFromString(peerIdStr)).update(commitPayload);
|
|
394
|
+
return new Pending(promise);
|
|
395
|
+
});
|
|
396
|
+
|
|
397
|
+
// Wait for all commits to complete
|
|
398
|
+
const results = await Promise.all(commitRequests.map((p, idx) => p.result().then(res => {
|
|
399
|
+
const peerIdStr = peerIds[idx]!;
|
|
400
|
+
log('cluster-tx:commit-response', { messageHash: record.messageHash, peerId: peerIdStr, success: true });
|
|
401
|
+
summary.push({ peerId: peerIdStr, success: true });
|
|
402
|
+
return res;
|
|
403
|
+
}).catch(err => {
|
|
404
|
+
const peerIdStr = peerIds[idx]!;
|
|
405
|
+
log('cluster-tx:commit-response', { messageHash: record.messageHash, peerId: peerIdStr, success: false, error: err });
|
|
406
|
+
summary.push({ peerId: peerIdStr, success: false, error: err instanceof Error ? err.message : String(err) });
|
|
407
|
+
return null;
|
|
408
|
+
})));
|
|
409
|
+
const commitSuccesses = summary.filter(entry => entry.success).map(entry => entry.peerId);
|
|
410
|
+
const commitFailures = summary.filter(entry => !entry.success);
|
|
411
|
+
log('cluster-tx:commit-summary', {
|
|
412
|
+
messageHash: record.messageHash,
|
|
413
|
+
successes: commitSuccesses,
|
|
414
|
+
failures: commitFailures
|
|
415
|
+
});
|
|
416
|
+
log('cluster-tx:commit-merge-begin', {
|
|
417
|
+
messageHash: record.messageHash,
|
|
418
|
+
initialCommits: Object.keys(record.commits ?? {}),
|
|
419
|
+
transactionsEntry: this.transactions.get(record.messageHash)
|
|
420
|
+
});
|
|
421
|
+
|
|
422
|
+
// Merge all commits into the record
|
|
423
|
+
for (const result of results.filter(Boolean) as ClusterRecord[]) {
|
|
424
|
+
log('cluster-tx:commit-merge-input', {
|
|
425
|
+
messageHash: record.messageHash,
|
|
426
|
+
resultFrom: Object.keys(result.commits ?? {}),
|
|
427
|
+
recordBefore: Object.keys(record.commits ?? {})
|
|
428
|
+
});
|
|
429
|
+
log('cluster-tx:commit-merge-result', {
|
|
430
|
+
messageHash: record.messageHash,
|
|
431
|
+
peerCommits: Object.keys(result.commits ?? {})
|
|
432
|
+
});
|
|
433
|
+
record.commits = { ...record.commits, ...result.commits };
|
|
434
|
+
log('cluster-tx:commit-merge-after', {
|
|
435
|
+
messageHash: record.messageHash,
|
|
436
|
+
mergedCommits: Object.keys(record.commits ?? {})
|
|
437
|
+
});
|
|
438
|
+
}
|
|
439
|
+
log('cluster-tx:commit-merge', {
|
|
440
|
+
messageHash: record.messageHash,
|
|
441
|
+
mergedCommits: Object.keys(record.commits ?? {})
|
|
442
|
+
});
|
|
443
|
+
log('cluster-tx:commit-merge-end', {
|
|
444
|
+
messageHash: record.messageHash,
|
|
445
|
+
finalCommits: Object.keys(record.commits ?? {}),
|
|
446
|
+
transactionsEntry: this.transactions.get(record.messageHash)
|
|
447
|
+
});
|
|
448
|
+
this.updateTransactionRecord(record, 'after-commit');
|
|
449
|
+
|
|
450
|
+
// Check for simple majority (>50%) - this proves commitment
|
|
451
|
+
const peerCount = Object.keys(record.peers).length;
|
|
452
|
+
const simpleMajority = Math.floor(peerCount * this.cfg.simpleMajorityThreshold) + 1;
|
|
453
|
+
const commitCount = Object.keys(record.commits).length;
|
|
454
|
+
|
|
455
|
+
if (commitCount >= simpleMajority) {
|
|
456
|
+
log('cluster-tx:commit-majority-reached', {
|
|
457
|
+
messageHash: record.messageHash,
|
|
458
|
+
commitCount,
|
|
459
|
+
simpleMajority,
|
|
460
|
+
peerCount,
|
|
461
|
+
threshold: this.cfg.simpleMajorityThreshold
|
|
462
|
+
});
|
|
463
|
+
// Simple majority proves commitment - we can return success
|
|
464
|
+
// Background propagation to remaining peers will continue
|
|
465
|
+
}
|
|
466
|
+
|
|
467
|
+
const missingPeers = commitFailures.map(entry => entry.peerId);
|
|
468
|
+
if (missingPeers.length > 0) {
|
|
469
|
+
this.scheduleCommitRetry(record.messageHash, record, missingPeers);
|
|
470
|
+
} else {
|
|
471
|
+
this.clearRetry(record.messageHash);
|
|
472
|
+
}
|
|
473
|
+
return record;
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
private updateTransactionRecord(record: ClusterRecord, stage: string): void {
|
|
477
|
+
const state = this.transactions.get(record.messageHash);
|
|
478
|
+
if (!state) {
|
|
479
|
+
log('cluster-tx:transaction-update-miss', { messageHash: record.messageHash, stage });
|
|
480
|
+
return;
|
|
481
|
+
}
|
|
482
|
+
state.record = { ...record };
|
|
483
|
+
state.lastUpdate = Date.now();
|
|
484
|
+
log('cluster-tx:transaction-update', {
|
|
485
|
+
messageHash: record.messageHash,
|
|
486
|
+
stage,
|
|
487
|
+
promises: Object.keys(record.promises ?? {}),
|
|
488
|
+
commits: Object.keys(record.commits ?? {})
|
|
489
|
+
});
|
|
490
|
+
}
|
|
491
|
+
|
|
492
|
+
private scheduleCommitRetry(messageHash: string, record: ClusterRecord, missingPeers: string[]): void {
|
|
493
|
+
const state = this.transactions.get(messageHash);
|
|
494
|
+
if (!state) {
|
|
495
|
+
return;
|
|
496
|
+
}
|
|
497
|
+
const existing = state.retry;
|
|
498
|
+
const nextAttempt = (existing?.attempt ?? 0) + 1;
|
|
499
|
+
if (nextAttempt > this.retryMaxAttempts) {
|
|
500
|
+
log('cluster-tx:retry-abort', { messageHash, missingPeers });
|
|
501
|
+
return;
|
|
502
|
+
}
|
|
503
|
+
if (missingPeers.length === 0) {
|
|
504
|
+
this.clearRetry(messageHash);
|
|
505
|
+
return;
|
|
506
|
+
}
|
|
507
|
+
const pendingPeers = new Set(missingPeers);
|
|
508
|
+
const baseInterval = existing ? Math.min(existing.intervalMs * this.retryBackoffFactor, this.retryMaxIntervalMs) : this.retryInitialIntervalMs;
|
|
509
|
+
if (existing?.timer) {
|
|
510
|
+
clearTimeout(existing.timer);
|
|
511
|
+
}
|
|
512
|
+
const timer = setTimeout(() => {
|
|
513
|
+
void this.retryCommits(messageHash);
|
|
514
|
+
}, baseInterval);
|
|
515
|
+
state.retry = {
|
|
516
|
+
pendingPeers,
|
|
517
|
+
attempt: nextAttempt,
|
|
518
|
+
intervalMs: baseInterval,
|
|
519
|
+
timer
|
|
520
|
+
};
|
|
521
|
+
log('cluster-tx:retry-scheduled', { messageHash, attempt: nextAttempt, missingPeers, delayMs: baseInterval });
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
private async retryCommits(messageHash: string): Promise<void> {
|
|
525
|
+
const state = this.transactions.get(messageHash);
|
|
526
|
+
if (!state?.retry) {
|
|
527
|
+
return;
|
|
528
|
+
}
|
|
529
|
+
const { pendingPeers, attempt } = state.retry;
|
|
530
|
+
if (pendingPeers.size === 0) {
|
|
531
|
+
this.clearRetry(messageHash);
|
|
532
|
+
return;
|
|
533
|
+
}
|
|
534
|
+
const peerIds = Array.from(pendingPeers);
|
|
535
|
+
const record = state.record;
|
|
536
|
+
log('cluster-tx:retry-start', { messageHash, attempt, peerIds });
|
|
537
|
+
const results = await Promise.all(peerIds.map(async peerIdStr => {
|
|
538
|
+
const isLocal = this.localCluster && peerIdStr === this.localCluster.peerId.toString();
|
|
539
|
+
const payload: ClusterRecord = {
|
|
540
|
+
...record,
|
|
541
|
+
commits: record.commits
|
|
542
|
+
};
|
|
543
|
+
try {
|
|
544
|
+
const res = isLocal
|
|
545
|
+
? await this.localCluster!.update(payload)
|
|
546
|
+
: await this.createClusterClient(peerIdFromString(peerIdStr)).update(payload);
|
|
547
|
+
state.record.commits = { ...state.record.commits, ...res.commits };
|
|
548
|
+
return { peerId: peerIdStr, success: true as const };
|
|
549
|
+
} catch (err) {
|
|
550
|
+
return {
|
|
551
|
+
peerId: peerIdStr,
|
|
552
|
+
success: false as const,
|
|
553
|
+
error: err instanceof Error ? err.message : String(err)
|
|
554
|
+
};
|
|
555
|
+
}
|
|
556
|
+
}));
|
|
557
|
+
const successes = results.filter(r => r.success).map(r => r.peerId);
|
|
558
|
+
const failures = results.filter(r => !r.success);
|
|
559
|
+
for (const peerId of successes) {
|
|
560
|
+
pendingPeers.delete(peerId);
|
|
561
|
+
}
|
|
562
|
+
log('cluster-tx:retry-complete', { messageHash, attempt, successes, failures });
|
|
563
|
+
if (pendingPeers.size === 0) {
|
|
564
|
+
log('cluster-tx:retry-finished', { messageHash });
|
|
565
|
+
this.clearRetry(messageHash);
|
|
566
|
+
return;
|
|
567
|
+
}
|
|
568
|
+
if (!this.transactions.has(messageHash)) {
|
|
569
|
+
return;
|
|
570
|
+
}
|
|
571
|
+
this.scheduleCommitRetry(messageHash, state.record, Array.from(pendingPeers));
|
|
572
|
+
}
|
|
573
|
+
|
|
574
|
+
private clearRetry(messageHash: string): void {
|
|
575
|
+
const state = this.transactions.get(messageHash);
|
|
576
|
+
if (!state?.retry) {
|
|
577
|
+
return;
|
|
578
|
+
}
|
|
579
|
+
if (state.retry.timer) {
|
|
580
|
+
clearTimeout(state.retry.timer);
|
|
581
|
+
}
|
|
582
|
+
state.retry = undefined;
|
|
583
|
+
// Clean up the transaction after retry is complete
|
|
584
|
+
setTimeout(() => {
|
|
585
|
+
this.transactions.delete(messageHash);
|
|
586
|
+
log('cluster-tx:transaction-remove', {
|
|
587
|
+
messageHash,
|
|
588
|
+
remaining: Array.from(this.transactions.keys())
|
|
589
|
+
});
|
|
590
|
+
}, 100);
|
|
591
|
+
}
|
|
592
|
+
}
|