@optimystic/db-p2p 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.min.js +52 -0
- package/dist/index.min.js.map +7 -0
- package/dist/src/cluster/client.d.ts +12 -0
- package/dist/src/cluster/client.d.ts.map +1 -0
- package/dist/src/cluster/client.js +65 -0
- package/dist/src/cluster/client.js.map +1 -0
- package/dist/src/cluster/cluster-repo.d.ts +79 -0
- package/dist/src/cluster/cluster-repo.d.ts.map +1 -0
- package/dist/src/cluster/cluster-repo.js +613 -0
- package/dist/src/cluster/cluster-repo.js.map +1 -0
- package/dist/src/cluster/partition-detector.d.ts +59 -0
- package/dist/src/cluster/partition-detector.d.ts.map +1 -0
- package/dist/src/cluster/partition-detector.js +129 -0
- package/dist/src/cluster/partition-detector.js.map +1 -0
- package/dist/src/cluster/service.d.ts +49 -0
- package/dist/src/cluster/service.d.ts.map +1 -0
- package/dist/src/cluster/service.js +107 -0
- package/dist/src/cluster/service.js.map +1 -0
- package/dist/src/index.d.ts +29 -0
- package/dist/src/index.d.ts.map +1 -0
- package/dist/src/index.js +29 -0
- package/dist/src/index.js.map +1 -0
- package/dist/src/it-utility.d.ts +4 -0
- package/dist/src/it-utility.d.ts.map +1 -0
- package/dist/src/it-utility.js +32 -0
- package/dist/src/it-utility.js.map +1 -0
- package/dist/src/libp2p-key-network.d.ts +59 -0
- package/dist/src/libp2p-key-network.d.ts.map +1 -0
- package/dist/src/libp2p-key-network.js +278 -0
- package/dist/src/libp2p-key-network.js.map +1 -0
- package/dist/src/libp2p-node.d.ts +28 -0
- package/dist/src/libp2p-node.d.ts.map +1 -0
- package/dist/src/libp2p-node.js +270 -0
- package/dist/src/libp2p-node.js.map +1 -0
- package/dist/src/logger.d.ts +3 -0
- package/dist/src/logger.d.ts.map +1 -0
- package/dist/src/logger.js +6 -0
- package/dist/src/logger.js.map +1 -0
- package/dist/src/network/get-network-manager.d.ts +4 -0
- package/dist/src/network/get-network-manager.d.ts.map +1 -0
- package/dist/src/network/get-network-manager.js +17 -0
- package/dist/src/network/get-network-manager.js.map +1 -0
- package/dist/src/network/network-manager-service.d.ts +82 -0
- package/dist/src/network/network-manager-service.d.ts.map +1 -0
- package/dist/src/network/network-manager-service.js +283 -0
- package/dist/src/network/network-manager-service.js.map +1 -0
- package/dist/src/peer-utils.d.ts +2 -0
- package/dist/src/peer-utils.d.ts.map +1 -0
- package/dist/src/peer-utils.js +28 -0
- package/dist/src/peer-utils.js.map +1 -0
- package/dist/src/protocol-client.d.ts +12 -0
- package/dist/src/protocol-client.d.ts.map +1 -0
- package/dist/src/protocol-client.js +34 -0
- package/dist/src/protocol-client.js.map +1 -0
- package/dist/src/repo/client.d.ts +17 -0
- package/dist/src/repo/client.d.ts.map +1 -0
- package/dist/src/repo/client.js +82 -0
- package/dist/src/repo/client.js.map +1 -0
- package/dist/src/repo/cluster-coordinator.d.ts +59 -0
- package/dist/src/repo/cluster-coordinator.d.ts.map +1 -0
- package/dist/src/repo/cluster-coordinator.js +539 -0
- package/dist/src/repo/cluster-coordinator.js.map +1 -0
- package/dist/src/repo/coordinator-repo.d.ts +29 -0
- package/dist/src/repo/coordinator-repo.d.ts.map +1 -0
- package/dist/src/repo/coordinator-repo.js +102 -0
- package/dist/src/repo/coordinator-repo.js.map +1 -0
- package/dist/src/repo/redirect.d.ts +14 -0
- package/dist/src/repo/redirect.d.ts.map +1 -0
- package/dist/src/repo/redirect.js +9 -0
- package/dist/src/repo/redirect.js.map +1 -0
- package/dist/src/repo/service.d.ts +52 -0
- package/dist/src/repo/service.d.ts.map +1 -0
- package/dist/src/repo/service.js +181 -0
- package/dist/src/repo/service.js.map +1 -0
- package/dist/src/repo/types.d.ts +7 -0
- package/dist/src/repo/types.d.ts.map +1 -0
- package/dist/src/repo/types.js +2 -0
- package/dist/src/repo/types.js.map +1 -0
- package/dist/src/routing/libp2p-known-peers.d.ts +4 -0
- package/dist/src/routing/libp2p-known-peers.d.ts.map +1 -0
- package/dist/src/routing/libp2p-known-peers.js +19 -0
- package/dist/src/routing/libp2p-known-peers.js.map +1 -0
- package/dist/src/routing/responsibility.d.ts +14 -0
- package/dist/src/routing/responsibility.d.ts.map +1 -0
- package/dist/src/routing/responsibility.js +45 -0
- package/dist/src/routing/responsibility.js.map +1 -0
- package/dist/src/routing/simple-cluster-coordinator.d.ts +23 -0
- package/dist/src/routing/simple-cluster-coordinator.d.ts.map +1 -0
- package/dist/src/routing/simple-cluster-coordinator.js +59 -0
- package/dist/src/routing/simple-cluster-coordinator.js.map +1 -0
- package/dist/src/storage/arachnode-fret-adapter.d.ts +65 -0
- package/dist/src/storage/arachnode-fret-adapter.d.ts.map +1 -0
- package/dist/src/storage/arachnode-fret-adapter.js +93 -0
- package/dist/src/storage/arachnode-fret-adapter.js.map +1 -0
- package/dist/src/storage/block-storage.d.ts +31 -0
- package/dist/src/storage/block-storage.d.ts.map +1 -0
- package/dist/src/storage/block-storage.js +154 -0
- package/dist/src/storage/block-storage.js.map +1 -0
- package/dist/src/storage/file-storage.d.ts +30 -0
- package/dist/src/storage/file-storage.d.ts.map +1 -0
- package/dist/src/storage/file-storage.js +127 -0
- package/dist/src/storage/file-storage.js.map +1 -0
- package/dist/src/storage/helpers.d.ts +3 -0
- package/dist/src/storage/helpers.d.ts.map +1 -0
- package/dist/src/storage/helpers.js +28 -0
- package/dist/src/storage/helpers.js.map +1 -0
- package/dist/src/storage/i-block-storage.d.ts +32 -0
- package/dist/src/storage/i-block-storage.d.ts.map +1 -0
- package/dist/src/storage/i-block-storage.js +2 -0
- package/dist/src/storage/i-block-storage.js.map +1 -0
- package/dist/src/storage/i-raw-storage.d.ts +20 -0
- package/dist/src/storage/i-raw-storage.d.ts.map +1 -0
- package/dist/src/storage/i-raw-storage.js +2 -0
- package/dist/src/storage/i-raw-storage.js.map +1 -0
- package/dist/src/storage/memory-storage.d.ts +27 -0
- package/dist/src/storage/memory-storage.d.ts.map +1 -0
- package/dist/src/storage/memory-storage.js +87 -0
- package/dist/src/storage/memory-storage.js.map +1 -0
- package/dist/src/storage/restoration-coordinator-v2.d.ts +63 -0
- package/dist/src/storage/restoration-coordinator-v2.d.ts.map +1 -0
- package/dist/src/storage/restoration-coordinator-v2.js +157 -0
- package/dist/src/storage/restoration-coordinator-v2.js.map +1 -0
- package/dist/src/storage/ring-selector.d.ts +56 -0
- package/dist/src/storage/ring-selector.d.ts.map +1 -0
- package/dist/src/storage/ring-selector.js +118 -0
- package/dist/src/storage/ring-selector.js.map +1 -0
- package/dist/src/storage/storage-monitor.d.ts +23 -0
- package/dist/src/storage/storage-monitor.d.ts.map +1 -0
- package/dist/src/storage/storage-monitor.js +40 -0
- package/dist/src/storage/storage-monitor.js.map +1 -0
- package/dist/src/storage/storage-repo.d.ts +17 -0
- package/dist/src/storage/storage-repo.d.ts.map +1 -0
- package/dist/src/storage/storage-repo.js +267 -0
- package/dist/src/storage/storage-repo.js.map +1 -0
- package/dist/src/storage/struct.d.ts +29 -0
- package/dist/src/storage/struct.d.ts.map +1 -0
- package/dist/src/storage/struct.js +2 -0
- package/dist/src/storage/struct.js.map +1 -0
- package/dist/src/sync/client.d.ts +27 -0
- package/dist/src/sync/client.d.ts.map +1 -0
- package/dist/src/sync/client.js +32 -0
- package/dist/src/sync/client.js.map +1 -0
- package/dist/src/sync/protocol.d.ts +58 -0
- package/dist/src/sync/protocol.d.ts.map +1 -0
- package/dist/src/sync/protocol.js +12 -0
- package/dist/src/sync/protocol.js.map +1 -0
- package/dist/src/sync/service.d.ts +62 -0
- package/dist/src/sync/service.d.ts.map +1 -0
- package/dist/src/sync/service.js +168 -0
- package/dist/src/sync/service.js.map +1 -0
- package/package.json +73 -0
- package/readme.md +497 -0
- package/src/cluster/client.ts +63 -0
- package/src/cluster/cluster-repo.ts +711 -0
- package/src/cluster/partition-detector.ts +158 -0
- package/src/cluster/service.ts +156 -0
- package/src/index.ts +30 -0
- package/src/it-utility.ts +36 -0
- package/src/libp2p-key-network.ts +334 -0
- package/src/libp2p-node.ts +335 -0
- package/src/logger.ts +9 -0
- package/src/network/get-network-manager.ts +17 -0
- package/src/network/network-manager-service.ts +334 -0
- package/src/peer-utils.ts +24 -0
- package/src/protocol-client.ts +54 -0
- package/src/repo/client.ts +112 -0
- package/src/repo/cluster-coordinator.ts +592 -0
- package/src/repo/coordinator-repo.ts +137 -0
- package/src/repo/redirect.ts +17 -0
- package/src/repo/service.ts +219 -0
- package/src/repo/types.ts +7 -0
- package/src/routing/libp2p-known-peers.ts +26 -0
- package/src/routing/responsibility.ts +63 -0
- package/src/routing/simple-cluster-coordinator.ts +70 -0
- package/src/storage/arachnode-fret-adapter.ts +128 -0
- package/src/storage/block-storage.ts +182 -0
- package/src/storage/file-storage.ts +163 -0
- package/src/storage/helpers.ts +29 -0
- package/src/storage/i-block-storage.ts +40 -0
- package/src/storage/i-raw-storage.ts +30 -0
- package/src/storage/memory-storage.ts +108 -0
- package/src/storage/restoration-coordinator-v2.ts +191 -0
- package/src/storage/ring-selector.ts +155 -0
- package/src/storage/storage-monitor.ts +59 -0
- package/src/storage/storage-repo.ts +320 -0
- package/src/storage/struct.ts +34 -0
- package/src/sync/client.ts +42 -0
- package/src/sync/protocol.ts +71 -0
- package/src/sync/service.ts +229 -0
|
@@ -0,0 +1,334 @@
|
|
|
1
|
+
import type { AbortOptions, Libp2p, PeerId, Stream } from "@libp2p/interface";
|
|
2
|
+
import { toString as u8ToString } from 'uint8arrays/to-string'
|
|
3
|
+
import type { ClusterPeers, FindCoordinatorOptions, IKeyNetwork, IPeerNetwork } from "@optimystic/db-core";
|
|
4
|
+
import { peerIdFromString } from '@libp2p/peer-id'
|
|
5
|
+
import { multiaddr } from '@multiformats/multiaddr'
|
|
6
|
+
import type { FretService } from 'p2p-fret'
|
|
7
|
+
import { hashKey } from 'p2p-fret'
|
|
8
|
+
import { createLogger } from './logger.js'
|
|
9
|
+
|
|
10
|
+
interface WithFretService { services?: { fret?: FretService } }
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Configuration options for self-coordination behavior
|
|
14
|
+
*/
|
|
15
|
+
export interface SelfCoordinationConfig {
|
|
16
|
+
/** Time (ms) after last connection before allowing self-coordination. Default: 30000 */
|
|
17
|
+
gracePeriodMs?: number;
|
|
18
|
+
/** Threshold for suspicious network shrinkage (0-1). >50% drop is suspicious. Default: 0.5 */
|
|
19
|
+
shrinkageThreshold?: number;
|
|
20
|
+
/** Allow self-coordination at all. Default: true (for testing). Set false in production. */
|
|
21
|
+
allowSelfCoordination?: boolean;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* Decision result from self-coordination guard
|
|
26
|
+
*/
|
|
27
|
+
export interface SelfCoordinationDecision {
|
|
28
|
+
allow: boolean;
|
|
29
|
+
reason: 'bootstrap-node' | 'partition-detected' | 'suspicious-shrinkage' | 'grace-period-not-elapsed' | 'extended-isolation' | 'disabled';
|
|
30
|
+
warn?: boolean;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
export class Libp2pKeyPeerNetwork implements IKeyNetwork, IPeerNetwork {
|
|
34
|
+
private readonly selfCoordinationConfig: Required<SelfCoordinationConfig>;
|
|
35
|
+
private networkHighWaterMark = 1;
|
|
36
|
+
private lastConnectedTime = Date.now();
|
|
37
|
+
|
|
38
|
+
constructor(
|
|
39
|
+
private readonly libp2p: Libp2p,
|
|
40
|
+
private readonly clusterSize: number = 16,
|
|
41
|
+
selfCoordinationConfig?: SelfCoordinationConfig
|
|
42
|
+
) {
|
|
43
|
+
this.selfCoordinationConfig = {
|
|
44
|
+
gracePeriodMs: selfCoordinationConfig?.gracePeriodMs ?? 30_000,
|
|
45
|
+
shrinkageThreshold: selfCoordinationConfig?.shrinkageThreshold ?? 0.5,
|
|
46
|
+
allowSelfCoordination: selfCoordinationConfig?.allowSelfCoordination ?? true
|
|
47
|
+
};
|
|
48
|
+
this.setupConnectionTracking();
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
// coordinator cache: key (base64url) -> peerId until expiry (bounded LRU-ish via Map insertion order)
|
|
52
|
+
private readonly coordinatorCache = new Map<string, { id: PeerId, expires: number }>()
|
|
53
|
+
private static readonly MAX_CACHE_ENTRIES = 1000
|
|
54
|
+
private readonly log = createLogger('libp2p-key-network')
|
|
55
|
+
|
|
56
|
+
private toCacheKey(key: Uint8Array): string { return u8ToString(key, 'base64url') }
|
|
57
|
+
|
|
58
|
+
/**
|
|
59
|
+
* Set up connection event tracking to update high water mark and last connected time.
|
|
60
|
+
*/
|
|
61
|
+
private setupConnectionTracking(): void {
|
|
62
|
+
this.libp2p.addEventListener('connection:open', () => {
|
|
63
|
+
this.updateNetworkObservations();
|
|
64
|
+
});
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* Update network high water mark and last connected time.
|
|
69
|
+
* Called on new connections.
|
|
70
|
+
*/
|
|
71
|
+
private updateNetworkObservations(): void {
|
|
72
|
+
const connections = this.libp2p.getConnections?.() ?? [];
|
|
73
|
+
if (connections.length > 0) {
|
|
74
|
+
this.lastConnectedTime = Date.now();
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
try {
|
|
78
|
+
const fret = this.getFret();
|
|
79
|
+
const estimate = fret.getNetworkSizeEstimate();
|
|
80
|
+
if (estimate.size_estimate > this.networkHighWaterMark) {
|
|
81
|
+
this.networkHighWaterMark = estimate.size_estimate;
|
|
82
|
+
this.log('network-hwm-updated mark=%d confidence=%f', this.networkHighWaterMark, estimate.confidence);
|
|
83
|
+
}
|
|
84
|
+
} catch {
|
|
85
|
+
// FRET not available - use connection count as fallback
|
|
86
|
+
const connectionCount = this.libp2p.getConnections?.().length ?? 0;
|
|
87
|
+
const observedSize = connectionCount + 1; // +1 for self
|
|
88
|
+
if (observedSize > this.networkHighWaterMark) {
|
|
89
|
+
this.networkHighWaterMark = observedSize;
|
|
90
|
+
this.log('network-hwm-updated mark=%d (from connections)', this.networkHighWaterMark);
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
/**
|
|
96
|
+
* Determine if self-coordination should be allowed based on network observations.
|
|
97
|
+
*
|
|
98
|
+
* Principle: If we've ever seen a larger network, assume our connectivity is the problem,
|
|
99
|
+
* not the network shrinking.
|
|
100
|
+
*/
|
|
101
|
+
shouldAllowSelfCoordination(): SelfCoordinationDecision {
|
|
102
|
+
// Check global disable
|
|
103
|
+
if (!this.selfCoordinationConfig.allowSelfCoordination) {
|
|
104
|
+
return { allow: false, reason: 'disabled' };
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
// Case 1: New/bootstrap node (never seen larger network)
|
|
108
|
+
if (this.networkHighWaterMark <= 1) {
|
|
109
|
+
return { allow: true, reason: 'bootstrap-node' };
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
// Case 2: Check for partition via FRET
|
|
113
|
+
try {
|
|
114
|
+
const fret = this.getFret();
|
|
115
|
+
if (fret.detectPartition()) {
|
|
116
|
+
this.log('self-coord-blocked: partition-detected');
|
|
117
|
+
return { allow: false, reason: 'partition-detected' };
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
// Case 3: Suspicious network shrinkage (>threshold drop)
|
|
121
|
+
const estimate = fret.getNetworkSizeEstimate();
|
|
122
|
+
const shrinkage = 1 - (estimate.size_estimate / this.networkHighWaterMark);
|
|
123
|
+
if (shrinkage > this.selfCoordinationConfig.shrinkageThreshold) {
|
|
124
|
+
this.log('self-coord-blocked: suspicious-shrinkage current=%d hwm=%d shrinkage=%f',
|
|
125
|
+
estimate.size_estimate, this.networkHighWaterMark, shrinkage);
|
|
126
|
+
return { allow: false, reason: 'suspicious-shrinkage' };
|
|
127
|
+
}
|
|
128
|
+
} catch {
|
|
129
|
+
// FRET not available - be conservative
|
|
130
|
+
const connections = this.libp2p.getConnections?.() ?? [];
|
|
131
|
+
if (this.networkHighWaterMark > 1 && connections.length === 0) {
|
|
132
|
+
// We've seen peers before but have none now - suspicious
|
|
133
|
+
const timeSinceConnection = Date.now() - this.lastConnectedTime;
|
|
134
|
+
if (timeSinceConnection < this.selfCoordinationConfig.gracePeriodMs) {
|
|
135
|
+
this.log('self-coord-blocked: grace-period-not-elapsed since=%dms', timeSinceConnection);
|
|
136
|
+
return { allow: false, reason: 'grace-period-not-elapsed' };
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
// Case 4: Recently connected (grace period not elapsed)
|
|
142
|
+
const timeSinceConnection = Date.now() - this.lastConnectedTime;
|
|
143
|
+
if (timeSinceConnection < this.selfCoordinationConfig.gracePeriodMs) {
|
|
144
|
+
const connections = this.libp2p.getConnections?.() ?? [];
|
|
145
|
+
// Only block if we have no connections but did recently
|
|
146
|
+
if (connections.length === 0) {
|
|
147
|
+
this.log('self-coord-blocked: grace-period-not-elapsed since=%dms', timeSinceConnection);
|
|
148
|
+
return { allow: false, reason: 'grace-period-not-elapsed' };
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
// Case 5: Extended isolation with gradual shrinkage - allow with warning
|
|
153
|
+
this.log('self-coord-allowed: extended-isolation (warn)');
|
|
154
|
+
return { allow: true, reason: 'extended-isolation', warn: true };
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
public recordCoordinator(key: Uint8Array, peerId: PeerId, ttlMs = 30 * 60 * 1000): void {
|
|
158
|
+
const k = this.toCacheKey(key)
|
|
159
|
+
const now = Date.now()
|
|
160
|
+
for (const [ck, entry] of this.coordinatorCache) {
|
|
161
|
+
if (entry.expires <= now) this.coordinatorCache.delete(ck)
|
|
162
|
+
}
|
|
163
|
+
this.coordinatorCache.set(k, { id: peerId, expires: now + ttlMs })
|
|
164
|
+
while (this.coordinatorCache.size > Libp2pKeyPeerNetwork.MAX_CACHE_ENTRIES) {
|
|
165
|
+
const firstKey = this.coordinatorCache.keys().next().value as string | undefined
|
|
166
|
+
if (firstKey == null) break
|
|
167
|
+
this.coordinatorCache.delete(firstKey)
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
private getCachedCoordinator(key: Uint8Array): PeerId | undefined {
|
|
172
|
+
const k = this.toCacheKey(key)
|
|
173
|
+
const hit = this.coordinatorCache.get(k)
|
|
174
|
+
if (hit && hit.expires > Date.now()) return hit.id
|
|
175
|
+
if (hit) this.coordinatorCache.delete(k)
|
|
176
|
+
return undefined
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
connect(peerId: PeerId, protocol: string, _options?: AbortOptions): Promise<Stream> {
|
|
180
|
+
const conns = (this.libp2p as any).getConnections?.(peerId) ?? []
|
|
181
|
+
if (Array.isArray(conns) && conns.length > 0 && typeof conns[0]?.newStream === 'function') {
|
|
182
|
+
return conns[0].newStream([protocol]) as Promise<Stream>
|
|
183
|
+
}
|
|
184
|
+
const dialOptions = { runOnLimitedConnection: true, negotiateFully: false } as const
|
|
185
|
+
return this.libp2p.dialProtocol(peerId, [protocol], dialOptions)
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
private getFret(): FretService {
|
|
189
|
+
const svc = (this.libp2p as unknown as WithFretService).services?.fret
|
|
190
|
+
if (svc == null) throw new Error('FRET service is not registered on this libp2p node')
|
|
191
|
+
return svc
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
private async getNeighborIdsForKey(key: Uint8Array, wants: number): Promise<string[]> {
|
|
195
|
+
const fret = this.getFret()
|
|
196
|
+
const coord = await hashKey(key)
|
|
197
|
+
const both = fret.getNeighbors(coord, 'both', wants)
|
|
198
|
+
return Array.from(new Set(both)).slice(0, wants)
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
async findCoordinator(key: Uint8Array, _options?: Partial<FindCoordinatorOptions>): Promise<PeerId> {
|
|
202
|
+
const excludedSet = new Set<string>((_options?.excludedPeers ?? []).map(p => p.toString()))
|
|
203
|
+
const keyStr = this.toCacheKey(key).substring(0, 12);
|
|
204
|
+
|
|
205
|
+
this.log('findCoordinator:start key=%s excluded=%o', keyStr, Array.from(excludedSet).map(s => s.substring(0, 12)))
|
|
206
|
+
|
|
207
|
+
// honor cache if not excluded
|
|
208
|
+
const cached = this.getCachedCoordinator(key)
|
|
209
|
+
if (cached != null && !excludedSet.has(cached.toString())) {
|
|
210
|
+
this.log('findCoordinator:cached-hit key=%s coordinator=%s', keyStr, cached.toString().substring(0, 12))
|
|
211
|
+
return cached
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
// Retry logic: connections can be temporarily down, so retry a few times with delay
|
|
215
|
+
const maxRetries = 3;
|
|
216
|
+
const retryDelayMs = 500;
|
|
217
|
+
|
|
218
|
+
for (let attempt = 0; attempt < maxRetries; attempt++) {
|
|
219
|
+
// Get currently connected peers for filtering
|
|
220
|
+
const connected = (this.libp2p.getConnections?.() ?? []).map((c: any) => c.remotePeer) as PeerId[]
|
|
221
|
+
const connectedSet = new Set(connected.map(p => p.toString()))
|
|
222
|
+
this.log('findCoordinator:connected-peers key=%s count=%d peers=%o attempt=%d', keyStr, connected.length, connected.map(p => p.toString().substring(0, 12)), attempt)
|
|
223
|
+
|
|
224
|
+
// prefer FRET neighbors that are also connected, pick first non-excluded
|
|
225
|
+
try {
|
|
226
|
+
const ids = await this.getNeighborIdsForKey(key, this.clusterSize)
|
|
227
|
+
this.log('findCoordinator:fret-neighbors key=%s candidates=%o', keyStr, ids.map(s => s.substring(0, 12)))
|
|
228
|
+
|
|
229
|
+
// Filter to only connected FRET neighbors
|
|
230
|
+
const connectedFretIds = ids.filter(id => connectedSet.has(id) || id === this.libp2p.peerId.toString())
|
|
231
|
+
this.log('findCoordinator:fret-connected key=%s count=%d peers=%o', keyStr, connectedFretIds.length, connectedFretIds.map(s => s.substring(0, 12)))
|
|
232
|
+
|
|
233
|
+
const pick = connectedFretIds.find(id => !excludedSet.has(id))
|
|
234
|
+
if (pick) {
|
|
235
|
+
const pid = peerIdFromString(pick)
|
|
236
|
+
this.recordCoordinator(key, pid)
|
|
237
|
+
this.log('findCoordinator:fret-selected key=%s coordinator=%s', keyStr, pick.substring(0, 12))
|
|
238
|
+
return pid
|
|
239
|
+
}
|
|
240
|
+
} catch (err) {
|
|
241
|
+
this.log('findCoordinator getNeighborIdsForKey failed - %o', err)
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
// fallback: prefer any existing connected peer that's not excluded
|
|
245
|
+
const connectedPick = connected.find(p => !excludedSet.has(p.toString()))
|
|
246
|
+
if (connectedPick) {
|
|
247
|
+
this.recordCoordinator(key, connectedPick)
|
|
248
|
+
this.log('findCoordinator:connected-fallback key=%s coordinator=%s', keyStr, connectedPick.toString().substring(0, 12))
|
|
249
|
+
return connectedPick
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
// If no connections and not the last attempt, wait and retry
|
|
253
|
+
if (connected.length === 0 && attempt < maxRetries - 1) {
|
|
254
|
+
this.log('findCoordinator:no-connections-retry key=%s attempt=%d delay=%dms', keyStr, attempt, retryDelayMs)
|
|
255
|
+
await new Promise(resolve => setTimeout(resolve, retryDelayMs))
|
|
256
|
+
continue
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
// last resort: prefer self only if not excluded and guard allows
|
|
261
|
+
const self = this.libp2p.peerId
|
|
262
|
+
if (!excludedSet.has(self.toString())) {
|
|
263
|
+
const decision = this.shouldAllowSelfCoordination();
|
|
264
|
+
if (!decision.allow) {
|
|
265
|
+
this.log('findCoordinator:self-coord-blocked key=%s reason=%s', keyStr, decision.reason);
|
|
266
|
+
throw new Error(`Self-coordination blocked: ${decision.reason}. No coordinator available for key.`);
|
|
267
|
+
}
|
|
268
|
+
if (decision.warn) {
|
|
269
|
+
this.log('findCoordinator:self-selected-warn key=%s coordinator=%s reason=%s',
|
|
270
|
+
keyStr, self.toString().substring(0, 12), decision.reason);
|
|
271
|
+
} else {
|
|
272
|
+
this.log('findCoordinator:self-selected key=%s coordinator=%s reason=%s',
|
|
273
|
+
keyStr, self.toString().substring(0, 12), decision.reason);
|
|
274
|
+
}
|
|
275
|
+
return self
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
this.log('findCoordinator:all-excluded key=%s self=%s', keyStr, self.toString().substring(0, 12))
|
|
279
|
+
throw new Error('No coordinator available for key (all candidates excluded)')
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
private getConnectedAddrsByPeer(): Record<string, string[]> {
|
|
283
|
+
const conns = this.libp2p.getConnections()
|
|
284
|
+
const byPeer: Record<string, string[]> = {}
|
|
285
|
+
for (const c of conns) {
|
|
286
|
+
const id = c.remotePeer.toString()
|
|
287
|
+
const addr = c.remoteAddr?.toString?.()
|
|
288
|
+
if (addr) (byPeer[id] ??= []).push(addr)
|
|
289
|
+
}
|
|
290
|
+
return byPeer
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
private parseMultiaddrs(addrs: string[]): ReturnType<typeof multiaddr>[] {
|
|
294
|
+
const out: ReturnType<typeof multiaddr>[] = []
|
|
295
|
+
for (const a of addrs) {
|
|
296
|
+
try { out.push(multiaddr(a)) } catch (err) { console.warn('invalid multiaddr from connection', a, err) }
|
|
297
|
+
}
|
|
298
|
+
return out
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
async findCluster(key: Uint8Array): Promise<ClusterPeers> {
|
|
302
|
+
const fret = this.getFret()
|
|
303
|
+
const coord = await hashKey(key)
|
|
304
|
+
const cohort = fret.assembleCohort(coord, this.clusterSize)
|
|
305
|
+
const keyStr = this.toCacheKey(key).substring(0, 12);
|
|
306
|
+
|
|
307
|
+
// Include self in the cohort
|
|
308
|
+
const ids = Array.from(new Set([...cohort, this.libp2p.peerId.toString()]))
|
|
309
|
+
|
|
310
|
+
const connectedByPeer = this.getConnectedAddrsByPeer()
|
|
311
|
+
const connectedPeerIds = Object.keys(connectedByPeer)
|
|
312
|
+
|
|
313
|
+
this.log('findCluster key=%s fretCohort=%d connected=%d cohortPeers=%o',
|
|
314
|
+
keyStr, cohort.length, connectedPeerIds.length, ids.map(s => s.substring(0, 12)))
|
|
315
|
+
|
|
316
|
+
const peers: ClusterPeers = {}
|
|
317
|
+
|
|
318
|
+
for (const idStr of ids) {
|
|
319
|
+
if (idStr === this.libp2p.peerId.toString()) {
|
|
320
|
+
peers[idStr] = { multiaddrs: this.libp2p.getMultiaddrs(), publicKey: this.libp2p.peerId.publicKey?.raw ?? new Uint8Array() }
|
|
321
|
+
continue
|
|
322
|
+
}
|
|
323
|
+
const strings = connectedByPeer[idStr] ?? []
|
|
324
|
+
const addrs = this.parseMultiaddrs(strings)
|
|
325
|
+
peers[idStr] = { multiaddrs: addrs, publicKey: new Uint8Array() }
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
this.log('findCluster:result key=%s clusterSize=%d withAddrs=%d connectedInCohort=%d',
|
|
329
|
+
keyStr, Object.keys(peers).length,
|
|
330
|
+
Object.values(peers).filter(p => p.multiaddrs.length > 0).length,
|
|
331
|
+
ids.filter(id => connectedPeerIds.includes(id) || id === this.libp2p.peerId.toString()).length)
|
|
332
|
+
return peers
|
|
333
|
+
}
|
|
334
|
+
}
|
|
@@ -0,0 +1,335 @@
|
|
|
1
|
+
import { createLibp2p, type Libp2p } from 'libp2p';
|
|
2
|
+
import { tcp } from '@libp2p/tcp';
|
|
3
|
+
import { noise } from '@chainsafe/libp2p-noise';
|
|
4
|
+
import { yamux } from '@chainsafe/libp2p-yamux';
|
|
5
|
+
import { identify } from '@libp2p/identify';
|
|
6
|
+
import { ping } from '@libp2p/ping';
|
|
7
|
+
import { gossipsub } from '@chainsafe/libp2p-gossipsub';
|
|
8
|
+
import { bootstrap } from '@libp2p/bootstrap';
|
|
9
|
+
import { peerIdFromString } from '@libp2p/peer-id';
|
|
10
|
+
import { clusterService } from './cluster/service.js';
|
|
11
|
+
import { repoService } from './repo/service.js';
|
|
12
|
+
import { StorageRepo } from './storage/storage-repo.js';
|
|
13
|
+
import { BlockStorage } from './storage/block-storage.js';
|
|
14
|
+
import { MemoryRawStorage } from './storage/memory-storage.js';
|
|
15
|
+
import { FileRawStorage } from './storage/file-storage.js';
|
|
16
|
+
import type { IRawStorage } from './storage/i-raw-storage.js';
|
|
17
|
+
import { clusterMember } from './cluster/cluster-repo.js';
|
|
18
|
+
import { coordinatorRepo } from './repo/coordinator-repo.js';
|
|
19
|
+
import { Libp2pKeyPeerNetwork } from './libp2p-key-network.js';
|
|
20
|
+
import { ClusterClient } from './cluster/client.js';
|
|
21
|
+
import type { IRepo, ICluster, ITransactionValidator } from '@optimystic/db-core';
|
|
22
|
+
import { multiaddr } from '@multiformats/multiaddr';
|
|
23
|
+
import { networkManagerService } from './network/network-manager-service.js';
|
|
24
|
+
import { fretService, Libp2pFretService } from 'p2p-fret';
|
|
25
|
+
import { syncService } from './sync/service.js';
|
|
26
|
+
import { RestorationCoordinator } from './storage/restoration-coordinator-v2.js';
|
|
27
|
+
import { RingSelector } from './storage/ring-selector.js';
|
|
28
|
+
import { StorageMonitor } from './storage/storage-monitor.js';
|
|
29
|
+
import type { StorageMonitorConfig } from './storage/storage-monitor.js';
|
|
30
|
+
import { ArachnodeFretAdapter } from './storage/arachnode-fret-adapter.js';
|
|
31
|
+
import type { RestoreCallback } from './storage/struct.js';
|
|
32
|
+
import type { FretService } from 'p2p-fret';
|
|
33
|
+
import { PartitionDetector } from './cluster/partition-detector.js';
|
|
34
|
+
|
|
35
|
+
export type NodeOptions = {
|
|
36
|
+
port: number;
|
|
37
|
+
bootstrapNodes: string[];
|
|
38
|
+
networkName: string;
|
|
39
|
+
fretProfile?: 'edge' | 'core';
|
|
40
|
+
id?: string; // optional peer id
|
|
41
|
+
relay?: boolean; // enable relay service
|
|
42
|
+
storageType?: 'memory' | 'file'; // storage backend type
|
|
43
|
+
storagePath?: string; // path for file storage (required if storageType is 'file')
|
|
44
|
+
clusterSize?: number; // desired cluster size per key
|
|
45
|
+
clusterPolicy?: {
|
|
46
|
+
allowDownsize?: boolean;
|
|
47
|
+
sizeTolerance?: number; // acceptable relative difference (e.g. 0.5 = +/-50%)
|
|
48
|
+
superMajorityThreshold?: number; // fraction of peers needed for super-majority (default: 0.67)
|
|
49
|
+
};
|
|
50
|
+
|
|
51
|
+
/** Arachnode storage configuration */
|
|
52
|
+
arachnode?: {
|
|
53
|
+
enableRingZulu?: boolean; // default: true
|
|
54
|
+
storage?: StorageMonitorConfig;
|
|
55
|
+
};
|
|
56
|
+
|
|
57
|
+
/** Transaction validator for cluster consensus */
|
|
58
|
+
validator?: ITransactionValidator;
|
|
59
|
+
};
|
|
60
|
+
|
|
61
|
+
export async function createLibp2pNode(options: NodeOptions): Promise<Libp2p> {
|
|
62
|
+
// Create storage based on type
|
|
63
|
+
const storageType = options.storageType ?? 'memory';
|
|
64
|
+
let rawStorage: IRawStorage;
|
|
65
|
+
|
|
66
|
+
if (storageType === 'file') {
|
|
67
|
+
if (!options.storagePath) {
|
|
68
|
+
throw new Error('storagePath is required when storageType is "file"');
|
|
69
|
+
}
|
|
70
|
+
rawStorage = new FileRawStorage(options.storagePath);
|
|
71
|
+
} else {
|
|
72
|
+
rawStorage = new MemoryRawStorage();
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
// Create placeholder restore callback (will be replaced after node starts)
|
|
76
|
+
let restoreCallback: RestoreCallback = async (_blockId, _rev?) => {
|
|
77
|
+
return undefined;
|
|
78
|
+
};
|
|
79
|
+
|
|
80
|
+
// Create shared storage layers with restoration callback
|
|
81
|
+
const storageRepo = new StorageRepo((blockId) =>
|
|
82
|
+
new BlockStorage(blockId, rawStorage, restoreCallback)
|
|
83
|
+
);
|
|
84
|
+
|
|
85
|
+
let clusterImpl: ICluster | undefined;
|
|
86
|
+
let coordinatedRepo: IRepo | undefined;
|
|
87
|
+
|
|
88
|
+
const clusterProxy: ICluster = {
|
|
89
|
+
async update(record) {
|
|
90
|
+
if (!clusterImpl) {
|
|
91
|
+
throw new Error('ClusterMember not initialized');
|
|
92
|
+
}
|
|
93
|
+
return await clusterImpl.update(record);
|
|
94
|
+
}
|
|
95
|
+
};
|
|
96
|
+
|
|
97
|
+
const repoProxy: IRepo = {
|
|
98
|
+
async get(blockGets, options) {
|
|
99
|
+
const target = coordinatedRepo ?? storageRepo;
|
|
100
|
+
return await target.get(blockGets, options);
|
|
101
|
+
},
|
|
102
|
+
async pend(request, options) {
|
|
103
|
+
const target = coordinatedRepo ?? storageRepo;
|
|
104
|
+
return await target.pend(request, options);
|
|
105
|
+
},
|
|
106
|
+
async cancel(trxRef, options) {
|
|
107
|
+
const target = coordinatedRepo ?? storageRepo;
|
|
108
|
+
return await target.cancel(trxRef, options);
|
|
109
|
+
},
|
|
110
|
+
async commit(request, options) {
|
|
111
|
+
const target = coordinatedRepo ?? storageRepo;
|
|
112
|
+
return await target.commit(request, options);
|
|
113
|
+
}
|
|
114
|
+
};
|
|
115
|
+
|
|
116
|
+
// Parse peer ID if provided
|
|
117
|
+
const peerId = options.id ? await peerIdFromString(options.id) : undefined;
|
|
118
|
+
|
|
119
|
+
const libp2pOptions: any = {
|
|
120
|
+
start: false,
|
|
121
|
+
...(peerId ? { peerId } : {}),
|
|
122
|
+
addresses: {
|
|
123
|
+
listen: [`/ip4/0.0.0.0/tcp/${options.port}`]
|
|
124
|
+
},
|
|
125
|
+
connectionManager: {
|
|
126
|
+
autoDial: true,
|
|
127
|
+
minConnections: 1,
|
|
128
|
+
maxConnections: 16,
|
|
129
|
+
inboundConnectionUpgradeTimeout: 10_000,
|
|
130
|
+
dialQueue: { concurrency: 2, attempts: 2 }
|
|
131
|
+
},
|
|
132
|
+
transports: [tcp()],
|
|
133
|
+
connectionEncrypters: [noise()],
|
|
134
|
+
streamMuxers: [yamux()],
|
|
135
|
+
services: {
|
|
136
|
+
identify: identify({
|
|
137
|
+
protocolPrefix: `/optimystic/${options.networkName}`
|
|
138
|
+
}),
|
|
139
|
+
ping: ping(),
|
|
140
|
+
pubsub: gossipsub({
|
|
141
|
+
allowPublishToZeroTopicPeers: true,
|
|
142
|
+
heartbeatInterval: 7000
|
|
143
|
+
}),
|
|
144
|
+
|
|
145
|
+
// Custom services - create wrapper factories that inject dependencies
|
|
146
|
+
cluster: (components: any) => {
|
|
147
|
+
const serviceFactory = clusterService({
|
|
148
|
+
protocolPrefix: `/optimystic/${options.networkName}`,
|
|
149
|
+
configuredClusterSize: options.clusterSize ?? 10,
|
|
150
|
+
allowClusterDownsize: options.clusterPolicy?.allowDownsize ?? true,
|
|
151
|
+
clusterSizeTolerance: options.clusterPolicy?.sizeTolerance ?? 0.5
|
|
152
|
+
});
|
|
153
|
+
return serviceFactory({
|
|
154
|
+
logger: components.logger,
|
|
155
|
+
registrar: components.registrar,
|
|
156
|
+
cluster: clusterProxy
|
|
157
|
+
});
|
|
158
|
+
},
|
|
159
|
+
|
|
160
|
+
repo: (components: any) => {
|
|
161
|
+
const serviceFactory = repoService({
|
|
162
|
+
protocolPrefix: `/optimystic/${options.networkName}`
|
|
163
|
+
});
|
|
164
|
+
return serviceFactory({
|
|
165
|
+
logger: components.logger,
|
|
166
|
+
registrar: components.registrar,
|
|
167
|
+
repo: repoProxy
|
|
168
|
+
});
|
|
169
|
+
},
|
|
170
|
+
|
|
171
|
+
sync: (components: any) => {
|
|
172
|
+
const serviceFactory = syncService({
|
|
173
|
+
protocolPrefix: `/optimystic/${options.networkName}`
|
|
174
|
+
});
|
|
175
|
+
return serviceFactory({
|
|
176
|
+
logger: components.logger,
|
|
177
|
+
registrar: components.registrar,
|
|
178
|
+
repo: repoProxy
|
|
179
|
+
});
|
|
180
|
+
},
|
|
181
|
+
|
|
182
|
+
networkManager: (components: any) => {
|
|
183
|
+
const svcFactory = networkManagerService({
|
|
184
|
+
clusterSize: options.clusterSize ?? 10,
|
|
185
|
+
expectedRemotes: (options.bootstrapNodes?.length ?? 0) > 0,
|
|
186
|
+
allowClusterDownsize: options.clusterPolicy?.allowDownsize ?? true,
|
|
187
|
+
clusterSizeTolerance: options.clusterPolicy?.sizeTolerance ?? 0.5
|
|
188
|
+
})
|
|
189
|
+
const svc = svcFactory(components)
|
|
190
|
+
try { (svc as any).setLibp2p?.(components.libp2p) } catch { }
|
|
191
|
+
return svc
|
|
192
|
+
},
|
|
193
|
+
fret: (components: any) => {
|
|
194
|
+
const svcFactory = fretService({
|
|
195
|
+
k: 15,
|
|
196
|
+
m: 8,
|
|
197
|
+
capacity: 2048,
|
|
198
|
+
profile: options.fretProfile ?? ((options.bootstrapNodes?.length ?? 0) > 0 ? 'core' : 'edge'),
|
|
199
|
+
networkName: options.networkName,
|
|
200
|
+
bootstraps: options.bootstrapNodes ?? []
|
|
201
|
+
});
|
|
202
|
+
const svc = svcFactory(components) as Libp2pFretService;
|
|
203
|
+
try { svc.setLibp2p(components.libp2p); } catch { }
|
|
204
|
+
return svc;
|
|
205
|
+
}
|
|
206
|
+
},
|
|
207
|
+
// Add bootstrap nodes as needed
|
|
208
|
+
peerDiscovery: [
|
|
209
|
+
...(options.bootstrapNodes?.length ? [bootstrap({ list: options.bootstrapNodes })] : [])
|
|
210
|
+
],
|
|
211
|
+
};
|
|
212
|
+
|
|
213
|
+
const node = await createLibp2p(libp2pOptions);
|
|
214
|
+
|
|
215
|
+
// Inject libp2p reference into services that need it before start
|
|
216
|
+
try { ((node as any).services?.fret as any)?.setLibp2p?.(node) } catch { }
|
|
217
|
+
try { ((node as any).services?.networkManager as any)?.setLibp2p?.(node) } catch { }
|
|
218
|
+
|
|
219
|
+
await node.start();
|
|
220
|
+
|
|
221
|
+
// Initialize cluster coordination components
|
|
222
|
+
const keyNetwork = new Libp2pKeyPeerNetwork(node);
|
|
223
|
+
const protocolPrefix = `/optimystic/${options.networkName}`;
|
|
224
|
+
const createClusterClient = (peerId: any) => ClusterClient.create(peerId, keyNetwork, protocolPrefix);
|
|
225
|
+
|
|
226
|
+
// Create partition detector and get FRET service
|
|
227
|
+
const partitionDetector = new PartitionDetector();
|
|
228
|
+
const fretSvc = (node as any).services?.fret as FretService | undefined;
|
|
229
|
+
|
|
230
|
+
clusterImpl = clusterMember({
|
|
231
|
+
storageRepo,
|
|
232
|
+
peerNetwork: keyNetwork,
|
|
233
|
+
peerId: node.peerId,
|
|
234
|
+
protocolPrefix,
|
|
235
|
+
partitionDetector,
|
|
236
|
+
fretService: fretSvc,
|
|
237
|
+
validator: options.validator
|
|
238
|
+
});
|
|
239
|
+
|
|
240
|
+
const coordinatorRepoFactory = coordinatorRepo(
|
|
241
|
+
keyNetwork,
|
|
242
|
+
createClusterClient,
|
|
243
|
+
{
|
|
244
|
+
clusterSize: options.clusterSize ?? 10,
|
|
245
|
+
superMajorityThreshold: options.clusterPolicy?.superMajorityThreshold ?? 0.67,
|
|
246
|
+
simpleMajorityThreshold: 0.51,
|
|
247
|
+
minAbsoluteClusterSize: 2, // Allow 2-node clusters for development/small networks
|
|
248
|
+
allowClusterDownsize: options.clusterPolicy?.allowDownsize ?? true,
|
|
249
|
+
clusterSizeTolerance: options.clusterPolicy?.sizeTolerance ?? 0.5,
|
|
250
|
+
partitionDetectionWindow: 60000
|
|
251
|
+
},
|
|
252
|
+
fretSvc
|
|
253
|
+
);
|
|
254
|
+
|
|
255
|
+
coordinatedRepo = coordinatorRepoFactory({
|
|
256
|
+
storageRepo,
|
|
257
|
+
localCluster: clusterImpl,
|
|
258
|
+
localPeerId: node.peerId
|
|
259
|
+
});
|
|
260
|
+
|
|
261
|
+
// Initialize Arachnode ring membership and restoration
|
|
262
|
+
const enableArachnode = options.arachnode?.enableRingZulu ?? true;
|
|
263
|
+
if (enableArachnode) {
|
|
264
|
+
const log = (node as any).logger?.forComponent?.('db-p2p:arachnode');
|
|
265
|
+
const fret = (node as any).services?.fret as any;
|
|
266
|
+
|
|
267
|
+
if (fret) {
|
|
268
|
+
const fretAdapter = new ArachnodeFretAdapter(fret);
|
|
269
|
+
|
|
270
|
+
const storageMonitor = new StorageMonitor(rawStorage, options.arachnode?.storage ?? {});
|
|
271
|
+
const ringSelector = new RingSelector(fretAdapter, storageMonitor, {
|
|
272
|
+
minCapacity: 100 * 1024 * 1024, // 100MB minimum
|
|
273
|
+
thresholds: {
|
|
274
|
+
moveOut: 0.85,
|
|
275
|
+
moveIn: 0.40
|
|
276
|
+
}
|
|
277
|
+
});
|
|
278
|
+
|
|
279
|
+
// Determine and announce ring membership
|
|
280
|
+
const peerId = node.peerId.toString();
|
|
281
|
+
const arachnodeInfo = await ringSelector.createArachnodeInfo(peerId);
|
|
282
|
+
fretAdapter.setArachnodeInfo(arachnodeInfo);
|
|
283
|
+
|
|
284
|
+
log?.('Announced Arachnode membership: Ring %d', arachnodeInfo.ringDepth);
|
|
285
|
+
|
|
286
|
+
// Setup restoration coordinator with FRET adapter
|
|
287
|
+
const restorationCoordinatorV2 = new RestorationCoordinator(
|
|
288
|
+
fretAdapter,
|
|
289
|
+
{ connect: (pid, protocol) => node.dialProtocol(pid, [protocol]) },
|
|
290
|
+
`/optimystic/${options.networkName}`
|
|
291
|
+
);
|
|
292
|
+
|
|
293
|
+
// Update restore callback to use new coordinator
|
|
294
|
+
const newRestoreCallback: RestoreCallback = async (blockId, rev?) => {
|
|
295
|
+
return await restorationCoordinatorV2.restore(blockId, rev);
|
|
296
|
+
};
|
|
297
|
+
|
|
298
|
+
// Replace the restore callback (this is a bit hacky, but works for now)
|
|
299
|
+
// In production, we'd want to properly manage this
|
|
300
|
+
(storageRepo as any).createBlockStorage = (blockId: string) =>
|
|
301
|
+
new BlockStorage(blockId, rawStorage, newRestoreCallback);
|
|
302
|
+
|
|
303
|
+
// Monitor capacity and adjust ring periodically
|
|
304
|
+
const monitorInterval = setInterval(async () => {
|
|
305
|
+
const transition = await ringSelector.shouldTransition();
|
|
306
|
+
if (transition.shouldMove) {
|
|
307
|
+
log?.('Ring transition needed: moving %s to Ring %d',
|
|
308
|
+
transition.direction, transition.newRingDepth);
|
|
309
|
+
|
|
310
|
+
// Update Arachnode info with new ring
|
|
311
|
+
const updatedInfo = await ringSelector.createArachnodeInfo(peerId);
|
|
312
|
+
fretAdapter.setArachnodeInfo(updatedInfo);
|
|
313
|
+
}
|
|
314
|
+
}, 60_000); // Check every minute
|
|
315
|
+
|
|
316
|
+
// Cleanup on node stop
|
|
317
|
+
const originalStop = node.stop.bind(node);
|
|
318
|
+
node.stop = async () => {
|
|
319
|
+
clearInterval(monitorInterval);
|
|
320
|
+
await originalStop();
|
|
321
|
+
};
|
|
322
|
+
} else {
|
|
323
|
+
log?.('FRET service not available, Arachnode disabled');
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
// Skip proactive bootstrap dials; rely on discovery and minimal churn
|
|
328
|
+
|
|
329
|
+
// Expose coordinated repo and storage for external use
|
|
330
|
+
(node as any).coordinatedRepo = coordinatedRepo;
|
|
331
|
+
(node as any).storageRepo = storageRepo;
|
|
332
|
+
(node as any).keyNetwork = keyNetwork;
|
|
333
|
+
|
|
334
|
+
return node;
|
|
335
|
+
}
|
package/src/logger.ts
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import type { Libp2p } from 'libp2p'
|
|
2
|
+
import type { NetworkManagerService } from './network-manager-service.js'
|
|
3
|
+
import { createLogger } from '../logger.js'
|
|
4
|
+
|
|
5
|
+
const log = createLogger('network:get-manager')
|
|
6
|
+
|
|
7
|
+
export function getNetworkManager(node: Libp2p): NetworkManagerService {
|
|
8
|
+
const svc = (node as any).services?.networkManager
|
|
9
|
+
if (svc == null) {
|
|
10
|
+
throw new Error('networkManager service is not registered on this libp2p node')
|
|
11
|
+
}
|
|
12
|
+
// Provide libp2p reference early to avoid MissingServiceError from components accessor
|
|
13
|
+
try { (svc as any).setLibp2p?.(node) } catch (err) { log('getNetworkManager setLibp2p failed - %o', err) }
|
|
14
|
+
return svc as NetworkManagerService
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
|