@optimystic/db-p2p 0.0.1 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/{readme.md → README.md} +7 -0
- package/dist/index.min.js +31 -30
- package/dist/index.min.js.map +4 -4
- package/dist/src/cluster/cluster-repo.d.ts +27 -0
- package/dist/src/cluster/cluster-repo.d.ts.map +1 -1
- package/dist/src/cluster/cluster-repo.js +129 -17
- package/dist/src/cluster/cluster-repo.js.map +1 -1
- package/dist/src/cluster/service.d.ts +13 -2
- package/dist/src/cluster/service.d.ts.map +1 -1
- package/dist/src/cluster/service.js +17 -7
- package/dist/src/cluster/service.js.map +1 -1
- package/dist/src/index.d.ts +1 -1
- package/dist/src/index.d.ts.map +1 -1
- package/dist/src/index.js +1 -1
- package/dist/src/index.js.map +1 -1
- package/dist/src/libp2p-node.d.ts +13 -2
- package/dist/src/libp2p-node.d.ts.map +1 -1
- package/dist/src/libp2p-node.js +40 -17
- package/dist/src/libp2p-node.js.map +1 -1
- package/dist/src/protocol-client.d.ts.map +1 -1
- package/dist/src/protocol-client.js +8 -7
- package/dist/src/protocol-client.js.map +1 -1
- package/dist/src/repo/cluster-coordinator.d.ts +7 -2
- package/dist/src/repo/cluster-coordinator.d.ts.map +1 -1
- package/dist/src/repo/cluster-coordinator.js +18 -3
- package/dist/src/repo/cluster-coordinator.js.map +1 -1
- package/dist/src/repo/coordinator-repo.d.ts +26 -3
- package/dist/src/repo/coordinator-repo.d.ts.map +1 -1
- package/dist/src/repo/coordinator-repo.js +117 -22
- package/dist/src/repo/coordinator-repo.js.map +1 -1
- package/dist/src/repo/service.d.ts +13 -2
- package/dist/src/repo/service.d.ts.map +1 -1
- package/dist/src/repo/service.js +25 -12
- package/dist/src/repo/service.js.map +1 -1
- package/dist/src/storage/memory-storage.d.ts +15 -0
- package/dist/src/storage/memory-storage.d.ts.map +1 -1
- package/dist/src/storage/memory-storage.js +23 -4
- package/dist/src/storage/memory-storage.js.map +1 -1
- package/dist/src/storage/storage-repo.d.ts.map +1 -1
- package/dist/src/storage/storage-repo.js.map +1 -1
- package/dist/src/sync/service.d.ts.map +1 -1
- package/dist/src/sync/service.js +7 -2
- package/dist/src/sync/service.js.map +1 -1
- package/package.json +27 -20
- package/src/cluster/cluster-repo.ts +828 -711
- package/src/cluster/service.ts +44 -31
- package/src/index.ts +1 -1
- package/src/libp2p-key-network.ts +334 -334
- package/src/libp2p-node.ts +371 -335
- package/src/network/network-manager-service.ts +334 -334
- package/src/protocol-client.ts +53 -54
- package/src/repo/client.ts +112 -112
- package/src/repo/cluster-coordinator.ts +613 -592
- package/src/repo/coordinator-repo.ts +269 -137
- package/src/repo/service.ts +237 -219
- package/src/storage/block-storage.ts +182 -182
- package/src/storage/memory-storage.ts +24 -5
- package/src/storage/storage-repo.ts +321 -320
- package/src/sync/service.ts +7 -6
- package/dist/src/storage/file-storage.d.ts +0 -30
- package/dist/src/storage/file-storage.d.ts.map +0 -1
- package/dist/src/storage/file-storage.js +0 -127
- package/dist/src/storage/file-storage.js.map +0 -1
- package/src/storage/file-storage.ts +0 -163
|
@@ -1,137 +1,269 @@
|
|
|
1
|
-
import type { PendRequest, ActionBlocks, IRepo, MessageOptions, CommitResult, GetBlockResults, PendResult, BlockGets, CommitRequest, RepoMessage, IKeyNetwork, ICluster, ClusterConsensusConfig } from "@optimystic/db-core";
|
|
2
|
-
import { ClusterCoordinator } from "./cluster-coordinator.js";
|
|
3
|
-
import type { ClusterClient } from "../cluster/client.js";
|
|
4
|
-
import type { PeerId } from "@libp2p/interface";
|
|
5
|
-
import
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
export
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
//
|
|
88
|
-
const
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
1
|
+
import type { PendRequest, ActionBlocks, IRepo, MessageOptions, CommitResult, GetBlockResults, PendResult, BlockGets, CommitRequest, RepoMessage, IKeyNetwork, ICluster, ClusterConsensusConfig, BlockId, ActionRev } from "@optimystic/db-core";
|
|
2
|
+
import { ClusterCoordinator } from "./cluster-coordinator.js";
|
|
3
|
+
import type { ClusterClient } from "../cluster/client.js";
|
|
4
|
+
import type { PeerId } from "@libp2p/interface";
|
|
5
|
+
import { peerIdFromString } from "@libp2p/peer-id";
|
|
6
|
+
import type { FretService } from "p2p-fret";
|
|
7
|
+
import { createLogger } from '../logger.js';
|
|
8
|
+
|
|
9
|
+
const log = createLogger('coordinator-repo');
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Extended cluster interface that includes the ability to check if a transaction was executed.
|
|
13
|
+
* This is used by CoordinatorRepo to avoid duplicate execution.
|
|
14
|
+
*/
|
|
15
|
+
interface LocalClusterWithExecutionTracking extends ICluster {
|
|
16
|
+
wasTransactionExecuted?(messageHash: string): boolean;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Callback to query a cluster peer for their latest revision of a block.
|
|
21
|
+
* Returns the peer's latest ActionRev if they have the block, undefined otherwise.
|
|
22
|
+
*/
|
|
23
|
+
export type ClusterLatestCallback = (peerId: PeerId, blockId: BlockId) => Promise<ActionRev | undefined>;
|
|
24
|
+
|
|
25
|
+
interface CoordinatorRepoComponents {
|
|
26
|
+
storageRepo: IRepo;
|
|
27
|
+
localCluster?: LocalClusterWithExecutionTracking;
|
|
28
|
+
localPeerId?: PeerId;
|
|
29
|
+
/**
|
|
30
|
+
* Optional callback to query cluster peers for their latest block revision.
|
|
31
|
+
* Used for read-path cluster verification to discover unknown revisions.
|
|
32
|
+
*/
|
|
33
|
+
clusterLatestCallback?: ClusterLatestCallback;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
export function coordinatorRepo(
|
|
37
|
+
keyNetwork: IKeyNetwork,
|
|
38
|
+
createClusterClient: (peerId: PeerId) => ClusterClient,
|
|
39
|
+
cfg?: Partial<ClusterConsensusConfig> & { clusterSize?: number },
|
|
40
|
+
fretService?: FretService
|
|
41
|
+
): (components: CoordinatorRepoComponents) => CoordinatorRepo {
|
|
42
|
+
return (components: CoordinatorRepoComponents) => new CoordinatorRepo(
|
|
43
|
+
keyNetwork,
|
|
44
|
+
createClusterClient,
|
|
45
|
+
components.storageRepo,
|
|
46
|
+
cfg,
|
|
47
|
+
components.localCluster,
|
|
48
|
+
components.localPeerId,
|
|
49
|
+
fretService,
|
|
50
|
+
components.clusterLatestCallback
|
|
51
|
+
);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/** Cluster coordination repo - uses local store, as well as distributes changes to other nodes using cluster consensus. */
|
|
55
|
+
export class CoordinatorRepo implements IRepo {
|
|
56
|
+
private coordinator: ClusterCoordinator;
|
|
57
|
+
private readonly DEFAULT_TIMEOUT = 30000; // 30 seconds default timeout
|
|
58
|
+
|
|
59
|
+
constructor(
|
|
60
|
+
readonly keyNetwork: IKeyNetwork,
|
|
61
|
+
readonly createClusterClient: (peerId: PeerId) => ClusterClient,
|
|
62
|
+
private readonly storageRepo: IRepo,
|
|
63
|
+
cfg?: Partial<ClusterConsensusConfig> & { clusterSize?: number },
|
|
64
|
+
localCluster?: LocalClusterWithExecutionTracking,
|
|
65
|
+
localPeerId?: PeerId,
|
|
66
|
+
fretService?: FretService,
|
|
67
|
+
private readonly clusterLatestCallback?: ClusterLatestCallback
|
|
68
|
+
) {
|
|
69
|
+
const policy: ClusterConsensusConfig & { clusterSize: number } = {
|
|
70
|
+
clusterSize: cfg?.clusterSize ?? 10,
|
|
71
|
+
superMajorityThreshold: cfg?.superMajorityThreshold ?? 0.75,
|
|
72
|
+
simpleMajorityThreshold: cfg?.simpleMajorityThreshold ?? 0.51,
|
|
73
|
+
minAbsoluteClusterSize: cfg?.minAbsoluteClusterSize ?? 3,
|
|
74
|
+
allowClusterDownsize: cfg?.allowClusterDownsize ?? true,
|
|
75
|
+
clusterSizeTolerance: cfg?.clusterSizeTolerance ?? 0.5,
|
|
76
|
+
partitionDetectionWindow: cfg?.partitionDetectionWindow ?? 60000
|
|
77
|
+
};
|
|
78
|
+
const localClusterRef = localCluster && localPeerId ? {
|
|
79
|
+
update: localCluster.update.bind(localCluster),
|
|
80
|
+
peerId: localPeerId,
|
|
81
|
+
wasTransactionExecuted: localCluster.wasTransactionExecuted?.bind(localCluster)
|
|
82
|
+
} : undefined;
|
|
83
|
+
this.coordinator = new ClusterCoordinator(keyNetwork, createClusterClient, policy, localClusterRef, fretService);
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
async get(blockGets: BlockGets, options?: MessageOptions): Promise<GetBlockResults> {
|
|
87
|
+
// First try local storage
|
|
88
|
+
const localResult = await this.storageRepo.get(blockGets, options);
|
|
89
|
+
|
|
90
|
+
// Check for blocks that weren't found locally - try to fetch from cluster peers
|
|
91
|
+
// Skip cluster fetch if this is already a sync request (to prevent recursive queries)
|
|
92
|
+
const skipClusterFetch = (options as any)?.skipClusterFetch;
|
|
93
|
+
if (this.clusterLatestCallback && !skipClusterFetch) {
|
|
94
|
+
for (const blockId of blockGets.blockIds) {
|
|
95
|
+
const localEntry = localResult[blockId];
|
|
96
|
+
// If block not found locally (no state), try cluster peers
|
|
97
|
+
if (!localEntry?.state?.latest) {
|
|
98
|
+
try {
|
|
99
|
+
await this.fetchBlockFromCluster(blockId);
|
|
100
|
+
// Re-fetch after sync
|
|
101
|
+
const refreshed = await this.storageRepo.get({ blockIds: [blockId], context: blockGets.context }, options);
|
|
102
|
+
if (refreshed[blockId]) {
|
|
103
|
+
localResult[blockId] = refreshed[blockId];
|
|
104
|
+
}
|
|
105
|
+
} catch (err) {
|
|
106
|
+
log('cluster-fetch:error', { blockId, error: (err as Error).message });
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
return localResult;
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
private async fetchBlockFromCluster(blockId: BlockId): Promise<void> {
|
|
116
|
+
if (!this.clusterLatestCallback) return;
|
|
117
|
+
|
|
118
|
+
// Query cluster for the block
|
|
119
|
+
const clusterLatest = await this.queryClusterForLatest(blockId);
|
|
120
|
+
if (clusterLatest) {
|
|
121
|
+
// Found on cluster - trigger restoration to sync the block
|
|
122
|
+
await this.storageRepo.get({ blockIds: [blockId], context: { committed: [clusterLatest], rev: clusterLatest.rev } });
|
|
123
|
+
log('cluster-fetch:synced', { blockId, rev: clusterLatest.rev });
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/**
|
|
128
|
+
* Query cluster peers to find the maximum latest revision for a block.
|
|
129
|
+
*/
|
|
130
|
+
private async queryClusterForLatest(blockId: BlockId): Promise<ActionRev | undefined> {
|
|
131
|
+
const blockIdBytes = new TextEncoder().encode(blockId);
|
|
132
|
+
const peers = await this.keyNetwork.findCluster(blockIdBytes);
|
|
133
|
+
if (!peers || Object.keys(peers).length === 0) {
|
|
134
|
+
return undefined;
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
const peerIds = Object.keys(peers);
|
|
138
|
+
let maxLatest: ActionRev | undefined;
|
|
139
|
+
|
|
140
|
+
// Add timeout wrapper to prevent hanging on unresponsive peers
|
|
141
|
+
const withTimeout = <T>(promise: Promise<T>, timeoutMs: number): Promise<T | undefined> =>
|
|
142
|
+
Promise.race([
|
|
143
|
+
promise,
|
|
144
|
+
new Promise<undefined>(resolve => setTimeout(() => resolve(undefined), timeoutMs))
|
|
145
|
+
]);
|
|
146
|
+
|
|
147
|
+
// Query peers in parallel for their latest revision (with 3 second timeout per peer)
|
|
148
|
+
const latestResults = await Promise.allSettled(
|
|
149
|
+
peerIds.map(peerIdStr => {
|
|
150
|
+
const peerId = peerIdFromString(peerIdStr);
|
|
151
|
+
return withTimeout(this.clusterLatestCallback!(peerId, blockId), 3000);
|
|
152
|
+
})
|
|
153
|
+
);
|
|
154
|
+
|
|
155
|
+
for (const result of latestResults) {
|
|
156
|
+
if (result.status === 'fulfilled' && result.value) {
|
|
157
|
+
const peerLatest = result.value;
|
|
158
|
+
if (!maxLatest || peerLatest.rev > maxLatest.rev) {
|
|
159
|
+
maxLatest = peerLatest;
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
return maxLatest;
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
async pend(request: PendRequest, options?: MessageOptions): Promise<PendResult> {
|
|
168
|
+
const allBlockIds = Object.keys(request.transforms);
|
|
169
|
+
const coordinatingBlockIds = (options as any)?.coordinatingBlockIds ?? allBlockIds;
|
|
170
|
+
|
|
171
|
+
const peerCount = await this.coordinator.getClusterSize(coordinatingBlockIds[0]!);
|
|
172
|
+
if (peerCount <= 1) {
|
|
173
|
+
return await this.storageRepo.pend(request, options);
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
const message: RepoMessage = {
|
|
177
|
+
operations: [{ pend: request }],
|
|
178
|
+
expiration: options?.expiration ?? Date.now() + this.DEFAULT_TIMEOUT,
|
|
179
|
+
coordinatingBlockIds
|
|
180
|
+
};
|
|
181
|
+
|
|
182
|
+
try {
|
|
183
|
+
const { localExecuted } = await this.coordinator.executeClusterTransaction(coordinatingBlockIds[0]!, message, options);
|
|
184
|
+
log('coordinator-repo:pend-cluster-complete', {
|
|
185
|
+
actionId: request.actionId,
|
|
186
|
+
localExecuted
|
|
187
|
+
});
|
|
188
|
+
// Only call storageRepo if local cluster didn't already execute during consensus
|
|
189
|
+
if (!localExecuted) {
|
|
190
|
+
const result = await this.storageRepo.pend(request, options);
|
|
191
|
+
log('coordinator-repo:pend-fallback-result', {
|
|
192
|
+
actionId: request.actionId,
|
|
193
|
+
success: result.success,
|
|
194
|
+
hasMissing: !!(result as any).missing?.length,
|
|
195
|
+
hasPending: !!(result as any).pending?.length
|
|
196
|
+
});
|
|
197
|
+
return result;
|
|
198
|
+
}
|
|
199
|
+
// Local cluster already executed - return success
|
|
200
|
+
return {
|
|
201
|
+
success: true,
|
|
202
|
+
pending: [],
|
|
203
|
+
blockIds: Object.keys(request.transforms)
|
|
204
|
+
};
|
|
205
|
+
} catch (error) {
|
|
206
|
+
log('coordinator-repo:pend-error', { actionId: request.actionId, error: (error as Error).message });
|
|
207
|
+
throw error;
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
async cancel(actionRef: ActionBlocks, options?: MessageOptions): Promise<void> {
|
|
212
|
+
// TODO: Verify that we are a proximate node for all block IDs in the request
|
|
213
|
+
|
|
214
|
+
// Extract all block IDs affected by this cancel operation
|
|
215
|
+
const blockIds = actionRef.blockIds;
|
|
216
|
+
|
|
217
|
+
// Create a message for this cancel operation with timeout
|
|
218
|
+
const message: RepoMessage = {
|
|
219
|
+
operations: [{ cancel: { actionRef } }],
|
|
220
|
+
expiration: options?.expiration ?? Date.now() + this.DEFAULT_TIMEOUT
|
|
221
|
+
};
|
|
222
|
+
|
|
223
|
+
try {
|
|
224
|
+
// For each block ID, execute a cluster transaction
|
|
225
|
+
const clusterPromises = blockIds.map(blockId =>
|
|
226
|
+
this.coordinator.executeClusterTransaction(blockId, message, options)
|
|
227
|
+
);
|
|
228
|
+
|
|
229
|
+
// Wait for all cluster transactions to complete
|
|
230
|
+
const results = await Promise.all(clusterPromises);
|
|
231
|
+
|
|
232
|
+
// Only call storageRepo if local cluster didn't already execute during consensus
|
|
233
|
+
const anyLocalExecuted = results.some(r => r.localExecuted);
|
|
234
|
+
if (!anyLocalExecuted) {
|
|
235
|
+
await this.storageRepo.cancel(actionRef, options);
|
|
236
|
+
}
|
|
237
|
+
} catch (error) {
|
|
238
|
+
log('coordinator-repo:cancel-error', { actionId: actionRef.actionId, error: (error as Error).message });
|
|
239
|
+
throw error;
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
async commit(request: CommitRequest, options?: MessageOptions): Promise<CommitResult> {
|
|
244
|
+
const blockIds = request.blockIds;
|
|
245
|
+
|
|
246
|
+
const peerCount = await this.coordinator.getClusterSize(blockIds[0]!);
|
|
247
|
+
if (peerCount <= 1) {
|
|
248
|
+
return await this.storageRepo.commit(request, options);
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
const message: RepoMessage = {
|
|
252
|
+
operations: [{ commit: request }],
|
|
253
|
+
expiration: options?.expiration ?? Date.now() + this.DEFAULT_TIMEOUT
|
|
254
|
+
};
|
|
255
|
+
|
|
256
|
+
try {
|
|
257
|
+
const { localExecuted } = await this.coordinator.executeClusterTransaction(blockIds[0]!, message, options);
|
|
258
|
+
// Only call storageRepo if local cluster didn't already execute during consensus
|
|
259
|
+
if (!localExecuted) {
|
|
260
|
+
return await this.storageRepo.commit(request, options);
|
|
261
|
+
}
|
|
262
|
+
// Local cluster already executed - return success
|
|
263
|
+
return { success: true };
|
|
264
|
+
} catch (error) {
|
|
265
|
+
log('coordinator-repo:commit-error', { actionId: request.actionId, error: (error as Error).message });
|
|
266
|
+
throw error;
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
}
|