@optimystic/db-p2p 0.0.1 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/{readme.md → README.md} +7 -0
  2. package/dist/index.min.js +31 -30
  3. package/dist/index.min.js.map +4 -4
  4. package/dist/src/cluster/cluster-repo.d.ts +27 -0
  5. package/dist/src/cluster/cluster-repo.d.ts.map +1 -1
  6. package/dist/src/cluster/cluster-repo.js +129 -17
  7. package/dist/src/cluster/cluster-repo.js.map +1 -1
  8. package/dist/src/cluster/service.d.ts +13 -2
  9. package/dist/src/cluster/service.d.ts.map +1 -1
  10. package/dist/src/cluster/service.js +17 -7
  11. package/dist/src/cluster/service.js.map +1 -1
  12. package/dist/src/index.d.ts +1 -1
  13. package/dist/src/index.d.ts.map +1 -1
  14. package/dist/src/index.js +1 -1
  15. package/dist/src/index.js.map +1 -1
  16. package/dist/src/libp2p-node.d.ts +13 -2
  17. package/dist/src/libp2p-node.d.ts.map +1 -1
  18. package/dist/src/libp2p-node.js +40 -17
  19. package/dist/src/libp2p-node.js.map +1 -1
  20. package/dist/src/protocol-client.d.ts.map +1 -1
  21. package/dist/src/protocol-client.js +8 -7
  22. package/dist/src/protocol-client.js.map +1 -1
  23. package/dist/src/repo/cluster-coordinator.d.ts +7 -2
  24. package/dist/src/repo/cluster-coordinator.d.ts.map +1 -1
  25. package/dist/src/repo/cluster-coordinator.js +18 -3
  26. package/dist/src/repo/cluster-coordinator.js.map +1 -1
  27. package/dist/src/repo/coordinator-repo.d.ts +26 -3
  28. package/dist/src/repo/coordinator-repo.d.ts.map +1 -1
  29. package/dist/src/repo/coordinator-repo.js +117 -22
  30. package/dist/src/repo/coordinator-repo.js.map +1 -1
  31. package/dist/src/repo/service.d.ts +13 -2
  32. package/dist/src/repo/service.d.ts.map +1 -1
  33. package/dist/src/repo/service.js +25 -12
  34. package/dist/src/repo/service.js.map +1 -1
  35. package/dist/src/storage/memory-storage.d.ts +15 -0
  36. package/dist/src/storage/memory-storage.d.ts.map +1 -1
  37. package/dist/src/storage/memory-storage.js +23 -4
  38. package/dist/src/storage/memory-storage.js.map +1 -1
  39. package/dist/src/storage/storage-repo.d.ts.map +1 -1
  40. package/dist/src/storage/storage-repo.js.map +1 -1
  41. package/dist/src/sync/service.d.ts.map +1 -1
  42. package/dist/src/sync/service.js +7 -2
  43. package/dist/src/sync/service.js.map +1 -1
  44. package/package.json +27 -20
  45. package/src/cluster/cluster-repo.ts +828 -711
  46. package/src/cluster/service.ts +44 -31
  47. package/src/index.ts +1 -1
  48. package/src/libp2p-key-network.ts +334 -334
  49. package/src/libp2p-node.ts +371 -335
  50. package/src/network/network-manager-service.ts +334 -334
  51. package/src/protocol-client.ts +53 -54
  52. package/src/repo/client.ts +112 -112
  53. package/src/repo/cluster-coordinator.ts +613 -592
  54. package/src/repo/coordinator-repo.ts +269 -137
  55. package/src/repo/service.ts +237 -219
  56. package/src/storage/block-storage.ts +182 -182
  57. package/src/storage/memory-storage.ts +24 -5
  58. package/src/storage/storage-repo.ts +321 -320
  59. package/src/sync/service.ts +7 -6
  60. package/dist/src/storage/file-storage.d.ts +0 -30
  61. package/dist/src/storage/file-storage.d.ts.map +0 -1
  62. package/dist/src/storage/file-storage.js +0 -127
  63. package/dist/src/storage/file-storage.js.map +0 -1
  64. package/src/storage/file-storage.ts +0 -163
@@ -1,137 +1,269 @@
1
- import type { PendRequest, ActionBlocks, IRepo, MessageOptions, CommitResult, GetBlockResults, PendResult, BlockGets, CommitRequest, RepoMessage, IKeyNetwork, ICluster, ClusterConsensusConfig } from "@optimystic/db-core";
2
- import { ClusterCoordinator } from "./cluster-coordinator.js";
3
- import type { ClusterClient } from "../cluster/client.js";
4
- import type { PeerId } from "@libp2p/interface";
5
- import type { FretService } from "p2p-fret";
6
-
7
- interface CoordinatorRepoComponents {
8
- storageRepo: IRepo;
9
- localCluster?: ICluster;
10
- localPeerId?: PeerId;
11
- }
12
-
13
- export function coordinatorRepo(
14
- keyNetwork: IKeyNetwork,
15
- createClusterClient: (peerId: PeerId) => ClusterClient,
16
- cfg?: Partial<ClusterConsensusConfig> & { clusterSize?: number },
17
- fretService?: FretService
18
- ): (components: CoordinatorRepoComponents) => CoordinatorRepo {
19
- return (components: CoordinatorRepoComponents) => new CoordinatorRepo(keyNetwork, createClusterClient, components.storageRepo, cfg, components.localCluster, components.localPeerId, fretService);
20
- }
21
-
22
- /** Cluster coordination repo - uses local store, as well as distributes changes to other nodes using cluster consensus. */
23
- export class CoordinatorRepo implements IRepo {
24
- private coordinator: ClusterCoordinator;
25
- private readonly DEFAULT_TIMEOUT = 30000; // 30 seconds default timeout
26
-
27
- constructor(
28
- readonly keyNetwork: IKeyNetwork,
29
- readonly createClusterClient: (peerId: PeerId) => ClusterClient,
30
- private readonly storageRepo: IRepo,
31
- cfg?: Partial<ClusterConsensusConfig> & { clusterSize?: number },
32
- localCluster?: ICluster,
33
- localPeerId?: PeerId,
34
- fretService?: FretService
35
- ) {
36
- const policy: ClusterConsensusConfig & { clusterSize: number } = {
37
- clusterSize: cfg?.clusterSize ?? 10,
38
- superMajorityThreshold: cfg?.superMajorityThreshold ?? 0.75,
39
- simpleMajorityThreshold: cfg?.simpleMajorityThreshold ?? 0.51,
40
- minAbsoluteClusterSize: cfg?.minAbsoluteClusterSize ?? 3,
41
- allowClusterDownsize: cfg?.allowClusterDownsize ?? true,
42
- clusterSizeTolerance: cfg?.clusterSizeTolerance ?? 0.5,
43
- partitionDetectionWindow: cfg?.partitionDetectionWindow ?? 60000
44
- };
45
- const localClusterRef = localCluster && localPeerId ? { update: localCluster.update.bind(localCluster), peerId: localPeerId } : undefined;
46
- this.coordinator = new ClusterCoordinator(keyNetwork, createClusterClient, policy, localClusterRef, fretService);
47
- }
48
-
49
- async get(blockGets: BlockGets, options?: MessageOptions): Promise<GetBlockResults> {
50
- // TODO: Verify that we are a proximate node for all block IDs in the request
51
-
52
- // For read operations, just use the local store
53
- // TODO: Implement read-path cluster verification without creating full 2PC transactions
54
- return await this.storageRepo.get(blockGets, options);
55
- }
56
-
57
- async pend(request: PendRequest, options?: MessageOptions): Promise<PendResult> {
58
- const allBlockIds = Object.keys(request.transforms);
59
- const coordinatingBlockIds = (options as any)?.coordinatingBlockIds ?? allBlockIds;
60
-
61
- const peerCount = await this.coordinator.getClusterSize(coordinatingBlockIds[0]!)
62
- if (peerCount <= 1) {
63
- return await this.storageRepo.pend(request, options)
64
- }
65
-
66
- const message: RepoMessage = {
67
- operations: [{ pend: request }],
68
- expiration: options?.expiration ?? Date.now() + this.DEFAULT_TIMEOUT,
69
- coordinatingBlockIds
70
- };
71
-
72
- try {
73
- await this.coordinator.executeClusterTransaction(coordinatingBlockIds[0]!, message, options);
74
- return await this.storageRepo.pend(request, options);
75
- } catch (error) {
76
- console.error('Failed to complete pend operation:', error)
77
- throw error
78
- }
79
- }
80
-
81
- async cancel(actionRef: ActionBlocks, options?: MessageOptions): Promise<void> {
82
- // TODO: Verify that we are a proximate node for all block IDs in the request
83
-
84
- // Extract all block IDs affected by this cancel operation
85
- const blockIds = actionRef.blockIds;
86
-
87
- // Create a message for this cancel operation with timeout
88
- const message: RepoMessage = {
89
- operations: [{ cancel: { actionRef } }],
90
- expiration: options?.expiration ?? Date.now() + this.DEFAULT_TIMEOUT
91
- };
92
-
93
- try {
94
- // For each block ID, execute a cluster transaction
95
- const clusterPromises = blockIds.map(blockId =>
96
- this.coordinator.executeClusterTransaction(blockId, message, options)
97
- );
98
-
99
- // Wait for all cluster transactions to complete
100
- await Promise.all(clusterPromises);
101
-
102
- // If all cluster transactions succeeded, apply the cancel to the local store
103
- await this.storageRepo.cancel(actionRef, options);
104
- } catch (error) {
105
- console.error('Failed to complete cancel operation:', error);
106
- throw error;
107
- }
108
- }
109
-
110
- async commit(request: CommitRequest, options?: MessageOptions): Promise<CommitResult> {
111
- // TODO: Verify that we are a proximate node for all block IDs in the request
112
-
113
- // Extract all block IDs affected by this commit operation
114
- const blockIds = request.blockIds;
115
-
116
- const peerCount = await this.coordinator.getClusterSize(blockIds[0]!)
117
- if (peerCount <= 1) {
118
- return await this.storageRepo.commit(request, options)
119
- }
120
-
121
- // Create a single message for the entire commit operation
122
- const message: RepoMessage = {
123
- operations: [{ commit: request }],
124
- expiration: options?.expiration ?? Date.now() + this.DEFAULT_TIMEOUT
125
- };
126
-
127
- try {
128
- // Execute cluster transaction using the first block ID
129
- // All blocks in this operation should map to the same cluster
130
- await this.coordinator.executeClusterTransaction(blockIds[0]!, message, options);
131
- return await this.storageRepo.commit(request, options);
132
- } catch (error) {
133
- console.error('Failed to complete commit operation:', error)
134
- throw error
135
- }
136
- }
137
- }
1
+ import type { PendRequest, ActionBlocks, IRepo, MessageOptions, CommitResult, GetBlockResults, PendResult, BlockGets, CommitRequest, RepoMessage, IKeyNetwork, ICluster, ClusterConsensusConfig, BlockId, ActionRev } from "@optimystic/db-core";
2
+ import { ClusterCoordinator } from "./cluster-coordinator.js";
3
+ import type { ClusterClient } from "../cluster/client.js";
4
+ import type { PeerId } from "@libp2p/interface";
5
+ import { peerIdFromString } from "@libp2p/peer-id";
6
+ import type { FretService } from "p2p-fret";
7
+ import { createLogger } from '../logger.js';
8
+
9
+ const log = createLogger('coordinator-repo');
10
+
11
+ /**
12
+ * Extended cluster interface that includes the ability to check if a transaction was executed.
13
+ * This is used by CoordinatorRepo to avoid duplicate execution.
14
+ */
15
+ interface LocalClusterWithExecutionTracking extends ICluster {
16
+ wasTransactionExecuted?(messageHash: string): boolean;
17
+ }
18
+
19
+ /**
20
+ * Callback to query a cluster peer for their latest revision of a block.
21
+ * Returns the peer's latest ActionRev if they have the block, undefined otherwise.
22
+ */
23
+ export type ClusterLatestCallback = (peerId: PeerId, blockId: BlockId) => Promise<ActionRev | undefined>;
24
+
25
+ interface CoordinatorRepoComponents {
26
+ storageRepo: IRepo;
27
+ localCluster?: LocalClusterWithExecutionTracking;
28
+ localPeerId?: PeerId;
29
+ /**
30
+ * Optional callback to query cluster peers for their latest block revision.
31
+ * Used for read-path cluster verification to discover unknown revisions.
32
+ */
33
+ clusterLatestCallback?: ClusterLatestCallback;
34
+ }
35
+
36
+ export function coordinatorRepo(
37
+ keyNetwork: IKeyNetwork,
38
+ createClusterClient: (peerId: PeerId) => ClusterClient,
39
+ cfg?: Partial<ClusterConsensusConfig> & { clusterSize?: number },
40
+ fretService?: FretService
41
+ ): (components: CoordinatorRepoComponents) => CoordinatorRepo {
42
+ return (components: CoordinatorRepoComponents) => new CoordinatorRepo(
43
+ keyNetwork,
44
+ createClusterClient,
45
+ components.storageRepo,
46
+ cfg,
47
+ components.localCluster,
48
+ components.localPeerId,
49
+ fretService,
50
+ components.clusterLatestCallback
51
+ );
52
+ }
53
+
54
+ /** Cluster coordination repo - uses local store, as well as distributes changes to other nodes using cluster consensus. */
55
+ export class CoordinatorRepo implements IRepo {
56
+ private coordinator: ClusterCoordinator;
57
+ private readonly DEFAULT_TIMEOUT = 30000; // 30 seconds default timeout
58
+
59
+ constructor(
60
+ readonly keyNetwork: IKeyNetwork,
61
+ readonly createClusterClient: (peerId: PeerId) => ClusterClient,
62
+ private readonly storageRepo: IRepo,
63
+ cfg?: Partial<ClusterConsensusConfig> & { clusterSize?: number },
64
+ localCluster?: LocalClusterWithExecutionTracking,
65
+ localPeerId?: PeerId,
66
+ fretService?: FretService,
67
+ private readonly clusterLatestCallback?: ClusterLatestCallback
68
+ ) {
69
+ const policy: ClusterConsensusConfig & { clusterSize: number } = {
70
+ clusterSize: cfg?.clusterSize ?? 10,
71
+ superMajorityThreshold: cfg?.superMajorityThreshold ?? 0.75,
72
+ simpleMajorityThreshold: cfg?.simpleMajorityThreshold ?? 0.51,
73
+ minAbsoluteClusterSize: cfg?.minAbsoluteClusterSize ?? 3,
74
+ allowClusterDownsize: cfg?.allowClusterDownsize ?? true,
75
+ clusterSizeTolerance: cfg?.clusterSizeTolerance ?? 0.5,
76
+ partitionDetectionWindow: cfg?.partitionDetectionWindow ?? 60000
77
+ };
78
+ const localClusterRef = localCluster && localPeerId ? {
79
+ update: localCluster.update.bind(localCluster),
80
+ peerId: localPeerId,
81
+ wasTransactionExecuted: localCluster.wasTransactionExecuted?.bind(localCluster)
82
+ } : undefined;
83
+ this.coordinator = new ClusterCoordinator(keyNetwork, createClusterClient, policy, localClusterRef, fretService);
84
+ }
85
+
86
+ async get(blockGets: BlockGets, options?: MessageOptions): Promise<GetBlockResults> {
87
+ // First try local storage
88
+ const localResult = await this.storageRepo.get(blockGets, options);
89
+
90
+ // Check for blocks that weren't found locally - try to fetch from cluster peers
91
+ // Skip cluster fetch if this is already a sync request (to prevent recursive queries)
92
+ const skipClusterFetch = (options as any)?.skipClusterFetch;
93
+ if (this.clusterLatestCallback && !skipClusterFetch) {
94
+ for (const blockId of blockGets.blockIds) {
95
+ const localEntry = localResult[blockId];
96
+ // If block not found locally (no state), try cluster peers
97
+ if (!localEntry?.state?.latest) {
98
+ try {
99
+ await this.fetchBlockFromCluster(blockId);
100
+ // Re-fetch after sync
101
+ const refreshed = await this.storageRepo.get({ blockIds: [blockId], context: blockGets.context }, options);
102
+ if (refreshed[blockId]) {
103
+ localResult[blockId] = refreshed[blockId];
104
+ }
105
+ } catch (err) {
106
+ log('cluster-fetch:error', { blockId, error: (err as Error).message });
107
+ }
108
+ }
109
+ }
110
+ }
111
+
112
+ return localResult;
113
+ }
114
+
115
+ private async fetchBlockFromCluster(blockId: BlockId): Promise<void> {
116
+ if (!this.clusterLatestCallback) return;
117
+
118
+ // Query cluster for the block
119
+ const clusterLatest = await this.queryClusterForLatest(blockId);
120
+ if (clusterLatest) {
121
+ // Found on cluster - trigger restoration to sync the block
122
+ await this.storageRepo.get({ blockIds: [blockId], context: { committed: [clusterLatest], rev: clusterLatest.rev } });
123
+ log('cluster-fetch:synced', { blockId, rev: clusterLatest.rev });
124
+ }
125
+ }
126
+
127
+ /**
128
+ * Query cluster peers to find the maximum latest revision for a block.
129
+ */
130
+ private async queryClusterForLatest(blockId: BlockId): Promise<ActionRev | undefined> {
131
+ const blockIdBytes = new TextEncoder().encode(blockId);
132
+ const peers = await this.keyNetwork.findCluster(blockIdBytes);
133
+ if (!peers || Object.keys(peers).length === 0) {
134
+ return undefined;
135
+ }
136
+
137
+ const peerIds = Object.keys(peers);
138
+ let maxLatest: ActionRev | undefined;
139
+
140
+ // Add timeout wrapper to prevent hanging on unresponsive peers
141
+ const withTimeout = <T>(promise: Promise<T>, timeoutMs: number): Promise<T | undefined> =>
142
+ Promise.race([
143
+ promise,
144
+ new Promise<undefined>(resolve => setTimeout(() => resolve(undefined), timeoutMs))
145
+ ]);
146
+
147
+ // Query peers in parallel for their latest revision (with 3 second timeout per peer)
148
+ const latestResults = await Promise.allSettled(
149
+ peerIds.map(peerIdStr => {
150
+ const peerId = peerIdFromString(peerIdStr);
151
+ return withTimeout(this.clusterLatestCallback!(peerId, blockId), 3000);
152
+ })
153
+ );
154
+
155
+ for (const result of latestResults) {
156
+ if (result.status === 'fulfilled' && result.value) {
157
+ const peerLatest = result.value;
158
+ if (!maxLatest || peerLatest.rev > maxLatest.rev) {
159
+ maxLatest = peerLatest;
160
+ }
161
+ }
162
+ }
163
+
164
+ return maxLatest;
165
+ }
166
+
167
+ async pend(request: PendRequest, options?: MessageOptions): Promise<PendResult> {
168
+ const allBlockIds = Object.keys(request.transforms);
169
+ const coordinatingBlockIds = (options as any)?.coordinatingBlockIds ?? allBlockIds;
170
+
171
+ const peerCount = await this.coordinator.getClusterSize(coordinatingBlockIds[0]!);
172
+ if (peerCount <= 1) {
173
+ return await this.storageRepo.pend(request, options);
174
+ }
175
+
176
+ const message: RepoMessage = {
177
+ operations: [{ pend: request }],
178
+ expiration: options?.expiration ?? Date.now() + this.DEFAULT_TIMEOUT,
179
+ coordinatingBlockIds
180
+ };
181
+
182
+ try {
183
+ const { localExecuted } = await this.coordinator.executeClusterTransaction(coordinatingBlockIds[0]!, message, options);
184
+ log('coordinator-repo:pend-cluster-complete', {
185
+ actionId: request.actionId,
186
+ localExecuted
187
+ });
188
+ // Only call storageRepo if local cluster didn't already execute during consensus
189
+ if (!localExecuted) {
190
+ const result = await this.storageRepo.pend(request, options);
191
+ log('coordinator-repo:pend-fallback-result', {
192
+ actionId: request.actionId,
193
+ success: result.success,
194
+ hasMissing: !!(result as any).missing?.length,
195
+ hasPending: !!(result as any).pending?.length
196
+ });
197
+ return result;
198
+ }
199
+ // Local cluster already executed - return success
200
+ return {
201
+ success: true,
202
+ pending: [],
203
+ blockIds: Object.keys(request.transforms)
204
+ };
205
+ } catch (error) {
206
+ log('coordinator-repo:pend-error', { actionId: request.actionId, error: (error as Error).message });
207
+ throw error;
208
+ }
209
+ }
210
+
211
+ async cancel(actionRef: ActionBlocks, options?: MessageOptions): Promise<void> {
212
+ // TODO: Verify that we are a proximate node for all block IDs in the request
213
+
214
+ // Extract all block IDs affected by this cancel operation
215
+ const blockIds = actionRef.blockIds;
216
+
217
+ // Create a message for this cancel operation with timeout
218
+ const message: RepoMessage = {
219
+ operations: [{ cancel: { actionRef } }],
220
+ expiration: options?.expiration ?? Date.now() + this.DEFAULT_TIMEOUT
221
+ };
222
+
223
+ try {
224
+ // For each block ID, execute a cluster transaction
225
+ const clusterPromises = blockIds.map(blockId =>
226
+ this.coordinator.executeClusterTransaction(blockId, message, options)
227
+ );
228
+
229
+ // Wait for all cluster transactions to complete
230
+ const results = await Promise.all(clusterPromises);
231
+
232
+ // Only call storageRepo if local cluster didn't already execute during consensus
233
+ const anyLocalExecuted = results.some(r => r.localExecuted);
234
+ if (!anyLocalExecuted) {
235
+ await this.storageRepo.cancel(actionRef, options);
236
+ }
237
+ } catch (error) {
238
+ log('coordinator-repo:cancel-error', { actionId: actionRef.actionId, error: (error as Error).message });
239
+ throw error;
240
+ }
241
+ }
242
+
243
+ async commit(request: CommitRequest, options?: MessageOptions): Promise<CommitResult> {
244
+ const blockIds = request.blockIds;
245
+
246
+ const peerCount = await this.coordinator.getClusterSize(blockIds[0]!);
247
+ if (peerCount <= 1) {
248
+ return await this.storageRepo.commit(request, options);
249
+ }
250
+
251
+ const message: RepoMessage = {
252
+ operations: [{ commit: request }],
253
+ expiration: options?.expiration ?? Date.now() + this.DEFAULT_TIMEOUT
254
+ };
255
+
256
+ try {
257
+ const { localExecuted } = await this.coordinator.executeClusterTransaction(blockIds[0]!, message, options);
258
+ // Only call storageRepo if local cluster didn't already execute during consensus
259
+ if (!localExecuted) {
260
+ return await this.storageRepo.commit(request, options);
261
+ }
262
+ // Local cluster already executed - return success
263
+ return { success: true };
264
+ } catch (error) {
265
+ log('coordinator-repo:commit-error', { actionId: request.actionId, error: (error as Error).message });
266
+ throw error;
267
+ }
268
+ }
269
+ }