@optimystic/db-p2p 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.min.js +52 -0
- package/dist/index.min.js.map +7 -0
- package/dist/src/cluster/client.d.ts +12 -0
- package/dist/src/cluster/client.d.ts.map +1 -0
- package/dist/src/cluster/client.js +65 -0
- package/dist/src/cluster/client.js.map +1 -0
- package/dist/src/cluster/cluster-repo.d.ts +79 -0
- package/dist/src/cluster/cluster-repo.d.ts.map +1 -0
- package/dist/src/cluster/cluster-repo.js +613 -0
- package/dist/src/cluster/cluster-repo.js.map +1 -0
- package/dist/src/cluster/partition-detector.d.ts +59 -0
- package/dist/src/cluster/partition-detector.d.ts.map +1 -0
- package/dist/src/cluster/partition-detector.js +129 -0
- package/dist/src/cluster/partition-detector.js.map +1 -0
- package/dist/src/cluster/service.d.ts +49 -0
- package/dist/src/cluster/service.d.ts.map +1 -0
- package/dist/src/cluster/service.js +107 -0
- package/dist/src/cluster/service.js.map +1 -0
- package/dist/src/index.d.ts +29 -0
- package/dist/src/index.d.ts.map +1 -0
- package/dist/src/index.js +29 -0
- package/dist/src/index.js.map +1 -0
- package/dist/src/it-utility.d.ts +4 -0
- package/dist/src/it-utility.d.ts.map +1 -0
- package/dist/src/it-utility.js +32 -0
- package/dist/src/it-utility.js.map +1 -0
- package/dist/src/libp2p-key-network.d.ts +59 -0
- package/dist/src/libp2p-key-network.d.ts.map +1 -0
- package/dist/src/libp2p-key-network.js +278 -0
- package/dist/src/libp2p-key-network.js.map +1 -0
- package/dist/src/libp2p-node.d.ts +28 -0
- package/dist/src/libp2p-node.d.ts.map +1 -0
- package/dist/src/libp2p-node.js +270 -0
- package/dist/src/libp2p-node.js.map +1 -0
- package/dist/src/logger.d.ts +3 -0
- package/dist/src/logger.d.ts.map +1 -0
- package/dist/src/logger.js +6 -0
- package/dist/src/logger.js.map +1 -0
- package/dist/src/network/get-network-manager.d.ts +4 -0
- package/dist/src/network/get-network-manager.d.ts.map +1 -0
- package/dist/src/network/get-network-manager.js +17 -0
- package/dist/src/network/get-network-manager.js.map +1 -0
- package/dist/src/network/network-manager-service.d.ts +82 -0
- package/dist/src/network/network-manager-service.d.ts.map +1 -0
- package/dist/src/network/network-manager-service.js +283 -0
- package/dist/src/network/network-manager-service.js.map +1 -0
- package/dist/src/peer-utils.d.ts +2 -0
- package/dist/src/peer-utils.d.ts.map +1 -0
- package/dist/src/peer-utils.js +28 -0
- package/dist/src/peer-utils.js.map +1 -0
- package/dist/src/protocol-client.d.ts +12 -0
- package/dist/src/protocol-client.d.ts.map +1 -0
- package/dist/src/protocol-client.js +34 -0
- package/dist/src/protocol-client.js.map +1 -0
- package/dist/src/repo/client.d.ts +17 -0
- package/dist/src/repo/client.d.ts.map +1 -0
- package/dist/src/repo/client.js +82 -0
- package/dist/src/repo/client.js.map +1 -0
- package/dist/src/repo/cluster-coordinator.d.ts +59 -0
- package/dist/src/repo/cluster-coordinator.d.ts.map +1 -0
- package/dist/src/repo/cluster-coordinator.js +539 -0
- package/dist/src/repo/cluster-coordinator.js.map +1 -0
- package/dist/src/repo/coordinator-repo.d.ts +29 -0
- package/dist/src/repo/coordinator-repo.d.ts.map +1 -0
- package/dist/src/repo/coordinator-repo.js +102 -0
- package/dist/src/repo/coordinator-repo.js.map +1 -0
- package/dist/src/repo/redirect.d.ts +14 -0
- package/dist/src/repo/redirect.d.ts.map +1 -0
- package/dist/src/repo/redirect.js +9 -0
- package/dist/src/repo/redirect.js.map +1 -0
- package/dist/src/repo/service.d.ts +52 -0
- package/dist/src/repo/service.d.ts.map +1 -0
- package/dist/src/repo/service.js +181 -0
- package/dist/src/repo/service.js.map +1 -0
- package/dist/src/repo/types.d.ts +7 -0
- package/dist/src/repo/types.d.ts.map +1 -0
- package/dist/src/repo/types.js +2 -0
- package/dist/src/repo/types.js.map +1 -0
- package/dist/src/routing/libp2p-known-peers.d.ts +4 -0
- package/dist/src/routing/libp2p-known-peers.d.ts.map +1 -0
- package/dist/src/routing/libp2p-known-peers.js +19 -0
- package/dist/src/routing/libp2p-known-peers.js.map +1 -0
- package/dist/src/routing/responsibility.d.ts +14 -0
- package/dist/src/routing/responsibility.d.ts.map +1 -0
- package/dist/src/routing/responsibility.js +45 -0
- package/dist/src/routing/responsibility.js.map +1 -0
- package/dist/src/routing/simple-cluster-coordinator.d.ts +23 -0
- package/dist/src/routing/simple-cluster-coordinator.d.ts.map +1 -0
- package/dist/src/routing/simple-cluster-coordinator.js +59 -0
- package/dist/src/routing/simple-cluster-coordinator.js.map +1 -0
- package/dist/src/storage/arachnode-fret-adapter.d.ts +65 -0
- package/dist/src/storage/arachnode-fret-adapter.d.ts.map +1 -0
- package/dist/src/storage/arachnode-fret-adapter.js +93 -0
- package/dist/src/storage/arachnode-fret-adapter.js.map +1 -0
- package/dist/src/storage/block-storage.d.ts +31 -0
- package/dist/src/storage/block-storage.d.ts.map +1 -0
- package/dist/src/storage/block-storage.js +154 -0
- package/dist/src/storage/block-storage.js.map +1 -0
- package/dist/src/storage/file-storage.d.ts +30 -0
- package/dist/src/storage/file-storage.d.ts.map +1 -0
- package/dist/src/storage/file-storage.js +127 -0
- package/dist/src/storage/file-storage.js.map +1 -0
- package/dist/src/storage/helpers.d.ts +3 -0
- package/dist/src/storage/helpers.d.ts.map +1 -0
- package/dist/src/storage/helpers.js +28 -0
- package/dist/src/storage/helpers.js.map +1 -0
- package/dist/src/storage/i-block-storage.d.ts +32 -0
- package/dist/src/storage/i-block-storage.d.ts.map +1 -0
- package/dist/src/storage/i-block-storage.js +2 -0
- package/dist/src/storage/i-block-storage.js.map +1 -0
- package/dist/src/storage/i-raw-storage.d.ts +20 -0
- package/dist/src/storage/i-raw-storage.d.ts.map +1 -0
- package/dist/src/storage/i-raw-storage.js +2 -0
- package/dist/src/storage/i-raw-storage.js.map +1 -0
- package/dist/src/storage/memory-storage.d.ts +27 -0
- package/dist/src/storage/memory-storage.d.ts.map +1 -0
- package/dist/src/storage/memory-storage.js +87 -0
- package/dist/src/storage/memory-storage.js.map +1 -0
- package/dist/src/storage/restoration-coordinator-v2.d.ts +63 -0
- package/dist/src/storage/restoration-coordinator-v2.d.ts.map +1 -0
- package/dist/src/storage/restoration-coordinator-v2.js +157 -0
- package/dist/src/storage/restoration-coordinator-v2.js.map +1 -0
- package/dist/src/storage/ring-selector.d.ts +56 -0
- package/dist/src/storage/ring-selector.d.ts.map +1 -0
- package/dist/src/storage/ring-selector.js +118 -0
- package/dist/src/storage/ring-selector.js.map +1 -0
- package/dist/src/storage/storage-monitor.d.ts +23 -0
- package/dist/src/storage/storage-monitor.d.ts.map +1 -0
- package/dist/src/storage/storage-monitor.js +40 -0
- package/dist/src/storage/storage-monitor.js.map +1 -0
- package/dist/src/storage/storage-repo.d.ts +17 -0
- package/dist/src/storage/storage-repo.d.ts.map +1 -0
- package/dist/src/storage/storage-repo.js +267 -0
- package/dist/src/storage/storage-repo.js.map +1 -0
- package/dist/src/storage/struct.d.ts +29 -0
- package/dist/src/storage/struct.d.ts.map +1 -0
- package/dist/src/storage/struct.js +2 -0
- package/dist/src/storage/struct.js.map +1 -0
- package/dist/src/sync/client.d.ts +27 -0
- package/dist/src/sync/client.d.ts.map +1 -0
- package/dist/src/sync/client.js +32 -0
- package/dist/src/sync/client.js.map +1 -0
- package/dist/src/sync/protocol.d.ts +58 -0
- package/dist/src/sync/protocol.d.ts.map +1 -0
- package/dist/src/sync/protocol.js +12 -0
- package/dist/src/sync/protocol.js.map +1 -0
- package/dist/src/sync/service.d.ts +62 -0
- package/dist/src/sync/service.d.ts.map +1 -0
- package/dist/src/sync/service.js +168 -0
- package/dist/src/sync/service.js.map +1 -0
- package/package.json +73 -0
- package/readme.md +497 -0
- package/src/cluster/client.ts +63 -0
- package/src/cluster/cluster-repo.ts +711 -0
- package/src/cluster/partition-detector.ts +158 -0
- package/src/cluster/service.ts +156 -0
- package/src/index.ts +30 -0
- package/src/it-utility.ts +36 -0
- package/src/libp2p-key-network.ts +334 -0
- package/src/libp2p-node.ts +335 -0
- package/src/logger.ts +9 -0
- package/src/network/get-network-manager.ts +17 -0
- package/src/network/network-manager-service.ts +334 -0
- package/src/peer-utils.ts +24 -0
- package/src/protocol-client.ts +54 -0
- package/src/repo/client.ts +112 -0
- package/src/repo/cluster-coordinator.ts +592 -0
- package/src/repo/coordinator-repo.ts +137 -0
- package/src/repo/redirect.ts +17 -0
- package/src/repo/service.ts +219 -0
- package/src/repo/types.ts +7 -0
- package/src/routing/libp2p-known-peers.ts +26 -0
- package/src/routing/responsibility.ts +63 -0
- package/src/routing/simple-cluster-coordinator.ts +70 -0
- package/src/storage/arachnode-fret-adapter.ts +128 -0
- package/src/storage/block-storage.ts +182 -0
- package/src/storage/file-storage.ts +163 -0
- package/src/storage/helpers.ts +29 -0
- package/src/storage/i-block-storage.ts +40 -0
- package/src/storage/i-raw-storage.ts +30 -0
- package/src/storage/memory-storage.ts +108 -0
- package/src/storage/restoration-coordinator-v2.ts +191 -0
- package/src/storage/ring-selector.ts +155 -0
- package/src/storage/storage-monitor.ts +59 -0
- package/src/storage/storage-repo.ts +320 -0
- package/src/storage/struct.ts +34 -0
- package/src/sync/client.ts +42 -0
- package/src/sync/protocol.ts +71 -0
- package/src/sync/service.ts +229 -0
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
import type { PendRequest, ActionBlocks, IRepo, MessageOptions, CommitResult, GetBlockResults, PendResult, BlockGets, CommitRequest, RepoMessage, IKeyNetwork, ICluster, ClusterConsensusConfig } from "@optimystic/db-core";
|
|
2
|
+
import { ClusterCoordinator } from "./cluster-coordinator.js";
|
|
3
|
+
import type { ClusterClient } from "../cluster/client.js";
|
|
4
|
+
import type { PeerId } from "@libp2p/interface";
|
|
5
|
+
import type { FretService } from "p2p-fret";
|
|
6
|
+
|
|
7
|
+
interface CoordinatorRepoComponents {
|
|
8
|
+
storageRepo: IRepo;
|
|
9
|
+
localCluster?: ICluster;
|
|
10
|
+
localPeerId?: PeerId;
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
export function coordinatorRepo(
|
|
14
|
+
keyNetwork: IKeyNetwork,
|
|
15
|
+
createClusterClient: (peerId: PeerId) => ClusterClient,
|
|
16
|
+
cfg?: Partial<ClusterConsensusConfig> & { clusterSize?: number },
|
|
17
|
+
fretService?: FretService
|
|
18
|
+
): (components: CoordinatorRepoComponents) => CoordinatorRepo {
|
|
19
|
+
return (components: CoordinatorRepoComponents) => new CoordinatorRepo(keyNetwork, createClusterClient, components.storageRepo, cfg, components.localCluster, components.localPeerId, fretService);
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
/** Cluster coordination repo - uses local store, as well as distributes changes to other nodes using cluster consensus. */
|
|
23
|
+
export class CoordinatorRepo implements IRepo {
|
|
24
|
+
private coordinator: ClusterCoordinator;
|
|
25
|
+
private readonly DEFAULT_TIMEOUT = 30000; // 30 seconds default timeout
|
|
26
|
+
|
|
27
|
+
constructor(
|
|
28
|
+
readonly keyNetwork: IKeyNetwork,
|
|
29
|
+
readonly createClusterClient: (peerId: PeerId) => ClusterClient,
|
|
30
|
+
private readonly storageRepo: IRepo,
|
|
31
|
+
cfg?: Partial<ClusterConsensusConfig> & { clusterSize?: number },
|
|
32
|
+
localCluster?: ICluster,
|
|
33
|
+
localPeerId?: PeerId,
|
|
34
|
+
fretService?: FretService
|
|
35
|
+
) {
|
|
36
|
+
const policy: ClusterConsensusConfig & { clusterSize: number } = {
|
|
37
|
+
clusterSize: cfg?.clusterSize ?? 10,
|
|
38
|
+
superMajorityThreshold: cfg?.superMajorityThreshold ?? 0.75,
|
|
39
|
+
simpleMajorityThreshold: cfg?.simpleMajorityThreshold ?? 0.51,
|
|
40
|
+
minAbsoluteClusterSize: cfg?.minAbsoluteClusterSize ?? 3,
|
|
41
|
+
allowClusterDownsize: cfg?.allowClusterDownsize ?? true,
|
|
42
|
+
clusterSizeTolerance: cfg?.clusterSizeTolerance ?? 0.5,
|
|
43
|
+
partitionDetectionWindow: cfg?.partitionDetectionWindow ?? 60000
|
|
44
|
+
};
|
|
45
|
+
const localClusterRef = localCluster && localPeerId ? { update: localCluster.update.bind(localCluster), peerId: localPeerId } : undefined;
|
|
46
|
+
this.coordinator = new ClusterCoordinator(keyNetwork, createClusterClient, policy, localClusterRef, fretService);
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
async get(blockGets: BlockGets, options?: MessageOptions): Promise<GetBlockResults> {
|
|
50
|
+
// TODO: Verify that we are a proximate node for all block IDs in the request
|
|
51
|
+
|
|
52
|
+
// For read operations, just use the local store
|
|
53
|
+
// TODO: Implement read-path cluster verification without creating full 2PC transactions
|
|
54
|
+
return await this.storageRepo.get(blockGets, options);
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
async pend(request: PendRequest, options?: MessageOptions): Promise<PendResult> {
|
|
58
|
+
const allBlockIds = Object.keys(request.transforms);
|
|
59
|
+
const coordinatingBlockIds = (options as any)?.coordinatingBlockIds ?? allBlockIds;
|
|
60
|
+
|
|
61
|
+
const peerCount = await this.coordinator.getClusterSize(coordinatingBlockIds[0]!)
|
|
62
|
+
if (peerCount <= 1) {
|
|
63
|
+
return await this.storageRepo.pend(request, options)
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
const message: RepoMessage = {
|
|
67
|
+
operations: [{ pend: request }],
|
|
68
|
+
expiration: options?.expiration ?? Date.now() + this.DEFAULT_TIMEOUT,
|
|
69
|
+
coordinatingBlockIds
|
|
70
|
+
};
|
|
71
|
+
|
|
72
|
+
try {
|
|
73
|
+
await this.coordinator.executeClusterTransaction(coordinatingBlockIds[0]!, message, options);
|
|
74
|
+
return await this.storageRepo.pend(request, options);
|
|
75
|
+
} catch (error) {
|
|
76
|
+
console.error('Failed to complete pend operation:', error)
|
|
77
|
+
throw error
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
async cancel(actionRef: ActionBlocks, options?: MessageOptions): Promise<void> {
|
|
82
|
+
// TODO: Verify that we are a proximate node for all block IDs in the request
|
|
83
|
+
|
|
84
|
+
// Extract all block IDs affected by this cancel operation
|
|
85
|
+
const blockIds = actionRef.blockIds;
|
|
86
|
+
|
|
87
|
+
// Create a message for this cancel operation with timeout
|
|
88
|
+
const message: RepoMessage = {
|
|
89
|
+
operations: [{ cancel: { actionRef } }],
|
|
90
|
+
expiration: options?.expiration ?? Date.now() + this.DEFAULT_TIMEOUT
|
|
91
|
+
};
|
|
92
|
+
|
|
93
|
+
try {
|
|
94
|
+
// For each block ID, execute a cluster transaction
|
|
95
|
+
const clusterPromises = blockIds.map(blockId =>
|
|
96
|
+
this.coordinator.executeClusterTransaction(blockId, message, options)
|
|
97
|
+
);
|
|
98
|
+
|
|
99
|
+
// Wait for all cluster transactions to complete
|
|
100
|
+
await Promise.all(clusterPromises);
|
|
101
|
+
|
|
102
|
+
// If all cluster transactions succeeded, apply the cancel to the local store
|
|
103
|
+
await this.storageRepo.cancel(actionRef, options);
|
|
104
|
+
} catch (error) {
|
|
105
|
+
console.error('Failed to complete cancel operation:', error);
|
|
106
|
+
throw error;
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
async commit(request: CommitRequest, options?: MessageOptions): Promise<CommitResult> {
|
|
111
|
+
// TODO: Verify that we are a proximate node for all block IDs in the request
|
|
112
|
+
|
|
113
|
+
// Extract all block IDs affected by this commit operation
|
|
114
|
+
const blockIds = request.blockIds;
|
|
115
|
+
|
|
116
|
+
const peerCount = await this.coordinator.getClusterSize(blockIds[0]!)
|
|
117
|
+
if (peerCount <= 1) {
|
|
118
|
+
return await this.storageRepo.commit(request, options)
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
// Create a single message for the entire commit operation
|
|
122
|
+
const message: RepoMessage = {
|
|
123
|
+
operations: [{ commit: request }],
|
|
124
|
+
expiration: options?.expiration ?? Date.now() + this.DEFAULT_TIMEOUT
|
|
125
|
+
};
|
|
126
|
+
|
|
127
|
+
try {
|
|
128
|
+
// Execute cluster transaction using the first block ID
|
|
129
|
+
// All blocks in this operation should map to the same cluster
|
|
130
|
+
await this.coordinator.executeClusterTransaction(blockIds[0]!, message, options);
|
|
131
|
+
return await this.storageRepo.commit(request, options);
|
|
132
|
+
} catch (error) {
|
|
133
|
+
console.error('Failed to complete commit operation:', error)
|
|
134
|
+
throw error
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
export type RedirectPayload = {
|
|
2
|
+
redirect: {
|
|
3
|
+
peers: Array<{ id: string, addrs: string[] }>
|
|
4
|
+
reason: 'not_in_cluster'
|
|
5
|
+
}
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
export function encodePeers(peers: Array<{ id: string, addrs: string[] }>): RedirectPayload {
|
|
9
|
+
return {
|
|
10
|
+
redirect: {
|
|
11
|
+
peers,
|
|
12
|
+
reason: 'not_in_cluster'
|
|
13
|
+
}
|
|
14
|
+
}
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
|
|
@@ -0,0 +1,219 @@
|
|
|
1
|
+
import { pipe } from 'it-pipe'
|
|
2
|
+
import { decode as lpDecode, encode as lpEncode } from 'it-length-prefixed'
|
|
3
|
+
import type { Startable, Logger, IncomingStreamData } from '@libp2p/interface'
|
|
4
|
+
import type { IRepo, RepoMessage } from '@optimystic/db-core'
|
|
5
|
+
import { computeResponsibility } from '../routing/responsibility.js'
|
|
6
|
+
import { peersEqual } from '../peer-utils.js'
|
|
7
|
+
import { sha256 } from 'multiformats/hashes/sha2'
|
|
8
|
+
import { buildKnownPeers } from '../routing/libp2p-known-peers.js'
|
|
9
|
+
import { encodePeers } from './redirect.js'
|
|
10
|
+
import type { Uint8ArrayList } from 'uint8arraylist'
|
|
11
|
+
|
|
12
|
+
// Define Components interface
|
|
13
|
+
interface BaseComponents {
|
|
14
|
+
logger: { forComponent: (name: string) => Logger },
|
|
15
|
+
registrar: {
|
|
16
|
+
handle: (protocol: string, handler: (data: IncomingStreamData) => void, options: any) => Promise<void>
|
|
17
|
+
unhandle: (protocol: string) => Promise<void>
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export type RepoServiceComponents = BaseComponents & {
|
|
22
|
+
repo: IRepo
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
export type RepoServiceInit = {
|
|
26
|
+
protocol?: string,
|
|
27
|
+
protocolPrefix?: string,
|
|
28
|
+
maxInboundStreams?: number,
|
|
29
|
+
maxOutboundStreams?: number,
|
|
30
|
+
logPrefix?: string,
|
|
31
|
+
kBucketSize?: number,
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
export function repoService(init: RepoServiceInit = {}): (components: RepoServiceComponents) => RepoService {
|
|
35
|
+
return (components: RepoServiceComponents) => new RepoService(components, init);
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* A libp2p service that handles repo protocol messages
|
|
40
|
+
*/
|
|
41
|
+
export class RepoService implements Startable {
|
|
42
|
+
private readonly protocol: string
|
|
43
|
+
private readonly maxInboundStreams: number
|
|
44
|
+
private readonly maxOutboundStreams: number
|
|
45
|
+
private readonly log: Logger
|
|
46
|
+
private readonly repo: IRepo
|
|
47
|
+
private readonly components: RepoServiceComponents
|
|
48
|
+
private running: boolean
|
|
49
|
+
private readonly k: number
|
|
50
|
+
|
|
51
|
+
constructor(components: RepoServiceComponents, init: RepoServiceInit = {}) {
|
|
52
|
+
this.components = components
|
|
53
|
+
const computed = init.protocol ?? (init.protocolPrefix ?? '/db-p2p') + '/repo/1.0.0'
|
|
54
|
+
this.protocol = computed
|
|
55
|
+
this.maxInboundStreams = init.maxInboundStreams ?? 32
|
|
56
|
+
this.maxOutboundStreams = init.maxOutboundStreams ?? 64
|
|
57
|
+
this.log = components.logger.forComponent(init.logPrefix ?? 'db-p2p:repo-service')
|
|
58
|
+
this.repo = components.repo
|
|
59
|
+
this.running = false
|
|
60
|
+
this.k = init.kBucketSize ?? 10
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
readonly [Symbol.toStringTag] = '@libp2p/repo-service'
|
|
64
|
+
|
|
65
|
+
/**
|
|
66
|
+
* Start the service
|
|
67
|
+
*/
|
|
68
|
+
async start(): Promise<void> {
|
|
69
|
+
if (this.running) {
|
|
70
|
+
return
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
await this.components.registrar.handle(this.protocol, this.handleIncomingStream.bind(this), {
|
|
74
|
+
maxInboundStreams: this.maxInboundStreams,
|
|
75
|
+
maxOutboundStreams: this.maxOutboundStreams
|
|
76
|
+
})
|
|
77
|
+
|
|
78
|
+
this.running = true
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/**
|
|
82
|
+
* Stop the service
|
|
83
|
+
*/
|
|
84
|
+
async stop(): Promise<void> {
|
|
85
|
+
if (!this.running) {
|
|
86
|
+
return
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
await this.components.registrar.unhandle(this.protocol)
|
|
90
|
+
this.running = false
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
/**
|
|
94
|
+
* Handle incoming streams on the repo protocol
|
|
95
|
+
*/
|
|
96
|
+
private handleIncomingStream(data: IncomingStreamData): void {
|
|
97
|
+
const { stream, connection } = data
|
|
98
|
+
const peerId = connection.remotePeer
|
|
99
|
+
|
|
100
|
+
const processStream = async function* (this: RepoService, source: AsyncIterable<Uint8ArrayList>) {
|
|
101
|
+
for await (const msg of source) {
|
|
102
|
+
// Decode the message
|
|
103
|
+
const decoded = new TextDecoder().decode(msg.subarray())
|
|
104
|
+
const message = JSON.parse(decoded) as RepoMessage
|
|
105
|
+
|
|
106
|
+
// Process each operation
|
|
107
|
+
const operation = message.operations[0]
|
|
108
|
+
let response: any
|
|
109
|
+
|
|
110
|
+
if ('get' in operation) {
|
|
111
|
+
{
|
|
112
|
+
// Use sha256 digest of block id string for consistent key space
|
|
113
|
+
const mh = await sha256.digest(new TextEncoder().encode(operation.get.blockIds[0]!))
|
|
114
|
+
const key = mh.digest
|
|
115
|
+
const nm: any = (this.components as any).libp2p?.services?.networkManager
|
|
116
|
+
if (nm?.getCluster) {
|
|
117
|
+
const cluster: any[] = await nm.getCluster(key);
|
|
118
|
+
(message as any).cluster = (cluster as any[]).map(p => p.toString?.() ?? String(p))
|
|
119
|
+
const selfId = (this.components as any).libp2p.peerId
|
|
120
|
+
const isMember = cluster.some((p: any) => peersEqual(p, selfId))
|
|
121
|
+
const smallMesh = cluster.length < this.k
|
|
122
|
+
if (!smallMesh && !isMember) {
|
|
123
|
+
const peers = cluster.filter((p: any) => !peersEqual(p, selfId))
|
|
124
|
+
console.debug('repo-service:redirect', {
|
|
125
|
+
peerId: selfId.toString(),
|
|
126
|
+
reason: 'not-cluster-member',
|
|
127
|
+
operation: 'get',
|
|
128
|
+
blockId: operation.get.blockIds[0],
|
|
129
|
+
cluster: cluster.map((p: any) => p.toString?.() ?? String(p))
|
|
130
|
+
})
|
|
131
|
+
response = encodePeers(peers.map((pid: any) => ({ id: pid.toString(), addrs: [] })))
|
|
132
|
+
} else {
|
|
133
|
+
response = await this.repo.get(operation.get, { expiration: message.expiration })
|
|
134
|
+
}
|
|
135
|
+
} else {
|
|
136
|
+
response = await this.repo.get(operation.get, { expiration: message.expiration })
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
} else if ('pend' in operation) {
|
|
140
|
+
{
|
|
141
|
+
const id = Object.keys(operation.pend.transforms)[0]!
|
|
142
|
+
const mh = await sha256.digest(new TextEncoder().encode(id))
|
|
143
|
+
const key = mh.digest
|
|
144
|
+
const nm: any = (this.components as any).libp2p?.services?.networkManager
|
|
145
|
+
if (nm?.getCluster) {
|
|
146
|
+
const cluster: any[] = await nm.getCluster(key)
|
|
147
|
+
; (message as any).cluster = (cluster as any[]).map(p => p.toString?.() ?? String(p))
|
|
148
|
+
const selfId = (this.components as any).libp2p.peerId
|
|
149
|
+
const isMember = cluster.some((p: any) => peersEqual(p, selfId))
|
|
150
|
+
const smallMesh = cluster.length < this.k
|
|
151
|
+
if (!smallMesh && !isMember) {
|
|
152
|
+
const peers = cluster.filter((p: any) => !peersEqual(p, selfId))
|
|
153
|
+
console.debug('repo-service:redirect', {
|
|
154
|
+
peerId: selfId.toString(),
|
|
155
|
+
reason: 'not-cluster-member',
|
|
156
|
+
operation: 'pend',
|
|
157
|
+
blockId: id,
|
|
158
|
+
cluster: cluster.map((p: any) => p.toString?.() ?? String(p))
|
|
159
|
+
})
|
|
160
|
+
response = encodePeers(peers.map((pid: any) => ({ id: pid.toString(), addrs: [] })))
|
|
161
|
+
} else {
|
|
162
|
+
response = await this.repo.pend(operation.pend, { expiration: message.expiration })
|
|
163
|
+
}
|
|
164
|
+
} else {
|
|
165
|
+
response = await this.repo.pend(operation.pend, { expiration: message.expiration })
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
} else if ('cancel' in operation) {
|
|
169
|
+
response = await this.repo.cancel(operation.cancel.actionRef, {
|
|
170
|
+
expiration: message.expiration
|
|
171
|
+
})
|
|
172
|
+
} else if ('commit' in operation) {
|
|
173
|
+
{
|
|
174
|
+
const mh = await sha256.digest(new TextEncoder().encode(operation.commit.tailId))
|
|
175
|
+
const key = mh.digest
|
|
176
|
+
const nm: any = (this.components as any).libp2p?.services?.networkManager
|
|
177
|
+
if (nm?.getCluster) {
|
|
178
|
+
const cluster: any[] = await nm.getCluster(key)
|
|
179
|
+
; (message as any).cluster = (cluster as any[]).map(p => p.toString?.() ?? String(p))
|
|
180
|
+
const selfId = (this.components as any).libp2p.peerId
|
|
181
|
+
const isMember = cluster.some((p: any) => peersEqual(p, selfId))
|
|
182
|
+
const smallMesh = cluster.length < this.k
|
|
183
|
+
if (!smallMesh && !isMember) {
|
|
184
|
+
const peers = cluster.filter((p: any) => !peersEqual(p, selfId))
|
|
185
|
+
console.debug('repo-service:redirect', {
|
|
186
|
+
peerId: selfId.toString(),
|
|
187
|
+
reason: 'not-cluster-member',
|
|
188
|
+
operation: 'commit',
|
|
189
|
+
tailId: operation.commit.tailId,
|
|
190
|
+
cluster: cluster.map((p: any) => p.toString?.() ?? String(p))
|
|
191
|
+
})
|
|
192
|
+
response = encodePeers(peers.map((pid: any) => ({ id: pid.toString(), addrs: [] })))
|
|
193
|
+
} else {
|
|
194
|
+
response = await this.repo.commit(operation.commit, { expiration: message.expiration })
|
|
195
|
+
}
|
|
196
|
+
} else {
|
|
197
|
+
response = await this.repo.commit(operation.commit, { expiration: message.expiration })
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
// Encode and yield the response
|
|
203
|
+
yield new TextEncoder().encode(JSON.stringify(response))
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
Promise.resolve().then(async () => {
|
|
208
|
+
await pipe(
|
|
209
|
+
stream,
|
|
210
|
+
(source) => lpDecode(source),
|
|
211
|
+
processStream.bind(this),
|
|
212
|
+
(source) => lpEncode(source),
|
|
213
|
+
stream
|
|
214
|
+
)
|
|
215
|
+
}).catch(err => {
|
|
216
|
+
this.log.error('error handling repo protocol message from %p - %e', peerId, err)
|
|
217
|
+
})
|
|
218
|
+
}
|
|
219
|
+
}
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import type { Libp2p } from 'libp2p'
|
|
2
|
+
import type { PeerId } from '@libp2p/interface'
|
|
3
|
+
import type { KnownPeer } from './responsibility.js'
|
|
4
|
+
|
|
5
|
+
export function buildKnownPeers(libp2p: Libp2p): KnownPeer[] {
|
|
6
|
+
const self: KnownPeer = {
|
|
7
|
+
id: libp2p.peerId as unknown as PeerId,
|
|
8
|
+
addrs: libp2p.getMultiaddrs().map(ma => ma.toString())
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
const connections = libp2p.getConnections()
|
|
12
|
+
const byPeer: Record<string, { id: PeerId, addrs: Set<string> }> = {}
|
|
13
|
+
|
|
14
|
+
for (const c of connections) {
|
|
15
|
+
const pid = c.remotePeer
|
|
16
|
+
const key = pid.toString()
|
|
17
|
+
const entry = byPeer[key] ?? (byPeer[key] = { id: pid as unknown as PeerId, addrs: new Set() })
|
|
18
|
+
const addrStr = c.remoteAddr?.toString?.()
|
|
19
|
+
if (addrStr) entry.addrs.add(addrStr)
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
const others: KnownPeer[] = Object.values(byPeer).map(e => ({ id: e.id, addrs: Array.from(e.addrs) }))
|
|
23
|
+
return [self, ...others]
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
import type { PeerId } from '@libp2p/interface'
|
|
2
|
+
|
|
3
|
+
export type KnownPeer = { id: PeerId, addrs: string[] }
|
|
4
|
+
|
|
5
|
+
export type ResponsibilityResult = {
|
|
6
|
+
inCluster: boolean
|
|
7
|
+
nearest: KnownPeer[]
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
export function xorDistanceBytes(a: Uint8Array, b: Uint8Array): Uint8Array {
|
|
11
|
+
const len = Math.max(a.length, b.length)
|
|
12
|
+
const out = new Uint8Array(len)
|
|
13
|
+
for (let i = 0; i < len; i++) {
|
|
14
|
+
const ai = a[a.length - 1 - i] ?? 0
|
|
15
|
+
const bi = b[b.length - 1 - i] ?? 0
|
|
16
|
+
out[len - 1 - i] = ai ^ bi
|
|
17
|
+
}
|
|
18
|
+
return out
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export function lessThanLex(a: Uint8Array, b: Uint8Array): boolean {
|
|
22
|
+
const len = Math.max(a.length, b.length)
|
|
23
|
+
for (let i = 0; i < len; i++) {
|
|
24
|
+
const av = a[i] ?? 0
|
|
25
|
+
const bv = b[i] ?? 0
|
|
26
|
+
if (av < bv) return true
|
|
27
|
+
if (av > bv) return false
|
|
28
|
+
}
|
|
29
|
+
return false
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
export function sortPeersByDistance(peers: KnownPeer[], key: Uint8Array): KnownPeer[] {
|
|
33
|
+
return peers
|
|
34
|
+
.map(p => ({ p, d: xorDistanceBytes(p.id.toMultihash().bytes, key) }))
|
|
35
|
+
.sort((a, b) => (lessThanLex(a.d, b.d) ? -1 : 1))
|
|
36
|
+
.map(x => x.p)
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
export function computeResponsibility(
|
|
40
|
+
key: Uint8Array,
|
|
41
|
+
self: KnownPeer,
|
|
42
|
+
others: KnownPeer[],
|
|
43
|
+
k: number
|
|
44
|
+
): ResponsibilityResult {
|
|
45
|
+
const all = [self, ...others]
|
|
46
|
+
const sorted = sortPeersByDistance(all, key)
|
|
47
|
+
|
|
48
|
+
// For small meshes, use a different strategy
|
|
49
|
+
if (all.length <= 3) {
|
|
50
|
+
// With 3 or fewer nodes, the first node in XOR order handles it
|
|
51
|
+
// This ensures only ONE node considers itself responsible
|
|
52
|
+
const inCluster = sorted[0]!.id.equals(self.id)
|
|
53
|
+
return { inCluster, nearest: sorted }
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
// For larger meshes, use traditional k-nearest
|
|
57
|
+
const effectiveK = Math.min(k, Math.max(1, Math.floor(all.length / 2)))
|
|
58
|
+
const topK = sorted.slice(0, effectiveK)
|
|
59
|
+
const inCluster = topK.some(p => p.id.equals(self.id))
|
|
60
|
+
return { inCluster, nearest: topK }
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
import type { PeerId } from '@libp2p/interface'
|
|
2
|
+
import { sha256 } from 'multiformats/hashes/sha2'
|
|
3
|
+
|
|
4
|
+
export interface SimpleClusterCoordinator {
|
|
5
|
+
selectCoordinator(key: Uint8Array, peers: PeerId[]): Promise<PeerId>
|
|
6
|
+
selectReplicas(key: Uint8Array, peers: PeerId[], replicationFactor: number): Promise<PeerId[]>
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* Simple consistent hashing for small clusters
|
|
11
|
+
* Uses modulo arithmetic instead of XOR distance
|
|
12
|
+
*/
|
|
13
|
+
export class ModuloCoordinator implements SimpleClusterCoordinator {
|
|
14
|
+
async hashPeer(peerId: PeerId): Promise<bigint> {
|
|
15
|
+
const mh = await sha256.digest(peerId.toMultihash().bytes)
|
|
16
|
+
// Take first 8 bytes as bigint
|
|
17
|
+
const view = new DataView(mh.digest.buffer, mh.digest.byteOffset, 8)
|
|
18
|
+
return view.getBigUint64(0, false)
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
async hashKey(key: Uint8Array): Promise<bigint> {
|
|
22
|
+
const mh = await sha256.digest(key)
|
|
23
|
+
const view = new DataView(mh.digest.buffer, mh.digest.byteOffset, 8)
|
|
24
|
+
return view.getBigUint64(0, false)
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
async selectCoordinator(key: Uint8Array, peers: PeerId[]): Promise<PeerId> {
|
|
28
|
+
if (peers.length === 0) throw new Error('No peers available')
|
|
29
|
+
if (peers.length === 1) return peers[0]!
|
|
30
|
+
|
|
31
|
+
// Simple modulo selection - deterministic but not distance-based
|
|
32
|
+
const keyHash = await this.hashKey(key)
|
|
33
|
+
const index = Number(keyHash % BigInt(peers.length))
|
|
34
|
+
return peers[index]!
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
async selectReplicas(key: Uint8Array, peers: PeerId[], replicationFactor: number): Promise<PeerId[]> {
|
|
38
|
+
if (peers.length <= replicationFactor) return [...peers]
|
|
39
|
+
|
|
40
|
+
const coordinator = await this.selectCoordinator(key, peers)
|
|
41
|
+
const replicas = [coordinator]
|
|
42
|
+
const remaining = peers.filter(p => !p.equals(coordinator))
|
|
43
|
+
|
|
44
|
+
// Select additional replicas deterministically
|
|
45
|
+
for (let i = 1; i < replicationFactor && remaining.length > 0; i++) {
|
|
46
|
+
const subKey = new Uint8Array([...key, i])
|
|
47
|
+
const replica = await this.selectCoordinator(subKey, remaining)
|
|
48
|
+
replicas.push(replica)
|
|
49
|
+
remaining.splice(remaining.findIndex(p => p.equals(replica)), 1)
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
return replicas
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* For very small clusters, just replicate everywhere
|
|
58
|
+
*/
|
|
59
|
+
export class FullReplicationCoordinator implements SimpleClusterCoordinator {
|
|
60
|
+
async selectCoordinator(key: Uint8Array, peers: PeerId[]): Promise<PeerId> {
|
|
61
|
+
// Always select first peer as primary
|
|
62
|
+
if (peers.length === 0) throw new Error('No peers available')
|
|
63
|
+
return peers[0]!
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
async selectReplicas(key: Uint8Array, peers: PeerId[], replicationFactor: number): Promise<PeerId[]> {
|
|
67
|
+
// Replicate to all peers in small clusters
|
|
68
|
+
return [...peers]
|
|
69
|
+
}
|
|
70
|
+
}
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
import type { FretService } from 'p2p-fret';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Arachnode ring membership information.
|
|
5
|
+
* Stored in FRET's generic metadata field.
|
|
6
|
+
*/
|
|
7
|
+
export interface ArachnodeInfo {
|
|
8
|
+
/** Ring depth: 0 = full keyspace, N = 2^N partitions */
|
|
9
|
+
ringDepth: number;
|
|
10
|
+
|
|
11
|
+
/** Partition this node covers (undefined if ringDepth = 0) */
|
|
12
|
+
partition?: {
|
|
13
|
+
prefixBits: number;
|
|
14
|
+
prefixValue: number;
|
|
15
|
+
};
|
|
16
|
+
|
|
17
|
+
/** Storage capacity in bytes */
|
|
18
|
+
capacity: {
|
|
19
|
+
total: number;
|
|
20
|
+
used: number;
|
|
21
|
+
available: number;
|
|
22
|
+
};
|
|
23
|
+
|
|
24
|
+
/** Ring membership status */
|
|
25
|
+
status: 'joining' | 'active' | 'moving' | 'leaving';
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Adapter that provides Arachnode-specific methods on top of FRET's generic metadata.
|
|
30
|
+
*
|
|
31
|
+
* FRET remains a pure DHT, while this adapter layers Arachnode semantics.
|
|
32
|
+
*/
|
|
33
|
+
export class ArachnodeFretAdapter {
|
|
34
|
+
private static readonly ARACHNODE_KEY = 'arachnode';
|
|
35
|
+
|
|
36
|
+
constructor(private readonly fret: FretService) {}
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* Set this node's Arachnode ring membership.
|
|
40
|
+
*/
|
|
41
|
+
setArachnodeInfo(info: ArachnodeInfo): void {
|
|
42
|
+
this.fret.setMetadata({
|
|
43
|
+
[ArachnodeFretAdapter.ARACHNODE_KEY]: info
|
|
44
|
+
});
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* Get Arachnode info for a specific peer.
|
|
49
|
+
*/
|
|
50
|
+
getArachnodeInfo(peerId: string): ArachnodeInfo | undefined {
|
|
51
|
+
const metadata = this.fret.getMetadata(peerId);
|
|
52
|
+
return metadata?.[ArachnodeFretAdapter.ARACHNODE_KEY] as ArachnodeInfo | undefined;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
/**
|
|
56
|
+
* Get my own Arachnode info.
|
|
57
|
+
*/
|
|
58
|
+
getMyArachnodeInfo(): ArachnodeInfo | undefined {
|
|
59
|
+
const myPeerId = (this.fret as any).node?.peerId?.toString();
|
|
60
|
+
if (!myPeerId) return undefined;
|
|
61
|
+
return this.getArachnodeInfo(myPeerId);
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
/**
|
|
65
|
+
* Find all peers at a specific ring depth.
|
|
66
|
+
*/
|
|
67
|
+
findPeersAtRing(ringDepth: number): string[] {
|
|
68
|
+
const peers = this.fret.listPeers();
|
|
69
|
+
return peers
|
|
70
|
+
.filter(peer => {
|
|
71
|
+
const arachnode = peer.metadata?.[ArachnodeFretAdapter.ARACHNODE_KEY] as ArachnodeInfo | undefined;
|
|
72
|
+
return arachnode?.ringDepth === ringDepth;
|
|
73
|
+
})
|
|
74
|
+
.map(peer => peer.id);
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Find all known storage rings (unique ring depths).
|
|
79
|
+
*/
|
|
80
|
+
getKnownRings(): number[] {
|
|
81
|
+
const peers = this.fret.listPeers();
|
|
82
|
+
const rings = new Set<number>();
|
|
83
|
+
|
|
84
|
+
for (const peer of peers) {
|
|
85
|
+
const arachnode = peer.metadata?.[ArachnodeFretAdapter.ARACHNODE_KEY] as ArachnodeInfo | undefined;
|
|
86
|
+
if (arachnode?.ringDepth !== undefined) {
|
|
87
|
+
rings.add(arachnode.ringDepth);
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
return Array.from(rings).sort((a, b) => a - b);
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
/**
|
|
95
|
+
* Get statistics about discovered rings.
|
|
96
|
+
*/
|
|
97
|
+
getRingStats(): Array<{ ringDepth: number; peerCount: number; avgCapacity: number }> {
|
|
98
|
+
const peers = this.fret.listPeers();
|
|
99
|
+
const ringMap = new Map<number, { count: number; totalCapacity: number }>();
|
|
100
|
+
|
|
101
|
+
for (const peer of peers) {
|
|
102
|
+
const arachnode = peer.metadata?.[ArachnodeFretAdapter.ARACHNODE_KEY] as ArachnodeInfo | undefined;
|
|
103
|
+
if (arachnode) {
|
|
104
|
+
const existing = ringMap.get(arachnode.ringDepth) ?? { count: 0, totalCapacity: 0 };
|
|
105
|
+
ringMap.set(arachnode.ringDepth, {
|
|
106
|
+
count: existing.count + 1,
|
|
107
|
+
totalCapacity: existing.totalCapacity + arachnode.capacity.available
|
|
108
|
+
});
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
return Array.from(ringMap.entries())
|
|
113
|
+
.map(([ringDepth, stats]) => ({
|
|
114
|
+
ringDepth,
|
|
115
|
+
peerCount: stats.count,
|
|
116
|
+
avgCapacity: stats.totalCapacity / stats.count
|
|
117
|
+
}))
|
|
118
|
+
.sort((a, b) => a.ringDepth - b.ringDepth);
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
/**
|
|
122
|
+
* Access the underlying FRET service.
|
|
123
|
+
*/
|
|
124
|
+
getFret(): FretService {
|
|
125
|
+
return this.fret;
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
|