querysub 0.402.0 → 0.404.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.cursorrules +2 -0
- package/bin/audit-imports.js +4 -0
- package/package.json +7 -4
- package/spec.txt +77 -0
- package/src/-a-archives/archiveCache.ts +9 -4
- package/src/-a-archives/archivesBackBlaze.ts +1039 -1039
- package/src/-a-auth/certs.ts +0 -12
- package/src/-c-identity/IdentityController.ts +12 -3
- package/src/-f-node-discovery/NodeDiscovery.ts +32 -26
- package/src/-g-core-values/NodeCapabilities.ts +12 -2
- package/src/0-path-value-core/AuthorityLookup.ts +239 -0
- package/src/0-path-value-core/LockWatcher2.ts +150 -0
- package/src/0-path-value-core/PathRouter.ts +535 -0
- package/src/0-path-value-core/PathRouterRouteOverride.ts +72 -0
- package/src/0-path-value-core/PathRouterServerAuthoritySpec.tsx +65 -0
- package/src/0-path-value-core/PathValueCommitter.ts +222 -488
- package/src/0-path-value-core/PathValueController.ts +277 -239
- package/src/0-path-value-core/PathWatcher.ts +534 -0
- package/src/0-path-value-core/ShardPrefixes.ts +31 -0
- package/src/0-path-value-core/ValidStateComputer.ts +303 -0
- package/src/0-path-value-core/archiveLocks/ArchiveLocks.ts +1 -1
- package/src/0-path-value-core/archiveLocks/ArchiveLocks2.ts +80 -44
- package/src/0-path-value-core/archiveLocks/archiveSnapshots.ts +13 -16
- package/src/0-path-value-core/auditLogs.ts +2 -0
- package/src/0-path-value-core/hackedPackedPathParentFiltering.ts +97 -0
- package/src/0-path-value-core/pathValueArchives.ts +490 -492
- package/src/0-path-value-core/pathValueCore.ts +195 -1492
- package/src/0-path-value-core/startupAuthority.ts +74 -0
- package/src/1-path-client/RemoteWatcher.ts +100 -83
- package/src/1-path-client/pathValueClientWatcher.ts +808 -815
- package/src/2-proxy/PathValueProxyWatcher.ts +10 -8
- package/src/2-proxy/archiveMoveHarness.ts +182 -214
- package/src/2-proxy/garbageCollection.ts +9 -8
- package/src/2-proxy/schema2.ts +21 -1
- package/src/3-path-functions/PathFunctionHelpers.ts +206 -180
- package/src/3-path-functions/PathFunctionRunner.ts +943 -766
- package/src/3-path-functions/PathFunctionRunnerMain.ts +5 -3
- package/src/3-path-functions/pathFunctionLoader.ts +2 -2
- package/src/3-path-functions/syncSchema.ts +592 -521
- package/src/4-deploy/deployFunctions.ts +19 -4
- package/src/4-deploy/deployGetFunctionsInner.ts +8 -2
- package/src/4-deploy/deployMain.ts +51 -68
- package/src/4-deploy/edgeClientWatcher.tsx +1 -1
- package/src/4-deploy/edgeNodes.ts +2 -2
- package/src/4-dom/qreact.tsx +2 -4
- package/src/4-dom/qreactTest.tsx +7 -13
- package/src/4-querysub/Querysub.ts +21 -8
- package/src/4-querysub/QuerysubController.ts +45 -29
- package/src/4-querysub/permissions.ts +2 -2
- package/src/4-querysub/querysubPrediction.ts +80 -70
- package/src/4-querysub/schemaHelpers.ts +5 -1
- package/src/5-diagnostics/GenericFormat.tsx +14 -9
- package/src/archiveapps/archiveGCEntry.tsx +9 -2
- package/src/archiveapps/archiveJoinEntry.ts +87 -84
- package/src/archiveapps/archiveMergeEntry.tsx +2 -0
- package/src/bits.ts +19 -0
- package/src/config.ts +21 -3
- package/src/config2.ts +23 -48
- package/src/deployManager/components/DeployPage.tsx +7 -3
- package/src/deployManager/machineSchema.ts +4 -1
- package/src/diagnostics/ActionsHistory.ts +3 -8
- package/src/diagnostics/AuditLogPage.tsx +2 -3
- package/src/diagnostics/FunctionCallInfo.tsx +141 -0
- package/src/diagnostics/FunctionCallInfoState.ts +162 -0
- package/src/diagnostics/MachineThreadInfo.tsx +1 -1
- package/src/diagnostics/NodeViewer.tsx +37 -48
- package/src/diagnostics/SyncTestPage.tsx +241 -0
- package/src/diagnostics/auditImportViolations.ts +185 -0
- package/src/diagnostics/listenOnDebugger.ts +3 -3
- package/src/diagnostics/logs/IndexedLogs/BufferUnitSet.ts +10 -4
- package/src/diagnostics/logs/IndexedLogs/IndexedLogs.ts +2 -2
- package/src/diagnostics/logs/IndexedLogs/LogViewer3.tsx +24 -22
- package/src/diagnostics/logs/IndexedLogs/moveIndexLogsToPublic.ts +1 -1
- package/src/diagnostics/logs/diskLogGlobalContext.ts +1 -0
- package/src/diagnostics/logs/errorNotifications2/logWatcher.ts +1 -3
- package/src/diagnostics/logs/lifeCycleAnalysis/LifeCycleEntryEditor.tsx +39 -17
- package/src/diagnostics/logs/lifeCycleAnalysis/LifeCycleEntryReadMode.tsx +4 -6
- package/src/diagnostics/logs/lifeCycleAnalysis/LifeCycleInstanceTableView.tsx +36 -5
- package/src/diagnostics/logs/lifeCycleAnalysis/LifeCyclePage.tsx +19 -5
- package/src/diagnostics/logs/lifeCycleAnalysis/LifeCycleRenderer.tsx +15 -7
- package/src/diagnostics/logs/lifeCycleAnalysis/NestedLifeCycleInfo.tsx +28 -106
- package/src/diagnostics/logs/lifeCycleAnalysis/lifeCycleMatching.ts +2 -0
- package/src/diagnostics/logs/lifeCycleAnalysis/lifeCycleMisc.ts +0 -0
- package/src/diagnostics/logs/lifeCycleAnalysis/lifeCycleSearch.tsx +18 -7
- package/src/diagnostics/logs/lifeCycleAnalysis/lifeCycles.tsx +3 -0
- package/src/diagnostics/managementPages.tsx +10 -3
- package/src/diagnostics/misc-pages/ArchiveViewer.tsx +20 -26
- package/src/diagnostics/misc-pages/ArchiveViewerTree.tsx +6 -4
- package/src/diagnostics/misc-pages/ComponentSyncStats.tsx +2 -2
- package/src/diagnostics/misc-pages/LocalWatchViewer.tsx +7 -9
- package/src/diagnostics/misc-pages/SnapshotViewer.tsx +23 -12
- package/src/diagnostics/misc-pages/archiveViewerShared.tsx +1 -1
- package/src/diagnostics/pathAuditer.ts +486 -0
- package/src/diagnostics/pathAuditerCallback.ts +20 -0
- package/src/diagnostics/watchdog.ts +8 -1
- package/src/library-components/URLParam.ts +1 -1
- package/src/misc/hash.ts +1 -0
- package/src/path.ts +21 -7
- package/src/server.ts +54 -47
- package/src/user-implementation/loginEmail.tsx +1 -1
- package/tempnotes.txt +67 -0
- package/test.ts +288 -95
- package/src/0-path-value-core/NodePathAuthorities.ts +0 -1057
- package/src/0-path-value-core/PathController.ts +0 -1
- package/src/5-diagnostics/diskValueAudit.ts +0 -218
- package/src/5-diagnostics/memoryValueAudit.ts +0 -438
- package/src/archiveapps/lockTest.ts +0 -127
|
@@ -0,0 +1,535 @@
|
|
|
1
|
+
import { sort } from "socket-function/src/misc";
|
|
2
|
+
import { getLastPathPart, getPathDepth, getPathIndex, getPathStr1 } from "../path";
|
|
3
|
+
import { AuthorityEntry, authorityLookup } from "./AuthorityLookup";
|
|
4
|
+
import { PathValue } from "./pathValueCore";
|
|
5
|
+
import { shuffle } from "../misc/random";
|
|
6
|
+
import { fastHash } from "../misc/hash";
|
|
7
|
+
import { getOwnNodeId, isOwnNodeId } from "../-f-node-discovery/NodeDiscovery";
|
|
8
|
+
import { unique } from "../misc";
|
|
9
|
+
import { measureFnc } from "socket-function/src/profiling/measure";
|
|
10
|
+
import { getRoutingOverride, hasPrefixHash } from "./PathRouterRouteOverride";
|
|
11
|
+
import { sha256 } from "js-sha256";
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
// Cases
|
|
15
|
+
// 1) Whole path hash
|
|
16
|
+
// 2) Prefix + child override
|
|
17
|
+
// 3) Special key override (for calls, so we can shard by function)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
export const LOCAL_DOMAIN = "LOCAL";
|
|
23
|
+
export const LOCAL_DOMAIN_PATH = getPathStr1(LOCAL_DOMAIN);
|
|
24
|
+
|
|
25
|
+
// NOTE: The goal is for a user on the site to require talking to as few authorities as possible. Because most things are nested in lookups, if we have a prefix per root lookup, this should mean if you're on a page, most of the data should be talking to the same authority, as most of the data should have the exact same hash.
|
|
26
|
+
// - We also support the exclude default case, which allows you to only handle certain prefixes, so you can make a function only run on certain servers.
|
|
27
|
+
export type AuthoritySpec = {
|
|
28
|
+
nodeId: string;
|
|
29
|
+
// If we don't match a value in remaps, we hash `path`. UNLESS excludeDefault, then we just don't match it (probably with a hash of -1, so it doesn't match our range).
|
|
30
|
+
// - And paths can also override the hash based on the path itself, ex, to force the hash to be 0, or 0.4, etc.
|
|
31
|
+
routeStart: number;
|
|
32
|
+
routeEnd: number;
|
|
33
|
+
// If the path.startsWith(prefix), but prefix !== path, then we hash getPathIndex(path, hashIndex)
|
|
34
|
+
// - For now, let's just never add overlapping prefixes.
|
|
35
|
+
prefixes: {
|
|
36
|
+
prefix: string;
|
|
37
|
+
hashIndex: number;
|
|
38
|
+
}[];
|
|
39
|
+
excludeDefault?: boolean;
|
|
40
|
+
};
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
export class PathRouter {
|
|
44
|
+
|
|
45
|
+
public static async waitUntilReady() {
|
|
46
|
+
await authorityLookup.startSyncing();
|
|
47
|
+
}
|
|
48
|
+
/** NOTE: Parent watches are a little bit special. If it's a parent watch, we always hash it, assuming the parent is a prefix. And as most of the watches are parent watches, we're usually going to do this, and so it actually is independent of the topology.
|
|
49
|
+
- The topology is really only used for the initial sync, which will use matchesAuthoritySpec, which gets the full routing value, AND, for disk storage.
|
|
50
|
+
*/
|
|
51
|
+
@measureFnc
|
|
52
|
+
public static getRouteChildKey(path: string): number {
|
|
53
|
+
let key = getLastPathPart(path);
|
|
54
|
+
return this.getSingleKeyRoute(key);
|
|
55
|
+
}
|
|
56
|
+
// NOTE: For non-prefix values, breaking up by routes on the file system becomes complicated, and so we just all non-prefix values in the same file. However, in memory, in some places, we need route values for every single path, such as for FunctionRunner, so it can distribute the function running evenly, without overlap.
|
|
57
|
+
@measureFnc
|
|
58
|
+
private static getRouteFull(config: {
|
|
59
|
+
path: string;
|
|
60
|
+
spec: AuthoritySpec;
|
|
61
|
+
}): number {
|
|
62
|
+
// NOTE: getSelfPathIdentifierTargets also hardcodes this logic, so it can hash many values quickly
|
|
63
|
+
let { path, spec } = config;
|
|
64
|
+
let override = getRoutingOverride(path);
|
|
65
|
+
if (override) {
|
|
66
|
+
if (!hasPrefixHash({ spec, prefixHash: override.prefixHash })) return -1;
|
|
67
|
+
return override.route;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
let prefix = spec.prefixes.find(x => path.startsWith(x.prefix) && x.prefix !== path);
|
|
71
|
+
if (prefix) {
|
|
72
|
+
let key = getPathIndex(path, prefix.hashIndex);
|
|
73
|
+
if (key === undefined) throw new Error(`Impossible, hash index ${prefix.hashIndex} is out of range for path ${path}, but it matched the prefix ${prefix.prefix}`);
|
|
74
|
+
return this.getSingleKeyRoute(key);
|
|
75
|
+
}
|
|
76
|
+
if (spec.excludeDefault) return -1;
|
|
77
|
+
let hash = this.getSingleKeyRoute(path);
|
|
78
|
+
if (hash < spec.routeStart || hash >= spec.routeEnd) return -1;
|
|
79
|
+
return hash;
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
private static lastKeyRoute = {
|
|
83
|
+
key: "",
|
|
84
|
+
route: -1,
|
|
85
|
+
};
|
|
86
|
+
// Takes a key which is a part of a path. Mostly used PathRouterRouteOverride, or other PathRouter helpers.
|
|
87
|
+
public static getSingleKeyRoute(key: string): number {
|
|
88
|
+
if (key && this.lastKeyRoute.key === key) return this.lastKeyRoute.route;
|
|
89
|
+
let hash = fastHash(key);
|
|
90
|
+
let route = hash % (1000 * 1000 * 1000) / (1000 * 1000 * 1000);
|
|
91
|
+
this.lastKeyRoute.key = key;
|
|
92
|
+
this.lastKeyRoute.route = route;
|
|
93
|
+
return route;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
@measureFnc
|
|
98
|
+
public static matchesAuthoritySpec(authoritySpec: AuthoritySpec, path: string): boolean {
|
|
99
|
+
let route = this.getRouteFull({ path, spec: authoritySpec });
|
|
100
|
+
return authoritySpec.routeStart <= route && route < authoritySpec.routeEnd;
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
private static getPrefixHash(prefix: string): string {
|
|
105
|
+
return Buffer.from(sha256(prefix), "hex").toString("base64").slice(0, 6);
|
|
106
|
+
}
|
|
107
|
+
private static encodeIdentifier(config: { prefixes: string[]; rangeStart: number; rangeEnd: number } | "remaining"): string {
|
|
108
|
+
if (config === "remaining") return "P!REMAINING";
|
|
109
|
+
let { prefixes, rangeStart, rangeEnd } = config;
|
|
110
|
+
return ["P", rangeStart.toString(), rangeEnd.toString(), ...prefixes.map(x => this.getPrefixHash(x))].join("!");
|
|
111
|
+
|
|
112
|
+
}
|
|
113
|
+
private static decodeIdentifier(identifier: string): { prefixHashes: string[]; rangeStart: number; rangeEnd: number } | "remaining" {
|
|
114
|
+
if (!identifier.startsWith("P")) return "remaining";
|
|
115
|
+
let parts = identifier.split("!");
|
|
116
|
+
if (parts[1] === "REMAINING") return "remaining";
|
|
117
|
+
return {
|
|
118
|
+
rangeStart: parseFloat(parts[1]),
|
|
119
|
+
rangeEnd: parseFloat(parts[2]),
|
|
120
|
+
prefixHashes: parts.slice(3),
|
|
121
|
+
};
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// Allows you to create a folder on disk, and then later (with overlapsPathIdentifier) know which authorities have overlapping data (and so they need to read from that folder for their data).
|
|
125
|
+
// NOTE: Encodes all the data, even if it matches our spec or not. If you don't want this, you have to filter first
|
|
126
|
+
// - If this becomes an issue we COULD filter, as we can do it quickly, but I don't think it is required, as all the present usecases prefilter anyways.
|
|
127
|
+
@measureFnc
|
|
128
|
+
public static getSelfPathIdentifierTargets(values: PathValue[]): Map<string, PathValue[]> {
|
|
129
|
+
// NOTE: The file size limit is 1024 bytes. But we also have our folder, etc, so we want to add enough buffer
|
|
130
|
+
// - Shorter hashes means we can store more, but there's a point when the collisions make it less useful.
|
|
131
|
+
const MAX_PREFIXES_PER_FILE = 50;
|
|
132
|
+
const PREFIX_COVER_FRACTION = 0.95;
|
|
133
|
+
const TARGET_VALUES_PER_FILE = 50 * 1000;
|
|
134
|
+
if (values.length < TARGET_VALUES_PER_FILE) {
|
|
135
|
+
return new Map([[this.encodeIdentifier("remaining"), values]]);
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
let ourSpec = authorityLookup.getOurSpec();
|
|
139
|
+
|
|
140
|
+
let prefixes = ourSpec.prefixes.slice();
|
|
141
|
+
sort(prefixes, x => x.prefix.length);
|
|
142
|
+
function getPrefix(path: string): string | undefined {
|
|
143
|
+
for (let prefix of prefixes) {
|
|
144
|
+
if (path.startsWith(prefix.prefix)) return prefix.prefix;
|
|
145
|
+
}
|
|
146
|
+
return undefined;
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
// NOTE: If there are few enough path values for a prefix, we don't even need to calculate the routing hash.
|
|
151
|
+
let byPrefix = new Map<string | undefined, PathValue[]>();
|
|
152
|
+
for (let value of values) {
|
|
153
|
+
let prefix = getPrefix(value.path);
|
|
154
|
+
let values = byPrefix.get(prefix);
|
|
155
|
+
if (!values) {
|
|
156
|
+
values = [];
|
|
157
|
+
byPrefix.set(prefix, values);
|
|
158
|
+
}
|
|
159
|
+
values.push(value);
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
let prefixGroups = Array.from(byPrefix.entries()).map(([prefix, values]) => ({
|
|
163
|
+
prefix,
|
|
164
|
+
values,
|
|
165
|
+
}));
|
|
166
|
+
sort(prefixGroups, x => -x.values.length);
|
|
167
|
+
|
|
168
|
+
let groups: {
|
|
169
|
+
prefixes: string[];
|
|
170
|
+
values: PathValue[][];
|
|
171
|
+
count: number;
|
|
172
|
+
}[] = [];
|
|
173
|
+
let remainingValues: PathValue[][] = [];
|
|
174
|
+
groups.push({
|
|
175
|
+
prefixes: [],
|
|
176
|
+
values: [],
|
|
177
|
+
count: 0,
|
|
178
|
+
});
|
|
179
|
+
let prefixLeft = Math.round(values.length * PREFIX_COVER_FRACTION);
|
|
180
|
+
for (let prefixGroup of prefixGroups) {
|
|
181
|
+
if (prefixGroup.prefix === undefined) {
|
|
182
|
+
remainingValues.push(prefixGroup.values);
|
|
183
|
+
continue;
|
|
184
|
+
}
|
|
185
|
+
if (prefixLeft < 0) {
|
|
186
|
+
remainingValues.push(prefixGroup.values);
|
|
187
|
+
continue;
|
|
188
|
+
}
|
|
189
|
+
let last = groups[groups.length - 1];
|
|
190
|
+
if (
|
|
191
|
+
last.count > 0 && last.count + prefixGroup.values.length > TARGET_VALUES_PER_FILE
|
|
192
|
+
|| last.prefixes.length >= MAX_PREFIXES_PER_FILE
|
|
193
|
+
) {
|
|
194
|
+
groups.push({
|
|
195
|
+
prefixes: [],
|
|
196
|
+
values: [],
|
|
197
|
+
count: 0,
|
|
198
|
+
});
|
|
199
|
+
last = groups[groups.length - 1];
|
|
200
|
+
}
|
|
201
|
+
last.prefixes.push(prefixGroup.prefix);
|
|
202
|
+
last.values.push(prefixGroup.values);
|
|
203
|
+
last.count += prefixGroup.values.length;
|
|
204
|
+
prefixLeft -= prefixGroup.values.length;
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
let finalFiles = new Map<string, PathValue[]>();
|
|
209
|
+
for (let group of groups) {
|
|
210
|
+
if (group.prefixes.length === 1 && group.count > TARGET_VALUES_PER_FILE) {
|
|
211
|
+
// Split by routing hash
|
|
212
|
+
let values = group.values.flat();
|
|
213
|
+
let splitCount = Math.ceil(values.length / TARGET_VALUES_PER_FILE);
|
|
214
|
+
let byRouteGroup = new Map<number, PathValue[]>();
|
|
215
|
+
let prefix = group.prefixes[0];
|
|
216
|
+
let hashIndex = getPathDepth(prefix);
|
|
217
|
+
for (let value of values) {
|
|
218
|
+
let key = getPathIndex(value.path, hashIndex);
|
|
219
|
+
if (key === undefined) throw new Error(`Impossible, hash index ${hashIndex} is out of range for path ${value.path}, but it matched the prefix ${prefix}`);
|
|
220
|
+
let route = this.getSingleKeyRoute(key);
|
|
221
|
+
let routeIndex = Math.floor(route * splitCount);
|
|
222
|
+
let routeValues = byRouteGroup.get(routeIndex);
|
|
223
|
+
if (!routeValues) {
|
|
224
|
+
routeValues = [];
|
|
225
|
+
byRouteGroup.set(routeIndex, routeValues);
|
|
226
|
+
}
|
|
227
|
+
routeValues.push(value);
|
|
228
|
+
}
|
|
229
|
+
for (let [routeIndex, routeValues] of byRouteGroup) {
|
|
230
|
+
let rangeStart = routeIndex / splitCount;
|
|
231
|
+
let rangeEnd = (routeIndex + 1) / splitCount;
|
|
232
|
+
let identifier = this.encodeIdentifier({ prefixes: [prefix], rangeStart, rangeEnd });
|
|
233
|
+
finalFiles.set(identifier, routeValues);
|
|
234
|
+
}
|
|
235
|
+
} else {
|
|
236
|
+
let identifier = this.encodeIdentifier({ prefixes: group.prefixes, rangeStart: 0, rangeEnd: 1 });
|
|
237
|
+
finalFiles.set(identifier, group.values.flat());
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
if (remainingValues.length > 0) {
|
|
242
|
+
let identifier = this.encodeIdentifier("remaining");
|
|
243
|
+
finalFiles.set(identifier, remainingValues.flat());
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
return finalFiles;
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
// NOTE: We just look at the prefix of identifier, so you can add anything after it, if you want
|
|
250
|
+
@measureFnc
|
|
251
|
+
public static overlapsPathIdentifier(authority: AuthoritySpec, identifier: string): boolean {
|
|
252
|
+
let decodeObj = this.decodeIdentifier(identifier);
|
|
253
|
+
// Match all remaining, as we don't store enough information to know what prefixes they excluded
|
|
254
|
+
if (decodeObj === "remaining") return true;
|
|
255
|
+
|
|
256
|
+
let ourHashes = authority.prefixes.map(x => this.getPrefixHash(x.prefix));
|
|
257
|
+
// If it pulled off some values as prefixes, but we did full path hashes, then the hashes are going to be totally different. And so there could easily be overlap.
|
|
258
|
+
if (decodeObj.prefixHashes.some(x => !ourHashes.includes(x))) return true;
|
|
259
|
+
// However, if we hash everything the same, then the overlap is purely a case of if the route ranges overlap.
|
|
260
|
+
return decodeObj.rangeStart < authority.routeEnd && decodeObj.rangeEnd > authority.routeStart;
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
public static isLocalPath(path: string): boolean {
|
|
266
|
+
return path.startsWith(LOCAL_DOMAIN_PATH);
|
|
267
|
+
}
|
|
268
|
+
@measureFnc
|
|
269
|
+
public static isSelfAuthority(path: string): boolean {
|
|
270
|
+
if (this.isLocalPath(path)) return true;
|
|
271
|
+
let ourSpec = authorityLookup.getOurSpec();
|
|
272
|
+
return this.matchesAuthoritySpec(ourSpec, path);
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
// NOTE: This might return overlapping specs. Presently, this is only used when we're loading our initial data, so it's fine. However, if we use this for another purpose in the future, it might cause problems. So we might need to implement a different function for that theoretical future purpose.
|
|
278
|
+
@measureFnc
|
|
279
|
+
public static getAuthoritySources(config: {
|
|
280
|
+
target: AuthoritySpec;
|
|
281
|
+
preferredNodeIds?: string[];
|
|
282
|
+
}): (AuthoritySpec & { useFullPathHash?: boolean })[] {
|
|
283
|
+
let { target } = config;
|
|
284
|
+
let allSources = authorityLookup.getTopologySync();
|
|
285
|
+
allSources = allSources.filter(x => !isOwnNodeId(x.nodeId));
|
|
286
|
+
// THIS is normal during initial server startup
|
|
287
|
+
if (!allSources.length) {
|
|
288
|
+
return [];
|
|
289
|
+
}
|
|
290
|
+
// If the target doesn't exclude default the source cannot exclude the default.
|
|
291
|
+
if (!target.excludeDefault) {
|
|
292
|
+
allSources = allSources.filter(x => !x.authoritySpec.excludeDefault);
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
// Basically... Authority specs have different prefixes, that means their overlap is inconsistent. So you can't take part of one and part of the other. That doesn't make any sense. HOWEVER, 99% of the time everything SHOULD be cohesive. AND, when it isn't, then It means we are in the process of a transition, in which case there might be two groups. If there's a hundred different cohesive groups... something is very broken
|
|
296
|
+
let cohesiveGroups: AuthoritySpec[][] = [];
|
|
297
|
+
|
|
298
|
+
let groupByPrefixHash = new Map<string, AuthoritySpec[]>();
|
|
299
|
+
for (let source of allSources) {
|
|
300
|
+
let usedPrefixes = source.authoritySpec.prefixes.map(x => x.prefix);
|
|
301
|
+
|
|
302
|
+
if (target.excludeDefault) {
|
|
303
|
+
// This relaxes the restrictions, as with no default hashes, it means if a value isn't in one of our prefixes, even if it's inconsistent in the sources, it's fine.
|
|
304
|
+
// (And... Otherwise, we can't filter these at all because otherwise the default/remaining group will be different, even if it's a superset/subset relationship)
|
|
305
|
+
let targetUsedPrefixes = new Set(target.prefixes.map(x => x.prefix));
|
|
306
|
+
usedPrefixes = usedPrefixes.filter(x => targetUsedPrefixes.has(x));
|
|
307
|
+
}
|
|
308
|
+
let prefixHash = sha256(JSON.stringify(usedPrefixes.sort()));
|
|
309
|
+
let group = groupByPrefixHash.get(prefixHash);
|
|
310
|
+
if (!group) {
|
|
311
|
+
group = [];
|
|
312
|
+
groupByPrefixHash.set(prefixHash, group);
|
|
313
|
+
}
|
|
314
|
+
group.push(source.authoritySpec);
|
|
315
|
+
}
|
|
316
|
+
cohesiveGroups = Array.from(groupByPrefixHash.values());
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
if (cohesiveGroups.length > 4) {
|
|
320
|
+
console.error(`Authority prefixes in our topology have been inconsistent, with ${cohesiveGroups.length} different groups. This SHOULD be fixable by restarting the PathValueServers. If this doesn't fix it, it means there is an issue with how we are loading our data.`);
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
function hashesSameAsTarget(spec: AuthoritySpec): boolean {
|
|
324
|
+
let targetUsedPrefixes = new Set(target.prefixes.map(x => x.prefix));
|
|
325
|
+
if (target.excludeDefault) {
|
|
326
|
+
// If target is excluding default, then it doesn't matter if the spec is including it. We're only looking for matching values in target.
|
|
327
|
+
return spec.prefixes.every(x => targetUsedPrefixes.has(x.prefix));
|
|
328
|
+
}
|
|
329
|
+
// if !targetExcludeDefault, then !spec.excludeDefault, because we filtered out other cases earlier (so we don't need to handle spec.
|
|
330
|
+
|
|
331
|
+
if (spec.prefixes.length !== target.prefixes.length) return false;
|
|
332
|
+
// Otherwise, All of the prefixes have to be identical
|
|
333
|
+
for (let prefix of spec.prefixes) {
|
|
334
|
+
if (!targetUsedPrefixes.has(prefix.prefix)) return false;
|
|
335
|
+
}
|
|
336
|
+
return true;
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
shuffle(cohesiveGroups, Math.random());
|
|
340
|
+
sort(cohesiveGroups, x => hashesSameAsTarget(x[0]) ? -1 : 1);
|
|
341
|
+
|
|
342
|
+
let preferredNodeIds = new Set(config?.preferredNodeIds ?? []);
|
|
343
|
+
for (let group of cohesiveGroups) {
|
|
344
|
+
shuffle(group, Math.random());
|
|
345
|
+
sort(group, x => preferredNodeIds.has(x.nodeId) ? -1 : 1);
|
|
346
|
+
let targetStart = target.routeStart;
|
|
347
|
+
let targetEnd = target.routeEnd;
|
|
348
|
+
// If the group doesn't hash the same way we do, then we need to expand our range to be the full range of the group, so we don't miss anything.
|
|
349
|
+
let useFullPathHash = !hashesSameAsTarget(group[0]);
|
|
350
|
+
if (useFullPathHash) {
|
|
351
|
+
targetStart = 0;
|
|
352
|
+
targetEnd = 1;
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
let missingRanges: { start: number; end: number }[] = [{
|
|
356
|
+
start: targetStart,
|
|
357
|
+
end: targetEnd,
|
|
358
|
+
}];
|
|
359
|
+
let usedParts: (AuthoritySpec & { useFullPathHash?: boolean })[] = [];
|
|
360
|
+
for (let source of group) {
|
|
361
|
+
let s = source.routeStart;
|
|
362
|
+
let e = source.routeEnd;
|
|
363
|
+
// Find if it overlaps any missing range, and if so, cut out that part of the missing change, and add the nodeId to usedNodeIds
|
|
364
|
+
for (let i = missingRanges.length - 1; i >= 0; i--) {
|
|
365
|
+
let missingRange = missingRanges[i];
|
|
366
|
+
if (s >= missingRange.end || e <= missingRange.start) continue;
|
|
367
|
+
let startTaken = Math.max(missingRange.start, s);
|
|
368
|
+
let endTaken = Math.min(missingRange.end, e);
|
|
369
|
+
// NOTE: It's not ideal that we might have to fragment one node ID between multiple requests. However, in practice, there shouldn't be much fragmentation here. The ranges that our nodes are breaking down by should be consistent, so there's actually no overlap or subsets.
|
|
370
|
+
usedParts.push({
|
|
371
|
+
nodeId: source.nodeId,
|
|
372
|
+
routeStart: startTaken,
|
|
373
|
+
routeEnd: endTaken,
|
|
374
|
+
prefixes: target.prefixes,
|
|
375
|
+
excludeDefault: target.excludeDefault,
|
|
376
|
+
useFullPathHash,
|
|
377
|
+
});
|
|
378
|
+
missingRanges.splice(i, 1);
|
|
379
|
+
// Add back the parts we didn't overlap
|
|
380
|
+
if (missingRange.start < s) {
|
|
381
|
+
missingRanges.push({
|
|
382
|
+
start: missingRange.start,
|
|
383
|
+
end: s,
|
|
384
|
+
});
|
|
385
|
+
}
|
|
386
|
+
if (missingRange.end > e) {
|
|
387
|
+
missingRanges.push({
|
|
388
|
+
start: e,
|
|
389
|
+
end: missingRange.end,
|
|
390
|
+
});
|
|
391
|
+
}
|
|
392
|
+
}
|
|
393
|
+
if (missingRanges.length === 0) break;
|
|
394
|
+
}
|
|
395
|
+
if (missingRanges.length === 0) {
|
|
396
|
+
if (!hashesSameAsTarget(group[0])) {
|
|
397
|
+
console.warn(`Found a cohesive group that doesn't hash the same way we do. Expanding our range to be the full range of the group. This will be slower than it needs to.`);
|
|
398
|
+
}
|
|
399
|
+
return usedParts;
|
|
400
|
+
}
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
allSources = authorityLookup.getTopologySync().filter(x => !isOwnNodeId(x.nodeId));
|
|
404
|
+
console.error(`No consistent authority sources found for our entire range of ${target.routeStart} to ${target.routeEnd}. We tried ${cohesiveGroups.length} cohesive groups, from ${allSources.length} sources. Falling back to all sources (which will be slow).`);
|
|
405
|
+
return allSources.map(x => x.authoritySpec);
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
|
|
409
|
+
@measureFnc
|
|
410
|
+
public static getChildReadNodes(path: string, config?: {
|
|
411
|
+
preferredNodeIds?: string[];
|
|
412
|
+
}): {
|
|
413
|
+
// By default we hash the key directly under the path. However, If that is not how our nodes were sharded, as in they didn't use it as a prefix, then they will be sharded by hashing the full path, and so when we receive the data, we need to keep this in mind and filter the data based on the full path hash.
|
|
414
|
+
useFullPathHash?: boolean;
|
|
415
|
+
|
|
416
|
+
// NOTE: If at all possible, we will cover all ranges. Node of the returned nodes will be redundant.
|
|
417
|
+
// - Sorted by range.start
|
|
418
|
+
nodes: {
|
|
419
|
+
nodeId: string;
|
|
420
|
+
// The range of hashes this node owns, for the child keys of path
|
|
421
|
+
// (If the node doesn't restrict the range, it will just be { start: 0, end: 1 })
|
|
422
|
+
range: { start: number; end: number };
|
|
423
|
+
}[];
|
|
424
|
+
} {
|
|
425
|
+
if (this.isSelfAuthority(path)) {
|
|
426
|
+
return { nodes: [{ nodeId: getOwnNodeId(), range: { start: 0, end: 1 } }] };
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
// If a prefix is a parent of path, then it is the same as matching just the path directly
|
|
430
|
+
// (If our prefix directly equals one of the other matches, then it's more complicated, As then, the child keys of path are what is hashed, and so all the children will have different routes, so we might match multiple nodes. The same thing if we're matching the remaining case, in which case it's a full path hash, so the child key matters, and again, different routes).
|
|
431
|
+
// - The different route case is how the FuntionRunner works, and without it large databases couldn't run functions. However, most applications won't directly use it.
|
|
432
|
+
let allSources = authorityLookup.getTopologySync();
|
|
433
|
+
allSources = allSources.filter(x => !isOwnNodeId(x.nodeId));
|
|
434
|
+
let nestedMatches = allSources.filter(x =>
|
|
435
|
+
x.authoritySpec.prefixes.some(y => path.startsWith(y.prefix) && y.prefix !== path)
|
|
436
|
+
&& this.matchesAuthoritySpec(x.authoritySpec, path)
|
|
437
|
+
);
|
|
438
|
+
if (nestedMatches.length > 0) {
|
|
439
|
+
shuffle(nestedMatches, Math.random());
|
|
440
|
+
let preferredNodeIds = new Set(config?.preferredNodeIds ?? []);
|
|
441
|
+
sort(nestedMatches, x => preferredNodeIds.has(x.nodeId) ? -1 : 1);
|
|
442
|
+
return {
|
|
443
|
+
nodes: nestedMatches.map(x => ({
|
|
444
|
+
nodeId: x.nodeId,
|
|
445
|
+
// NOTE: Our path is picked by the prefix, and the prefix only hashes the direct child, and we're more deeply nested than that, which means... the route for all of our children will be identical, so this node matches all of our children.
|
|
446
|
+
range: { start: 0, end: 1 },
|
|
447
|
+
})),
|
|
448
|
+
};
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
let fullSources = this.getAuthoritySources({
|
|
452
|
+
target: {
|
|
453
|
+
nodeId: "",
|
|
454
|
+
prefixes: [{
|
|
455
|
+
prefix: path,
|
|
456
|
+
hashIndex: getPathDepth(path),
|
|
457
|
+
}],
|
|
458
|
+
routeStart: 0,
|
|
459
|
+
routeEnd: 1,
|
|
460
|
+
excludeDefault: true,
|
|
461
|
+
},
|
|
462
|
+
preferredNodeIds: config?.preferredNodeIds,
|
|
463
|
+
});
|
|
464
|
+
if (fullSources.length === 0) return { nodes: [] };
|
|
465
|
+
|
|
466
|
+
return {
|
|
467
|
+
useFullPathHash: fullSources.some(x => x.useFullPathHash),
|
|
468
|
+
nodes: fullSources.map(x => ({
|
|
469
|
+
nodeId: x.nodeId,
|
|
470
|
+
range: { start: x.routeStart, end: x.routeEnd },
|
|
471
|
+
})),
|
|
472
|
+
};
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
|
|
477
|
+
@measureFnc
|
|
478
|
+
public static getReadyAuthorities(path: string): AuthorityEntry[] {
|
|
479
|
+
return this.getAllAuthorities(path).filter(x => x.isReady);
|
|
480
|
+
}
|
|
481
|
+
@measureFnc
|
|
482
|
+
public static getReadyAuthority(path: string): AuthorityEntry | undefined {
|
|
483
|
+
let candidates = authorityLookup.getTopologySync();
|
|
484
|
+
shuffle(candidates, Math.random());
|
|
485
|
+
for (let candidate of candidates) {
|
|
486
|
+
if (!candidate.isReady) continue;
|
|
487
|
+
if (this.matchesAuthoritySpec(candidate.authoritySpec, path)) {
|
|
488
|
+
return candidate;
|
|
489
|
+
}
|
|
490
|
+
}
|
|
491
|
+
return undefined;
|
|
492
|
+
}
|
|
493
|
+
@measureFnc
|
|
494
|
+
public static getAllAuthorities(path: string): AuthorityEntry[] {
|
|
495
|
+
let entries: AuthorityEntry[] = [];
|
|
496
|
+
let candidates = authorityLookup.getTopologySync();
|
|
497
|
+
for (let candidate of candidates) {
|
|
498
|
+
if (!candidate.isReady) continue;
|
|
499
|
+
if (this.matchesAuthoritySpec(candidate.authoritySpec, path)) {
|
|
500
|
+
entries.push(candidate);
|
|
501
|
+
}
|
|
502
|
+
}
|
|
503
|
+
return entries;
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
@measureFnc
|
|
507
|
+
public static getAllAuthorityNodes(path: string): string[] {
|
|
508
|
+
let authorities = this.getAllAuthorities(path);
|
|
509
|
+
return unique(authorities.map(x => x.nodeId));
|
|
510
|
+
}
|
|
511
|
+
|
|
512
|
+
|
|
513
|
+
|
|
514
|
+
// Returns nodeId => T[] (each T might be mapped to many nodeIds)
|
|
515
|
+
@measureFnc
|
|
516
|
+
public static getAllAuthoritiesForValues<T extends { path: string }>(values: T[]): Map<string, T[]> {
|
|
517
|
+
let valuesPerOtherAuthority = new Map<string, T[]>();
|
|
518
|
+
for (let pathValue of values) {
|
|
519
|
+
let otherAuthorities = PathRouter.getAllAuthorityNodes(pathValue.path);
|
|
520
|
+
for (let otherAuthority of otherAuthorities) {
|
|
521
|
+
if (isOwnNodeId(otherAuthority)) continue;
|
|
522
|
+
|
|
523
|
+
let values = valuesPerOtherAuthority.get(otherAuthority);
|
|
524
|
+
if (!values) {
|
|
525
|
+
values = [];
|
|
526
|
+
valuesPerOtherAuthority.set(otherAuthority, values);
|
|
527
|
+
}
|
|
528
|
+
values.push(pathValue);
|
|
529
|
+
}
|
|
530
|
+
}
|
|
531
|
+
return valuesPerOtherAuthority;
|
|
532
|
+
}
|
|
533
|
+
|
|
534
|
+
}
|
|
535
|
+
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
import { cacheLimited } from "socket-function/src/caching";
|
|
2
|
+
import { AuthoritySpec, PathRouter } from "./PathRouter";
|
|
3
|
+
import { sha256 } from "js-sha256";
|
|
4
|
+
import { getPathFromStr } from "../path";
|
|
5
|
+
|
|
6
|
+
function getPrefixHash(prefix: string): string {
|
|
7
|
+
return Buffer.from(sha256(prefix), "hex").toString("base64").slice(0, 12);
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
// So... should be use unicode characters?
|
|
11
|
+
// - Both to ensure all of our code supports them in keys, and because it allows us to us a lot fewer characters.
|
|
12
|
+
// NOTE: If we want, we could add code to prevent users from creating keys that use these. However, I think it's fine, because the querysub server is still the first node you talk to, so it can throttle your traffic. ALSO, ideally all of your traffic DOES go to the same PathValueServer. That's why prefix routing exists. And PathValueServers should be able to handle A LOT of traffic.
|
|
13
|
+
let keySpecialIdentifier = (
|
|
14
|
+
"ROUTE_"
|
|
15
|
+
+ String.fromCharCode(0xE1A4)
|
|
16
|
+
+ String.fromCharCode(0xE8B2)
|
|
17
|
+
+ String.fromCharCode(0xF0C9)
|
|
18
|
+
+ String.fromCharCode(0xF4D7)
|
|
19
|
+
);
|
|
20
|
+
|
|
21
|
+
// NOTE: ONLY works for direct accesses, not for child key accesses. Also, it only works if the prefix is matched. If nothing matches the prefix, then this will actually make it not match any authorities.
|
|
22
|
+
export function createRoutingOverrideKey(config: {
|
|
23
|
+
originalKey: string;
|
|
24
|
+
routeKey: string;
|
|
25
|
+
// This is not the prefix that it has to match. This is the prefix we remap it to. And so anything which matches this remap prefix will match this routing key.
|
|
26
|
+
remappedPrefix: string;
|
|
27
|
+
}) {
|
|
28
|
+
let { originalKey, routeKey, remappedPrefix } = config;
|
|
29
|
+
let route = PathRouter.getSingleKeyRoute(routeKey);
|
|
30
|
+
return keySpecialIdentifier + "!" + getPrefixHash(remappedPrefix) + "!" + route.toString().slice(0, 7) + "!" + originalKey;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
export function getRoutingOverride(path: string): {
|
|
34
|
+
route: number;
|
|
35
|
+
prefixHash: string;
|
|
36
|
+
} | undefined {
|
|
37
|
+
if (!path.includes(keySpecialIdentifier)) return undefined;
|
|
38
|
+
let parts = getPathFromStr(path);
|
|
39
|
+
for (let part of parts) {
|
|
40
|
+
let obj = getRoutingOverridePart(part);
|
|
41
|
+
if (obj) return obj;
|
|
42
|
+
}
|
|
43
|
+
return undefined;
|
|
44
|
+
}
|
|
45
|
+
export function getRoutingOverridePart(part: string): {
|
|
46
|
+
prefixHash: string;
|
|
47
|
+
route: number;
|
|
48
|
+
} | undefined {
|
|
49
|
+
if (!part.startsWith(keySpecialIdentifier)) return undefined;
|
|
50
|
+
let parts = part.split("!");
|
|
51
|
+
if (parts.length < 4) return undefined;
|
|
52
|
+
let prefixHash = parts[1];
|
|
53
|
+
let route = parseFloat(parts[2]);
|
|
54
|
+
if (isNaN(route)) return undefined;
|
|
55
|
+
return {
|
|
56
|
+
prefixHash,
|
|
57
|
+
route,
|
|
58
|
+
};
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
export const hasPrefixHash = cacheLimited(1000 * 10,
|
|
62
|
+
(config: { spec: AuthoritySpec, prefixHash: string }) => {
|
|
63
|
+
let { spec, prefixHash } = config;
|
|
64
|
+
for (let prefix of spec.prefixes) {
|
|
65
|
+
if (getPrefixHash(prefix.prefix) === prefixHash) {
|
|
66
|
+
return true;
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
return false;
|
|
70
|
+
}
|
|
71
|
+
);
|
|
72
|
+
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
import path from "path";
|
|
2
|
+
import { AuthoritySpec } from "./PathRouter";
|
|
3
|
+
import { getPathDepth } from "../path";
|
|
4
|
+
import { getAuthorityRange, getAuthorityExcludeDefault, getAuthorityPrefix } from "../config";
|
|
5
|
+
import { getShardPrefixes } from "./ShardPrefixes";
|
|
6
|
+
import { getOwnNodeId } from "../-f-node-discovery/NodeDiscovery";
|
|
7
|
+
|
|
8
|
+
export async function getOurAuthoritySpec(defaultToAll: true): Promise<AuthoritySpec>;
|
|
9
|
+
export async function getOurAuthoritySpec(defaultToAll?: boolean): Promise<AuthoritySpec | undefined>;
|
|
10
|
+
export async function getOurAuthoritySpec(defaultToAll?: boolean): Promise<AuthoritySpec | undefined> {
|
|
11
|
+
let prefixes = await getShardPrefixes();
|
|
12
|
+
|
|
13
|
+
const range = getAuthorityRange();
|
|
14
|
+
const excludeDefault = getAuthorityExcludeDefault();
|
|
15
|
+
const cmdPrefixes = getAuthorityPrefix();
|
|
16
|
+
|
|
17
|
+
if (!range) {
|
|
18
|
+
if (defaultToAll) {
|
|
19
|
+
return getAllAuthoritySpec();
|
|
20
|
+
}
|
|
21
|
+
return undefined;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
const [startStr, endStr] = range.split("-");
|
|
25
|
+
const rangeStart = parseFloat(startStr);
|
|
26
|
+
const rangeEnd = parseFloat(endStr);
|
|
27
|
+
|
|
28
|
+
if (isNaN(rangeStart) || isNaN(rangeEnd)) {
|
|
29
|
+
throw new Error(`Invalid authority range, should be in the format "0.5-1.0": ${range}`);
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
let usePrefixes = prefixes;
|
|
33
|
+
if (cmdPrefixes && cmdPrefixes.length > 0) {
|
|
34
|
+
usePrefixes = cmdPrefixes;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
return {
|
|
38
|
+
nodeId: "",
|
|
39
|
+
routeStart: rangeStart,
|
|
40
|
+
routeEnd: rangeEnd,
|
|
41
|
+
prefixes: usePrefixes.map(prefix => ({
|
|
42
|
+
prefix,
|
|
43
|
+
hashIndex: getPathDepth(prefix),
|
|
44
|
+
})),
|
|
45
|
+
excludeDefault: excludeDefault || undefined,
|
|
46
|
+
};
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
export function getEmptyAuthoritySpec(): AuthoritySpec {
|
|
51
|
+
return {
|
|
52
|
+
nodeId: "",
|
|
53
|
+
routeStart: -1,
|
|
54
|
+
routeEnd: -1,
|
|
55
|
+
prefixes: [],
|
|
56
|
+
};
|
|
57
|
+
}
|
|
58
|
+
export function getAllAuthoritySpec(): AuthoritySpec {
|
|
59
|
+
return {
|
|
60
|
+
nodeId: "",
|
|
61
|
+
routeStart: 0,
|
|
62
|
+
routeEnd: 1,
|
|
63
|
+
prefixes: [],
|
|
64
|
+
};
|
|
65
|
+
}
|