querysub 0.437.0 → 0.439.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.eslintrc.js +50 -50
- package/bin/deploy.js +0 -0
- package/bin/function.js +0 -0
- package/bin/server.js +0 -0
- package/costsBenefits.txt +115 -115
- package/deploy.ts +2 -2
- package/package.json +2 -2
- package/spec.txt +1192 -1192
- package/src/-a-archives/archives.ts +202 -202
- package/src/-a-archives/archivesDisk.ts +454 -454
- package/src/-a-auth/certs.ts +540 -540
- package/src/-a-auth/node-forge-ed25519.d.ts +16 -16
- package/src/-b-authorities/dnsAuthority.ts +138 -138
- package/src/-c-identity/IdentityController.ts +258 -258
- package/src/-d-trust/NetworkTrust2.ts +180 -180
- package/src/-e-certs/EdgeCertController.ts +252 -252
- package/src/-e-certs/certAuthority.ts +201 -201
- package/src/-f-node-discovery/NodeDiscovery.ts +640 -640
- package/src/-g-core-values/NodeCapabilities.ts +200 -200
- package/src/-h-path-value-serialize/stringSerializer.ts +175 -175
- package/src/0-path-value-core/PathValueCommitter.ts +468 -468
- package/src/0-path-value-core/PathValueController.ts +0 -2
- package/src/0-path-value-core/archiveLocks/archiveSnapshots.ts +37 -1
- package/src/0-path-value-core/pathValueCore.ts +12 -0
- package/src/2-proxy/PathValueProxyWatcher.ts +2542 -2542
- package/src/2-proxy/TransactionDelayer.ts +94 -94
- package/src/2-proxy/pathDatabaseProxyBase.ts +36 -36
- package/src/2-proxy/pathValueProxy.ts +159 -159
- package/src/3-path-functions/PathFunctionRunner.ts +24 -13
- package/src/3-path-functions/PathFunctionRunnerMain.ts +87 -87
- package/src/3-path-functions/pathFunctionLoader.ts +516 -516
- package/src/3-path-functions/tests/rejectTest.ts +76 -76
- package/src/4-deploy/deployCheck.ts +6 -6
- package/src/4-dom/css.tsx +29 -29
- package/src/4-dom/cssTypes.d.ts +211 -211
- package/src/4-dom/qreact.tsx +2799 -2799
- package/src/4-dom/qreactTest.tsx +410 -410
- package/src/4-querysub/permissions.ts +335 -335
- package/src/4-querysub/querysubPrediction.ts +483 -483
- package/src/5-diagnostics/qreactDebug.tsx +400 -346
- package/src/TestController.ts +34 -34
- package/src/bits.ts +104 -104
- package/src/buffers.ts +69 -69
- package/src/diagnostics/ActionsHistory.ts +57 -57
- package/src/diagnostics/PathDistributionInfo.tsx +9 -1
- package/src/diagnostics/listenOnDebugger.ts +71 -71
- package/src/diagnostics/logs/IndexedLogs/BufferUnitIndex.ts +1 -1
- package/src/diagnostics/logs/diskLogger.ts +6 -0
- package/src/diagnostics/misc-pages/SnapshotViewer.tsx +78 -1
- package/src/diagnostics/periodic.ts +111 -111
- package/src/diagnostics/trackResources.ts +91 -91
- package/src/diagnostics/watchdog.ts +120 -120
- package/src/errors.ts +133 -133
- package/src/forceProduction.ts +2 -2
- package/src/fs.ts +80 -80
- package/src/functional/diff.ts +857 -857
- package/src/functional/promiseCache.ts +78 -78
- package/src/functional/random.ts +8 -8
- package/src/functional/stats.ts +60 -60
- package/src/heapDumps.ts +665 -665
- package/src/https.ts +1 -1
- package/src/library-components/AspectSizedComponent.tsx +87 -87
- package/src/library-components/ButtonSelector.tsx +64 -64
- package/src/library-components/DropdownCustom.tsx +150 -150
- package/src/library-components/DropdownSelector.tsx +31 -31
- package/src/library-components/InlinePopup.tsx +66 -66
- package/src/library-components/uncaughtToast.tsx +2 -0
- package/src/misc/color.ts +29 -29
- package/src/misc/hash.ts +83 -83
- package/src/misc/ipPong.js +13 -13
- package/src/misc/networking.ts +1 -1
- package/src/misc/random.ts +44 -44
- package/src/misc.ts +196 -196
- package/src/path.ts +255 -255
- package/src/persistentLocalStore.ts +41 -41
- package/src/promise.ts +14 -14
- package/src/storage/fileSystemPointer.ts +71 -71
- package/src/test/heapProcess.ts +35 -35
- package/src/zip.ts +15 -15
- package/tsconfig.json +26 -26
- package/yarnSpec.txt +56 -56
|
@@ -1,469 +1,469 @@
|
|
|
1
|
-
import { SocketFunction } from "socket-function/SocketFunction";
|
|
2
|
-
import { delay, batchFunction } from "socket-function/src/batching";
|
|
3
|
-
import { deepCloneJSON, isNode, timeInSecond } from "socket-function/src/misc";
|
|
4
|
-
import { measureBlock, measureFnc } from "socket-function/src/profiling/measure";
|
|
5
|
-
import { isTrustedByNode } from "../-d-trust/NetworkTrust2";
|
|
6
|
-
import { areNodeIdsEqual, isOwnNodeId } from "../-f-node-discovery/NodeDiscovery";
|
|
7
|
-
import { ActionsHistory } from "../diagnostics/ActionsHistory";
|
|
8
|
-
import { errorToUndefined, logErrors, timeoutToUndefined } from "../errors";
|
|
9
|
-
import { getPathFromStr, hack_stripPackedPath } from "../path";
|
|
10
|
-
import { PathValueControllerBase } from "./PathValueController";
|
|
11
|
-
import { PathRouter } from "./PathRouter";
|
|
12
|
-
import { PathValue, MAX_ACCEPTED_CHANGE_AGE, WriteState, debugPathValuePath, compareTime, epochTime } from "./pathValueCore";
|
|
13
|
-
import { validStateComputer } from "./ValidStateComputer";
|
|
14
|
-
import debugbreak from "debugbreak";
|
|
15
|
-
import { red } from "socket-function/src/formatting/logColors";
|
|
16
|
-
import { isClient } from "../config2";
|
|
17
|
-
import { auditLog, isDebugLogEnabled } from "./auditLogs";
|
|
18
|
-
import { AuthorityEntry, authorityLookup } from "./AuthorityLookup";
|
|
19
|
-
import { debugNodeId } from "../-c-identity/IdentityController";
|
|
20
|
-
import { decodeNodeId } from "../-a-auth/certs";
|
|
21
|
-
import { decodeParentFilter, encodeParentFilter } from "./hackedPackedPathParentFiltering";
|
|
22
|
-
import { deepCloneCborx } from "../misc/cloneHelpers";
|
|
23
|
-
import { removeRange } from "../rangeMath";
|
|
24
|
-
import { registerShutdownHandler } from "../diagnostics/periodic";
|
|
25
|
-
import { getCallFactory } from "socket-function/src/nodeCache";
|
|
26
|
-
setImmediate(() => import("../1-path-client/RemoteWatcher"));
|
|
27
|
-
setImmediate(() => import("../4-querysub/Querysub"));
|
|
28
|
-
|
|
29
|
-
const MAX_SEND_TRY_COUNT = 3;
|
|
30
|
-
|
|
31
|
-
// NOTE: This isn't very efficient, but it is safer. It's quadratic depending on the number of authorities for the server. Which shouldn't be so bad. I mean, why would we have four times redundancy? That's a lot. And even then, we're sending sixteen times the amount of traffic. That's fine, I guess. I think it's more reasonable for us to have two times redundancy, maybe even three times redundancy. With two times redundancy, it's only sending the value four times, which is reasonable. And the trade-off is there's basically no way that we can lose the data.
|
|
32
|
-
// - If we only send to one authority and then they rebroadcast it, it's possible that they die after receiving the value. If we send it to both value servers but they don't re-broadcast, it's possible that we die while broadcasting the values after having only sent a few. However, by sending it to both servers and having both servers send it to the other servers, it means that if we die while broadcasting it, it's fine. And if the server dies that we want to send it to, it's fine because we send it to both servers.
|
|
33
|
-
const BROADCAST_TO_ALL_AUTHORITIES = true;
|
|
34
|
-
|
|
35
|
-
export type BatchValues = {
|
|
36
|
-
pathValues: PathValue[],
|
|
37
|
-
parentsSynced?: string[];
|
|
38
|
-
sourceNodeId: string;
|
|
39
|
-
initialTrigger?: "initialTrigger";
|
|
40
|
-
authoritySync?: boolean;
|
|
41
|
-
};
|
|
42
|
-
|
|
43
|
-
export type RemoteValueAndValidState = {
|
|
44
|
-
sourceNodeId: string;
|
|
45
|
-
pathValues: PathValue[];
|
|
46
|
-
initialTriggers: { values: Set<string>; parentPaths: Set<string> };
|
|
47
|
-
};
|
|
48
|
-
|
|
49
|
-
class PathValueCommitter {
|
|
50
|
-
private pendingCommits = new Set<Promise<unknown>>();
|
|
51
|
-
private addCommitPromise(promise: Promise<unknown>) {
|
|
52
|
-
logErrors(promise);
|
|
53
|
-
this.pendingCommits.add(promise);
|
|
54
|
-
void promise.finally(() => this.pendingCommits.delete(promise));
|
|
55
|
-
}
|
|
56
|
-
public async waitForValuesToCommit() {
|
|
57
|
-
await this.broadcastValues({ values: new Set() });
|
|
58
|
-
await Promise.all(this.pendingCommits);
|
|
59
|
-
// HACK: Wait a bit more, for the websocket to send the values (and for some batching, etc, etc)
|
|
60
|
-
//await new Promise(resolve => setTimeout(resolve, 100));
|
|
61
|
-
await delay("afterio");
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
@measureFnc
|
|
65
|
-
// Returns true if all the writes are accepted by some node (this should almost always be the case)
|
|
66
|
-
public commitValues(values: PathValue[], predictWrites: "predictWrites" | undefined): void {
|
|
67
|
-
if (values.length === 0) return;
|
|
68
|
-
ActionsHistory.OnWrite(values);
|
|
69
|
-
|
|
70
|
-
let now = Date.now();
|
|
71
|
-
|
|
72
|
-
// Validate the writes are in order (and not too old), just in case some delay in our code
|
|
73
|
-
// causes them to be too old before we even get to send them! (Or if addWrites is called
|
|
74
|
-
// from an endpoint?)
|
|
75
|
-
{
|
|
76
|
-
let maxAge = now - MAX_ACCEPTED_CHANGE_AGE;
|
|
77
|
-
let prevTime = 0;
|
|
78
|
-
for (let pathValue of values) {
|
|
79
|
-
if (pathValue.time.time < maxAge) {
|
|
80
|
-
// NOTE: LIKELY caused by synchronizing taking too long, which is likely due to the PathValueServer massively lagging
|
|
81
|
-
// - Check the caller proxyWatcher startTime vs lastSyncTime, to see how long it took from start to sync
|
|
82
|
-
// (assuming the caller is a commitFunction, and not just a general watcher. If it is a general
|
|
83
|
-
// watcher it might have taken a really long time to run, in which case check the watchFunction).
|
|
84
|
-
let message = `MAX_CHANGE_AGE EXCEEDED! Cannot commit write, that is before the max age ${pathValue.time.time} < ${maxAge}. Acceping this write would result in changes in the past that wouldn't propagate correctly`;
|
|
85
|
-
console.error(red(message));
|
|
86
|
-
debugbreak(2);
|
|
87
|
-
debugger;
|
|
88
|
-
throw new Error(message);
|
|
89
|
-
}
|
|
90
|
-
if (pathValue.time.time < prevTime) {
|
|
91
|
-
debugbreak(2);
|
|
92
|
-
debugger;
|
|
93
|
-
throw new Error(`PathValues must be in increasing order, but they were not.`);
|
|
94
|
-
}
|
|
95
|
-
if (pathValue.time.version % 1 !== 0) {
|
|
96
|
-
debugbreak(2);
|
|
97
|
-
debugger;
|
|
98
|
-
throw new Error(`PathValues must have an integer version, but they did not. Path: ${pathValue.path}, Time: ${pathValue.time.time}, Version: ${pathValue.time.version}`);
|
|
99
|
-
}
|
|
100
|
-
prevTime = pathValue.time.time;
|
|
101
|
-
}
|
|
102
|
-
}
|
|
103
|
-
|
|
104
|
-
let pathValuesToIngest: PathValue[] = [];
|
|
105
|
-
if (predictWrites) {
|
|
106
|
-
pathValuesToIngest = values;
|
|
107
|
-
} else {
|
|
108
|
-
pathValuesToIngest = values.filter(pathValue => PathRouter.isSelfAuthority(pathValue.path));
|
|
109
|
-
}
|
|
110
|
-
|
|
111
|
-
// NOTE: Error out early on the client, so we get better errors AND so we can avoid even ingesting the values
|
|
112
|
-
// TODO: Move this error to PathValueProxyWatcher, so it can be thrown on the exact line that does the write.
|
|
113
|
-
// - This can be down by caching path (probably just the domain), and telling PathValueProxyWatcher
|
|
114
|
-
// to throw on any accesses to it.
|
|
115
|
-
// - PathValueProxyWatcher could even populate this cache itself on reads, so it will probably always
|
|
116
|
-
// thrown on the first write attempt.
|
|
117
|
-
// (It could even populate on the first domain, so it would always populate on the first write
|
|
118
|
-
// on the current domain).
|
|
119
|
-
if (!isNode()) {
|
|
120
|
-
let remoteWrites = values.filter(pathValue => !PathRouter.isSelfAuthority(pathValue.path));
|
|
121
|
-
if (remoteWrites.length > 0) {
|
|
122
|
-
throw new Error(`Cannot commit writes to paths that are not local to this node. Paths: ${remoteWrites.map(x => x.path).join(", ")}`);
|
|
123
|
-
}
|
|
124
|
-
}
|
|
125
|
-
|
|
126
|
-
// NOTE: This function will just ignore writes we aren't an authority on, so we can just give it everything.
|
|
127
|
-
validStateComputer.ingestValuesAndValidStates({
|
|
128
|
-
pathValues: pathValuesToIngest,
|
|
129
|
-
parentSyncs: [],
|
|
130
|
-
initialTriggers: { values: new Set(), parentPaths: new Set() },
|
|
131
|
-
});
|
|
132
|
-
|
|
133
|
-
let remoteValues = values.filter(x => !PathRouter.isSelfAuthority(x.path));
|
|
134
|
-
|
|
135
|
-
if (remoteValues.length > 0) {
|
|
136
|
-
this.addCommitPromise(this.broadcastValues({ values: new Set(remoteValues) }));
|
|
137
|
-
}
|
|
138
|
-
|
|
139
|
-
// If we're the authority on it, we should still share it with the other authorities.
|
|
140
|
-
let stillShareValues = values.filter(x => !PathRouter.isLocalPath(x.path) && PathRouter.isSelfAuthority(x.path));
|
|
141
|
-
if (stillShareValues.length > 0) {
|
|
142
|
-
this.addCommitPromise(PathValueControllerBase.authorityShareValues({ pathValues: stillShareValues }));
|
|
143
|
-
}
|
|
144
|
-
}
|
|
145
|
-
|
|
146
|
-
private broadcastValues = batchFunction(
|
|
147
|
-
{ delay: 10, throttleWindow: 500, noMeasure: true },
|
|
148
|
-
async function internal_forwardWrites(valuesBatched: {
|
|
149
|
-
values: Set<PathValue>;
|
|
150
|
-
tryCount?: number;
|
|
151
|
-
}[]) {
|
|
152
|
-
let values = new Set(valuesBatched.flatMap(x => Array.from(x.values)));
|
|
153
|
-
if (values.size === 0) return;
|
|
154
|
-
let tryCountPerValue = new Map<PathValue, number>();
|
|
155
|
-
for (let list of valuesBatched) {
|
|
156
|
-
for (let value of list.values) {
|
|
157
|
-
if (!list.tryCount) continue;
|
|
158
|
-
tryCountPerValue.set(value, list.tryCount);
|
|
159
|
-
}
|
|
160
|
-
}
|
|
161
|
-
if (tryCountPerValue.size > 0) {
|
|
162
|
-
console.info(`Syncing all authorities to ensure we have the latest values, due to failed writes.`);
|
|
163
|
-
await authorityLookup.syncAllNow();
|
|
164
|
-
}
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
let valuesPerOtherAuthority = new Map<string, PathValue[]>();
|
|
168
|
-
for (let pathValue of values) {
|
|
169
|
-
if (isDebugLogEnabled()) {
|
|
170
|
-
// let valueStr: string | undefined;
|
|
171
|
-
// if (typeof pathValue.value === "boolean" || typeof pathValue.value === "number") {
|
|
172
|
-
// valueStr = pathValue.value.toString();
|
|
173
|
-
// } else if (typeof pathValue.value === "string") {
|
|
174
|
-
// valueStr = pathValue.value.slice(0, 100);
|
|
175
|
-
// if (valueStr.length < pathValue.value.length) {
|
|
176
|
-
// valueStr += `[+${pathValue.value.length - valueStr.length}]`;
|
|
177
|
-
// }
|
|
178
|
-
// } else if (Buffer.isBuffer(pathValue.value)) {
|
|
179
|
-
// valueStr = `Buffer(${pathValue.value.length})`;
|
|
180
|
-
// } else if (pathValue.value === null) {
|
|
181
|
-
// valueStr = "{null}";
|
|
182
|
-
// } else {
|
|
183
|
-
// valueStr = `{${typeof pathValue.value}}`;
|
|
184
|
-
// }
|
|
185
|
-
auditLog("CREATE VALUE", {
|
|
186
|
-
path: pathValue.path,
|
|
187
|
-
timeId: pathValue.time.time,
|
|
188
|
-
timeIdFull: pathValue.time,
|
|
189
|
-
source: pathValue.source,
|
|
190
|
-
transparent: pathValue.isTransparent,
|
|
191
|
-
// value: valueStr,
|
|
192
|
-
event: pathValue.event,
|
|
193
|
-
// NOTE: I think it'd be too expensive to log all the locks. There's one thing to log every time we read a value or create a value, but to log all of the dependencies for each write path would be O(locksPerWrite * 2 * writes), which is just too much, and could easily result in ten thousand logs per write. I think it will be fine because if there's a rejection, then that will tell us the time ID that the rejected value was reading, Which we can infer to know at least one of the locks, as in the most important lock that that right had.
|
|
194
|
-
});
|
|
195
|
-
}
|
|
196
|
-
|
|
197
|
-
let otherAuthorities = PathRouter.getAllAuthorities(pathValue.path);
|
|
198
|
-
otherAuthorities = otherAuthorities.filter(x => !isOwnNodeId(x.nodeId));
|
|
199
|
-
if (otherAuthorities.length === 0) {
|
|
200
|
-
validStateComputer.ingestValuesAndValidStates({
|
|
201
|
-
pathValues: [{
|
|
202
|
-
...pathValue,
|
|
203
|
-
valid: false,
|
|
204
|
-
}],
|
|
205
|
-
parentSyncs: [],
|
|
206
|
-
initialTriggers: { values: new Set(), parentPaths: new Set() },
|
|
207
|
-
});
|
|
208
|
-
console.error(`There are no authorities for path ${pathValue.path}. The write will be lost.`, {
|
|
209
|
-
path: pathValue.path,
|
|
210
|
-
timeId: pathValue.time.time,
|
|
211
|
-
source: pathValue.source,
|
|
212
|
-
otherAuthorities,
|
|
213
|
-
});
|
|
214
|
-
continue;
|
|
215
|
-
}
|
|
216
|
-
function sendToAuthority(otherAuthority: AuthorityEntry) {
|
|
217
|
-
let values = valuesPerOtherAuthority.get(otherAuthority.nodeId);
|
|
218
|
-
if (!values) {
|
|
219
|
-
values = [];
|
|
220
|
-
valuesPerOtherAuthority.set(otherAuthority.nodeId, values);
|
|
221
|
-
}
|
|
222
|
-
values.push(pathValue);
|
|
223
|
-
}
|
|
224
|
-
if (BROADCAST_TO_ALL_AUTHORITIES) {
|
|
225
|
-
for (let otherAuthority of otherAuthorities) {
|
|
226
|
-
sendToAuthority(otherAuthority);
|
|
227
|
-
}
|
|
228
|
-
} else {
|
|
229
|
-
// NOTE: Path routing is pretty strict. Once we disconnect, it'll remove it from the list of authorities. And if we can't talk to it, the retry logic will call us again.
|
|
230
|
-
let otherAuthority = otherAuthorities[~~(Math.random() * otherAuthorities.length)];
|
|
231
|
-
sendToAuthority(otherAuthority);
|
|
232
|
-
}
|
|
233
|
-
}
|
|
234
|
-
|
|
235
|
-
// Don't send to bad nodes for 60 seconds
|
|
236
|
-
const nodeIgnoreTime = Date.now() - 1000 * 60;
|
|
237
|
-
|
|
238
|
-
let promises = Array.from(valuesPerOtherAuthority.entries()).map(async ([otherAuthority, values]) => {
|
|
239
|
-
|
|
240
|
-
let disconnected = SocketFunction.getLastDisconnectTime(otherAuthority);
|
|
241
|
-
if (!SocketFunction.isNodeConnected(otherAuthority) && disconnected && disconnected > nodeIgnoreTime) {
|
|
242
|
-
// If it disconnected recently... don't send to it for a little bit, so we don't spend
|
|
243
|
-
// all of our time spamming disconnected nodes
|
|
244
|
-
console.log(`disconnected at ${disconnected} > ${nodeIgnoreTime}`);
|
|
245
|
-
return;
|
|
246
|
-
}
|
|
247
|
-
|
|
248
|
-
let isTrusted = await isTrustedByNode(otherAuthority);
|
|
249
|
-
if (!isTrusted) {
|
|
250
|
-
console.log(`not trusted`);
|
|
251
|
-
throw new Error(`Tried to write to paths on authorities not trusted by us. You probably need to call a function instead of directly writing to the server schema. Authority: ${otherAuthority}, Paths: ${values.map(x => getPathFromStr(x.path).join(".")).join(", ")}`);
|
|
252
|
-
}
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
// NOTE: We don't retry on failure, because... we broadcasted it anyways, AND this is supposed
|
|
256
|
-
// to be on a trusted node (which should always be a server), so... either our network went
|
|
257
|
-
// down, or all nodes went down. Either way, it likely won't fix itself quickly, and so these
|
|
258
|
-
// writes are going to too old and therefore rejected by the time a server is up anyways.
|
|
259
|
-
|
|
260
|
-
let forwardPromise = PathValueControllerBase.createValues({
|
|
261
|
-
nodeId: otherAuthority,
|
|
262
|
-
pathValues: values,
|
|
263
|
-
});
|
|
264
|
-
logErrors(forwardPromise);
|
|
265
|
-
void forwardPromise.then(x => {
|
|
266
|
-
if (x === "refused") {
|
|
267
|
-
values = values.map(value => {
|
|
268
|
-
console.info(`Rejecting value that was refused: ${debugPathValuePath(value)}`, {
|
|
269
|
-
path: value.path,
|
|
270
|
-
timeId: value.time.time,
|
|
271
|
-
});
|
|
272
|
-
return {
|
|
273
|
-
...value,
|
|
274
|
-
valid: false,
|
|
275
|
-
};
|
|
276
|
-
});
|
|
277
|
-
|
|
278
|
-
validStateComputer.ingestValuesAndValidStates({
|
|
279
|
-
pathValues: values,
|
|
280
|
-
parentSyncs: [],
|
|
281
|
-
initialTriggers: { values: new Set(), parentPaths: new Set() },
|
|
282
|
-
});
|
|
283
|
-
}
|
|
284
|
-
});
|
|
285
|
-
// If we broadcast to all authorities, we can't also retry, as this means if one authority is down, we end up infinitely looping. And it breaks the servers and the logs.
|
|
286
|
-
if (!BROADCAST_TO_ALL_AUTHORITIES) {
|
|
287
|
-
void forwardPromise.catch(async error => {
|
|
288
|
-
let byTryCount = new Map<number, PathValue[]>();
|
|
289
|
-
for (let value of values) {
|
|
290
|
-
let tryCount = tryCountPerValue.get(value) ?? 0;
|
|
291
|
-
let arr = byTryCount.get(tryCount) ?? [];
|
|
292
|
-
arr.push(value);
|
|
293
|
-
byTryCount.set(tryCount, arr);
|
|
294
|
-
}
|
|
295
|
-
for (let [tryCount, values] of byTryCount.entries()) {
|
|
296
|
-
tryCount++;
|
|
297
|
-
if (tryCount > MAX_SEND_TRY_COUNT) {
|
|
298
|
-
console.error(`Failed to send values after ${MAX_SEND_TRY_COUNT} tries. Giving up.`, {
|
|
299
|
-
error: error.message,
|
|
300
|
-
otherAuthority,
|
|
301
|
-
count: values.length,
|
|
302
|
-
});
|
|
303
|
-
continue;
|
|
304
|
-
}
|
|
305
|
-
for (let value of values) {
|
|
306
|
-
console.error(`Retrying to send values after ${tryCount} tries.`, {
|
|
307
|
-
error: error.message,
|
|
308
|
-
otherAuthority,
|
|
309
|
-
path: value.path,
|
|
310
|
-
timeId: value.time.time,
|
|
311
|
-
});
|
|
312
|
-
}
|
|
313
|
-
void pathValueCommitter.broadcastValues({
|
|
314
|
-
values: new Set(values),
|
|
315
|
-
tryCount: tryCount,
|
|
316
|
-
});
|
|
317
|
-
}
|
|
318
|
-
});
|
|
319
|
-
}
|
|
320
|
-
|
|
321
|
-
pathValueCommitter.addCommitPromise(forwardPromise);
|
|
322
|
-
});
|
|
323
|
-
// await, so "waitForValuesToCommit" works correctly
|
|
324
|
-
await Promise.all(promises.map(x => timeoutToUndefined(timeInSecond * 30, errorToUndefined(x))));
|
|
325
|
-
}
|
|
326
|
-
);
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
public ingestRemoteValuesAndValidStates = batchFunction(
|
|
330
|
-
{ delay: 16, throttleWindow: 1000, name: "ingestRemoteValuesAndValidStates", noMeasure: true },
|
|
331
|
-
async (batched: RemoteValueAndValidState[]) => {
|
|
332
|
-
const { remoteWatcher } = await import("../1-path-client/RemoteWatcher");
|
|
333
|
-
|
|
334
|
-
// NOTE: We need to ignore values if they're not who we're watching. That way, if we change the watcher, it's smooth and we don't partial data that might clobber with the data from the authority we are really using.
|
|
335
|
-
if (!isClient()) {
|
|
336
|
-
measureBlock(function ignoreUnrequestedValues() {
|
|
337
|
-
for (let batch of batched) {
|
|
338
|
-
function isWrongAuthority(path: string, value?: PathValue, type?: string) {
|
|
339
|
-
let watchingAuthorityId = remoteWatcher.getExistingWatchRemoteNodeId(path);
|
|
340
|
-
// If we AREN'T watching it... it's actually fine, we can receive any values.
|
|
341
|
-
// When we start watching, those values will get clobbered.
|
|
342
|
-
if (watchingAuthorityId === undefined) return false;
|
|
343
|
-
if (!areNodeIdsEqual(watchingAuthorityId, batch.sourceNodeId)) {
|
|
344
|
-
let valueWatchNode = remoteWatcher.getValueWatchRemoteNodeId(path);
|
|
345
|
-
if (!valueWatchNode || !areNodeIdsEqual(valueWatchNode, batch.sourceNodeId)) {
|
|
346
|
-
let candidates = PathRouter.getAllAuthorities(path);
|
|
347
|
-
require("debugbreak")(2);
|
|
348
|
-
debugger;
|
|
349
|
-
remoteWatcher.getExistingWatchRemoteNodeId(path);
|
|
350
|
-
console.warn(`Ignoring value from wrong authority. Should have been ${debugNodeId(watchingAuthorityId)}, but was received from ${debugNodeId(batch.sourceNodeId)}.`, {
|
|
351
|
-
path,
|
|
352
|
-
type,
|
|
353
|
-
timeId: value?.time.time,
|
|
354
|
-
source: value?.source,
|
|
355
|
-
sourceNodeId: debugNodeId(batch.sourceNodeId),
|
|
356
|
-
sourceNodeThreadId: decodeNodeId(batch.sourceNodeId)?.threadId,
|
|
357
|
-
watchingAuthorityId: debugNodeId(watchingAuthorityId),
|
|
358
|
-
watchingAuthorityNodeThreadId: decodeNodeId(watchingAuthorityId)?.threadId,
|
|
359
|
-
isTransparent: value?.isTransparent,
|
|
360
|
-
});
|
|
361
|
-
}
|
|
362
|
-
return true;
|
|
363
|
-
}
|
|
364
|
-
return false;
|
|
365
|
-
}
|
|
366
|
-
batch.pathValues = batch.pathValues.filter(value => {
|
|
367
|
-
// NOTE: See the definition for lock count for why this check isn't checking all the possible cases. Essentially, locks is often empty, and that's intentional. However, the reverse should never be true, locks should never have values when lockCount is 0.
|
|
368
|
-
if (value.lockCount === 0 && value.locks.length > 0) {
|
|
369
|
-
console.error(red(`Ignoring value with invalid lockCount. Was ${value.lockCount}, but we have ${value.locks.length} locks. locks are optional, but lockCount isn't. We should never have locks without having lockCount set. ${debugPathValuePath(value)}`));
|
|
370
|
-
return false;
|
|
371
|
-
}
|
|
372
|
-
|
|
373
|
-
if (PathRouter.isSelfAuthority(value.path)) return true;
|
|
374
|
-
if (isWrongAuthority(value.path, value, "value")) {
|
|
375
|
-
return false;
|
|
376
|
-
}
|
|
377
|
-
return true;
|
|
378
|
-
});
|
|
379
|
-
|
|
380
|
-
// NOTE: If we are the authority on it, it means we're not going to have a remote watch node ID. And if we did, it would be ourselves. So this also implicitly filters out any initial triggers for values that we are the authority on.
|
|
381
|
-
for (let path of Array.from(batch.initialTriggers.values)) {
|
|
382
|
-
if (isWrongAuthority(path, undefined, "initialTrigger")) {
|
|
383
|
-
batch.initialTriggers.values.delete(path);
|
|
384
|
-
}
|
|
385
|
-
}
|
|
386
|
-
for (let parentPath of Array.from(batch.initialTriggers.parentPaths)) {
|
|
387
|
-
if (remoteWatcher.isFinalRemoteWatchPath({ parentPath, nodeId: batch.sourceNodeId })) {
|
|
388
|
-
continue;
|
|
389
|
-
}
|
|
390
|
-
|
|
391
|
-
console.warn(`Ignoring parent path which we aren't watching. From ${debugNodeId(batch.sourceNodeId)}.`, {
|
|
392
|
-
parentPath,
|
|
393
|
-
sourceNodeId: debugNodeId(batch.sourceNodeId),
|
|
394
|
-
sourceNodeThreadId: decodeNodeId(batch.sourceNodeId)?.threadId,
|
|
395
|
-
});
|
|
396
|
-
batch.initialTriggers.parentPaths.delete(parentPath);
|
|
397
|
-
}
|
|
398
|
-
}
|
|
399
|
-
});
|
|
400
|
-
}
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
// path => sourceNodeId
|
|
404
|
-
let parentSyncs = new Map<string, Set<string>>();
|
|
405
|
-
|
|
406
|
-
// We need to do a bit of work to properly clear path values that are from old initial triggers. As if we receive two initial triggers, they need to clobber each other. And if we collapse it, we lose that information. So we have to do that here.
|
|
407
|
-
let finalResults = new Map<string, {
|
|
408
|
-
path: string;
|
|
409
|
-
pathValues: PathValue[];
|
|
410
|
-
initialTrigger: boolean;
|
|
411
|
-
batchIndex: number;
|
|
412
|
-
}>();
|
|
413
|
-
|
|
414
|
-
for (let batchIndex = 0; batchIndex < batched.length; batchIndex++) {
|
|
415
|
-
let batch = batched[batchIndex];
|
|
416
|
-
for (let pathValue of batch.pathValues) {
|
|
417
|
-
let initialTrigger = batch.initialTriggers.values.has(pathValue.path);
|
|
418
|
-
let results = finalResults.get(pathValue.path);
|
|
419
|
-
if (!results) {
|
|
420
|
-
results = {
|
|
421
|
-
path: pathValue.path,
|
|
422
|
-
pathValues: [],
|
|
423
|
-
initialTrigger: false,
|
|
424
|
-
batchIndex: -1,
|
|
425
|
-
};
|
|
426
|
-
finalResults.set(pathValue.path, results);
|
|
427
|
-
}
|
|
428
|
-
let isContinuedBatch = results.batchIndex === batchIndex;
|
|
429
|
-
results.batchIndex = batchIndex;
|
|
430
|
-
if (initialTrigger && !isContinuedBatch) {
|
|
431
|
-
results.pathValues = [];
|
|
432
|
-
results.initialTrigger = true;
|
|
433
|
-
}
|
|
434
|
-
results.pathValues.push(pathValue);
|
|
435
|
-
}
|
|
436
|
-
for (let parentPath of batch.initialTriggers.parentPaths) {
|
|
437
|
-
let sourceNodeIds = parentSyncs.get(parentPath);
|
|
438
|
-
if (!sourceNodeIds) {
|
|
439
|
-
sourceNodeIds = new Set();
|
|
440
|
-
parentSyncs.set(parentPath, sourceNodeIds);
|
|
441
|
-
}
|
|
442
|
-
sourceNodeIds.add(batch.sourceNodeId);
|
|
443
|
-
}
|
|
444
|
-
}
|
|
445
|
-
|
|
446
|
-
let parentPaths = new Set(parentSyncs.keys());
|
|
447
|
-
let initialValues = new Set(Array.from(finalResults.values()).filter(x => x.initialTrigger).map(x => x.path));
|
|
448
|
-
|
|
449
|
-
let parentSyncsList: { parentPath: string; sourceNodeId: string }[] = [];
|
|
450
|
-
for (let [parentPath, sourceNodeIds] of parentSyncs.entries()) {
|
|
451
|
-
for (let sourceNodeId of sourceNodeIds) {
|
|
452
|
-
parentSyncsList.push({ parentPath, sourceNodeId });
|
|
453
|
-
}
|
|
454
|
-
}
|
|
455
|
-
|
|
456
|
-
validStateComputer.ingestValuesAndValidStates({
|
|
457
|
-
pathValues: Array.from(finalResults.values()).map(x => x.pathValues).flat(),
|
|
458
|
-
parentSyncs: parentSyncsList,
|
|
459
|
-
initialTriggers: { values: initialValues, parentPaths: parentPaths },
|
|
460
|
-
});
|
|
461
|
-
},
|
|
462
|
-
);
|
|
463
|
-
}
|
|
464
|
-
|
|
465
|
-
export const pathValueCommitter = new PathValueCommitter();
|
|
466
|
-
|
|
467
|
-
registerShutdownHandler(async () => {
|
|
468
|
-
await pathValueCommitter.waitForValuesToCommit();
|
|
1
|
+
import { SocketFunction } from "socket-function/SocketFunction";
|
|
2
|
+
import { delay, batchFunction } from "socket-function/src/batching";
|
|
3
|
+
import { deepCloneJSON, isNode, timeInSecond } from "socket-function/src/misc";
|
|
4
|
+
import { measureBlock, measureFnc } from "socket-function/src/profiling/measure";
|
|
5
|
+
import { isTrustedByNode } from "../-d-trust/NetworkTrust2";
|
|
6
|
+
import { areNodeIdsEqual, isOwnNodeId } from "../-f-node-discovery/NodeDiscovery";
|
|
7
|
+
import { ActionsHistory } from "../diagnostics/ActionsHistory";
|
|
8
|
+
import { errorToUndefined, logErrors, timeoutToUndefined } from "../errors";
|
|
9
|
+
import { getPathFromStr, hack_stripPackedPath } from "../path";
|
|
10
|
+
import { PathValueControllerBase } from "./PathValueController";
|
|
11
|
+
import { PathRouter } from "./PathRouter";
|
|
12
|
+
import { PathValue, MAX_ACCEPTED_CHANGE_AGE, WriteState, debugPathValuePath, compareTime, epochTime } from "./pathValueCore";
|
|
13
|
+
import { validStateComputer } from "./ValidStateComputer";
|
|
14
|
+
import debugbreak from "debugbreak";
|
|
15
|
+
import { red } from "socket-function/src/formatting/logColors";
|
|
16
|
+
import { isClient } from "../config2";
|
|
17
|
+
import { auditLog, isDebugLogEnabled } from "./auditLogs";
|
|
18
|
+
import { AuthorityEntry, authorityLookup } from "./AuthorityLookup";
|
|
19
|
+
import { debugNodeId } from "../-c-identity/IdentityController";
|
|
20
|
+
import { decodeNodeId } from "../-a-auth/certs";
|
|
21
|
+
import { decodeParentFilter, encodeParentFilter } from "./hackedPackedPathParentFiltering";
|
|
22
|
+
import { deepCloneCborx } from "../misc/cloneHelpers";
|
|
23
|
+
import { removeRange } from "../rangeMath";
|
|
24
|
+
import { registerShutdownHandler } from "../diagnostics/periodic";
|
|
25
|
+
import { getCallFactory } from "socket-function/src/nodeCache";
|
|
26
|
+
setImmediate(() => import("../1-path-client/RemoteWatcher"));
|
|
27
|
+
setImmediate(() => import("../4-querysub/Querysub"));
|
|
28
|
+
|
|
29
|
+
const MAX_SEND_TRY_COUNT = 3;
|
|
30
|
+
|
|
31
|
+
// NOTE: This isn't very efficient, but it is safer. It's quadratic depending on the number of authorities for the server. Which shouldn't be so bad. I mean, why would we have four times redundancy? That's a lot. And even then, we're sending sixteen times the amount of traffic. That's fine, I guess. I think it's more reasonable for us to have two times redundancy, maybe even three times redundancy. With two times redundancy, it's only sending the value four times, which is reasonable. And the trade-off is there's basically no way that we can lose the data.
|
|
32
|
+
// - If we only send to one authority and then they rebroadcast it, it's possible that they die after receiving the value. If we send it to both value servers but they don't re-broadcast, it's possible that we die while broadcasting the values after having only sent a few. However, by sending it to both servers and having both servers send it to the other servers, it means that if we die while broadcasting it, it's fine. And if the server dies that we want to send it to, it's fine because we send it to both servers.
|
|
33
|
+
const BROADCAST_TO_ALL_AUTHORITIES = true;
|
|
34
|
+
|
|
35
|
+
export type BatchValues = {
|
|
36
|
+
pathValues: PathValue[],
|
|
37
|
+
parentsSynced?: string[];
|
|
38
|
+
sourceNodeId: string;
|
|
39
|
+
initialTrigger?: "initialTrigger";
|
|
40
|
+
authoritySync?: boolean;
|
|
41
|
+
};
|
|
42
|
+
|
|
43
|
+
export type RemoteValueAndValidState = {
|
|
44
|
+
sourceNodeId: string;
|
|
45
|
+
pathValues: PathValue[];
|
|
46
|
+
initialTriggers: { values: Set<string>; parentPaths: Set<string> };
|
|
47
|
+
};
|
|
48
|
+
|
|
49
|
+
class PathValueCommitter {
|
|
50
|
+
private pendingCommits = new Set<Promise<unknown>>();
|
|
51
|
+
private addCommitPromise(promise: Promise<unknown>) {
|
|
52
|
+
logErrors(promise);
|
|
53
|
+
this.pendingCommits.add(promise);
|
|
54
|
+
void promise.finally(() => this.pendingCommits.delete(promise));
|
|
55
|
+
}
|
|
56
|
+
public async waitForValuesToCommit() {
|
|
57
|
+
await this.broadcastValues({ values: new Set() });
|
|
58
|
+
await Promise.all(this.pendingCommits);
|
|
59
|
+
// HACK: Wait a bit more, for the websocket to send the values (and for some batching, etc, etc)
|
|
60
|
+
//await new Promise(resolve => setTimeout(resolve, 100));
|
|
61
|
+
await delay("afterio");
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
@measureFnc
|
|
65
|
+
// Returns true if all the writes are accepted by some node (this should almost always be the case)
|
|
66
|
+
public commitValues(values: PathValue[], predictWrites: "predictWrites" | undefined): void {
|
|
67
|
+
if (values.length === 0) return;
|
|
68
|
+
ActionsHistory.OnWrite(values);
|
|
69
|
+
|
|
70
|
+
let now = Date.now();
|
|
71
|
+
|
|
72
|
+
// Validate the writes are in order (and not too old), just in case some delay in our code
|
|
73
|
+
// causes them to be too old before we even get to send them! (Or if addWrites is called
|
|
74
|
+
// from an endpoint?)
|
|
75
|
+
{
|
|
76
|
+
let maxAge = now - MAX_ACCEPTED_CHANGE_AGE;
|
|
77
|
+
let prevTime = 0;
|
|
78
|
+
for (let pathValue of values) {
|
|
79
|
+
if (pathValue.time.time < maxAge) {
|
|
80
|
+
// NOTE: LIKELY caused by synchronizing taking too long, which is likely due to the PathValueServer massively lagging
|
|
81
|
+
// - Check the caller proxyWatcher startTime vs lastSyncTime, to see how long it took from start to sync
|
|
82
|
+
// (assuming the caller is a commitFunction, and not just a general watcher. If it is a general
|
|
83
|
+
// watcher it might have taken a really long time to run, in which case check the watchFunction).
|
|
84
|
+
let message = `MAX_CHANGE_AGE EXCEEDED! Cannot commit write, that is before the max age ${pathValue.time.time} < ${maxAge}. Acceping this write would result in changes in the past that wouldn't propagate correctly`;
|
|
85
|
+
console.error(red(message));
|
|
86
|
+
debugbreak(2);
|
|
87
|
+
debugger;
|
|
88
|
+
throw new Error(message);
|
|
89
|
+
}
|
|
90
|
+
if (pathValue.time.time < prevTime) {
|
|
91
|
+
debugbreak(2);
|
|
92
|
+
debugger;
|
|
93
|
+
throw new Error(`PathValues must be in increasing order, but they were not.`);
|
|
94
|
+
}
|
|
95
|
+
if (pathValue.time.version % 1 !== 0) {
|
|
96
|
+
debugbreak(2);
|
|
97
|
+
debugger;
|
|
98
|
+
throw new Error(`PathValues must have an integer version, but they did not. Path: ${pathValue.path}, Time: ${pathValue.time.time}, Version: ${pathValue.time.version}`);
|
|
99
|
+
}
|
|
100
|
+
prevTime = pathValue.time.time;
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
let pathValuesToIngest: PathValue[] = [];
|
|
105
|
+
if (predictWrites) {
|
|
106
|
+
pathValuesToIngest = values;
|
|
107
|
+
} else {
|
|
108
|
+
pathValuesToIngest = values.filter(pathValue => PathRouter.isSelfAuthority(pathValue.path));
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
// NOTE: Error out early on the client, so we get better errors AND so we can avoid even ingesting the values
|
|
112
|
+
// TODO: Move this error to PathValueProxyWatcher, so it can be thrown on the exact line that does the write.
|
|
113
|
+
// - This can be down by caching path (probably just the domain), and telling PathValueProxyWatcher
|
|
114
|
+
// to throw on any accesses to it.
|
|
115
|
+
// - PathValueProxyWatcher could even populate this cache itself on reads, so it will probably always
|
|
116
|
+
// thrown on the first write attempt.
|
|
117
|
+
// (It could even populate on the first domain, so it would always populate on the first write
|
|
118
|
+
// on the current domain).
|
|
119
|
+
if (!isNode()) {
|
|
120
|
+
let remoteWrites = values.filter(pathValue => !PathRouter.isSelfAuthority(pathValue.path));
|
|
121
|
+
if (remoteWrites.length > 0) {
|
|
122
|
+
throw new Error(`Cannot commit writes to paths that are not local to this node. Paths: ${remoteWrites.map(x => x.path).join(", ")}`);
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
// NOTE: This function will just ignore writes we aren't an authority on, so we can just give it everything.
|
|
127
|
+
validStateComputer.ingestValuesAndValidStates({
|
|
128
|
+
pathValues: pathValuesToIngest,
|
|
129
|
+
parentSyncs: [],
|
|
130
|
+
initialTriggers: { values: new Set(), parentPaths: new Set() },
|
|
131
|
+
});
|
|
132
|
+
|
|
133
|
+
let remoteValues = values.filter(x => !PathRouter.isSelfAuthority(x.path));
|
|
134
|
+
|
|
135
|
+
if (remoteValues.length > 0) {
|
|
136
|
+
this.addCommitPromise(this.broadcastValues({ values: new Set(remoteValues) }));
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
// If we're the authority on it, we should still share it with the other authorities.
|
|
140
|
+
let stillShareValues = values.filter(x => !PathRouter.isLocalPath(x.path) && PathRouter.isSelfAuthority(x.path));
|
|
141
|
+
if (stillShareValues.length > 0) {
|
|
142
|
+
this.addCommitPromise(PathValueControllerBase.authorityShareValues({ pathValues: stillShareValues }));
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
private broadcastValues = batchFunction(
|
|
147
|
+
{ delay: 10, throttleWindow: 500, noMeasure: true },
|
|
148
|
+
async function internal_forwardWrites(valuesBatched: {
|
|
149
|
+
values: Set<PathValue>;
|
|
150
|
+
tryCount?: number;
|
|
151
|
+
}[]) {
|
|
152
|
+
let values = new Set(valuesBatched.flatMap(x => Array.from(x.values)));
|
|
153
|
+
if (values.size === 0) return;
|
|
154
|
+
let tryCountPerValue = new Map<PathValue, number>();
|
|
155
|
+
for (let list of valuesBatched) {
|
|
156
|
+
for (let value of list.values) {
|
|
157
|
+
if (!list.tryCount) continue;
|
|
158
|
+
tryCountPerValue.set(value, list.tryCount);
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
if (tryCountPerValue.size > 0) {
|
|
162
|
+
console.info(`Syncing all authorities to ensure we have the latest values, due to failed writes.`);
|
|
163
|
+
await authorityLookup.syncAllNow();
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
let valuesPerOtherAuthority = new Map<string, PathValue[]>();
|
|
168
|
+
for (let pathValue of values) {
|
|
169
|
+
if (isDebugLogEnabled()) {
|
|
170
|
+
// let valueStr: string | undefined;
|
|
171
|
+
// if (typeof pathValue.value === "boolean" || typeof pathValue.value === "number") {
|
|
172
|
+
// valueStr = pathValue.value.toString();
|
|
173
|
+
// } else if (typeof pathValue.value === "string") {
|
|
174
|
+
// valueStr = pathValue.value.slice(0, 100);
|
|
175
|
+
// if (valueStr.length < pathValue.value.length) {
|
|
176
|
+
// valueStr += `[+${pathValue.value.length - valueStr.length}]`;
|
|
177
|
+
// }
|
|
178
|
+
// } else if (Buffer.isBuffer(pathValue.value)) {
|
|
179
|
+
// valueStr = `Buffer(${pathValue.value.length})`;
|
|
180
|
+
// } else if (pathValue.value === null) {
|
|
181
|
+
// valueStr = "{null}";
|
|
182
|
+
// } else {
|
|
183
|
+
// valueStr = `{${typeof pathValue.value}}`;
|
|
184
|
+
// }
|
|
185
|
+
auditLog("CREATE VALUE", {
|
|
186
|
+
path: pathValue.path,
|
|
187
|
+
timeId: pathValue.time.time,
|
|
188
|
+
timeIdFull: pathValue.time,
|
|
189
|
+
source: pathValue.source,
|
|
190
|
+
transparent: pathValue.isTransparent,
|
|
191
|
+
// value: valueStr,
|
|
192
|
+
event: pathValue.event,
|
|
193
|
+
// NOTE: I think it'd be too expensive to log all the locks. There's one thing to log every time we read a value or create a value, but to log all of the dependencies for each write path would be O(locksPerWrite * 2 * writes), which is just too much, and could easily result in ten thousand logs per write. I think it will be fine because if there's a rejection, then that will tell us the time ID that the rejected value was reading, Which we can infer to know at least one of the locks, as in the most important lock that that right had.
|
|
194
|
+
});
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
let otherAuthorities = PathRouter.getAllAuthorities(pathValue.path);
|
|
198
|
+
otherAuthorities = otherAuthorities.filter(x => !isOwnNodeId(x.nodeId));
|
|
199
|
+
if (otherAuthorities.length === 0) {
|
|
200
|
+
validStateComputer.ingestValuesAndValidStates({
|
|
201
|
+
pathValues: [{
|
|
202
|
+
...pathValue,
|
|
203
|
+
valid: false,
|
|
204
|
+
}],
|
|
205
|
+
parentSyncs: [],
|
|
206
|
+
initialTriggers: { values: new Set(), parentPaths: new Set() },
|
|
207
|
+
});
|
|
208
|
+
console.error(`There are no authorities for path ${pathValue.path}. The write will be lost.`, {
|
|
209
|
+
path: pathValue.path,
|
|
210
|
+
timeId: pathValue.time.time,
|
|
211
|
+
source: pathValue.source,
|
|
212
|
+
otherAuthorities,
|
|
213
|
+
});
|
|
214
|
+
continue;
|
|
215
|
+
}
|
|
216
|
+
function sendToAuthority(otherAuthority: AuthorityEntry) {
|
|
217
|
+
let values = valuesPerOtherAuthority.get(otherAuthority.nodeId);
|
|
218
|
+
if (!values) {
|
|
219
|
+
values = [];
|
|
220
|
+
valuesPerOtherAuthority.set(otherAuthority.nodeId, values);
|
|
221
|
+
}
|
|
222
|
+
values.push(pathValue);
|
|
223
|
+
}
|
|
224
|
+
if (BROADCAST_TO_ALL_AUTHORITIES) {
|
|
225
|
+
for (let otherAuthority of otherAuthorities) {
|
|
226
|
+
sendToAuthority(otherAuthority);
|
|
227
|
+
}
|
|
228
|
+
} else {
|
|
229
|
+
// NOTE: Path routing is pretty strict. Once we disconnect, it'll remove it from the list of authorities. And if we can't talk to it, the retry logic will call us again.
|
|
230
|
+
let otherAuthority = otherAuthorities[~~(Math.random() * otherAuthorities.length)];
|
|
231
|
+
sendToAuthority(otherAuthority);
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
// Don't send to bad nodes for 60 seconds
|
|
236
|
+
const nodeIgnoreTime = Date.now() - 1000 * 60;
|
|
237
|
+
|
|
238
|
+
let promises = Array.from(valuesPerOtherAuthority.entries()).map(async ([otherAuthority, values]) => {
|
|
239
|
+
|
|
240
|
+
let disconnected = SocketFunction.getLastDisconnectTime(otherAuthority);
|
|
241
|
+
if (!SocketFunction.isNodeConnected(otherAuthority) && disconnected && disconnected > nodeIgnoreTime) {
|
|
242
|
+
// If it disconnected recently... don't send to it for a little bit, so we don't spend
|
|
243
|
+
// all of our time spamming disconnected nodes
|
|
244
|
+
console.log(`disconnected at ${disconnected} > ${nodeIgnoreTime}`);
|
|
245
|
+
return;
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
let isTrusted = await isTrustedByNode(otherAuthority);
|
|
249
|
+
if (!isTrusted) {
|
|
250
|
+
console.log(`not trusted`);
|
|
251
|
+
throw new Error(`Tried to write to paths on authorities not trusted by us. You probably need to call a function instead of directly writing to the server schema. Authority: ${otherAuthority}, Paths: ${values.map(x => getPathFromStr(x.path).join(".")).join(", ")}`);
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
// NOTE: We don't retry on failure, because... we broadcasted it anyways, AND this is supposed
|
|
256
|
+
// to be on a trusted node (which should always be a server), so... either our network went
|
|
257
|
+
// down, or all nodes went down. Either way, it likely won't fix itself quickly, and so these
|
|
258
|
+
// writes are going to too old and therefore rejected by the time a server is up anyways.
|
|
259
|
+
|
|
260
|
+
let forwardPromise = PathValueControllerBase.createValues({
|
|
261
|
+
nodeId: otherAuthority,
|
|
262
|
+
pathValues: values,
|
|
263
|
+
});
|
|
264
|
+
logErrors(forwardPromise);
|
|
265
|
+
void forwardPromise.then(x => {
|
|
266
|
+
if (x === "refused") {
|
|
267
|
+
values = values.map(value => {
|
|
268
|
+
console.info(`Rejecting value that was refused: ${debugPathValuePath(value)}`, {
|
|
269
|
+
path: value.path,
|
|
270
|
+
timeId: value.time.time,
|
|
271
|
+
});
|
|
272
|
+
return {
|
|
273
|
+
...value,
|
|
274
|
+
valid: false,
|
|
275
|
+
};
|
|
276
|
+
});
|
|
277
|
+
|
|
278
|
+
validStateComputer.ingestValuesAndValidStates({
|
|
279
|
+
pathValues: values,
|
|
280
|
+
parentSyncs: [],
|
|
281
|
+
initialTriggers: { values: new Set(), parentPaths: new Set() },
|
|
282
|
+
});
|
|
283
|
+
}
|
|
284
|
+
});
|
|
285
|
+
// If we broadcast to all authorities, we can't also retry, as this means if one authority is down, we end up infinitely looping. And it breaks the servers and the logs.
|
|
286
|
+
if (!BROADCAST_TO_ALL_AUTHORITIES) {
|
|
287
|
+
void forwardPromise.catch(async error => {
|
|
288
|
+
let byTryCount = new Map<number, PathValue[]>();
|
|
289
|
+
for (let value of values) {
|
|
290
|
+
let tryCount = tryCountPerValue.get(value) ?? 0;
|
|
291
|
+
let arr = byTryCount.get(tryCount) ?? [];
|
|
292
|
+
arr.push(value);
|
|
293
|
+
byTryCount.set(tryCount, arr);
|
|
294
|
+
}
|
|
295
|
+
for (let [tryCount, values] of byTryCount.entries()) {
|
|
296
|
+
tryCount++;
|
|
297
|
+
if (tryCount > MAX_SEND_TRY_COUNT) {
|
|
298
|
+
console.error(`Failed to send values after ${MAX_SEND_TRY_COUNT} tries. Giving up.`, {
|
|
299
|
+
error: error.message,
|
|
300
|
+
otherAuthority,
|
|
301
|
+
count: values.length,
|
|
302
|
+
});
|
|
303
|
+
continue;
|
|
304
|
+
}
|
|
305
|
+
for (let value of values) {
|
|
306
|
+
console.error(`Retrying to send values after ${tryCount} tries.`, {
|
|
307
|
+
error: error.message,
|
|
308
|
+
otherAuthority,
|
|
309
|
+
path: value.path,
|
|
310
|
+
timeId: value.time.time,
|
|
311
|
+
});
|
|
312
|
+
}
|
|
313
|
+
void pathValueCommitter.broadcastValues({
|
|
314
|
+
values: new Set(values),
|
|
315
|
+
tryCount: tryCount,
|
|
316
|
+
});
|
|
317
|
+
}
|
|
318
|
+
});
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
pathValueCommitter.addCommitPromise(forwardPromise);
|
|
322
|
+
});
|
|
323
|
+
// await, so "waitForValuesToCommit" works correctly
|
|
324
|
+
await Promise.all(promises.map(x => timeoutToUndefined(timeInSecond * 30, errorToUndefined(x))));
|
|
325
|
+
}
|
|
326
|
+
);
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+
public ingestRemoteValuesAndValidStates = batchFunction(
|
|
330
|
+
{ delay: 16, throttleWindow: 1000, name: "ingestRemoteValuesAndValidStates", noMeasure: true },
|
|
331
|
+
async (batched: RemoteValueAndValidState[]) => {
|
|
332
|
+
const { remoteWatcher } = await import("../1-path-client/RemoteWatcher");
|
|
333
|
+
|
|
334
|
+
// NOTE: We need to ignore values if they're not who we're watching. That way, if we change the watcher, it's smooth and we don't partial data that might clobber with the data from the authority we are really using.
|
|
335
|
+
if (!isClient()) {
|
|
336
|
+
measureBlock(function ignoreUnrequestedValues() {
|
|
337
|
+
for (let batch of batched) {
|
|
338
|
+
function isWrongAuthority(path: string, value?: PathValue, type?: string) {
|
|
339
|
+
let watchingAuthorityId = remoteWatcher.getExistingWatchRemoteNodeId(path);
|
|
340
|
+
// If we AREN'T watching it... it's actually fine, we can receive any values.
|
|
341
|
+
// When we start watching, those values will get clobbered.
|
|
342
|
+
if (watchingAuthorityId === undefined) return false;
|
|
343
|
+
if (!areNodeIdsEqual(watchingAuthorityId, batch.sourceNodeId)) {
|
|
344
|
+
let valueWatchNode = remoteWatcher.getValueWatchRemoteNodeId(path);
|
|
345
|
+
if (!valueWatchNode || !areNodeIdsEqual(valueWatchNode, batch.sourceNodeId)) {
|
|
346
|
+
let candidates = PathRouter.getAllAuthorities(path);
|
|
347
|
+
require("debugbreak")(2);
|
|
348
|
+
debugger;
|
|
349
|
+
remoteWatcher.getExistingWatchRemoteNodeId(path);
|
|
350
|
+
console.warn(`Ignoring value from wrong authority. Should have been ${debugNodeId(watchingAuthorityId)}, but was received from ${debugNodeId(batch.sourceNodeId)}.`, {
|
|
351
|
+
path,
|
|
352
|
+
type,
|
|
353
|
+
timeId: value?.time.time,
|
|
354
|
+
source: value?.source,
|
|
355
|
+
sourceNodeId: debugNodeId(batch.sourceNodeId),
|
|
356
|
+
sourceNodeThreadId: decodeNodeId(batch.sourceNodeId)?.threadId,
|
|
357
|
+
watchingAuthorityId: debugNodeId(watchingAuthorityId),
|
|
358
|
+
watchingAuthorityNodeThreadId: decodeNodeId(watchingAuthorityId)?.threadId,
|
|
359
|
+
isTransparent: value?.isTransparent,
|
|
360
|
+
});
|
|
361
|
+
}
|
|
362
|
+
return true;
|
|
363
|
+
}
|
|
364
|
+
return false;
|
|
365
|
+
}
|
|
366
|
+
batch.pathValues = batch.pathValues.filter(value => {
|
|
367
|
+
// NOTE: See the definition for lock count for why this check isn't checking all the possible cases. Essentially, locks is often empty, and that's intentional. However, the reverse should never be true, locks should never have values when lockCount is 0.
|
|
368
|
+
if (value.lockCount === 0 && value.locks.length > 0) {
|
|
369
|
+
console.error(red(`Ignoring value with invalid lockCount. Was ${value.lockCount}, but we have ${value.locks.length} locks. locks are optional, but lockCount isn't. We should never have locks without having lockCount set. ${debugPathValuePath(value)}`));
|
|
370
|
+
return false;
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
if (PathRouter.isSelfAuthority(value.path)) return true;
|
|
374
|
+
if (isWrongAuthority(value.path, value, "value")) {
|
|
375
|
+
return false;
|
|
376
|
+
}
|
|
377
|
+
return true;
|
|
378
|
+
});
|
|
379
|
+
|
|
380
|
+
// NOTE: If we are the authority on it, it means we're not going to have a remote watch node ID. And if we did, it would be ourselves. So this also implicitly filters out any initial triggers for values that we are the authority on.
|
|
381
|
+
for (let path of Array.from(batch.initialTriggers.values)) {
|
|
382
|
+
if (isWrongAuthority(path, undefined, "initialTrigger")) {
|
|
383
|
+
batch.initialTriggers.values.delete(path);
|
|
384
|
+
}
|
|
385
|
+
}
|
|
386
|
+
for (let parentPath of Array.from(batch.initialTriggers.parentPaths)) {
|
|
387
|
+
if (remoteWatcher.isFinalRemoteWatchPath({ parentPath, nodeId: batch.sourceNodeId })) {
|
|
388
|
+
continue;
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
console.warn(`Ignoring parent path which we aren't watching. From ${debugNodeId(batch.sourceNodeId)}.`, {
|
|
392
|
+
parentPath,
|
|
393
|
+
sourceNodeId: debugNodeId(batch.sourceNodeId),
|
|
394
|
+
sourceNodeThreadId: decodeNodeId(batch.sourceNodeId)?.threadId,
|
|
395
|
+
});
|
|
396
|
+
batch.initialTriggers.parentPaths.delete(parentPath);
|
|
397
|
+
}
|
|
398
|
+
}
|
|
399
|
+
});
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
|
|
403
|
+
// path => sourceNodeId
|
|
404
|
+
let parentSyncs = new Map<string, Set<string>>();
|
|
405
|
+
|
|
406
|
+
// We need to do a bit of work to properly clear path values that are from old initial triggers. As if we receive two initial triggers, they need to clobber each other. And if we collapse it, we lose that information. So we have to do that here.
|
|
407
|
+
let finalResults = new Map<string, {
|
|
408
|
+
path: string;
|
|
409
|
+
pathValues: PathValue[];
|
|
410
|
+
initialTrigger: boolean;
|
|
411
|
+
batchIndex: number;
|
|
412
|
+
}>();
|
|
413
|
+
|
|
414
|
+
for (let batchIndex = 0; batchIndex < batched.length; batchIndex++) {
|
|
415
|
+
let batch = batched[batchIndex];
|
|
416
|
+
for (let pathValue of batch.pathValues) {
|
|
417
|
+
let initialTrigger = batch.initialTriggers.values.has(pathValue.path);
|
|
418
|
+
let results = finalResults.get(pathValue.path);
|
|
419
|
+
if (!results) {
|
|
420
|
+
results = {
|
|
421
|
+
path: pathValue.path,
|
|
422
|
+
pathValues: [],
|
|
423
|
+
initialTrigger: false,
|
|
424
|
+
batchIndex: -1,
|
|
425
|
+
};
|
|
426
|
+
finalResults.set(pathValue.path, results);
|
|
427
|
+
}
|
|
428
|
+
let isContinuedBatch = results.batchIndex === batchIndex;
|
|
429
|
+
results.batchIndex = batchIndex;
|
|
430
|
+
if (initialTrigger && !isContinuedBatch) {
|
|
431
|
+
results.pathValues = [];
|
|
432
|
+
results.initialTrigger = true;
|
|
433
|
+
}
|
|
434
|
+
results.pathValues.push(pathValue);
|
|
435
|
+
}
|
|
436
|
+
for (let parentPath of batch.initialTriggers.parentPaths) {
|
|
437
|
+
let sourceNodeIds = parentSyncs.get(parentPath);
|
|
438
|
+
if (!sourceNodeIds) {
|
|
439
|
+
sourceNodeIds = new Set();
|
|
440
|
+
parentSyncs.set(parentPath, sourceNodeIds);
|
|
441
|
+
}
|
|
442
|
+
sourceNodeIds.add(batch.sourceNodeId);
|
|
443
|
+
}
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
let parentPaths = new Set(parentSyncs.keys());
|
|
447
|
+
let initialValues = new Set(Array.from(finalResults.values()).filter(x => x.initialTrigger).map(x => x.path));
|
|
448
|
+
|
|
449
|
+
let parentSyncsList: { parentPath: string; sourceNodeId: string }[] = [];
|
|
450
|
+
for (let [parentPath, sourceNodeIds] of parentSyncs.entries()) {
|
|
451
|
+
for (let sourceNodeId of sourceNodeIds) {
|
|
452
|
+
parentSyncsList.push({ parentPath, sourceNodeId });
|
|
453
|
+
}
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
validStateComputer.ingestValuesAndValidStates({
|
|
457
|
+
pathValues: Array.from(finalResults.values()).map(x => x.pathValues).flat(),
|
|
458
|
+
parentSyncs: parentSyncsList,
|
|
459
|
+
initialTriggers: { values: initialValues, parentPaths: parentPaths },
|
|
460
|
+
});
|
|
461
|
+
},
|
|
462
|
+
);
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
export const pathValueCommitter = new PathValueCommitter();
|
|
466
|
+
|
|
467
|
+
registerShutdownHandler(async () => {
|
|
468
|
+
await pathValueCommitter.waitForValuesToCommit();
|
|
469
469
|
});
|