querysub 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.dependency-cruiser.js +304 -0
- package/.eslintrc.js +51 -0
- package/.github/copilot-instructions.md +1 -0
- package/.vscode/settings.json +25 -0
- package/bin/deploy.js +4 -0
- package/bin/function.js +4 -0
- package/bin/server.js +4 -0
- package/costsBenefits.txt +112 -0
- package/deploy.ts +3 -0
- package/inject.ts +1 -0
- package/package.json +60 -0
- package/prompts.txt +54 -0
- package/spec.txt +820 -0
- package/src/-a-archives/archiveCache.ts +913 -0
- package/src/-a-archives/archives.ts +148 -0
- package/src/-a-archives/archivesBackBlaze.ts +792 -0
- package/src/-a-archives/archivesDisk.ts +418 -0
- package/src/-a-archives/copyLocalToBackblaze.ts +24 -0
- package/src/-a-auth/certs.ts +517 -0
- package/src/-a-auth/der.ts +122 -0
- package/src/-a-auth/ed25519.ts +1015 -0
- package/src/-a-auth/node-forge-ed25519.d.ts +17 -0
- package/src/-b-authorities/dnsAuthority.ts +203 -0
- package/src/-b-authorities/emailAuthority.ts +57 -0
- package/src/-c-identity/IdentityController.ts +200 -0
- package/src/-d-trust/NetworkTrust2.ts +150 -0
- package/src/-e-certs/EdgeCertController.ts +288 -0
- package/src/-e-certs/certAuthority.ts +192 -0
- package/src/-f-node-discovery/NodeDiscovery.ts +543 -0
- package/src/-g-core-values/NodeCapabilities.ts +134 -0
- package/src/-g-core-values/oneTimeForward.ts +91 -0
- package/src/-h-path-value-serialize/PathValueSerializer.ts +769 -0
- package/src/-h-path-value-serialize/stringSerializer.ts +176 -0
- package/src/0-path-value-core/LoggingClient.tsx +24 -0
- package/src/0-path-value-core/NodePathAuthorities.ts +978 -0
- package/src/0-path-value-core/PathController.ts +1 -0
- package/src/0-path-value-core/PathValueCommitter.ts +565 -0
- package/src/0-path-value-core/PathValueController.ts +231 -0
- package/src/0-path-value-core/archiveLocks/ArchiveLocks.ts +154 -0
- package/src/0-path-value-core/archiveLocks/ArchiveLocks2.ts +820 -0
- package/src/0-path-value-core/archiveLocks/archiveSnapshots.ts +180 -0
- package/src/0-path-value-core/debugLogs.ts +90 -0
- package/src/0-path-value-core/pathValueArchives.ts +483 -0
- package/src/0-path-value-core/pathValueCore.ts +2217 -0
- package/src/1-path-client/RemoteWatcher.ts +558 -0
- package/src/1-path-client/pathValueClientWatcher.ts +702 -0
- package/src/2-proxy/PathValueProxyWatcher.ts +1857 -0
- package/src/2-proxy/archiveMoveHarness.ts +376 -0
- package/src/2-proxy/garbageCollection.ts +753 -0
- package/src/2-proxy/pathDatabaseProxyBase.ts +37 -0
- package/src/2-proxy/pathValueProxy.ts +139 -0
- package/src/2-proxy/schema2.ts +518 -0
- package/src/3-path-functions/PathFunctionHelpers.ts +129 -0
- package/src/3-path-functions/PathFunctionRunner.ts +619 -0
- package/src/3-path-functions/PathFunctionRunnerMain.ts +67 -0
- package/src/3-path-functions/deployBlock.ts +10 -0
- package/src/3-path-functions/deployCheck.ts +7 -0
- package/src/3-path-functions/deployMain.ts +160 -0
- package/src/3-path-functions/pathFunctionLoader.ts +282 -0
- package/src/3-path-functions/syncSchema.ts +475 -0
- package/src/3-path-functions/tests/functionsTest.ts +135 -0
- package/src/3-path-functions/tests/rejectTest.ts +77 -0
- package/src/4-dom/css.tsx +29 -0
- package/src/4-dom/cssTypes.d.ts +212 -0
- package/src/4-dom/qreact.tsx +2322 -0
- package/src/4-dom/qreactTest.tsx +417 -0
- package/src/4-querysub/Querysub.ts +877 -0
- package/src/4-querysub/QuerysubController.ts +620 -0
- package/src/4-querysub/copyEvent.ts +0 -0
- package/src/4-querysub/permissions.ts +289 -0
- package/src/4-querysub/permissionsShared.ts +1 -0
- package/src/4-querysub/querysubPrediction.ts +525 -0
- package/src/5-diagnostics/FullscreenModal.tsx +67 -0
- package/src/5-diagnostics/GenericFormat.tsx +165 -0
- package/src/5-diagnostics/Modal.tsx +79 -0
- package/src/5-diagnostics/Table.tsx +183 -0
- package/src/5-diagnostics/TimeGrouper.tsx +114 -0
- package/src/5-diagnostics/diskValueAudit.ts +216 -0
- package/src/5-diagnostics/memoryValueAudit.ts +442 -0
- package/src/5-diagnostics/nodeMetadata.ts +135 -0
- package/src/5-diagnostics/qreactDebug.tsx +309 -0
- package/src/5-diagnostics/shared.ts +26 -0
- package/src/5-diagnostics/synchronousLagTracking.ts +47 -0
- package/src/TestController.ts +35 -0
- package/src/allowclient.flag +0 -0
- package/src/bits.ts +86 -0
- package/src/buffers.ts +69 -0
- package/src/config.ts +53 -0
- package/src/config2.ts +48 -0
- package/src/diagnostics/ActionsHistory.ts +56 -0
- package/src/diagnostics/NodeViewer.tsx +503 -0
- package/src/diagnostics/SizeLimiter.ts +62 -0
- package/src/diagnostics/TimeDebug.tsx +18 -0
- package/src/diagnostics/benchmark.ts +139 -0
- package/src/diagnostics/errorLogs/ErrorLogController.ts +515 -0
- package/src/diagnostics/errorLogs/ErrorLogCore.ts +274 -0
- package/src/diagnostics/errorLogs/LogClassifiers.tsx +302 -0
- package/src/diagnostics/errorLogs/LogFilterUI.tsx +84 -0
- package/src/diagnostics/errorLogs/LogNotify.tsx +101 -0
- package/src/diagnostics/errorLogs/LogTimeSelector.tsx +724 -0
- package/src/diagnostics/errorLogs/LogViewer.tsx +757 -0
- package/src/diagnostics/errorLogs/hookErrors.ts +60 -0
- package/src/diagnostics/errorLogs/logFiltering.tsx +149 -0
- package/src/diagnostics/heapTag.ts +13 -0
- package/src/diagnostics/listenOnDebugger.ts +77 -0
- package/src/diagnostics/logs/DiskLoggerPage.tsx +572 -0
- package/src/diagnostics/logs/ObjectDisplay.tsx +165 -0
- package/src/diagnostics/logs/ansiFormat.ts +108 -0
- package/src/diagnostics/logs/diskLogGlobalContext.ts +38 -0
- package/src/diagnostics/logs/diskLogger.ts +305 -0
- package/src/diagnostics/logs/diskShimConsoleLogs.ts +32 -0
- package/src/diagnostics/logs/injectFileLocationToConsole.ts +50 -0
- package/src/diagnostics/logs/logGitHashes.ts +30 -0
- package/src/diagnostics/managementPages.tsx +289 -0
- package/src/diagnostics/periodic.ts +89 -0
- package/src/diagnostics/runSaturationTest.ts +416 -0
- package/src/diagnostics/satSchema.ts +64 -0
- package/src/diagnostics/trackResources.ts +82 -0
- package/src/diagnostics/watchdog.ts +55 -0
- package/src/errors.ts +132 -0
- package/src/forceProduction.ts +3 -0
- package/src/fs.ts +72 -0
- package/src/heapDumps.ts +666 -0
- package/src/https.ts +2 -0
- package/src/inject.ts +1 -0
- package/src/library-components/ATag.tsx +84 -0
- package/src/library-components/Button.tsx +344 -0
- package/src/library-components/ButtonSelector.tsx +64 -0
- package/src/library-components/DropdownCustom.tsx +151 -0
- package/src/library-components/DropdownSelector.tsx +32 -0
- package/src/library-components/Input.tsx +334 -0
- package/src/library-components/InputLabel.tsx +198 -0
- package/src/library-components/InputPicker.tsx +125 -0
- package/src/library-components/LazyComponent.tsx +62 -0
- package/src/library-components/MeasureHeightCSS.tsx +48 -0
- package/src/library-components/MeasuredDiv.tsx +47 -0
- package/src/library-components/ShowMore.tsx +51 -0
- package/src/library-components/SyncedController.ts +171 -0
- package/src/library-components/TimeRangeSelector.tsx +407 -0
- package/src/library-components/URLParam.ts +263 -0
- package/src/library-components/colors.tsx +14 -0
- package/src/library-components/drag.ts +114 -0
- package/src/library-components/icons.tsx +692 -0
- package/src/library-components/niceStringify.ts +50 -0
- package/src/library-components/renderToString.ts +52 -0
- package/src/misc/PromiseRace.ts +101 -0
- package/src/misc/color.ts +30 -0
- package/src/misc/getParentProcessId.cs +53 -0
- package/src/misc/getParentProcessId.ts +53 -0
- package/src/misc/hash.ts +83 -0
- package/src/misc/ipPong.js +13 -0
- package/src/misc/networking.ts +2 -0
- package/src/misc/random.ts +45 -0
- package/src/misc.ts +19 -0
- package/src/noserverhotreload.flag +0 -0
- package/src/path.ts +226 -0
- package/src/persistentLocalStore.ts +37 -0
- package/src/promise.ts +15 -0
- package/src/server.ts +73 -0
- package/src/src.d.ts +1 -0
- package/src/test/heapProcess.ts +36 -0
- package/src/test/mongoSatTest.tsx +55 -0
- package/src/test/satTest.ts +193 -0
- package/src/test/test.tsx +552 -0
- package/src/zip.ts +92 -0
- package/src/zipThreaded.ts +106 -0
- package/src/zipThreadedWorker.js +19 -0
- package/tsconfig.json +27 -0
- package/yarnSpec.txt +56 -0
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
import { SocketFunction } from "socket-function/SocketFunction";
|
|
2
|
+
import { errorToUndefined, logErrors } from "../errors";
|
|
3
|
+
import { requiresNetworkTrustHook } from "../-d-trust/NetworkTrust2";
|
|
4
|
+
import { authorityStorage, ReadLock, pathWatcher, PathValue, PathValueSnapshot, WriteState, writeValidStorage, lockToCallback, isCoreQuiet, WatchConfig, MAX_ACCEPTED_CHANGE_AGE, MAX_CHANGE_AGE, debugPathValuePath } from "./pathValueCore";
|
|
5
|
+
import { measureCodeSync, measureFnc } from "socket-function/src/profiling/measure";
|
|
6
|
+
import { ActionsHistory } from "../diagnostics/ActionsHistory";
|
|
7
|
+
import { isNode } from "socket-function/src/misc";
|
|
8
|
+
import { magenta, red } from "socket-function/src/formatting/logColors";
|
|
9
|
+
import { AuthorityPath, pathValueAuthority2 } from "./NodePathAuthorities";
|
|
10
|
+
import { pathValueSerializer } from "../-h-path-value-serialize/PathValueSerializer";
|
|
11
|
+
import { pathValueCommitter } from "./PathValueCommitter";
|
|
12
|
+
import { Benchmark } from "../diagnostics/benchmark";
|
|
13
|
+
import { formatNumber, formatTime } from "socket-function/src/formatting/format";
|
|
14
|
+
import { sha256 } from "js-sha256";
|
|
15
|
+
import debugbreak from "debugbreak";
|
|
16
|
+
import { ClientWatcher } from "../1-path-client/pathValueClientWatcher";
|
|
17
|
+
import { debugLog, isDebugLogEnabled } from "./debugLogs";
|
|
18
|
+
import { debugNodeId } from "../-c-identity/IdentityController";
|
|
19
|
+
import { diskLog } from "../diagnostics/logs/diskLogger";
|
|
20
|
+
export { pathValueCommitter };
|
|
21
|
+
|
|
22
|
+
class PathValueControllerBase {
|
|
23
|
+
/**
|
|
24
|
+
Use for writers to send writes to authorities, and for authorities to send writes to watchers.
|
|
25
|
+
- Values we are an authority on will have their valid states computed (recursively), otherwise
|
|
26
|
+
we just trust the remote valid states.
|
|
27
|
+
|
|
28
|
+
Triggers all watcher of the values, as well as all watches of any changed valid states.
|
|
29
|
+
|
|
30
|
+
NOTE: We don't check for max age in forwarded values, and instead just insert them, as
|
|
31
|
+
we trust our other authorities to not forward us bad values (and rejecting it would
|
|
32
|
+
cause inconsistencies between nodes, which is worse).
|
|
33
|
+
*/
|
|
34
|
+
public async forwardWrites(
|
|
35
|
+
valuesBuffers: Buffer[],
|
|
36
|
+
parentsSynced?: string[],
|
|
37
|
+
watchLocks?: "watchLocks",
|
|
38
|
+
initialTrigger?: "initialTrigger"
|
|
39
|
+
) {
|
|
40
|
+
let machineId = SocketFunction.getCaller().nodeId;
|
|
41
|
+
let callerId = SocketFunction.getCaller().nodeId;
|
|
42
|
+
|
|
43
|
+
const values = await errorToUndefined(pathValueSerializer.deserialize(valuesBuffers) as Promise<PathValue[]>);
|
|
44
|
+
if (!values) {
|
|
45
|
+
return;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
Benchmark.onReceivedValues(values);
|
|
49
|
+
ActionsHistory.OnRead(values);
|
|
50
|
+
|
|
51
|
+
// NOTE: We don't check for age here, because this only matters for new writes, and this isn't the correct place to check for this.
|
|
52
|
+
// // IMPORTANT!
|
|
53
|
+
// // We DO NOT reject old values here. Only trusted servers can call this function, and... writes are
|
|
54
|
+
// // sent to multiple servers, so if we reject a write here it has a high chance of resulting in
|
|
55
|
+
// // different writes between servers. Because writes are usually accepts, and receiving an old
|
|
56
|
+
// // write is likely due to an issue on our end (our internet lagging, our server lagging, etc),
|
|
57
|
+
// // the write will likely be accepted (as most writes are accepted), so defaulting to accept
|
|
58
|
+
// // it will likely result in a good state.
|
|
59
|
+
// let threshold = Date.now() - MAX_CHANGE_AGE;
|
|
60
|
+
// for (let value of values) {
|
|
61
|
+
// let pastThreshold = threshold - value.time.time;
|
|
62
|
+
// if (pastThreshold > 0) {
|
|
63
|
+
// console.error(red(`Received a value that is ${formatTime(pastThreshold)} pass change time, which will cause it to be forcefully accepted (locks will be ignored): ${value.path}`));
|
|
64
|
+
// }
|
|
65
|
+
// }
|
|
66
|
+
|
|
67
|
+
// Set up VALID watches for the caller. Because we are only adding the locks, this won't trigger
|
|
68
|
+
// a valid state send unless the initial valid state is rejected.
|
|
69
|
+
// IMPORTANT! This needs to be done here, and cannot be done in another call, as we NEED the valid
|
|
70
|
+
// states for created values to be watched immediately, otherwise the creator (such as the FunctionRunner),
|
|
71
|
+
// might miss the rejection (and not rerun the rejected function).
|
|
72
|
+
if (watchLocks) {
|
|
73
|
+
for (let value of values) {
|
|
74
|
+
lockToCallback.watchLock({
|
|
75
|
+
path: value.path,
|
|
76
|
+
startTime: value.time,
|
|
77
|
+
endTime: value.time,
|
|
78
|
+
readIsTranparent: value.canGCValue || false,
|
|
79
|
+
}, callerId);
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
if (isDebugLogEnabled()) {
|
|
84
|
+
let sourceNodeId = debugNodeId(callerId);
|
|
85
|
+
for (let value of values) {
|
|
86
|
+
debugLog("RECEIVE VALUE", { path: value.path, time: value.time.time, sourceNodeId });
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
diskLog(`Received PathValues via forwardWrites`, { valueCount: values.length, callerId, });
|
|
90
|
+
for (let value of values) {
|
|
91
|
+
diskLog("Received PathValue for path", { path: value.path, time: value.time.time, callerId });
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
if (isCoreQuiet) {
|
|
95
|
+
await pathValueCommitter.ingestRemoteValues({
|
|
96
|
+
pathValues: values,
|
|
97
|
+
parentsSynced,
|
|
98
|
+
sourceNodeId: callerId,
|
|
99
|
+
initialTrigger,
|
|
100
|
+
});
|
|
101
|
+
} else {
|
|
102
|
+
let sumAge = 0;
|
|
103
|
+
let now = Date.now();
|
|
104
|
+
for (let value of values) {
|
|
105
|
+
sumAge += now - value.time.time;
|
|
106
|
+
}
|
|
107
|
+
console.log(`(${now}) Received writes: ${values.length}, ${formatTime(sumAge / values.length)} AGE, parents: ${parentsSynced?.length} from ${machineId}`);
|
|
108
|
+
await measureCodeSync(function forwardWritesMeasureOverhead() {
|
|
109
|
+
return pathValueCommitter.ingestRemoteValues({
|
|
110
|
+
pathValues: values,
|
|
111
|
+
parentsSynced,
|
|
112
|
+
sourceNodeId: callerId,
|
|
113
|
+
initialTrigger,
|
|
114
|
+
});
|
|
115
|
+
});
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
/** Watches that the startTime is valid, and that there are no valid values between startTime/endTime
|
|
120
|
+
* - Puts the onus of syncing the lock, and it's readLocks, recursively, on the callee.
|
|
121
|
+
* Of course, the callee has to be an authority for this it work, and authorities are
|
|
122
|
+
* automatically doing this. Due to authorities being sharded between of large swathes of data,
|
|
123
|
+
* as well as data (hopefully) being relatively localized to our shard lines, it is expected
|
|
124
|
+
* there will be relatively few watchLockValid calls. Also, it is expected that relatively
|
|
125
|
+
* few readLocks will become invalid (rejected).
|
|
126
|
+
* - Notifies of invalid states by calling PathValidWatcher.onValidChange (which the client watches
|
|
127
|
+
* statically using createValidStateWatcher).
|
|
128
|
+
* - All the lock paths are assumed to be ours (we are the authority for them)
|
|
129
|
+
* - Triggers invalid range readLocks by passing the valid state of the value that conflicted.
|
|
130
|
+
* As any watchers will be watching this range locally, this will trigger them to ingest the valid
|
|
131
|
+
* state, and trigger the value that uses it, recalculate the value that uses it (as they are the
|
|
132
|
+
* authority on that), and then realize it is invalid and reject it.
|
|
133
|
+
*/
|
|
134
|
+
public async watchLockValid(locks: ReadLock[]) {
|
|
135
|
+
let callerId = SocketFunction.getCaller().nodeId;
|
|
136
|
+
for (let lock of locks) {
|
|
137
|
+
lockToCallback.watchLock(lock, callerId);
|
|
138
|
+
}
|
|
139
|
+
let now = Date.now();
|
|
140
|
+
let validStates: WriteState[] = locks.map(lock => writeValidStorage.getWriteState(lock, now));
|
|
141
|
+
logErrors(PathValueController.nodes[callerId].onValidChange(validStates));
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
// TODO: Batch these calls before processing them
|
|
145
|
+
public async onValidChange(config: WriteState[]) {
|
|
146
|
+
if (!isCoreQuiet || !isNode() || ClientWatcher.DEBUG_READS) {
|
|
147
|
+
let rejected = config.filter(x => !x.isValid);
|
|
148
|
+
if (rejected.length > 0) {
|
|
149
|
+
console.group(red(`Received rejection of paths`));
|
|
150
|
+
for (let value of rejected) {
|
|
151
|
+
console.log(debugPathValuePath(value));
|
|
152
|
+
}
|
|
153
|
+
console.groupEnd();
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
// Ignore any remote changes for values that WE are an authority on!
|
|
158
|
+
config = config.filter(x => !pathValueAuthority2.isSelfAuthority(x.path));
|
|
159
|
+
pathValueCommitter.ingestValidStates(config);
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
/** Returns serialized PathValue[] */
|
|
163
|
+
public async getSnapshot(config: { authorityPath: AuthorityPath; }): Promise<Buffer[]> {
|
|
164
|
+
let snapshot = await authorityStorage.getSnapshot(config.authorityPath);
|
|
165
|
+
let values = Object.values(snapshot.values).flat();
|
|
166
|
+
let buffers = await pathValueSerializer.serialize(values);
|
|
167
|
+
let totalSize = buffers.reduce((a, b) => a + b.length, 0);
|
|
168
|
+
console.log(`Sending snapshot: ${formatNumber(values.length)} values, ${formatNumber(buffers.length)} buffers, ${formatNumber(totalSize)}B`);
|
|
169
|
+
return buffers;
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
// Returns the initial states of the watches as well
|
|
174
|
+
// If there is no value, we call return { path, value: undefined, time: { time: 0, creatorId: 0 }, readLocks: [] }
|
|
175
|
+
// to indicate there is no value.
|
|
176
|
+
public async watchLatest(config: WatchConfig) {
|
|
177
|
+
let callerId = SocketFunction.getCaller().nodeId;
|
|
178
|
+
if (isDebugLogEnabled()) {
|
|
179
|
+
let sourceNodeId = debugNodeId(callerId);
|
|
180
|
+
for (let value of config.paths) {
|
|
181
|
+
debugLog("WATCH PATH", { path: value, sourceNodeId });
|
|
182
|
+
}
|
|
183
|
+
for (let value of config.parentPaths) {
|
|
184
|
+
debugLog("WATCH PARENT PATH", { path: value, sourceNodeId });
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
pathWatcher.watchPath({ paths: config.paths, parentPaths: config.parentPaths, callback: callerId, initialTrigger: true });
|
|
188
|
+
}
|
|
189
|
+
public async unwatchLatest(config: WatchConfig) {
|
|
190
|
+
let callerId = SocketFunction.getCaller().nodeId;
|
|
191
|
+
if (isDebugLogEnabled()) {
|
|
192
|
+
let sourceNodeId = debugNodeId(SocketFunction.getCaller().nodeId);
|
|
193
|
+
for (let value of config.paths) {
|
|
194
|
+
debugLog("UNWATCH PATH", { path: value, sourceNodeId });
|
|
195
|
+
}
|
|
196
|
+
for (let value of config.parentPaths) {
|
|
197
|
+
debugLog("UNWATCH PARENT PATH", { path: value, sourceNodeId });
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
pathWatcher.unwatchPath({ paths: config.paths, parentPaths: config.parentPaths, callback: callerId });
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
public async ping() { }
|
|
204
|
+
|
|
205
|
+
public async test(obj: unknown) {
|
|
206
|
+
return obj;
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
export const PathValueController = SocketFunction.register(
|
|
211
|
+
"PathValueController-1e062e2c-81c9-497b-b414-a46d0a4c2313",
|
|
212
|
+
new PathValueControllerBase(),
|
|
213
|
+
() => ({
|
|
214
|
+
forwardWrites: {},
|
|
215
|
+
|
|
216
|
+
watchLockValid: {},
|
|
217
|
+
onValidChange: {},
|
|
218
|
+
|
|
219
|
+
watchLatest: {},
|
|
220
|
+
unwatchLatest: {},
|
|
221
|
+
|
|
222
|
+
getSnapshot: {},
|
|
223
|
+
|
|
224
|
+
ping: {},
|
|
225
|
+
test: {},
|
|
226
|
+
}),
|
|
227
|
+
() => ({
|
|
228
|
+
hooks: [requiresNetworkTrustHook],
|
|
229
|
+
})
|
|
230
|
+
);
|
|
231
|
+
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
// 4.2) Turn on read locking, so when we read paths we actually talk to the lock system
|
|
2
|
+
// 5) Write merge script (otherwise we can't ACTUALLY test it...)
|
|
3
|
+
// - We need to use minTime/maxTime (throw if any non-genesis values are missing them),
|
|
4
|
+
// to determine if we are allowed to GC missing paths. We can only GC a block where
|
|
5
|
+
// it's maxTime < anyBlock.minTime (STRICLY less than, <= is not enough).
|
|
6
|
+
// - Our blocks will be sorted, so this isn't SO hard.
|
|
7
|
+
// - Have N blocks at each level, and when we exceed that limit, we merge. IF the size
|
|
8
|
+
// exceeds the current level limit, we add it to the next level, potentially
|
|
9
|
+
// merging on that level, etc, etc.
|
|
10
|
+
// - If a block has a minTime < blocks on the next level, it is considered on that level,
|
|
11
|
+
// even if it's size isn't enough. (This prevents blocks from dropping levels,
|
|
12
|
+
// such as if we GC a lot of deletions).
|
|
13
|
+
// 5.1) Merge stats in management page
|
|
14
|
+
// - Read bytes, read count, write bytes, write count, removed bytes, removed count
|
|
15
|
+
// 5.3) Run merge script on our actual data. This should result in a lot of merging,
|
|
16
|
+
// and take a few minutes to reduce our files.
|
|
17
|
+
// - The merges files should either take significantly less time to load OR allow us to
|
|
18
|
+
// do unzipping on multiple threads (because now the files will be large
|
|
19
|
+
// enough that this will be feasible).
|
|
20
|
+
// - Reload our data to verify nothing breaks
|
|
21
|
+
// 5.2) Merge test data generation
|
|
22
|
+
// - Writes lot of new paths, and then delete the values, to verify we at least
|
|
23
|
+
// have deletion GCing.
|
|
24
|
+
// - We should see files at each level be created, and eventually progress to the last level,
|
|
25
|
+
// where the deletions finally disappear.
|
|
26
|
+
// - BECAUSE we can only remove values from the oldest file, we will mostly be ensuring
|
|
27
|
+
// a total file limit, merging small files to keep < 1000 files AND then ONLY merging
|
|
28
|
+
// when we READ the files and determine that the merge will reduce our size by enough.
|
|
29
|
+
// - This is because even for other gcs, such as app gc, anyone else can just
|
|
30
|
+
// read multiple files and merge them as well, just like we are doing.
|
|
31
|
+
// 6) Run it. And then debug it if/when it fails
|
|
32
|
+
// 7) Verify our spec.txt todo is done
|
|
33
|
+
// 8) Next todo (fast app GC I believe)
|
|
34
|
+
/*
|
|
35
|
+
Proof
|
|
36
|
+
Invert transactions, and think of files as all having two "sheets" on them.
|
|
37
|
+
- A transaction removes one sheet
|
|
38
|
+
- A confirmed transaction removes the other sheet
|
|
39
|
+
- A conflicting transaction (time < this.time, same data) adds a red sheet
|
|
40
|
+
- We also add sheet back by deleting the conflicting transaction, which can never be undone, and doesn't require keeping transaction around forever.
|
|
41
|
+
- And by deleting the underlying files we prevent any new transactions from
|
|
42
|
+
removing their confirmation sheet, even after our transaction is deleted.
|
|
43
|
+
We remove one sheet, check all other sheets for conflicts, and if we find any,
|
|
44
|
+
we add a red sheet. If not, we remove the other sheet.
|
|
45
|
+
Of course, applying transactions (confirming them), requires a few steps
|
|
46
|
+
(breaking the transaction into parts, etc). The system itself has to make
|
|
47
|
+
sure these are safe, which is does by:
|
|
48
|
+
- Unique files names and every transaction requiring a delete means we can detect
|
|
49
|
+
if transactions conflict.
|
|
50
|
+
- Time stamping means we know which transaction should win.
|
|
51
|
+
- Transactions must be atomic before they are confirmed.
|
|
52
|
+
- Detaching transactions allows for better cleanup, while still blocking
|
|
53
|
+
new transactions.
|
|
54
|
+
|
|
55
|
+
OR, rather, think of it as boxes, with either letters in them OR crossed out letters
|
|
56
|
+
- We number these boxes so we know the order to apply them
|
|
57
|
+
- We are writing these boxes to paper, with pen, so we can't delete them.
|
|
58
|
+
- OR, lines on a piece of paper
|
|
59
|
+
|
|
60
|
+
ALSO, IMPORTANT! We get the time BEFORE we start reading. So that means any changes while
|
|
61
|
+
reading, are ignored (deletions are fine, we would apply those anyways).
|
|
62
|
+
|
|
63
|
+
We always transition from valid => invalid/complete, NEVER invalid => valid.
|
|
64
|
+
(or we are always invalid)
|
|
65
|
+
- This is because any transaction that causes us to be invalid will be applied,
|
|
66
|
+
removing the conflicting files, meaning we are STILL invalid.
|
|
67
|
+
|
|
68
|
+
Using our state transition assurances, we can make sure every transaction we read is applied,
|
|
69
|
+
if it was ever valid. This makes our iteration consistent, even if our file reading is slow
|
|
70
|
+
and done in pieces.
|
|
71
|
+
|
|
72
|
+
By double reading the dir, and retrying if it changes, we ensure we get atomic dir states. Otherwise
|
|
73
|
+
reading might get some old state, and some new state, missing the intermediate state entirely.
|
|
74
|
+
*/
|
|
75
|
+
/*
|
|
76
|
+
Steps
|
|
77
|
+
1) Create a transaction
|
|
78
|
+
2) Wait until transaction.time > 1 ready threshold
|
|
79
|
+
3) Create confirmation of transaction / delete invalid transaction
|
|
80
|
+
- Has a time indicator, overriding the create time so it is sorted
|
|
81
|
+
just before the base transaction
|
|
82
|
+
4) Wait until confirmation.time > 1 ready threshold
|
|
83
|
+
5) Create detached create, actually delete files, delete transaction
|
|
84
|
+
6) Wait until confirmation.time > 2 ready thresholds
|
|
85
|
+
7) Delete confirmation of transaction
|
|
86
|
+
*/
|
|
87
|
+
/*
|
|
88
|
+
Hanging constraints
|
|
89
|
+
- ANY transaction can be created at any time (obviously)
|
|
90
|
+
- If a transaction has EVER been confirmed, the confirmation file might be created
|
|
91
|
+
multiple times, at any time.
|
|
92
|
+
- If a transaction is EVER invalid, it might be deleted.
|
|
93
|
+
- If a transaction is EVER confirmed, the create confirmation may be created at any time,
|
|
94
|
+
and the files might deleted at any time, in any order.
|
|
95
|
+
- Pending confirmed transactions might be deleted at any time.
|
|
96
|
+
The main issue would be when creating confirmations and deleting, but... the confirmation
|
|
97
|
+
means that step can be restarted without too much issue.
|
|
98
|
+
Keeping our order strict is important. The confirmed transaction can't leave any room
|
|
99
|
+
for new transactions to sneak in, as the underlying transaction may be invalidated
|
|
100
|
+
before we apply the file changes.
|
|
101
|
+
*/
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
export type FileInfo = {
|
|
105
|
+
file: string;
|
|
106
|
+
createTime: number;
|
|
107
|
+
size: number;
|
|
108
|
+
};
|
|
109
|
+
|
|
110
|
+
export type ArchiveTransaction = {
|
|
111
|
+
createFiles: {
|
|
112
|
+
file: string;
|
|
113
|
+
data: Buffer;
|
|
114
|
+
}[];
|
|
115
|
+
deleteFiles: FileInfo[];
|
|
116
|
+
};
|
|
117
|
+
|
|
118
|
+
export interface ArchiveLocker {
|
|
119
|
+
/** Should resolve fairly quickly, as it doesn't mutate, it just reads files. */
|
|
120
|
+
getAllValidFiles(): Promise<FileInfo[]>;
|
|
121
|
+
|
|
122
|
+
/** Swaps createFiles for deleteFiles. Guarantees correctness when observed
|
|
123
|
+
* from getAllValidFiles.
|
|
124
|
+
* - Will take a while to return.
|
|
125
|
+
* - Might not change any files, you will have to call getAllValidFiles to see
|
|
126
|
+
* if we might have changed something. If the creates files are deleted
|
|
127
|
+
* quickly, you might not see them, and if the deleted files are gone,
|
|
128
|
+
* it could be someone else who deleted them (for another reason).
|
|
129
|
+
* - Basically, you shouldn't depend on this in any way, and every time
|
|
130
|
+
* you read make the best single change, and don't rely on it for
|
|
131
|
+
* the next set of changes.
|
|
132
|
+
* - Might create a file and delete only some or none of the requested delete files
|
|
133
|
+
* (if we crash while deleting multiple files).
|
|
134
|
+
*/
|
|
135
|
+
atomicSwapFiles(
|
|
136
|
+
config: {
|
|
137
|
+
// Runs without locking the existence of files, so duplicates can appear. Results in less rejections,
|
|
138
|
+
// but... then you might also have duplicate (or even redundant but slightly different) files.
|
|
139
|
+
allowDuplicates?: boolean;
|
|
140
|
+
},
|
|
141
|
+
code: (
|
|
142
|
+
validFiles: FileInfo[],
|
|
143
|
+
readFiles: (files: FileInfo[]) => Promise<(Buffer | undefined)[]>,
|
|
144
|
+
) => Promise<ArchiveTransaction[]>
|
|
145
|
+
): Promise<"accepted" | "rejected">;
|
|
146
|
+
|
|
147
|
+
/** Unsafely sets the current files. For loading snapshots.
|
|
148
|
+
* - Might fail, might break all your files, etc. BUT, if you are only running a single server,
|
|
149
|
+
* this will probably work. AND if you are reverting to an old snapshot, something that will
|
|
150
|
+
* probably work is better than a data state which is definitely broken.
|
|
151
|
+
*/
|
|
152
|
+
unsafeSetFiles(files: string[]): Promise<void>;
|
|
153
|
+
unsafeGetFileLocation(file: string): Promise<"live" | "zombie" | "recycled" | "missing">;
|
|
154
|
+
}
|