querysub 0.312.0 → 0.314.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.cursorrules +1 -1
- package/costsBenefits.txt +4 -1
- package/package.json +3 -2
- package/spec.txt +23 -18
- package/src/-0-hooks/hooks.ts +1 -1
- package/src/-a-archives/archives.ts +16 -3
- package/src/-a-archives/archivesBackBlaze.ts +51 -3
- package/src/-a-archives/archivesLimitedCache.ts +175 -0
- package/src/-a-archives/archivesPrivateFileSystem.ts +299 -0
- package/src/-a-auth/certs.ts +58 -31
- package/src/-b-authorities/cdnAuthority.ts +2 -2
- package/src/-b-authorities/dnsAuthority.ts +3 -2
- package/src/-c-identity/IdentityController.ts +3 -2
- package/src/-d-trust/NetworkTrust2.ts +17 -19
- package/src/-e-certs/EdgeCertController.ts +19 -81
- package/src/-e-certs/certAuthority.ts +7 -2
- package/src/-f-node-discovery/NodeDiscovery.ts +9 -7
- package/src/-g-core-values/NodeCapabilities.ts +6 -1
- package/src/0-path-value-core/NodePathAuthorities.ts +1 -1
- package/src/0-path-value-core/PathValueCommitter.ts +3 -3
- package/src/0-path-value-core/PathValueController.ts +3 -3
- package/src/0-path-value-core/archiveLocks/ArchiveLocks2.ts +15 -37
- package/src/0-path-value-core/pathValueCore.ts +4 -3
- package/src/3-path-functions/PathFunctionRunner.ts +2 -2
- package/src/4-dom/qreact.tsx +4 -3
- package/src/4-querysub/Querysub.ts +2 -2
- package/src/4-querysub/QuerysubController.ts +2 -2
- package/src/5-diagnostics/GenericFormat.tsx +1 -0
- package/src/5-diagnostics/Table.tsx +3 -0
- package/src/5-diagnostics/diskValueAudit.ts +2 -1
- package/src/5-diagnostics/nodeMetadata.ts +0 -1
- package/src/deployManager/components/MachineDetailPage.tsx +9 -1
- package/src/deployManager/components/ServiceDetailPage.tsx +10 -1
- package/src/deployManager/setupMachineMain.ts +8 -1
- package/src/diagnostics/NodeViewer.tsx +5 -6
- package/src/diagnostics/logs/FastArchiveAppendable.ts +757 -0
- package/src/diagnostics/logs/FastArchiveController.ts +524 -0
- package/src/diagnostics/logs/FastArchiveViewer.tsx +863 -0
- package/src/diagnostics/logs/LogViewer2.tsx +349 -0
- package/src/diagnostics/logs/TimeRangeSelector.tsx +94 -0
- package/src/diagnostics/logs/diskLogger.ts +135 -305
- package/src/diagnostics/logs/diskShimConsoleLogs.ts +6 -29
- package/src/diagnostics/logs/errorNotifications/ErrorNotificationController.ts +577 -0
- package/src/diagnostics/logs/errorNotifications/ErrorSuppressionUI.tsx +225 -0
- package/src/diagnostics/logs/errorNotifications/ErrorWarning.tsx +207 -0
- package/src/diagnostics/logs/importLogsEntry.ts +38 -0
- package/src/diagnostics/logs/injectFileLocationToConsole.ts +7 -17
- package/src/diagnostics/logs/lifeCycleAnalysis/lifeCycles.tsx +0 -0
- package/src/diagnostics/logs/lifeCycleAnalysis/spec.md +153 -0
- package/src/diagnostics/managementPages.tsx +7 -16
- package/src/diagnostics/misc-pages/ComponentSyncStats.tsx +0 -1
- package/src/diagnostics/periodic.ts +5 -0
- package/src/diagnostics/watchdog.ts +2 -2
- package/src/functional/SocketChannel.ts +67 -0
- package/src/library-components/Input.tsx +1 -1
- package/src/library-components/InputLabel.tsx +5 -2
- package/src/misc.ts +111 -0
- package/src/src.d.ts +34 -1
- package/src/user-implementation/userData.ts +4 -3
- package/test.ts +13 -0
- package/testEntry2.ts +29 -0
- package/src/diagnostics/errorLogs/ErrorLogController.ts +0 -535
- package/src/diagnostics/errorLogs/ErrorLogCore.ts +0 -274
- package/src/diagnostics/errorLogs/LogClassifiers.tsx +0 -308
- package/src/diagnostics/errorLogs/LogFilterUI.tsx +0 -84
- package/src/diagnostics/errorLogs/LogNotify.tsx +0 -101
- package/src/diagnostics/errorLogs/LogTimeSelector.tsx +0 -723
- package/src/diagnostics/errorLogs/LogViewer.tsx +0 -757
- package/src/diagnostics/errorLogs/logFiltering.tsx +0 -149
- package/src/diagnostics/logs/DiskLoggerPage.tsx +0 -613
|
@@ -0,0 +1,299 @@
|
|
|
1
|
+
import { measureFnc } from "socket-function/src/profiling/measure";
|
|
2
|
+
import { blue } from "socket-function/src/formatting/logColors";
|
|
3
|
+
import { Archives } from "./archives";
|
|
4
|
+
import { cache, lazy } from "socket-function/src/caching";
|
|
5
|
+
import { delay } from "socket-function/src/batching";
|
|
6
|
+
import { isDefined } from "../misc";
|
|
7
|
+
|
|
8
|
+
let getRootDirectory = lazy(async () => {
|
|
9
|
+
await navigator.storage.persist();
|
|
10
|
+
return navigator.storage.getDirectory();
|
|
11
|
+
});
|
|
12
|
+
|
|
13
|
+
class ArchivesPrivateFileSystem {
|
|
14
|
+
public LAG = 0;
|
|
15
|
+
private rootDir: string;
|
|
16
|
+
|
|
17
|
+
private logging = false;
|
|
18
|
+
public enableLogging() {
|
|
19
|
+
this.logging = true;
|
|
20
|
+
}
|
|
21
|
+
private log(text: string) {
|
|
22
|
+
if (!this.logging) return;
|
|
23
|
+
console.log(text);
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
constructor(rootDir: string) {
|
|
27
|
+
this.rootDir = rootDir;
|
|
28
|
+
// Ensure rootDir ends with "/" for consistency
|
|
29
|
+
if (!this.rootDir.endsWith("/")) {
|
|
30
|
+
this.rootDir = this.rootDir + "/";
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
public getDebugName() {
|
|
35
|
+
return "privateFS/" + this.rootDir;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
private async getOrCreateDirectory(path: string): Promise<FileSystemDirectoryHandle> {
|
|
39
|
+
const root = await getRootDirectory();
|
|
40
|
+
const pathParts = (this.rootDir + path).split("/").filter(part => part.length > 0);
|
|
41
|
+
|
|
42
|
+
let currentDir = root;
|
|
43
|
+
for (const part of pathParts) {
|
|
44
|
+
currentDir = await currentDir.getDirectoryHandle(part, { create: true });
|
|
45
|
+
}
|
|
46
|
+
return currentDir;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
private async ensureDirsExist(path: string) {
|
|
50
|
+
const pathParts = path.split("/");
|
|
51
|
+
// Create directories up to the parent of the file (don't include the file itself)
|
|
52
|
+
if (pathParts.length > 1) {
|
|
53
|
+
const dirPath = pathParts.slice(0, -1).join("/");
|
|
54
|
+
await this.getOrCreateDirectory(dirPath);
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
@measureFnc
|
|
59
|
+
public async set(fileName: string, data: Buffer): Promise<void> {
|
|
60
|
+
this.log(blue(`Setting file ${fileName} = ${data.length} bytes`));
|
|
61
|
+
|
|
62
|
+
await this.ensureDirsExist(fileName);
|
|
63
|
+
|
|
64
|
+
const pathParts = fileName.split("/");
|
|
65
|
+
const filename = pathParts[pathParts.length - 1];
|
|
66
|
+
const dirPath = pathParts.length > 1 ? pathParts.slice(0, -1).join("/") : "";
|
|
67
|
+
|
|
68
|
+
const directory = await this.getOrCreateDirectory(dirPath);
|
|
69
|
+
const fileHandle = await directory.getFileHandle(filename, { create: true });
|
|
70
|
+
const writable = await fileHandle.createWritable();
|
|
71
|
+
await writable.write(data);
|
|
72
|
+
await writable.close();
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
@measureFnc
|
|
76
|
+
public async del(fileName: string): Promise<void> {
|
|
77
|
+
this.log(blue(`Deleting file ${fileName}`));
|
|
78
|
+
|
|
79
|
+
const pathParts = fileName.split("/");
|
|
80
|
+
const filename = pathParts[pathParts.length - 1];
|
|
81
|
+
const dirPath = pathParts.length > 1 ? pathParts.slice(0, -1).join("/") : "";
|
|
82
|
+
|
|
83
|
+
try {
|
|
84
|
+
const directory = await this.getOrCreateDirectory(dirPath);
|
|
85
|
+
await directory.removeEntry(filename);
|
|
86
|
+
} catch (e: any) {
|
|
87
|
+
// File might not exist, which is fine for delete operations
|
|
88
|
+
if (e.name !== "NotFoundError") {
|
|
89
|
+
throw e;
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
public async setLargeFile(config: { path: string; getNextData(): Promise<Buffer | undefined>; }): Promise<void> {
|
|
95
|
+
let { path } = config;
|
|
96
|
+
await this.ensureDirsExist(path);
|
|
97
|
+
|
|
98
|
+
const pathParts = path.split("/");
|
|
99
|
+
const filename = pathParts[pathParts.length - 1];
|
|
100
|
+
const dirPath = pathParts.length > 1 ? pathParts.slice(0, -1).join("/") : "";
|
|
101
|
+
|
|
102
|
+
const directory = await this.getOrCreateDirectory(dirPath);
|
|
103
|
+
const fileHandle = await directory.getFileHandle(filename, { create: true });
|
|
104
|
+
const writable = await fileHandle.createWritable();
|
|
105
|
+
|
|
106
|
+
try {
|
|
107
|
+
while (true) {
|
|
108
|
+
let data = await config.getNextData();
|
|
109
|
+
if (!data?.length) break;
|
|
110
|
+
await writable.write(data);
|
|
111
|
+
}
|
|
112
|
+
} finally {
|
|
113
|
+
await writable.close();
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
@measureFnc
|
|
118
|
+
public async get(fileName: string, config?: { range?: { start: number; end: number; }; retryCount?: number }): Promise<Buffer | undefined> {
|
|
119
|
+
this.log(blue(`Start read file ${fileName}`));
|
|
120
|
+
|
|
121
|
+
const pathParts = fileName.split("/");
|
|
122
|
+
const filename = pathParts[pathParts.length - 1];
|
|
123
|
+
const dirPath = pathParts.length > 1 ? pathParts.slice(0, -1).join("/") : "";
|
|
124
|
+
|
|
125
|
+
try {
|
|
126
|
+
const directory = await this.getOrCreateDirectory(dirPath);
|
|
127
|
+
const fileHandle = await directory.getFileHandle(filename, { create: false });
|
|
128
|
+
const file = await fileHandle.getFile();
|
|
129
|
+
|
|
130
|
+
if (config?.range) {
|
|
131
|
+
const start = config.range.start;
|
|
132
|
+
const end = config.range.end;
|
|
133
|
+
const slice = file.slice(start, end);
|
|
134
|
+
const arrayBuffer = await slice.arrayBuffer();
|
|
135
|
+
return Buffer.from(arrayBuffer);
|
|
136
|
+
} else {
|
|
137
|
+
const arrayBuffer = await file.arrayBuffer();
|
|
138
|
+
return Buffer.from(arrayBuffer);
|
|
139
|
+
}
|
|
140
|
+
} catch (e: any) {
|
|
141
|
+
if (e.name === "NotFoundError") {
|
|
142
|
+
return undefined;
|
|
143
|
+
}
|
|
144
|
+
throw e;
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
@measureFnc
|
|
149
|
+
public async find(prefix: string, config?: { shallow?: boolean; type: "files" | "folders" }): Promise<string[]> {
|
|
150
|
+
this.log(blue(`findFileNames ${prefix}`));
|
|
151
|
+
|
|
152
|
+
let fileNames: string[] = [];
|
|
153
|
+
let folderNames: string[] = [];
|
|
154
|
+
|
|
155
|
+
async function readDir(directory: FileSystemDirectoryHandle, currentPath: string) {
|
|
156
|
+
for await (const [name, handle] of directory) {
|
|
157
|
+
const fullPath = currentPath + (currentPath ? "/" : "") + name;
|
|
158
|
+
|
|
159
|
+
if (handle.kind === "directory") {
|
|
160
|
+
folderNames.push(fullPath);
|
|
161
|
+
if (!config?.shallow) {
|
|
162
|
+
await readDir(handle, fullPath);
|
|
163
|
+
}
|
|
164
|
+
} else {
|
|
165
|
+
fileNames.push(fullPath);
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
try {
|
|
171
|
+
// Start from the root directory of our namespace
|
|
172
|
+
const rootDirectory = await this.getOrCreateDirectory("");
|
|
173
|
+
await readDir(rootDirectory, "");
|
|
174
|
+
|
|
175
|
+
let results = config?.type === "folders" ? folderNames : fileNames;
|
|
176
|
+
results = results.filter(name => name.startsWith(prefix));
|
|
177
|
+
|
|
178
|
+
if (config?.shallow) {
|
|
179
|
+
let targetDepth = prefix.split("/").length;
|
|
180
|
+
results = results.filter(name => name.split("/").length === targetDepth);
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
return results;
|
|
184
|
+
} catch (e: any) {
|
|
185
|
+
if (e.name === "NotFoundError") {
|
|
186
|
+
return [];
|
|
187
|
+
}
|
|
188
|
+
throw e;
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
@measureFnc
|
|
193
|
+
public async findInfo(prefix: string, config?: { shallow?: boolean; type: "files" | "folders" }): Promise<{ path: string; createTime: number; size: number; }[]> {
|
|
194
|
+
let files = await this.find(prefix, config);
|
|
195
|
+
return (await Promise.all(files.map(async file => {
|
|
196
|
+
try {
|
|
197
|
+
const info = await this.getInfo(file);
|
|
198
|
+
if (!info) return undefined;
|
|
199
|
+
return { path: file, createTime: info.writeTime, size: info.size };
|
|
200
|
+
} catch {
|
|
201
|
+
return undefined;
|
|
202
|
+
}
|
|
203
|
+
}))).filter(isDefined);
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
@measureFnc
|
|
207
|
+
public async getInfo(pathInput: string): Promise<{
|
|
208
|
+
writeTime: number;
|
|
209
|
+
size: number;
|
|
210
|
+
} | undefined> {
|
|
211
|
+
const pathParts = pathInput.split("/");
|
|
212
|
+
const filename = pathParts[pathParts.length - 1];
|
|
213
|
+
const dirPath = pathParts.length > 1 ? pathParts.slice(0, -1).join("/") : "";
|
|
214
|
+
|
|
215
|
+
try {
|
|
216
|
+
const directory = await this.getOrCreateDirectory(dirPath);
|
|
217
|
+
const fileHandle = await directory.getFileHandle(filename, { create: false });
|
|
218
|
+
const file = await fileHandle.getFile();
|
|
219
|
+
|
|
220
|
+
return {
|
|
221
|
+
writeTime: file.lastModified,
|
|
222
|
+
size: file.size,
|
|
223
|
+
};
|
|
224
|
+
} catch (e: any) {
|
|
225
|
+
if (e.name === "NotFoundError") {
|
|
226
|
+
return undefined;
|
|
227
|
+
}
|
|
228
|
+
throw e;
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
public async assertPathValid(path: string) {
|
|
233
|
+
// Private file system has fewer restrictions than regular file system
|
|
234
|
+
// Most characters are allowed, so we just check for obviously problematic patterns
|
|
235
|
+
if (path.includes("..")) {
|
|
236
|
+
throw new Error(`Invalid path contains '..': ${path}`);
|
|
237
|
+
}
|
|
238
|
+
if (path.startsWith("/")) {
|
|
239
|
+
throw new Error(`Invalid path starts with '/': ${path}`);
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
@measureFnc
|
|
244
|
+
public async move(config: {
|
|
245
|
+
path: string;
|
|
246
|
+
target: Archives;
|
|
247
|
+
targetPath: string;
|
|
248
|
+
}) {
|
|
249
|
+
let { path, target, targetPath } = config;
|
|
250
|
+
|
|
251
|
+
// Unwrap any nested archives
|
|
252
|
+
while (true) {
|
|
253
|
+
let targetUnwrapped = target.getBaseArchives?.();
|
|
254
|
+
if (!targetUnwrapped) break;
|
|
255
|
+
target = targetUnwrapped.archives;
|
|
256
|
+
targetPath = targetUnwrapped.parentPath + targetPath;
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
// For moves, we can optimize if both source and target are private file system
|
|
260
|
+
if (target instanceof ArchivesPrivateFileSystem) {
|
|
261
|
+
// Private File System API doesn't have a native move operation
|
|
262
|
+
// So we fall back to copy + delete
|
|
263
|
+
await this.copy({ path, target, targetPath });
|
|
264
|
+
await this.del(path);
|
|
265
|
+
} else {
|
|
266
|
+
// Moving to a different archive type - copy then delete
|
|
267
|
+
let data = await this.get(path);
|
|
268
|
+
if (!data) throw new Error(`File not found to move: ${path}`);
|
|
269
|
+
await target.set(targetPath, data);
|
|
270
|
+
await this.del(path);
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
@measureFnc
|
|
275
|
+
public async copy(config: {
|
|
276
|
+
path: string;
|
|
277
|
+
target: Archives;
|
|
278
|
+
targetPath: string;
|
|
279
|
+
}) {
|
|
280
|
+
let { path, target, targetPath } = config;
|
|
281
|
+
|
|
282
|
+
// Unwrap any nested archives
|
|
283
|
+
while (true) {
|
|
284
|
+
let targetUnwrapped = target.getBaseArchives?.();
|
|
285
|
+
if (!targetUnwrapped) break;
|
|
286
|
+
target = targetUnwrapped.archives;
|
|
287
|
+
targetPath = targetUnwrapped.parentPath + targetPath;
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
// Get the data and set it in the target
|
|
291
|
+
let data = await this.get(path);
|
|
292
|
+
if (!data) throw new Error(`File not found to copy: ${path}`);
|
|
293
|
+
await target.set(targetPath, data);
|
|
294
|
+
}
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
export const getArchivesPrivateFileSystem = cache((rootDir: string): Archives => {
|
|
298
|
+
return new ArchivesPrivateFileSystem(rootDir);
|
|
299
|
+
});
|
package/src/-a-auth/certs.ts
CHANGED
|
@@ -27,7 +27,7 @@ const timeInDay = 1000 * 60 * 60 * 24;
|
|
|
27
27
|
|
|
28
28
|
export const CA_NOT_FOUND_ERROR = "18aa7318-f88f-4d2d-b41f-3daf4a433827";
|
|
29
29
|
|
|
30
|
-
export const identityStorageKey = "
|
|
30
|
+
export const identityStorageKey = "machineCA_9";
|
|
31
31
|
export type IdentityStorageType = { domain: string; certB64: string; keyB64: string };
|
|
32
32
|
|
|
33
33
|
let identityCACached = createKeyStore<IdentityStorageType>(identityStorageKey);
|
|
@@ -69,9 +69,14 @@ export function createX509(
|
|
|
69
69
|
const commonNameAttrs = [{ name: "commonName", value: domain }];
|
|
70
70
|
certObj.setSubject(commonNameAttrs);
|
|
71
71
|
|
|
72
|
+
|
|
73
|
+
let machineId: string;
|
|
72
74
|
if (issuer === "self") {
|
|
73
75
|
certObj.setIssuer(commonNameAttrs);
|
|
76
|
+
machineId = getDomainPartFromPublicKey(keyPair.publicKey);
|
|
74
77
|
} else {
|
|
78
|
+
let certParsed = parseCert(issuer.cert.toString());
|
|
79
|
+
machineId = getDomainPartFromPublicKey(certParsed.publicKey);
|
|
75
80
|
certObj.setIssuer(forge.pki.certificateFromPem(issuer.cert.toString()).subject.attributes);
|
|
76
81
|
}
|
|
77
82
|
|
|
@@ -83,6 +88,18 @@ export function createX509(
|
|
|
83
88
|
|
|
84
89
|
let localHostDomain = "127-0-0-1." + domain.split(".").slice(-2).join(".");
|
|
85
90
|
|
|
91
|
+
//todonext
|
|
92
|
+
// Wait, why is one of our alt names not the machine URL that includes the machine hash? Oh, I guess it can't be because we don't know the hash until we create the cert, but we kind of do. Or we can at least.
|
|
93
|
+
// I guess the real question is why was this not an issue before? Were we adding it before and then we stopped adding it? Were we adding the thread certificate before to trust?
|
|
94
|
+
// I think it's fine to change this behavior, although it is kind of annoying to get the public key here (I think we have to derive it from the private key. What a nightmare.
|
|
95
|
+
// Maybe we had just an issue where we weren't correctly verifying the certificate? No, but that doesn't make any sense because Node.js is verifying the certificate.
|
|
96
|
+
//todonext
|
|
97
|
+
// Our trust store hasn't changed, it's just our machine ID that's changed. And even if it has changed, that's fine. But anyway, we should check our trust store to see what the old certificate looked like.
|
|
98
|
+
//todonext;
|
|
99
|
+
// Huh, the certificates did used to have the machine ID in them. And actually the domain I'm looking at, I'm pretty sure we were adding, that doesn't make any sense, the thread IDs? I don't know, maybe it was just totally borked before, maybe explain some of the issues we were having. It could be why sometimes startup failed. Maybe what happened was it only works if... When a node starts, it tells all other nodes that it changed the trust cache. And in that way, if they start in order, even though they keep breaking the trust cache by clobbering the machine CA certificate, We end up with all the certificates.
|
|
100
|
+
// I do really like the Node.js error. It's extremely specific. It even gives us the full cert alt names. Very good error. Without this error it would be extremely difficult to debug this.
|
|
101
|
+
|
|
102
|
+
|
|
86
103
|
extensions.push(...[
|
|
87
104
|
{ name: "keyUsage", keyCertSign: isCA, digitalSignature: true, nonRepudiation: true, keyEncipherment: true, dataEncipherment: true },
|
|
88
105
|
{ name: "subjectKeyIdentifier" },
|
|
@@ -92,6 +109,7 @@ export function createX509(
|
|
|
92
109
|
{ type: 2, value: domain },
|
|
93
110
|
{ type: 2, value: "*." + domain },
|
|
94
111
|
{ type: 2, value: localHostDomain },
|
|
112
|
+
{ type: 2, value: `*.${machineId}.${domain}` },
|
|
95
113
|
// NOTE: No longer allow 127.0.0.1, to make this more secure. We might enable this
|
|
96
114
|
// behavior behind a flag, for development.
|
|
97
115
|
//{ type: 7, ip: "127.0.0.1" }
|
|
@@ -123,10 +141,19 @@ export function createX509(
|
|
|
123
141
|
});
|
|
124
142
|
|
|
125
143
|
return measureBlock(function toPems() {
|
|
144
|
+
let cert = Buffer.from(forge.pki.certificateToPem(certObj));
|
|
145
|
+
let key = Buffer.from(privateKeyToPem(keyPair.privateKey));
|
|
146
|
+
if (issuer === "self") {
|
|
147
|
+
let certParsed = parseCert(cert);
|
|
148
|
+
let publicKeyPart = getDomainPartFromPublicKey(certParsed.publicKey);
|
|
149
|
+
if (publicKeyPart !== machineId) {
|
|
150
|
+
throw new Error(`PublicKey inside of certificate was different than expected. This means our altNames will be incorrect. Expected ${machineId}, got ${publicKeyPart}`);
|
|
151
|
+
}
|
|
152
|
+
}
|
|
126
153
|
return {
|
|
127
154
|
domain,
|
|
128
|
-
cert
|
|
129
|
-
key
|
|
155
|
+
cert,
|
|
156
|
+
key,
|
|
130
157
|
};
|
|
131
158
|
});
|
|
132
159
|
});
|
|
@@ -207,7 +234,8 @@ function getDomainPartFromPublicKey(publicKey: { publicKeyBytes: Buffer } | forg
|
|
|
207
234
|
} else {
|
|
208
235
|
bytes = Buffer.from(new Uint32Array((publicKey as any).n.data).buffer);
|
|
209
236
|
}
|
|
210
|
-
|
|
237
|
+
// This used to prepend "b", but... why? It is annoying as it made hashes look more similar than they should. Random is much better.
|
|
238
|
+
return sha265.sha256(Buffer.from(bytes)).slice(0, 16).replaceAll("+", "-").replaceAll("/", "_");
|
|
211
239
|
}
|
|
212
240
|
|
|
213
241
|
export function validateCACert(cert: string | Buffer) {
|
|
@@ -219,16 +247,13 @@ export function validateCACert(cert: string | Buffer) {
|
|
|
219
247
|
let domainParts = subject.split(".").reverse();
|
|
220
248
|
|
|
221
249
|
let rootDomainParsed = [domainParts.shift(), domainParts.shift()].reverse().join(".");
|
|
222
|
-
if (
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
}
|
|
228
|
-
//
|
|
229
|
-
if (domainParts[0] === "noproxy") {
|
|
230
|
-
domainParts.shift();
|
|
231
|
-
}
|
|
250
|
+
// NOTE: This doesn't seem to be required. All the validation is doing is making sure the hashes match up, just for identification, not for trust (trust happens in NetworkTrust2, which trusts if the identity is in the archives). And to be extra sure, SocketFunction only allows connections (in either direction) if it's a real certificate, or we explicitly trusted it. SO, if it's a random domain, it won't be trusted (unless we trust that domain), but it SHOULD be identified!
|
|
251
|
+
// if (rootDomainParsed !== getDomain()) {
|
|
252
|
+
// // This is important, as our trust store contains more then just OUR certificates,
|
|
253
|
+
// // so if we allow any domains then real domains can impersonate anyone! It has to
|
|
254
|
+
// // be one OUR domain to be trusted!
|
|
255
|
+
// throw new Error(`Certificate root domain should be ${getDomain()}, but is ${rootDomainParsed}`);
|
|
256
|
+
// }
|
|
232
257
|
|
|
233
258
|
let certExpectedPublicKeyPart = (domainParts.shift() || "").split("-").slice(-1)[0];
|
|
234
259
|
let certActualPublicKeyPart = getDomainPartFromPublicKey(certParsed.publicKey);
|
|
@@ -266,13 +291,10 @@ export function validateCertificate(cert: Buffer | string, issuerCert: Buffer |
|
|
|
266
291
|
let domainParts = subject.split(".").reverse();
|
|
267
292
|
|
|
268
293
|
let rootDomainParsed = [domainParts.shift(), domainParts.shift()].reverse().join(".");
|
|
269
|
-
if (
|
|
270
|
-
|
|
271
|
-
}
|
|
272
|
-
//
|
|
273
|
-
if (domainParts[0] === "noproxy") {
|
|
274
|
-
domainParts.shift();
|
|
275
|
-
}
|
|
294
|
+
// NOTE: This doesn't seem to be required. All the validation is doing is making sure the hashes match up, just for identification, not for trust (trust happens in NetworkTrust2, which trusts if the identity is in the archives). And to be extra sure, SocketFunction only allows connections (in either direction) if it's a real certificate, or we explicitly trusted it. SO, if it's a random domain, it won't be trusted (unless we trust that domain), but it SHOULD be identified!
|
|
295
|
+
// if (rootDomainParsed !== getDomain()) {
|
|
296
|
+
// throw new Error(`Certificate root domain should be ${getDomain()}, but is ${rootDomainParsed}`);
|
|
297
|
+
// }
|
|
276
298
|
|
|
277
299
|
let issuerCertParsed = parseCert(issuerCert);
|
|
278
300
|
|
|
@@ -431,8 +453,8 @@ export function createCertFromCA(config: {
|
|
|
431
453
|
});
|
|
432
454
|
}
|
|
433
455
|
|
|
434
|
-
export function getMachineId(
|
|
435
|
-
return
|
|
456
|
+
export function getMachineId(domainNameOrNodeId: string) {
|
|
457
|
+
return decodeNodeIdAssert(domainNameOrNodeId, "allowMissingThreadId").machineId;
|
|
436
458
|
}
|
|
437
459
|
|
|
438
460
|
export type NodeIdParts = {
|
|
@@ -441,7 +463,7 @@ export type NodeIdParts = {
|
|
|
441
463
|
domain: string;
|
|
442
464
|
port: number;
|
|
443
465
|
};
|
|
444
|
-
export function decodeNodeId(nodeId: string): NodeIdParts | undefined {
|
|
466
|
+
export function decodeNodeId(nodeId: string, allowMissingThreadId?: "allowMissingThreadId"): NodeIdParts | undefined {
|
|
445
467
|
let locationObj = getNodeIdLocation(nodeId);
|
|
446
468
|
if (!locationObj) {
|
|
447
469
|
return undefined;
|
|
@@ -455,7 +477,8 @@ export function decodeNodeId(nodeId: string): NodeIdParts | undefined {
|
|
|
455
477
|
port: locationObj.port,
|
|
456
478
|
};
|
|
457
479
|
}
|
|
458
|
-
|
|
480
|
+
let isValid = parts.length >= 4 || allowMissingThreadId && parts.length === 3;
|
|
481
|
+
if (!isValid) {
|
|
459
482
|
return undefined;
|
|
460
483
|
}
|
|
461
484
|
return {
|
|
@@ -465,8 +488,8 @@ export function decodeNodeId(nodeId: string): NodeIdParts | undefined {
|
|
|
465
488
|
port: locationObj.port,
|
|
466
489
|
};
|
|
467
490
|
}
|
|
468
|
-
export function decodeNodeIdAssert(nodeId: string): NodeIdParts {
|
|
469
|
-
let result = decodeNodeId(nodeId);
|
|
491
|
+
export function decodeNodeIdAssert(nodeId: string, allowMissingThreadId?: "allowMissingThreadId"): NodeIdParts {
|
|
492
|
+
let result = decodeNodeId(nodeId, allowMissingThreadId);
|
|
470
493
|
if (!result) {
|
|
471
494
|
throw new Error(`Invalid nodeId: ${nodeId}`);
|
|
472
495
|
}
|
|
@@ -488,9 +511,13 @@ export function getIdentityCA() {
|
|
|
488
511
|
return identityCA();
|
|
489
512
|
}
|
|
490
513
|
|
|
491
|
-
export
|
|
492
|
-
return getMachineId(getIdentityCA().domain);
|
|
493
|
-
}
|
|
514
|
+
export const getOwnMachineId = lazy(() => {
|
|
515
|
+
return getMachineId("threadId." + getIdentityCA().domain);
|
|
516
|
+
});
|
|
517
|
+
export const getOwnThreadId = lazy(() => {
|
|
518
|
+
let threadKeyCert = getThreadKeyCert();
|
|
519
|
+
return decodeNodeIdAssert(threadKeyCert.domain).threadId;
|
|
520
|
+
});
|
|
494
521
|
|
|
495
522
|
/** Part of the machineId comes from the publicKey, so we can use it to verify */
|
|
496
523
|
export function verifyMachineIdForPublicKey(config: {
|
|
@@ -499,7 +526,7 @@ export function verifyMachineIdForPublicKey(config: {
|
|
|
499
526
|
}): boolean {
|
|
500
527
|
let { machineId, publicKey } = config;
|
|
501
528
|
let domainPart = getDomainPartFromPublicKey(publicKey);
|
|
502
|
-
return machineId
|
|
529
|
+
return machineId === domainPart;
|
|
503
530
|
}
|
|
504
531
|
|
|
505
532
|
// NOTE: We don't have a cache per CA, as... the CA should be set first
|
|
@@ -29,7 +29,7 @@ export async function hostArchives(config: {
|
|
|
29
29
|
}
|
|
30
30
|
|
|
31
31
|
let archiveT = archives as ArchivesBackblaze;
|
|
32
|
-
let baseURL = await archiveT.
|
|
32
|
+
let baseURL = await archiveT.getURL!("");
|
|
33
33
|
// Remove the trailing slash if it exists
|
|
34
34
|
if (baseURL.endsWith("/")) {
|
|
35
35
|
baseURL = baseURL.slice(0, -1);
|
|
@@ -43,7 +43,7 @@ export async function hostArchives(config: {
|
|
|
43
43
|
if (path.startsWith("/")) {
|
|
44
44
|
path = path.slice(1);
|
|
45
45
|
}
|
|
46
|
-
let targetPath = await archiveT.
|
|
46
|
+
let targetPath = await archiveT.getURL!(parentPath + path);
|
|
47
47
|
let url = new URL(targetPath);
|
|
48
48
|
url.hostname = subdomain + "." + domain;
|
|
49
49
|
return url.toString();
|
|
@@ -10,6 +10,7 @@ import debugbreak from "debugbreak";
|
|
|
10
10
|
import { isClient } from "../config2";
|
|
11
11
|
import { getArchives } from "../-a-archives/archives";
|
|
12
12
|
import { cloudflareCall, cloudflareGETCall, cloudflarePOSTCall, getCloudflareCreds } from "./cloudflareHelpers";
|
|
13
|
+
import { magenta } from "socket-function/src/formatting/logColors";
|
|
13
14
|
|
|
14
15
|
const DNS_TTLSeconds = {
|
|
15
16
|
"TXT": 60,
|
|
@@ -87,14 +88,14 @@ export async function setRecord(type: string, key: string, value: string, proxie
|
|
|
87
88
|
// says "an identical record already exists", even though it doesn't, we changed the proxied value...
|
|
88
89
|
if (prevValues.some(x => x.content === value)) return;
|
|
89
90
|
|
|
90
|
-
console.log(`Removing previous records of ${type} for ${key} ${JSON.stringify(prevValues.map(x => x.content))}`);
|
|
91
|
+
console.log(magenta(`Removing previous records of ${type} for ${key} ${JSON.stringify(prevValues.map(x => x.content))}`));
|
|
91
92
|
let didDeletions = false;
|
|
92
93
|
for (let value of prevValues) {
|
|
93
94
|
didDeletions = true;
|
|
94
95
|
await cloudflareCall(`/zones/${zoneId}/dns_records/${value.id}`, Buffer.from([]), "DELETE");
|
|
95
96
|
}
|
|
96
97
|
|
|
97
|
-
console.log(`Setting ${type} record for ${key} to ${value} (previously had ${JSON.stringify(prevValues.map(x => x.content))})`);
|
|
98
|
+
console.log(magenta(`Setting ${type} record for ${key} to ${value} (previously had ${JSON.stringify(prevValues.map(x => x.content))})`));
|
|
98
99
|
const ttl = DNS_TTLSeconds[type as "A"] || 60;
|
|
99
100
|
await cloudflarePOSTCall(`/zones/${zoneId}/dns_records`, {
|
|
100
101
|
type: type,
|
|
@@ -148,10 +148,11 @@ class IdentityControllerBase {
|
|
|
148
148
|
}
|
|
149
149
|
|
|
150
150
|
let pubKey = getPublicIdentifier(payload.cert);
|
|
151
|
+
let machineId = getMachineId(getCommonName(payload.certIssuer));
|
|
151
152
|
|
|
152
153
|
callerInfo.set(caller, {
|
|
153
154
|
cert: { certPEM: payload.cert, issuerPEM: payload.certIssuer },
|
|
154
|
-
machineId
|
|
155
|
+
machineId,
|
|
155
156
|
reconnectNodeId,
|
|
156
157
|
pubKey,
|
|
157
158
|
pubKeyShort: getShortNumber(pubKey),
|
|
@@ -197,7 +198,7 @@ const changeIdentityOnce = cacheWeak(async function changeIdentityOnce(connectio
|
|
|
197
198
|
});
|
|
198
199
|
SocketFunction.addGlobalClientHook(async function identityHook(context) {
|
|
199
200
|
if (context.call.classGuid === IdentityController._classGuid) return;
|
|
200
|
-
//
|
|
201
|
+
// This is for US to tell them our identity. And if they established the connection the identity will come from their original connection url (that they used to connect to us), and they validated it either being a real certificate, or they added the cert from the trusted backblaze bucket. If it just from a real certificate it means we identified them, but they might not have network trust. But that's fine, as IdentityController is JUST for identification, and if it's a real certificate we know who they are! (Which doesn't mean we trust them).
|
|
201
202
|
if (isClientNodeId(context.call.nodeId)) {
|
|
202
203
|
return;
|
|
203
204
|
}
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { measureWrap } from "socket-function/src/profiling/measure";
|
|
2
|
-
import {
|
|
2
|
+
import { getIdentityCA, getMachineId, getOwnMachineId } from "../-a-auth/certs";
|
|
3
3
|
import { getArchives } from "../-a-archives/archives";
|
|
4
4
|
import { isNode, throttleFunction, timeInSecond } from "socket-function/src/misc";
|
|
5
5
|
import { SocketFunctionHook } from "socket-function/SocketFunctionTypes";
|
|
@@ -14,12 +14,13 @@ import { devDebugbreak, getDomain, isDevDebugbreak, isPublic, isRecovery } from
|
|
|
14
14
|
import { formatTime } from "socket-function/src/formatting/format";
|
|
15
15
|
import { runInSerial } from "socket-function/src/batching";
|
|
16
16
|
import { Querysub } from "../4-querysub/QuerysubController";
|
|
17
|
+
import { magenta } from "socket-function/src/formatting/logColors";
|
|
17
18
|
|
|
18
19
|
// Cache the untrust list, to prevent bugs from causing too many backend reads (while also allowing
|
|
19
20
|
// bad servers which make request before their trust is verified from staying broken).
|
|
20
21
|
const UNTRUST_CACHE_TIME = 30 * timeInSecond;
|
|
21
22
|
|
|
22
|
-
const archives = lazy(() => getArchives("
|
|
23
|
+
const archives = lazy(() => getArchives("trust2/"));
|
|
23
24
|
|
|
24
25
|
export const requiresNetworkTrustHook: SocketFunctionHook = async config => {
|
|
25
26
|
// HACK: On the clientside we strip the domain process and machine id, so we can no longer determine
|
|
@@ -39,10 +40,11 @@ export const requiresNetworkTrustHook: SocketFunctionHook = async config => {
|
|
|
39
40
|
if (getNodeIdIP(caller.nodeId) === "127.0.0.1" && isRecovery()) {
|
|
40
41
|
return;
|
|
41
42
|
}
|
|
42
|
-
let machineId = IdentityController_getMachineId(
|
|
43
|
+
let machineId = IdentityController_getMachineId(caller);
|
|
43
44
|
let trusted = await isTrusted(machineId);
|
|
44
45
|
if (!trusted) {
|
|
45
46
|
devDebugbreak();
|
|
47
|
+
let machineId = IdentityController_getMachineId(caller);
|
|
46
48
|
throw new Error(`Calling machine is not trusted. Caller ${machineId} is not trusted by ${SocketFunction.mountedNodeId} to make call ${config.call.classGuid}.${config.call.functionName}. To gain trust add backblaze permissions (see hasBackblazePermissions) or set --nonetwork.`);
|
|
47
49
|
}
|
|
48
50
|
};
|
|
@@ -78,6 +80,7 @@ const isTrustedBase = runInSerial(measureWrap(async function isTrustedBase(machi
|
|
|
78
80
|
lastArchivesTrusted = trustedMachineIds.slice();
|
|
79
81
|
for (let trustedMachineId of trustedMachineIds) {
|
|
80
82
|
trustedCache.add(trustedMachineId);
|
|
83
|
+
// NOTE: We don't load trust certs here, as we need to load them on demand in case the trust changes after our initial startup.
|
|
81
84
|
}
|
|
82
85
|
} else {
|
|
83
86
|
// Checking a single entry is a lot faster (as find is slow)
|
|
@@ -88,14 +91,10 @@ const isTrustedBase = runInSerial(measureWrap(async function isTrustedBase(machi
|
|
|
88
91
|
}
|
|
89
92
|
// Always trust ourself
|
|
90
93
|
trustedCache.add(getOwnMachineId());
|
|
91
|
-
|
|
92
|
-
//
|
|
93
|
-
// connected to by attackers (as dev machines might reveal unfinished content, or even
|
|
94
|
-
// have security vulnerabilities).
|
|
95
|
-
// - Don't trust this on public, as in theory an attacker MIGHT be able to connect
|
|
96
|
-
// from localhost (but not have disk read/write access)? Maybe...
|
|
94
|
+
|
|
95
|
+
// NOTE: The only happens in the case WE connected to it (ex, "127-0-0-1.querysubtest.com:15358"). It can't look like this if it connected to us, in which case the nodeId will be "client:...", being mostly random, and created by us (UNTIL they prove they have another id). So... I'm not even sure the isPublic check is required? We only connect to nodes we discover through node discovery, which requires backblaze write permissions. But I guess it's fine to be extra careful about it...
|
|
97
96
|
if (!isPublic()) {
|
|
98
|
-
trustedCache.add("127-0-0-1
|
|
97
|
+
trustedCache.add("127-0-0-1");
|
|
99
98
|
}
|
|
100
99
|
|
|
101
100
|
if (!trustedCache.has(machineId)) {
|
|
@@ -121,15 +120,14 @@ const loadServerCert = cache(async (machineId: string) => {
|
|
|
121
120
|
console.warn(`Could not find certificate in archives for ${machineId}`);
|
|
122
121
|
return;
|
|
123
122
|
}
|
|
123
|
+
console.log(magenta(`Loading certificate for ${machineId}`));
|
|
124
124
|
trustCertificate(certFile);
|
|
125
125
|
});
|
|
126
126
|
|
|
127
|
-
const ensureWeAreTrusted = lazy(measureWrap(async () => {
|
|
127
|
+
export const ensureWeAreTrusted = lazy(measureWrap(async () => {
|
|
128
128
|
let machineKeyCert = getIdentityCA();
|
|
129
|
-
let machineId =
|
|
130
|
-
|
|
131
|
-
await isTrusted(machineId);
|
|
132
|
-
if (!lastArchivesTrusted?.includes(machineId)) {
|
|
129
|
+
let machineId = getOwnMachineId();
|
|
130
|
+
if (!await archives().get(machineId)) {
|
|
133
131
|
await archives().set(machineId, machineKeyCert.cert);
|
|
134
132
|
}
|
|
135
133
|
}));
|
|
@@ -168,6 +166,7 @@ const TrustedController = SocketFunction.register(
|
|
|
168
166
|
if (isNode()) {
|
|
169
167
|
// We have to be trusted if we make calls to a trusted endpoint, OR our mounting
|
|
170
168
|
// (really only if we are mounting a trusted endpoint, but we don't actually know that)
|
|
169
|
+
// ONLY done on received calls, not on calls we made. If we make a call we assume that the server we called is known to us through a trusted route, and therefore, trusted.
|
|
171
170
|
requiresNetworkTrustHook.clientHook = async config => {
|
|
172
171
|
await ensureWeAreTrusted();
|
|
173
172
|
};
|
|
@@ -175,10 +174,9 @@ if (isNode()) {
|
|
|
175
174
|
// Load the remote certificate, in the almost certain case it isn't a real certificate, and is just internal
|
|
176
175
|
SocketFunction.addGlobalClientHook(async config => {
|
|
177
176
|
await measureWrap(async function checkTrust() {
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
]);
|
|
177
|
+
// IMPORTANT! We SHOULDN'T need to add our machineId before we connect, as we only check machineId on received calls, so we only need to set it before we make a call (so by the time anyone receives a call, they trust us!)
|
|
178
|
+
await ensureWeAreTrusted();
|
|
179
|
+
await loadTrustCerts(config.call.nodeId);
|
|
182
180
|
})();
|
|
183
181
|
});
|
|
184
182
|
}
|