querysub 0.312.0 → 0.313.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/.cursorrules +1 -1
  2. package/costsBenefits.txt +4 -1
  3. package/package.json +3 -2
  4. package/spec.txt +23 -18
  5. package/src/-0-hooks/hooks.ts +1 -1
  6. package/src/-a-archives/archives.ts +16 -3
  7. package/src/-a-archives/archivesBackBlaze.ts +51 -3
  8. package/src/-a-archives/archivesLimitedCache.ts +175 -0
  9. package/src/-a-archives/archivesPrivateFileSystem.ts +299 -0
  10. package/src/-a-auth/certs.ts +58 -31
  11. package/src/-b-authorities/cdnAuthority.ts +2 -2
  12. package/src/-b-authorities/dnsAuthority.ts +3 -2
  13. package/src/-c-identity/IdentityController.ts +3 -2
  14. package/src/-d-trust/NetworkTrust2.ts +17 -19
  15. package/src/-e-certs/EdgeCertController.ts +3 -4
  16. package/src/-e-certs/certAuthority.ts +1 -2
  17. package/src/-f-node-discovery/NodeDiscovery.ts +9 -7
  18. package/src/-g-core-values/NodeCapabilities.ts +6 -1
  19. package/src/0-path-value-core/NodePathAuthorities.ts +1 -1
  20. package/src/0-path-value-core/PathValueCommitter.ts +3 -3
  21. package/src/0-path-value-core/PathValueController.ts +3 -3
  22. package/src/0-path-value-core/archiveLocks/ArchiveLocks2.ts +15 -37
  23. package/src/0-path-value-core/pathValueCore.ts +4 -3
  24. package/src/3-path-functions/PathFunctionRunner.ts +2 -2
  25. package/src/4-dom/qreact.tsx +4 -3
  26. package/src/4-querysub/Querysub.ts +2 -2
  27. package/src/4-querysub/QuerysubController.ts +2 -2
  28. package/src/5-diagnostics/GenericFormat.tsx +1 -0
  29. package/src/5-diagnostics/Table.tsx +3 -0
  30. package/src/5-diagnostics/diskValueAudit.ts +2 -1
  31. package/src/5-diagnostics/nodeMetadata.ts +0 -1
  32. package/src/deployManager/components/MachineDetailPage.tsx +9 -1
  33. package/src/deployManager/components/ServiceDetailPage.tsx +10 -1
  34. package/src/diagnostics/NodeViewer.tsx +3 -4
  35. package/src/diagnostics/logs/FastArchiveAppendable.ts +748 -0
  36. package/src/diagnostics/logs/FastArchiveController.ts +524 -0
  37. package/src/diagnostics/logs/FastArchiveViewer.tsx +863 -0
  38. package/src/diagnostics/logs/LogViewer2.tsx +349 -0
  39. package/src/diagnostics/logs/TimeRangeSelector.tsx +94 -0
  40. package/src/diagnostics/logs/diskLogger.ts +135 -305
  41. package/src/diagnostics/logs/diskShimConsoleLogs.ts +6 -29
  42. package/src/diagnostics/logs/errorNotifications/ErrorNotificationController.ts +577 -0
  43. package/src/diagnostics/logs/errorNotifications/ErrorSuppressionUI.tsx +225 -0
  44. package/src/diagnostics/logs/errorNotifications/ErrorWarning.tsx +207 -0
  45. package/src/diagnostics/logs/importLogsEntry.ts +38 -0
  46. package/src/diagnostics/logs/injectFileLocationToConsole.ts +7 -17
  47. package/src/diagnostics/logs/lifeCycleAnalysis/lifeCycles.tsx +0 -0
  48. package/src/diagnostics/logs/lifeCycleAnalysis/spec.md +151 -0
  49. package/src/diagnostics/managementPages.tsx +7 -16
  50. package/src/diagnostics/misc-pages/ComponentSyncStats.tsx +0 -1
  51. package/src/diagnostics/periodic.ts +5 -0
  52. package/src/diagnostics/watchdog.ts +2 -2
  53. package/src/functional/SocketChannel.ts +67 -0
  54. package/src/library-components/Input.tsx +1 -1
  55. package/src/library-components/InputLabel.tsx +5 -2
  56. package/src/misc.ts +111 -0
  57. package/src/src.d.ts +34 -1
  58. package/src/user-implementation/userData.ts +4 -3
  59. package/test.ts +13 -0
  60. package/testEntry2.ts +29 -0
  61. package/src/diagnostics/errorLogs/ErrorLogController.ts +0 -535
  62. package/src/diagnostics/errorLogs/ErrorLogCore.ts +0 -274
  63. package/src/diagnostics/errorLogs/LogClassifiers.tsx +0 -308
  64. package/src/diagnostics/errorLogs/LogFilterUI.tsx +0 -84
  65. package/src/diagnostics/errorLogs/LogNotify.tsx +0 -101
  66. package/src/diagnostics/errorLogs/LogTimeSelector.tsx +0 -723
  67. package/src/diagnostics/errorLogs/LogViewer.tsx +0 -757
  68. package/src/diagnostics/errorLogs/logFiltering.tsx +0 -149
  69. package/src/diagnostics/logs/DiskLoggerPage.tsx +0 -613
@@ -1,88 +1,72 @@
1
-
2
- import { batchFunction, runInSerial, runInfinitePoll, runInfinitePollCallAtStart } from "socket-function/src/batching";
3
- import { nextId, timeInDay, timeInHour } from "socket-function/src/misc";
4
- import { getStorageDir, getStorageFolder } from "../../fs";
5
- import fs from "fs";
6
- import { canHaveChildren } from "socket-function/src/types";
7
- import { SizeLimiter } from "../SizeLimiter";
8
- import { SocketFunction } from "socket-function/SocketFunction";
9
1
  import { isNode } from "typesafecss";
10
- import { logGitHashes } from "./logGitHashes";
11
- import { Zip } from "../../zip";
12
- import { formatNumber } from "socket-function/src/formatting/format";
2
+ import { partialCopyObject } from "../../misc";
3
+ import { canHaveChildren } from "socket-function/src/types";
4
+ import { lazy } from "socket-function/src/caching";
5
+ import { timeInMinute } from "socket-function/src/misc";
6
+ import { formatTime } from "socket-function/src/formatting/format";
7
+ import { addEpsilons } from "../../bits";
8
+ import { FileMetadata } from "./FastArchiveController";
9
+ // IMPORTANT! We can't have any real imports here, because we are depended on so early in startup!
13
10
 
14
11
  if (isNode()) {
15
12
  // Delayed setup, as we depend on diskLogger early, and we don't want to force high level
16
13
  // modules to be required before their level
17
14
  setImmediate((async () => {
18
- await import("./DiskLoggerPage");
19
15
  const { addBuiltInContext } = await import("./diskLogGlobalContext");
20
16
  addBuiltInContext();
17
+ const { logGitHashes } = await import("./logGitHashes");
21
18
  await logGitHashes();
22
19
  }));
23
20
  }
24
21
 
25
- // TODO: If we run into size problems with our logs, we might want to use compression?
26
- // - We want chunks to be < 4096, but... we might still be able to compress a bit?
27
-
28
- let folder = getStorageFolder("disklogs");
29
-
30
- let SIZE_LIMIT = new SizeLimiter({
31
- diskRoot: getStorageDir(),
32
-
33
- maxBytes: 1024 * 1024 * 1024 * 1,
34
- minBytes: 1024 * 1024 * 10,
35
- maxDiskFraction: 0.02,
36
- maxTotalDiskFraction: 0.95,
37
- maxFiles: 1000,
38
- });
39
-
40
- const LOG_FILE_DURATION = timeInDay;
41
-
42
- export type LogObj = {
43
- [key: string]: unknown;
22
+ // NOTE: When logging we spread objects. If we encounter strings, we set the field `param${index}`
23
+ export type LogDatum = Record<string, unknown> & {
44
24
  time: number;
25
+ __LOG_TYPE: string;
26
+ __machineId?: string;
27
+ __threadId?: string;
28
+ __entry?: string;
29
+ param0?: string;
30
+ __DIR__?: string;
31
+ __NAME__?: string;
32
+ __LINE__?: string;
33
+ /** Dynamically set at runtime in the frontend. */
34
+ __metadata?: FileMetadata;
35
+ /** Dynamically set when matching recent errors only. */
36
+ __matchedOutdatedSuppressionKey?: string;
45
37
  };
46
-
47
- // NOTE: This is visible, otherwise it's easy to accidentally copy it, and not know why
48
- // the text is behaving strangely (not === other seemingly equal text, etc).
49
- // NOTE: Also hardcoded in measure.ts (in socket-function)
50
- export const noDiskLogPrefix = "█ ";
51
- export const diskLog = logDisk;
52
- export function logDisk(...args: unknown[]) {
53
- if (!isNode()) return;
54
- try {
55
- if (args.length === 0) return;
56
- // Move the first string argument to the front
57
- if (args.length > 0 && typeof args[0] !== "string" && args.some(x => typeof x === "string")) {
58
- let strIndex = args.findIndex(x => typeof x === "string");
59
- let str = args[strIndex];
60
- args.splice(strIndex, 1);
61
- args.unshift(str);
62
- }
63
- if (args.length > 0 && typeof args[0] === "string" && args[0].trim().length === 0) return;
64
- if (args.length > 0 && typeof args[0] === "string" && args[0].startsWith(noDiskLogPrefix)) return;
65
- let logObj = packageLogObj(args);
66
- logQueue(logObj);
67
- } catch (e: any) {
68
- process.stdout.write(`Error writing to disk logs: ${e.stack || e}\n\t${String(args[0])}\n`);
38
+ export const LOG_LIMIT_FLAG = String.fromCharCode(44533) + "LOGS_LIMITED_FLAG-9277640b-d709-4591-ab08-2bb29bbb94f4";
39
+
40
+ export const getLoggers = lazy(function () {
41
+ const { FastArchiveAppendable } = require("./FastArchiveAppendable") as typeof import("./FastArchiveAppendable");
42
+ if (!FastArchiveAppendable) {
43
+ setImmediate(() => {
44
+ getLoggers.reset();
45
+ });
46
+ return undefined;
69
47
  }
70
- }
71
- let logPending: LogObj[] = [];
72
- function logQueue(log: LogObj) {
73
- if (logPending.length === 0) {
74
- void Promise.resolve().finally(logClearQueue);
48
+ return {
49
+ logLogs: new FastArchiveAppendable<LogDatum>("logs-log/"),
50
+ warnLogs: new FastArchiveAppendable<LogDatum>("logs-warn/"),
51
+ infoLogs: new FastArchiveAppendable<LogDatum>("logs-info/"),
52
+ errorLogs: new FastArchiveAppendable<LogDatum>("logs-error/"),
53
+ };
54
+ });
55
+ const getNotifyErrors = lazy(function () {
56
+ const { notifyWatchersOfError: notifyErrors } = require("./errorNotifications/ErrorNotificationController") as typeof import("./errorNotifications/ErrorNotificationController");
57
+ if (typeof notifyErrors !== "function") {
58
+ setImmediate(() => {
59
+ getNotifyErrors.reset();
60
+ });
61
+ return undefined;
75
62
  }
76
- logPending.push(log);
77
- }
78
- function logClearQueue() {
79
- let commit = logPending;
80
- logPending = [];
81
- logBase(commit).catch(e => {
82
- process.stdout.write(`Error writing to disk logs: ${e.stack || e}\n\t${JSON.stringify(commit[0])}\n`);
83
- });
84
- }
63
+ return notifyErrors;
64
+ });
85
65
 
66
+ // NOTE: If any message (first param) starts with this, we don't log it to the disk. VERY useful for multi-line logging where it wouldn't make sense in the logs
67
+ // NOTE: This is visible, otherwise it's easy to accidentally copy it, and not know why the text is behaving strangely (not === other seemingly equal text, etc).
68
+ // NOTE: Also hardcoded in measure.ts (in socket-function)
69
+ export const noDiskLogPrefix = "█ ";
86
70
 
87
71
  let globalContextParts: (() => { [key: string]: unknown })[] = [];
88
72
  export function addGlobalContext(fnc: () => { [key: string]: unknown }) {
@@ -92,267 +76,113 @@ export function addGlobalContext(fnc: () => { [key: string]: unknown }) {
92
76
  globalContextParts.push(fnc);
93
77
  }
94
78
 
79
+ let startupDone = false;
80
+ void Promise.resolve().then(() => {
81
+ startupDone = true;
82
+ });
95
83
 
84
+ let logLimitLookup: {
85
+ resetTime: number;
86
+ counts: Map<string, number>;
87
+ } | undefined;
96
88
 
97
- export type LogFile = {
98
- startTime: number;
99
- endTime: number;
100
- path: string;
101
- name: string;
102
-
103
- // Only set in getLogFiles
104
- size: number;
105
- };
89
+ const LIMIT_PERIOD = timeInMinute * 15;
90
+ const LIMIT_THRESHOLD = 1000;
106
91
 
107
- export async function getLogFiles(): Promise<LogFile[]> {
108
- let files = await fs.promises.readdir(folder);
109
- let paths = files.map(file => folder + file);
110
- paths = paths.filter(x => x.endsWith(".log") || x.endsWith(".zip"));
111
- let objs = paths.map(decodeLogFileName);
112
- for (let obj of objs) {
113
- try {
114
- let stat = await fs.promises.stat(obj.path);
115
- obj.size = stat.size;
116
- } catch { }
117
- }
118
- return objs;
119
- }
120
- export async function getLogBuffer(path: string): Promise<Buffer | undefined> {
121
- if (!path.startsWith(folder)) throw new Error(`Path must start with ${folder}`);
122
- let buffer: Buffer | undefined;
92
+ const logDiskDontShim = logDisk;
93
+ /** NOTE: Calling this directly means we lose __FILE__ tracking. But... that's probably fine... */
94
+ export function logDisk(type: "log" | "warn" | "info" | "error", ...args: unknown[]) {
95
+ if (!isNode()) return;
123
96
  try {
124
- buffer = await fs.promises.readFile(path);
125
- } catch { }
126
- return buffer;
127
- }
128
- export function parseLogBuffer(buffer: Buffer): LogObj[] {
129
- let time = Date.now();
130
- console.log(`Parsing buffer ${buffer.length}`);
131
- let pos = 0;
132
- let logs: LogObj[] = [];
133
- const newLine = "\n".charCodeAt(0);
134
- let lastLoggedPos = 0;
135
- while (pos < buffer.length) {
136
- let end = buffer.indexOf(newLine, pos);
137
- if (end === -1) {
138
- end = buffer.length;
139
- }
140
- let line = buffer.slice(pos, end).toString("utf8");
141
- try {
142
- logs.push(JSON.parse(line));
143
- } catch { }
144
- pos = end + 1;
145
- if (pos - lastLoggedPos > 1_000_000) {
146
- console.log(`Parsed ${pos} of ${buffer.length}`);
147
- lastLoggedPos = pos;
148
- }
149
- }
150
- time = Date.now() - time;
151
- console.log(`Parsed ${pos} of ${buffer.length} ${formatNumber(time)}`);
152
- return logs;
153
- }
97
+ if (args.length === 0) return;
98
+ let logType = args.find(x => typeof x === "string") as string | undefined;
99
+ if (!logType) return;
100
+ if (logType.startsWith(noDiskLogPrefix)) return;
154
101
 
155
- function safeCopyObject<T>(obj: T): T {
156
- try {
157
- const maxFields = 500;
158
- let fieldCount = 0;
159
- const seen = new WeakSet();
102
+ let logObj = packageLogObj(type, args);
160
103
 
161
- function copy(value: unknown): unknown {
162
- // Handle primitives
163
- if (!canHaveChildren(value)) {
164
- return value;
165
- }
104
+ if (logObj.__FILE__) {
105
+ logType = String(logObj.__FILE__);
106
+ }
166
107
 
167
- // Check for circular references
168
- if (seen.has(value as object)) {
169
- return null;
108
+ if (logLimitLookup) {
109
+ if (logObj.time > logLimitLookup.resetTime) {
110
+ logLimitLookup = undefined;
170
111
  }
171
- seen.add(value as object);
112
+ }
113
+ if (!logLimitLookup) {
114
+ logLimitLookup = {
115
+ resetTime: logObj.time + LIMIT_PERIOD,
116
+ counts: new Map(),
117
+ };
118
+ }
172
119
 
173
- // Check if we've hit the field limit
174
- if (fieldCount >= maxFields) {
175
- return null;
176
- }
120
+ let count = logLimitLookup.counts.get(logType) || 0;
121
+ count++;
122
+ logLimitLookup.counts.set(logType, count);
123
+ if (count > LIMIT_THRESHOLD) {
124
+ let timeUntilReset = logLimitLookup.resetTime - logObj.time;
125
+ process.stdout.write(`Log type hit limit, not writing log type to disk for ~${formatTime(timeUntilReset)}: ${logType}\n`);
126
+ return;
127
+ }
128
+ if (count === LIMIT_THRESHOLD) {
129
+ logObj[LOG_LIMIT_FLAG] = true;
130
+ }
177
131
 
178
- if (Array.isArray(value)) {
179
- const result: unknown[] = [];
180
- for (const item of value) {
181
- fieldCount++;
182
- if (fieldCount >= maxFields) break;
183
- result.push(copy(item));
184
- }
185
- seen.delete(value);
186
- return result;
187
- }
132
+ let loggers = startupDone ? getLoggers() : undefined;
133
+ if (!loggers) {
134
+ getLoggers.reset();
135
+ setImmediate(() => {
136
+ logDiskDontShim(type, ...args);
137
+ });
138
+ return;
139
+ }
140
+ const { logLogs, warnLogs, infoLogs, errorLogs } = loggers;
141
+ if (type === "log") {
142
+ logLogs.append(logObj);
143
+ } else if (type === "warn") {
144
+ warnLogs.append(logObj);
145
+ } else if (type === "info") {
146
+ infoLogs.append(logObj);
147
+ } else {
148
+ errorLogs.append(logObj);
149
+ }
188
150
 
189
- // Handle plain objects
190
- const result: Record<string, unknown> = {};
191
- for (const key of Object.keys(value as object)) {
192
- fieldCount++;
193
- if (fieldCount >= maxFields) break;
194
- result[key] = copy((value as Record<string, unknown>)[key]);
195
- }
196
- seen.delete(value);
197
- return result;
151
+ if (type === "warn" || type === "error") {
152
+ // Dropping notifies is fine, as long as they get added to the logs, we'll see them eventually...
153
+ void getNotifyErrors()?.(logObj);
198
154
  }
199
155
 
200
- return copy(obj) as any;
201
156
  } catch (e: any) {
202
- return { errorCopying: e.messsage } as any;
157
+ process.stdout.write(`Error writing to disk logs: ${e.stack || e}\n\t${String(args[0])}\n`);
203
158
  }
204
159
  }
205
- let __baseThreadId = nextId();
206
- function packageLogObj(args: unknown[]): LogObj {
207
- let logObj: LogObj = {
208
- time: Date.now(),
209
- __baseThreadId,
160
+
161
+ let lastLogTime = 0;
162
+
163
+ function packageLogObj(type: string, args: unknown[]): LogDatum {
164
+ let now = Date.now();
165
+ if (now < lastLogTime) {
166
+ now = addEpsilons(lastLogTime, 1);
167
+ }
168
+ lastLogTime = now;
169
+ let logObj: LogDatum = {
170
+ time: now,
171
+ __LOG_TYPE: type,
210
172
  };
211
173
  for (let part of globalContextParts) {
212
- Object.assign(logObj, safeCopyObject(part()));
174
+ Object.assign(logObj, partialCopyObject(part(), 500));
213
175
  }
214
- args = args.map(safeCopyObject);
176
+ args = args.map(x => partialCopyObject(x, 500));
177
+ let stringCount = 0;
215
178
  for (let i = 0; i < args.length; i++) {
216
179
  let param = args[i];
217
180
  if (canHaveChildren(param)) {
218
181
  Object.assign(logObj, param);
219
182
  } else {
220
- logObj["param" + i] = param;
183
+ logObj["param" + stringCount] = param;
184
+ stringCount++;
221
185
  }
222
186
  }
223
187
  return logObj;
224
- }
225
-
226
-
227
- function decodeLogFileName(path: string): LogFile {
228
- let name = path.split("/").pop()!;
229
- let withoutExt = name.split(".").slice(0, -1).join(".");
230
- let [start, end] = withoutExt.split("-").map(Number);
231
- return {
232
- startTime: start,
233
- endTime: end,
234
- name,
235
- path,
236
- size: 0,
237
- };
238
- }
239
-
240
- // NOTE: Very little delay, so that during shutdown/crashes we don't lose too much before
241
- // the crash. One of the biggest reasons for logs is to diagnose crashes, so this is important!
242
- // NOTE: Not async, so we can properly measure the synchronous overhead of logging (otherwise
243
- // we are just measuring disk latency...)
244
- const logBase = batchFunction({ delay: 0 }, function logBase(logObjList: LogObj[][]) {
245
- let logs = logObjList.flat();
246
-
247
- let byLogPath = new Map<string, LogObj[]>();
248
- for (let log of logs) {
249
- function createLogFileName(logObj: LogObj): LogFile {
250
- let start = Math.floor(logObj.time / LOG_FILE_DURATION) * LOG_FILE_DURATION;
251
- let startTime = start;
252
- let endTime = start + LOG_FILE_DURATION;
253
- let name = startTime + "-" + endTime + ".log";
254
- let path = folder + name;
255
- let logFile: LogFile = { startTime, endTime, name, path, size: 0, };
256
- return logFile;
257
- }
258
- let logFile = createLogFileName(log);
259
- let list = byLogPath.get(logFile.path);
260
- if (!list) {
261
- list = [];
262
- byLogPath.set(logFile.path, list);
263
- }
264
- list.push(log);
265
- }
266
-
267
- for (let [path, logList] of byLogPath) {
268
- // Apparently, anything more than this and our writes might not be atomic
269
- const WRITE_ATOMIC_LIMIT = 4096;
270
- let lines = logList.map(v => Buffer.from(JSON.stringify(v) + "\n"));
271
-
272
- // Group lines into WRITE_ATOMIC_LIMIT byte chunks
273
- let chunks: Buffer[][] = [];
274
- let currentChunk: Buffer[] = [];
275
- let currentSize = 0;
276
- for (let line of lines) {
277
- if (currentSize + line.length > WRITE_ATOMIC_LIMIT && currentChunk.length > 0) {
278
- chunks.push(currentChunk);
279
- currentChunk = [];
280
- currentSize = 0;
281
- }
282
- currentChunk.push(line);
283
- currentSize += line.length;
284
- }
285
- if (currentChunk.length > 0) {
286
- chunks.push(currentChunk);
287
- }
288
-
289
- for (let chunk of chunks) {
290
- void logStreamWrites(path, Buffer.concat(chunk));
291
- }
292
- }
293
- });
294
- const logStreamWrites = runInSerial(async function (path: string, chunk: Buffer) {
295
- await fs.promises.appendFile(path, chunk);
296
- });
297
-
298
-
299
- if (isNode()) {
300
- runInfinitePoll(timeInHour, async function compressLogs() {
301
- // Maintain our size restrictions
302
- let logFiles = await fs.promises.readdir(folder);
303
- let objs: { time: number; bytes: number; path: string; }[] = [];
304
- for (let file of logFiles) {
305
- let path = folder + file;
306
- let stat = await fs.promises.stat(path);
307
- objs.push({ time: stat.mtimeMs, bytes: stat.size, path });
308
- }
309
- let { remove } = await SIZE_LIMIT.limit(objs);
310
- for (let file of remove) {
311
- await fs.promises.unlink(file.path);
312
- }
313
- });
314
- // Wait a random time, so we hopefully don't synchronize with any other services on this machine
315
- runInfinitePoll(timeInHour * (1 + Math.random()), async function compressOldLogs() {
316
- let logFiles = await fs.promises.readdir(folder);
317
- let compressTime = Date.now() - LOG_FILE_DURATION * 2;
318
- let filesCompressed = 0;
319
- for (let file of logFiles) {
320
- if (!file.endsWith(".log")) continue;
321
- if (filesCompressed === 0) {
322
- console.log("Compressing old logs");
323
- }
324
- filesCompressed++;
325
- let path = folder + file;
326
- if (decodeLogFileName(path).endTime > compressTime) continue;
327
- let basePath = path.split(".").slice(0, -1).join(".");
328
- let buffer = await fs.promises.readFile(path);
329
- let beforeSize = buffer.length;
330
- buffer = await Zip.gzip(buffer);
331
- console.log(`Compressed ${file} from ${formatNumber(beforeSize)}B to ${formatNumber(buffer.length)}B`);
332
- let tempPath = basePath + Math.random() + ".temp";
333
- await fs.promises.writeFile(tempPath, buffer);
334
- await fs.promises.rename(tempPath, basePath + ".zip");
335
- await fs.promises.unlink(path);
336
- }
337
- if (filesCompressed > 0) {
338
- console.log(`Compressed ${filesCompressed} old log files`);
339
- }
340
- });
341
- }
342
-
343
- /*
344
- Append Benchmarks
345
- 10 processes
346
- Windows = 100K/S
347
- Linux Digital Ocean = 1M/S
348
- Linux PI SD Card = 300K/S
349
- Linux PI USB = 300K/S
350
- 1 process
351
- Windows = 40K/S
352
- Linux Digital Ocean = 685K/S
353
- Linux PI = 200K/S
354
-
355
- rm test.txt
356
- for i in {0..9}; do node -e 'const fs=require("fs");const id='$i';let i=0;const start=Date.now();while(Date.now()-start<5000){fs.appendFileSync("test.txt", `${id},${i++}\n`)}' & done; wait
357
- node -e 'const fs=require("fs");const seqs=new Map();fs.readFileSync("test.txt","utf8").trim().split("\n").forEach((l,i)=>{const[id,seq]=l.split(",").map(Number);if(!seqs.has(id))seqs.set(id,{last:-1,errs:0});const s=seqs.get(id);if(seq!==s.last+1){console.error(`Error for id ${id} at line ${i}: ${s.last}->${seq}`);s.errs++}s.last=seq});seqs.forEach((v,id)=>console.log(`ID ${id}: final seq ${v.last}, ${v.errs} gaps`))'
358
- */
188
+ }
@@ -1,24 +1,13 @@
1
1
  import { canHaveChildren } from "socket-function/src/types";
2
2
  import { isNode } from "typesafecss";
3
3
  import { red, yellow } from "socket-function/src/formatting/logColors";
4
+ import * as diskLogger from "./diskLogger";
4
5
 
5
- let addLocalLogModule: (typeof addLocalLogPromise extends Promise<infer T> ? T : never) | undefined;
6
- const addLocalLogPromise = import("../errorLogs/ErrorLogController").then(x => {
7
- (addLocalLogModule as any) = x;
8
- return x;
9
- });
10
6
 
11
- let diskLoggerModule: typeof diskLoggerPromise extends Promise<infer T> ? T : never;
12
- const diskLoggerPromise = import("./diskLogger").then(x => {
13
- (diskLoggerModule as any) = x;
14
- return x;
15
- });
16
7
 
17
-
18
- let shimmed = false;
19
8
  export function shimConsoleLogs() {
20
- if (shimmed) return;
21
- shimmed = true;
9
+ if ((globalThis as any).shimmedConsoleLogs) return;
10
+ (globalThis as any).shimmedConsoleLogs = true;
22
11
  if (isNode()) {
23
12
  process.on("unhandledRejection", async (reason: any, promise) => {
24
13
  console.error(`Uncaught promise rejection: ${String(reason.stack || reason)}`);
@@ -36,26 +25,14 @@ export function shimConsoleLogs() {
36
25
  args.length > 0
37
26
  && String(args[0]).trim().length > 0
38
27
  ) {
39
- if (typeof diskLoggerModule?.logDisk === "function") {
40
- // Don't call it directly, so we don't get extra line debug context added to this call
41
- // (as it wouldn't be useful, as we really want the caller)
42
- let stopDoubleShim = diskLoggerModule.logDisk;
43
- stopDoubleShim(...args, { type: fncName });
44
- }
28
+ // Assign to a variable, so it isn't shimmed by injectFileLocationToConsole
29
+ const logDiskThatIsntShimmed = diskLogger.logDisk;
30
+ logDiskThatIsntShimmed(fncName, ...args);
45
31
  }
46
32
 
47
- let fileObj = args.find(x => canHaveChildren(x) && x["__FILE__"]);
48
-
49
33
  // Filter out objects added by injectFileLocationToConsole
50
34
  args = args.filter(x => !(canHaveChildren(x) && x["__FILE__"]));
51
35
 
52
- if (typeof addLocalLogModule?.addLocalLog === "function") {
53
- if (fncName === "error" || fncName === "warn") {
54
- // ALSO, track the logs in a file, for error notifications, etc
55
- addLocalLogModule.addLocalLog({ message: args.join(" | ") + " | " + fileObj?.["__FILE__"], time: Date.now() }, fncName);
56
- }
57
- }
58
-
59
36
  if (
60
37
  // Make warns / errors use better colors
61
38
  (fncName === "warn" || fncName === "error")