querysub 0.325.0 → 0.327.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "querysub",
3
- "version": "0.325.0",
3
+ "version": "0.327.0",
4
4
  "main": "index.js",
5
5
  "license": "MIT",
6
6
  "note1": "note on node-forge fork, see https://github.com/digitalbazaar/forge/issues/744 for details",
@@ -12,10 +12,11 @@ import { Args } from "socket-function/src/types";
12
12
  import { getArchivesBackblaze } from "./archivesBackBlaze";
13
13
  import { formatNumber } from "socket-function/src/formatting/format";
14
14
  import { SizeLimiter } from "../diagnostics/SizeLimiter";
15
+ import { isPublic } from "../config";
15
16
 
16
17
  const SIZE_LIMIT = new SizeLimiter({
17
18
  diskRoot: getStorageDir(),
18
- maxBytes: 1024 * 1024 * 1024 * 50,
19
+ maxBytes: isPublic() ? 1024 * 1024 * 1024 * 250 : 1024 * 1024 * 1024 * 50,
19
20
  // Anything less than this and we can't even load enough weights models for a single task
20
21
  minBytes: 1024 * 1024 * 1024 * 8,
21
22
  maxDiskFraction: 0.3,
@@ -191,6 +191,7 @@ const changeIdentityOnce = cacheWeak(async function changeIdentityOnce(connectio
191
191
  };
192
192
  let signature = sign(threadKeyCert, payload);
193
193
  await timeoutToError(
194
+ // NOTE: This timeout has to be small as if we try to connect to a node to send it something time sensitive such as a PathValue and it takes too long it might result in us having a PathValue which is expired. The threshold is around 60 seconds and so we want to build a timeout calling a few different nodes before the PathValue expires.
194
195
  10 * 1000,
195
196
  IdentityController.nodes[nodeId].changeIdentity(signature, payload),
196
197
  () => new Error(`Timeout calling changeIdentity for ${nodeId}`)
@@ -1,7 +1,7 @@
1
1
  import { measureWrap } from "socket-function/src/profiling/measure";
2
2
  import { getIdentityCA, getMachineId, getOwnMachineId } from "../-a-auth/certs";
3
3
  import { getArchives } from "../-a-archives/archives";
4
- import { isNode, throttleFunction, timeInSecond } from "socket-function/src/misc";
4
+ import { isNode, throttleFunction, timeInHour, timeInSecond } from "socket-function/src/misc";
5
5
  import { SocketFunctionHook } from "socket-function/SocketFunctionTypes";
6
6
  import { SocketFunction } from "socket-function/SocketFunction";
7
7
  import { IdentityController_getMachineId } from "../-c-identity/IdentityController";
@@ -19,6 +19,7 @@ import { magenta } from "socket-function/src/formatting/logColors";
19
19
  // Cache the untrust list, to prevent bugs from causing too many backend reads (while also allowing
20
20
  // bad servers which make request before their trust is verified from staying broken).
21
21
  const UNTRUST_CACHE_TIME = 30 * timeInSecond;
22
+ const TRUSTED_CACHE_RESET_INTERVAL = timeInHour;
22
23
 
23
24
  const archives = lazy(() => getArchives("trust2/"));
24
25
 
@@ -57,6 +58,8 @@ export const isTrusted = measureWrap(async function isTrusted(machineId: string)
57
58
  // See the comment in requiresNetworkTrustHook for why clients have to trust all callers.
58
59
  if (isClient()) return true;
59
60
 
61
+ await populateTrustedCache();
62
+
60
63
  if (trustedCache.has(machineId)) {
61
64
  return true;
62
65
  }
@@ -70,30 +73,11 @@ export const isTrusted = measureWrap(async function isTrusted(machineId: string)
70
73
  return false;
71
74
  }
72
75
 
73
- return await isTrustedBase(machineId);
74
- });
75
- let trustedCachePopulated = false;
76
- const isTrustedBase = runInSerial(measureWrap(async function isTrustedBase(machineId: string) {
77
- if (!trustedCachePopulated) {
78
- trustedCachePopulated = true;
79
- let trustedMachineIds = await archives().find("");
80
- lastArchivesTrusted = trustedMachineIds.slice();
81
- for (let trustedMachineId of trustedMachineIds) {
82
- trustedCache.add(trustedMachineId);
83
- // NOTE: We don't load trust certs here, as we need to load them on demand in case the trust changes after our initial startup.
84
- }
85
- } else {
86
- // Checking a single entry is a lot faster (as find is slow)
87
- let trusted = await archives().get(machineId);
88
- if (trusted) {
89
- trustedCache.add(machineId);
90
- }
76
+ // Checking a single entry is fast and if we don't trust them they'll be added to untrusted cache so it shouldn't slow things down by too much.
77
+ let trusted = await archives().get(machineId);
78
+ if (trusted) {
79
+ trustedCache.add(machineId);
91
80
  }
92
- // Always trust ourself
93
- trustedCache.add(getOwnMachineId());
94
-
95
- // NOTE: This only happens to servers that we connect to. Also we only allow the machine ID to be this special ID in the case it's on our domain. And because we use HTTPS when connecting to domains, it means that it must be implicitly trusted if it has a certificate for our domain.
96
- trustedCache.add("127-0-0-1");
97
81
 
98
82
  if (!trustedCache.has(machineId)) {
99
83
  untrustedCache.set(machineId, Date.now() + UNTRUST_CACHE_TIME);
@@ -101,7 +85,24 @@ const isTrustedBase = runInSerial(measureWrap(async function isTrustedBase(machi
101
85
  } else {
102
86
  return true;
103
87
  }
104
- }));
88
+ });
89
+ let populateTrustedCache = lazy(async () => {
90
+ let trustedMachineIds = await archives().find("");
91
+ lastArchivesTrusted = trustedMachineIds.slice();
92
+ for (let trustedMachineId of trustedMachineIds) {
93
+ trustedCache.add(trustedMachineId);
94
+ }
95
+ // Always trust ourself
96
+ trustedCache.add(getOwnMachineId());
97
+
98
+ // NOTE: This only happens to servers that we connect to. Also we only allow the machine ID to be this special ID in the case it's on our domain. And because we use HTTPS when connecting to domains, it means that it must be implicitly trusted if it has a certificate for our domain.
99
+ trustedCache.add("127-0-0-1");
100
+
101
+ setTimeout(() => {
102
+ trustedCache.clear();
103
+ populateTrustedCache.reset();
104
+ }, TRUSTED_CACHE_RESET_INTERVAL);
105
+ });
105
106
 
106
107
  export async function isNodeTrusted(nodeId: string) {
107
108
  let domainName = getNodeIdDomainMaybeUndefined(nodeId);
@@ -516,6 +516,7 @@ export class FastArchiveAppendable<Datum> {
516
516
  });
517
517
 
518
518
  const onDecompressedData = createLogScanner({
519
+ debugName: file.path,
519
520
  onParsedData,
520
521
  });
521
522
  let batchedData: Buffer[] = [];
@@ -697,14 +698,18 @@ export class FastArchiveAppendable<Datum> {
697
698
 
698
699
 
699
700
  export function createLogScanner(config: {
701
+ debugName: string;
700
702
  onParsedData: (posStart: number, posEnd: number, buffer: Buffer | "done") => MaybePromise<void>;
701
703
  }): (data: Buffer | "done") => Promise<void> {
702
704
  const { onParsedData } = config;
703
705
  let pendingData: Buffer[] = [];
704
706
 
707
+ let finished = false;
708
+
705
709
  let delimitterMatchIndex = 0;
706
- return runInSerial(async (data: Buffer | "done") => {
710
+ return (async (data: Buffer | "done") => {
707
711
  if (data === "done") {
712
+ finished = true;
708
713
  // Flush any pending data, even though we have no delimitter. It will probably fail to parse, but... maybe it will work?
709
714
  if (pendingData.length > 0) {
710
715
  let combinedBuffer = Buffer.concat(pendingData);
@@ -714,6 +719,9 @@ export function createLogScanner(config: {
714
719
  await onParsedData(0, 0, "done");
715
720
  return;
716
721
  }
722
+ if (finished) {
723
+ throw new Error(`Finished scan, but we received more data: ${data.length}, sample is: ${data.slice(0, 100).toString("hex")}, ${config.debugName}`);
724
+ }
717
725
 
718
726
  let lastStart = 0;
719
727
  await measureBlock(async () => {
@@ -734,6 +742,7 @@ export function createLogScanner(config: {
734
742
  ...pendingData,
735
743
  data.slice(lastStart, i + 1),
736
744
  ]).slice(0, -objectDelimitterBuffer.length);
745
+ pendingData = [];
737
746
  posStart = 0;
738
747
  posEnd = buffer.length;
739
748
  } else {
@@ -741,19 +750,12 @@ export function createLogScanner(config: {
741
750
  posStart = lastStart;
742
751
  posEnd = i + 1 - objectDelimitterBuffer.length;
743
752
  }
744
- // Delimitter was the start of the chunk, and it's the first chunk. Just skip it.
745
- if (posStart === posEnd && i === 0) {
746
- lastStart = i + 1;
747
- continue;
748
- }
749
-
750
753
  // Only sometimes awaiting here makes scanning almost 2X faster, in the normal case, somehow?
751
754
  let maybePromise = onParsedData(posStart, posEnd, buffer);
752
755
  if (maybePromise) {
753
756
  await maybePromise;
754
757
  }
755
758
 
756
- pendingData = [];
757
759
  lastStart = i + 1;
758
760
  }
759
761
  }
@@ -15,7 +15,7 @@ import { ButtonSelector } from "../../library-components/ButtonSelector";
15
15
  import { Button } from "../../library-components/Button";
16
16
  import { lazy } from "socket-function/src/caching";
17
17
  import { LOG_LIMIT_FLAG } from "./diskLogger";
18
- import { canHaveChildren } from "socket-function/src/types";
18
+ import { MaybePromise, canHaveChildren } from "socket-function/src/types";
19
19
  import { niceParse } from "../../niceStringify";
20
20
  import { FileMetadata } from "./FastArchiveController";
21
21
 
@@ -30,7 +30,8 @@ const caseInsensitiveParam = new URLParam("caseInsensitive", false);
30
30
 
31
31
  export class FastArchiveViewer<T> extends qreact.Component<{
32
32
  fastArchives: FastArchiveAppendable<T>[];
33
- onStart: () => void;
33
+ runOnLoad?: boolean;
34
+ onStart: () => MaybePromise<void>;
34
35
  getWantData?: (file: FileMetadata) => Promise<((posStart: number, posEnd: number, data: Buffer) => boolean) | undefined>;
35
36
  onDatums: (source: FastArchiveAppendable<T>, datums: T[], metadata: FileMetadata) => void;
36
37
  // Called after onData
@@ -38,12 +39,13 @@ export class FastArchiveViewer<T> extends qreact.Component<{
38
39
  onFinish?: () => void;
39
40
  }> {
40
41
  state = t.state({
42
+ runCount: t.atomic<number>(0),
41
43
  // rootPath =>
42
44
  fileMetadata: t.atomic<({
43
45
  files: FileMetadata[];
44
46
  createTime?: number;
45
47
  } | undefined)[]>([]),
46
- finished: t.atomic(false),
48
+ finished: t.atomic(true),
47
49
  error: t.atomic<string | undefined>(undefined),
48
50
  pendingSyncInitializations: t.atomic<number>(0),
49
51
 
@@ -108,6 +110,11 @@ export class FastArchiveViewer<T> extends qreact.Component<{
108
110
  this.latestSequenceNumber = this.currentSequenceNumber;
109
111
  const mySequenceNumber = this.currentSequenceNumber;
110
112
 
113
+ // Increment run count for each new run
114
+ Querysub.commit(() => {
115
+ this.state.runCount++;
116
+ });
117
+
111
118
  // Helper function to check if this sequence number is still the latest
112
119
  const isLatestSync = () => mySequenceNumber === this.latestSequenceNumber;
113
120
 
@@ -150,7 +157,7 @@ export class FastArchiveViewer<T> extends qreact.Component<{
150
157
 
151
158
  this.histogramStartTime = timeRange.startTime - this.histogramBucketTime * 2;
152
159
  this.histogramEndTime = timeRange.endTime + this.histogramBucketTime * 2;
153
- const bucketCount = Math.ceil((this.histogramEndTime - this.histogramStartTime) / this.histogramBucketTime);
160
+ const bucketCount = clamp(Math.ceil((this.histogramEndTime - this.histogramStartTime) / this.histogramBucketTime), 1, 10000);
154
161
 
155
162
  this.histogramAllDataCounts = new Float64Array(bucketCount);
156
163
  this.histogramSelectedDataCounts = new Float64Array(bucketCount);
@@ -181,9 +188,7 @@ export class FastArchiveViewer<T> extends qreact.Component<{
181
188
  }
182
189
  };
183
190
  try {
184
- ifLatest(() => {
185
- onStart();
186
- });
191
+ await onStart();
187
192
 
188
193
  const caseInsensitive = Querysub.fastRead(() => caseInsensitiveParam.value);
189
194
  let caseInsensitiveMapping = new Uint8Array(256);
@@ -572,7 +577,9 @@ export class FastArchiveViewer<T> extends qreact.Component<{
572
577
  fillWidth
573
578
  onKeyUp={this.handleDownload}
574
579
  ref2={() => {
575
- void this.handleDownload();
580
+ if (this.props.runOnLoad) {
581
+ void this.handleDownload();
582
+ }
576
583
  }}
577
584
  noEnterKeyBlur
578
585
  placeholder="Filter terms, ex x | y & z"
@@ -587,17 +594,21 @@ export class FastArchiveViewer<T> extends qreact.Component<{
587
594
  flavor="small"
588
595
  />
589
596
  <div className={css.vbox(10)}>
590
- {this.state.fileMetadata
591
- && (
592
- <div
593
- className={infoDisplay(120)}
594
- title={this.state.fileMetadata.map(x => x?.files || []).flat().map(x =>
595
- `${x.path} (${formatNumber(x.size)})`
596
- ).join("\n")}
597
- >
598
- File count: {formatNumber(totalFileCount)}, Backblaze size: {formatNumber(totalBackblazeByteCount)}B (compressed), Disk size: {formatNumber(totalLocalByteCount)}B (uncompressed)
599
- </div>
600
- )}
597
+ {this.state.runCount > 0 && (
598
+ <div
599
+ className={infoDisplay(120)}
600
+ title={this.state.fileMetadata.map(x => x?.files || []).flat().map(x =>
601
+ `${x.path} (${formatNumber(x.size)})`
602
+ ).join("\n")}
603
+ >
604
+ File count: {formatNumber(totalFileCount)}, Backblaze size: {formatNumber(totalBackblazeByteCount)}B (compressed), Disk size: {formatNumber(totalLocalByteCount)}B (uncompressed)
605
+ </div>
606
+ )}
607
+ {this.state.runCount === 0 && (
608
+ <div className={infoDisplay(200)}>
609
+ No data downloaded yet. Click Run to download data.
610
+ </div>
611
+ )}
601
612
  {this.state.finished && <div
602
613
  className={infoDisplay(60).button}
603
614
  onClick={() => {
@@ -144,7 +144,8 @@ export class LogViewer2 extends qreact.Component {
144
144
  <FastArchiveViewer
145
145
  ref2={x => this.fastArchiveViewer = x}
146
146
  fastArchives={logs}
147
- onStart={() => {
147
+ runOnLoad={errorNotifyToggleURL.value}
148
+ onStart={async () => {
148
149
  this.datumCount = 0;
149
150
  this.notMatchedCount = 0;
150
151
  this.errors = 0;
@@ -167,6 +168,9 @@ export class LogViewer2 extends qreact.Component {
167
168
  });
168
169
  }
169
170
  })();
171
+ // ALWAYS update it, as the synchronous one might be out of date, and if we use an outdated one extra errors show up.
172
+ suppressionList = await suppressionController.getSuppressionList.promise();
173
+
170
174
  }}
171
175
  getWantData={async (file) => {
172
176
  if (!hasErrorNotifyToggle) return undefined;
@@ -224,22 +224,26 @@ class SuppressionList {
224
224
  suppressionUpdatedChannel.watch(() => {
225
225
  void this.updateEntriesNow();
226
226
  });
227
+ await runInfinitePollCallAtStart(SUPPRESSION_POLL_INTERVAL, async () => {
228
+ await this.updateEntriesNow();
229
+ });
227
230
  });
231
+ private cacheEntries: SuppressionListBase | undefined = undefined;
228
232
  private updateEntriesNow = async () => {
229
233
  let entries = await suppressionListArchive.get(suppressionListKey);
230
234
  if (!entries) {
231
235
  entries = { entries: {} };
232
236
  }
233
- this.getEntries.set(Promise.resolve(entries));
237
+ this.cacheEntries = entries;
234
238
  };
235
- private getEntries = lazy(async (): Promise<SuppressionListBase> => {
239
+ private async getEntries(): Promise<SuppressionListBase> {
236
240
  await this.init();
237
- await runInfinitePollCallAtStart(SUPPRESSION_POLL_INTERVAL, async () => {
238
- await this.updateEntriesNow();
239
- });
241
+ if (!this.cacheEntries) {
242
+ throw new Error("Cache entries not set? Should be impossible.");
243
+ }
240
244
  // Infinite poll will have set this, so we don't infinitely loop
241
- return await this.getEntries();
242
- });
245
+ return this.cacheEntries;
246
+ }
243
247
 
244
248
  public async filterObjsToNonSuppressed(objs: LogDatum[]): Promise<LogDatum[]> {
245
249
  // NOTE: Streamed data should be rare enough, that handling this inefficiently is okay.
@@ -259,6 +263,7 @@ class SuppressionList {
259
263
  }
260
264
  let buffer = Buffer.concat(parts);
261
265
  let scanner = await this.scanForRecentErrors({
266
+ debugName: "filterObjsToNonSuppressed",
262
267
  startTime,
263
268
  endTime,
264
269
  });
@@ -266,6 +271,7 @@ class SuppressionList {
266
271
  return await scanner.finish();
267
272
  }
268
273
  public async scanForRecentErrors(config: {
274
+ debugName: string;
269
275
  startTime: number;
270
276
  endTime: number;
271
277
  }): Promise<{
@@ -285,6 +291,7 @@ class SuppressionList {
285
291
  // for the suppression key.
286
292
  let obj: { outdatedSuppressionKey?: string } = {};
287
293
  let callback = createLogScanner({
294
+ debugName: config.debugName,
288
295
  onParsedData: (posStart, posEnd, buffer) => {
289
296
  if (buffer === "done") {
290
297
  return;
@@ -297,7 +304,7 @@ class SuppressionList {
297
304
  try {
298
305
  datum = JSON.parse(buffer.slice(posStart, posEnd).toString()) as LogDatum;
299
306
  } catch (e: any) {
300
- process.stderr.write(`Failed to parse log datum in around ${buffer.slice(posStart, posEnd).slice(0, 100).toString("hex")}, error is:\n${e.stack}`);
307
+ process.stderr.write(`Failed to parse log datum in around ${buffer.slice(posStart, posEnd).slice(0, 100).toString("hex")}, in source ${config.debugName}, error is:\n${e.stack}`);
301
308
  return;
302
309
  }
303
310
  if (obj.outdatedSuppressionKey) {
@@ -307,18 +314,19 @@ class SuppressionList {
307
314
  },
308
315
  });
309
316
  let lastWaitTime = Date.now();
317
+ const stream = runInSerial(async (buffer: Buffer | "done") => {
318
+ // TODO: Maybe we should add this pattern to batching.ts? Basically, if we get called fast, we allow the calls through. BUT, if we called slowly OR we are doing a lot of processing (and so we are working for all of SELF_THROTTLE_INTERVAL), then we wait. This prevents this from taking over the machine. The back off is steep though, and if the machine is lagging we might reduce to a trickle, just getting 1 call in per SELF_THROTTLE_DELAY + synchronous lag from work in other parts of the program.
319
+ let now = Date.now();
320
+ if (now - lastWaitTime > SELF_THROTTLE_INTERVAL) {
321
+ await delay(SELF_THROTTLE_DELAY);
322
+ lastWaitTime = now;
323
+ }
324
+ await callback(buffer);
325
+ });
310
326
  return {
311
- onData: runInSerial(async (buffer) => {
312
- // TODO: Maybe we should add this pattern to batching.ts? Basically, if we get called fast, we allow the calls through. BUT, if we called slowly OR we are doing a lot of processing (and so we are working for all of SELF_THROTTLE_INTERVAL), then we wait. This prevents this from taking over the machine. The back off is steep though, and if the machine is lagging we might reduce to a trickle, just getting 1 call in per SELF_THROTTLE_DELAY + synchronous lag from work in other parts of the program.
313
- let now = Date.now();
314
- if (now - lastWaitTime > SELF_THROTTLE_INTERVAL) {
315
- await delay(SELF_THROTTLE_DELAY);
316
- lastWaitTime = now;
317
- }
318
- await callback(buffer);
319
- }),
327
+ onData: stream,
320
328
  finish: async () => {
321
- await callback("done");
329
+ await stream("done");
322
330
  // NOTE: We COULD limit as we run, however... how many errors are we really going to encounter that AREN'T suppressed? Suppression is supposed to prevent overload anyways. I guess worst case scenario, yes, we could get overloaded, but... if we are logging more NEW errors than we can store in memory, we have bigger problems...
323
331
  return limitRecentErrors(datums);
324
332
  },
@@ -341,7 +349,8 @@ class SuppressionList {
341
349
  }
342
350
 
343
351
  public async getSuppressionList(): Promise<SuppressionEntry[]> {
344
- return Object.values((await this.getEntries()).entries);
352
+ let entries = Object.values((await this.getEntries()).entries);
353
+ return entries;
345
354
  }
346
355
  }
347
356
  const suppressionList = new SuppressionList();
@@ -597,6 +606,7 @@ class RecentErrors {
597
606
  path = await urlCache.getURLLocalPath(file.url, hash);
598
607
  if (!path) continue;
599
608
  let scanner = await suppressionList.scanForRecentErrors({
609
+ debugName: file.url,
600
610
  startTime: file.startTime,
601
611
  endTime: file.endTime,
602
612
  });
@@ -10,7 +10,7 @@ import { ATag } from "../../../library-components/ATag";
10
10
  import { managementPageURL, showingManagementURL } from "../../managementPages";
11
11
  import { errorNotifyToggleURL } from "../LogViewer2";
12
12
  import { Querysub } from "../../../4-querysub/QuerysubController";
13
- import { nextId, timeInDay, timeInHour } from "socket-function/src/misc";
13
+ import { deepCloneJSON, nextId, timeInDay, timeInHour } from "socket-function/src/misc";
14
14
  import { formatNumber } from "socket-function/src/formatting/format";
15
15
  import { Icon } from "../../../library-components/icons";
16
16
  import { filterParam } from "../FastArchiveViewer";
@@ -133,14 +133,15 @@ export class ErrorWarning extends qreact.Component {
133
133
  {topExpired &&
134
134
  <div className={css.hbox(8)}>
135
135
  <Button onClick={() => {
136
+ let newObj = deepCloneJSON({
137
+ ...topExpired!,
138
+ expiresAt: Date.now() + timeInDay,
139
+ });
136
140
  void Querysub.onCommitFinished(async () => {
137
- await suppressionController.setSuppressionEntry.promise({
138
- ...topExpired!,
139
- expiresAt: Date.now() + timeInDay,
140
- });
141
+ await suppressionController.setSuppressionEntry.promise(newObj);
141
142
  });
142
143
  }}>
143
- Ignore Again
144
+ Ignore Again ({formatDateJSX(topExpired.expiresAt)})
144
145
  </Button>
145
146
  <div>
146
147
  Match Pattern =
@@ -4,23 +4,42 @@ import { RecentErrorsController, recentErrorsChannel, watchRecentErrors } from "
4
4
  import { timeInSecond } from "socket-function/src/misc";
5
5
  import { formatDateTime } from "socket-function/src/formatting/format";
6
6
 
7
-
8
7
  //todonext
9
8
 
9
+ // 1) Fix whatever is causing our memory usage to highly fluctuate. We seem to just go up and up and up from 100 megabytes all the way up to 300 megabytes and then we GC going back to 100. I turned on the socket function logging and I don't think we're sending network messages. And I ran a profile and it doesn't seem to be doing anything slow, but what are we even doing? We need to fix this.
10
+ // - It could be the FPS display. It's pretty easy to check.
11
+ // YEP! I think it's the FPS display. I disabled it. Now we could see if the air parsing lag goes away and if it does and then comes back when we turn back on the FPS display then you know we're gonna have to fix that. I could understand lagging but why would it be allocating so much? Is it the animation rendering? It definitely could be that because the animations are slow to render and can lag Chrome and can lag the GPU and can lag other windows.
12
+
13
+ // 2) The error parsing page is clearly lagging. Even when loading just the error data for the last few days, which is less than a megabytes of total logs, the error loading animation is locking up. Stuttering that is. We need to fix this.
14
+
15
+
16
+ // Deploy everything and then update the server runner itself.
17
+
18
+ // The constant error notifications might be fixed now. We'll see tomorrow after all the rolling updates finish.
19
+ // 4) fix whatever's causing constant error notifications. Something is broadcasting on the Recent Errors Change channel constantly.
20
+ // - I guess usually there's no server that's going to be listening on it. So it's... Not that big of a deal, but it's still annoying.
10
21
 
11
- // 5) Add a channel to watch suppression entries, as once we suppress it, we don't want notifications anymore anywhere, And we don't want to have to wait
12
- // - test by logging when we get errors and then having our script which automatically keeps adding errors to keep running so we can verify that we keep logging new errors and then ignore the errors and this suppression should propagate and cause us to no longer omit the errors.
13
- // - This will also cause the errors to be suppressed across different HTTP servers.
14
22
 
15
- // 5) Get IMs actually sending.
23
+
24
+
25
+ // Back to getting a Node.js watcher that works smoothly without receiving a whole bunch of extra errors etc.
26
+ // 4.1) update channel watching so you can specify that you want to watch only on a specific node ID and then update our code so we only watch it on the controller node ID that we're interfacing with.
27
+
28
+ // 5) Verify our suppression updates broadcast across the channel correctly, causing us to be able to suppress a notification and our watching script to then stop seeing the new updates. Realistically, it's the calling script that stops setting them, but same thing.
29
+
30
+ // 5) Set up the Instant Messaging Sending API.
31
+ // - Discord. With beeper it won't really matter what we're messaging. We could also do WhatsApp. It's really all the same.
16
32
 
17
33
  // 6) Set up all the code to properly rate limit IMs, batch them, link back to the log page, etc.
18
34
  // - Just link to the error page for the last week. We don't need to link to anything specific.
19
35
  // - properly getting the node ID that we're going to be watching, and if it goes down, getting a new one, and ignoring messages from the old node.
20
36
  // - And if no node exists, we need to warn and then wait.
21
37
 
38
+
22
39
  // 7) Write the digest script, which is very different, but will run in the same entry.
23
- // - Separate warnings and errors and also bucket by time bucket
40
+ // - Separate warnings and errors and also bucket by time bucket
41
+ // - suppressed errors by time bucket (but no type, as we definitely don't want to parse all suppressed errors...)
42
+ // - Time the entire thing, and put that, and the profile, in the digest too! That will give us a good gauge on if the errors/suppressions are getting slow (due to a lot of errors, or a lot of suppression checks!)
24
43
  // 8) Write a page that shows the results of the digest in tabs, writing the digest probably just to backblaze
25
44
  // - For now, just have two tabs, one for errors and one for warnings.
26
45
  // - If we're going to do a full scan, we might as well show time series data as well. It's trivial.
@@ -20,16 +20,6 @@ Very small amount of data
20
20
  - For now this will just be for:
21
21
  - non-suppressed errors
22
22
  - suppressed errors
23
- - Eventually the goal of this is to add our tracking charts to this. There are some really useful metrics we can track.
24
- - unique visit IPs.
25
- - Percent bounces.
26
- - average visit length and also median visit length top 95" bottom 5"
27
- - average stories read average story percentages read
28
- - percent of first page story views by subscribers percent of fifth page story view by subscribers
29
- - Number of users visiting the fifth page.
30
- - New subscriptions
31
- - Subscription cancellations
32
- - Total subscriptions
33
23
 
34
24
 
35
25
 
@@ -55,51 +45,9 @@ Very small amount of data
55
45
  2.1) Collections of life cycles so we can further reduce the complexity.
56
46
  - Uses FastArchiveViewer, but instead of showing a table, shows lifecycles (a derived concept)
57
47
  - We save them in backblaze, with a bit of cache for loading them
58
- - List of life cycles
59
- - Life cycle
60
- - Title
61
- - Operation list (each supports | / &, but having multiple is even better)
62
- - Match filter
63
- - Group key extractions (optional, if not set it becomes a singleton)
64
- - Just a field name
65
- - CAN have multiple, which adds us as multiple life cycles
66
- - With each one being namespaced using the key, so we can tell them apart
67
- - Global value setting (optional, if not set it has no global state impact)
68
- - A list of set values
69
- - Each one is an expression which can use fields in the object, ex:
70
- - `alivePathValueServers.$threadId = true`
71
- - Show AND SHOULDN'T include match filters!
72
- - So when we should the count matched, we can show stats for these, which will often be should have "finished", and shouldn't have "error", so we can see completed, and errors
73
- - Similar to error notifications, but... it's nice to also have this here, as we could miss the notification, or suppress it, but when we are looking at a life cycle it's relevant skyrockets.
74
- - ALSO for start, so we can see cutoff starts!
75
- OH! How do we handle cut off starts?
76
- - Maybe... we have a "read preload" duration, and... we read that, BUT, only include life cycles which are also in our main selected time. So we don't cut anything off in our main time, but don't add new values which also get cut off!
77
- - Same time/machine/thread selector as log viewer
78
- - Allow filtering to specific life cycles
79
- - After download, shows matches per life cycle
80
- - Button to reset to all
81
- - Download logs, and scan for selected life cycles
82
- - Immediately on load, showing progress and throttling, so it's not too bad
83
- - Result
84
- - List of life cycles, with count of each
85
- - Table of individual life cycles?
86
- - Can then filter within these life cycles by searching
87
- - BUT, importantly, if any log is matched in a life cycle, the entire life cycle is matched
88
- - AND, global settings for ALL life cycles are applied, not just filtered ones!
89
- - Table of result life cycles
90
- - Preview shows first matched line
91
- - ALSO, shows duration of life cycle!
92
- - And start date
93
- - Expand to see pin that specific life cycle above
94
- - Show list of logs in it, in another table, with searching on each of them
95
- - Start date AND duration of each line!
96
- - Can pin multiple life cycles (I guess might as well)
97
- - Show list of global value expressions as well (limited, but with filtering to search them easily)
98
- - Can expand a global value to see object (but again... limited?)
99
- - Can select a specific global value path, to have it injected into
100
- - a column for before and after each life cycle
101
- - A column for after each line within a life cycle
102
- - Can also select parent values, to show all paths under that (use ShowMore, to make them manageable...)
48
+ - show the life cycles and allow viewing just the specific life cycles and then drilling into those life cycles. Also allow viewing multiple at once so we can view a stream that has many life cycles (as in life cycle collections, which we can also save. )
49
+ - Show overlap when we're showing the list of life cycles by having it first sorted by start time and then have some kind of indicator for how many values after the value it overlaps with. So if it's serial there'll be no overlap. If everything is being queued up and then run in serial we'll see the overlap go from 1, 2, 3, 4, 5, 6, 7, 8, etc. If it's just in parallel, it'll count up to a number and then go up and down a little bit as values are added and removed. If everything's added at once and it's in parallel, then the values will go up, but then they'll very quickly go down.
50
+ - Maybe we should have some indication for how much parallel overlap there is? Or kind of like how much gap there is to the start time of the next thing and to the end time of the next thing, something like that.
103
51
 
104
52
  6) Add life cycles for
105
53
  - Node discovery life cycle (new node, check for alive, check for paths, etc)
@@ -112,6 +60,7 @@ Very small amount of data
112
60
  - Use different keys for threadId, and, triggeredNodeId, so we can track how a node discovers other nodes, vs how a node is discovered
113
61
  - Mark as dead, dead count increases, remove node as dead
114
62
  - MAYBE there should be 2 lifecycles, one for all, and one for just creation type stuff (no discovery)
63
+ - There's a life cycle for something dying which starts when we first see it's dead and ends when the count reaches the maximum. We want to use the capability to change the ending state so we can have it as partially dead but not fully dead or fully dead so we can see if the dead life cycles keep starting but not finishing which would be bad.
115
64
  - Trusted machine lifecycle
116
65
  - Check if we need to add trust
117
66
  - Add trust to archives
@@ -138,6 +87,10 @@ Very small amount of data
138
87
  - Re-enable all of our services as well.
139
88
  - Just logs MIGHT be able to do it, but... life cycles should make it a lot easier to correlate logs, which is something we need to do anyways to solve it...
140
89
 
90
+ Make sure we check our life cycles for nodes being added and removed to make sure that life cycle path is pretty empty and there aren't nodes constantly being lost and re-added.
91
+
92
+ Check the startup lifecycle to make sure we can detect the nodes pretty fast and in parallel, instead of serial
93
+
141
94
  10) Verify old user/fast-log-cache machine folders are deleted
142
95
 
143
96
 
@@ -309,11 +309,21 @@ export function getSyncedController<T extends SocketRegistered>(
309
309
  obj.promise = undefined;
310
310
  obj.invalidated = true;
311
311
  call(...args);
312
+ // Assign to itself, to preset the type assumptions typescript makes (otherwise we get an error below)
313
+ obj = obj as any;
312
314
  let promise = atomic(obj.promise);
313
315
  if (!promise) {
314
316
  debugger;
315
317
  throw new Error(`Impossible, called function, but promise is not found for ${fncName}`);
316
318
  }
319
+ // Don't cache promise calls
320
+ void promise.finally(() => {
321
+ Querysub.fastRead(() => {
322
+ if (obj.promise === promise) {
323
+ obj.promise = undefined;
324
+ }
325
+ });
326
+ });
317
327
  return promise;
318
328
  });
319
329
  };
@@ -9,6 +9,9 @@ import { renderToString } from "./renderToString";
9
9
  import { debugTime } from "../../src/0-path-value-core/pathValueCore";
10
10
  import { measureBlock } from "socket-function/src/profiling/measure";
11
11
 
12
+ // TODO: Create more icons with: https://www.recraft.ai/project/e2a2200f-bed2-4426-b8f1-e5120b9dc990
13
+ // - This should also make creating animated SVGs a lot easier. If the base is nice, and we add comments, we can probably get the AI to help animate it
14
+
12
15
  // Most icons are from https://www.figma.com/file/eVpKKmt8uOKmSYKW4LyusF/Free-Icon-Pack-1600%2B-icons-(Community)?node-id=1654-9894&t=0bDbK0bA9KGpswRE-0
13
16
 
14
17
  // TODO: Add a build step that does this (storing the .svgs in a file), so we don't
package/src/server.ts CHANGED
@@ -33,7 +33,8 @@ async function main() {
33
33
 
34
34
  Error.stackTraceLimit = 20;
35
35
 
36
- //SocketFunction.logMessages = true;
36
+ // SocketFunction.logMessages = true;
37
+ // SocketFunction.silent = false;
37
38
 
38
39
  // ClientWatcher.DEBUG_READS = true;
39
40
  // ClientWatcher.DEBUG_WRITES = true;