querysub 0.391.0 → 0.393.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "querysub",
3
- "version": "0.391.0",
3
+ "version": "0.393.0",
4
4
  "main": "index.js",
5
5
  "license": "MIT",
6
6
  "note1": "note on node-forge fork, see https://github.com/digitalbazaar/forge/issues/744 for details",
@@ -57,7 +57,7 @@
57
57
  "pako": "^2.1.0",
58
58
  "peggy": "^5.0.6",
59
59
  "querysub": "^0.357.0",
60
- "socket-function": "^1.1.2",
60
+ "socket-function": "^1.1.5",
61
61
  "terser": "^5.31.0",
62
62
  "typesafecss": "^0.28.0",
63
63
  "yaml": "^2.5.0",
@@ -17,6 +17,8 @@ setFlag(require, "cbor-x", "allowclient", true);
17
17
 
18
18
  import * as pako from "pako";
19
19
  import { delay } from "socket-function/src/batching";
20
+ import { LZ4 } from "../storage/LZ4";
21
+ import { unblockLoop } from "socket-function/src/batching";
20
22
  setFlag(require, "pako", "allowclient", true);
21
23
 
22
24
  const DEFAULT_BUFFER_SIZE = 1024 * 16;
@@ -32,6 +34,7 @@ const cborEncoder = lazy(() => new cbor.Encoder({ structuredClone: true }));
32
34
 
33
35
  const SERIALIZE_LOCK_COUNT = true;
34
36
 
37
+
35
38
  /*
36
39
  Memory Layout
37
40
  DataSettings
@@ -67,7 +70,7 @@ interface Reader {
67
70
 
68
71
  // JSON.stringify, length prefixed
69
72
  interface DataSettings {
70
- compression?: "gzip";
73
+ compression?: "gzip" | "lz4";
71
74
  valueCount: number;
72
75
  noLocks?: boolean;
73
76
  stripSource?: boolean;
@@ -390,7 +393,7 @@ class PathValueSerializer {
390
393
  let settings: DataSettings = {
391
394
  valueCount: values.length,
392
395
  noLocks: config?.noLocks,
393
- compression: config?.compress ? "gzip" : undefined,
396
+ compression: config?.compress ? "lz4" : undefined,
394
397
  stripSource: config?.stripSource,
395
398
  version,
396
399
  };
@@ -510,7 +513,20 @@ class PathValueSerializer {
510
513
  stringBuffers.reverse();
511
514
  outputBuffers.push(...stringBuffers);
512
515
 
513
- if (settings.compression === "gzip") {
516
+ if (settings.compression === "lz4") {
517
+ let compressedBuffers = await unblockLoop(outputBuffers.slice(1), x => LZ4.compress(x));
518
+ let compressedOutputBuffers = [outputBuffers[0], ...compressedBuffers];
519
+
520
+ // If the compress factor is less than a threshold, use the uncompressed buffers
521
+ let uncompressedSize = outputBuffers.reduce((total, x) => total + x.length, 0);
522
+ let compressedSize = compressedOutputBuffers.reduce((total, x) => total + x.length, 0);
523
+ if (compressedSize / uncompressedSize < MIN_COMPRESS_FACTOR) {
524
+ outputBuffers = compressedOutputBuffers;
525
+ } else {
526
+ settings.compression = undefined;
527
+ outputBuffers[0] = Buffer.from(JSON.stringify(settings));
528
+ }
529
+ } else if (settings.compression === "gzip") {
514
530
  // NOTE: Due to how the LZ77 window works merging buffers probably won't reduce the size by that much.
515
531
  let compressedBuffers = await Promise.all(outputBuffers.slice(1).map(x => Zip.gzip(x, 1)));
516
532
  let compressedOutputBuffers = [outputBuffers[0], ...compressedBuffers];
@@ -574,7 +590,9 @@ class PathValueSerializer {
574
590
 
575
591
  let settings = JSON.parse(buffers.shift()!.toString("utf8")) as DataSettings;
576
592
  let version = settings.version || 0;
577
- if (settings.compression === "gzip") {
593
+ if (settings.compression === "lz4") {
594
+ buffers = await unblockLoop(buffers, x => LZ4.decompress(x));
595
+ } else if (settings.compression === "gzip") {
578
596
  buffers = await Zip.gunzipBatch(buffers);
579
597
  }
580
598
 
@@ -2,12 +2,12 @@ import { SocketFunction } from "socket-function/SocketFunction";
2
2
  import { measureBlock, measureFnc, measureWrap } from "socket-function/src/profiling/measure";
3
3
  import { errorToUndefined, errorToUndefinedSilent, ignoreErrors, logErrors, timeoutToUndefined, timeoutToUndefinedSilent } from "../errors";
4
4
  import { PromiseObj } from "../promise";
5
- import { getAllNodeIds, getBrowserUrlNode, getOwnNodeId, isNodeDiscoveryLogging, isOwnNodeId, onNodeDiscoveryReady, triggerNodeChange, watchDeltaNodeIds, watchNodeIds } from "../-f-node-discovery/NodeDiscovery";
5
+ import { getAllNodeIds, getBrowserUrlNode, getOwnNodeId, isNodeDiscoveryLogging, isNodeIdLocal, isOwnNodeId, onNodeDiscoveryReady, triggerNodeChange, watchDeltaNodeIds, watchNodeIds } from "../-f-node-discovery/NodeDiscovery";
6
6
  import { PathValueController } from "./PathValueController";
7
7
  import { MAX_ACCEPTED_AUTHORITY_STARTUP_TIME, PathValueSnapshot, STARTUP_CUTOFF_TIME, authorityStorage, matchesParentRangeFilterPart } from "./pathValueCore";
8
8
  import { pathValueArchives } from "./pathValueArchives";
9
9
  import { deepCloneJSON, isNode, sha256Hash, sort, timeInMinute, timeInSecond } from "socket-function/src/misc";
10
- import { delay, runInfinitePoll } from "socket-function/src/batching";
10
+ import { delay, runInSerial, runInfinitePoll } from "socket-function/src/batching";
11
11
  import { blue, green, magenta, red, yellow } from "socket-function/src/formatting/logColors";
12
12
  import debugbreak from "debugbreak";
13
13
  import { getNodeIdFromLocation, getNodeIdIP, getNodeIdLocation } from "socket-function/src/nodeCache";
@@ -22,6 +22,8 @@ import { IdentityController_getCurrentReconnectNodeIdAssert, IdentityController_
22
22
  import { getBufferFraction, getBufferInt, getShortNumber } from "../bits";
23
23
  import { devDebugbreak, getDomain, isDevDebugbreak } from "../config";
24
24
  import { waitForFirstTimeSync } from "socket-function/time/trueTimeShim";
25
+ import { testTCPIsListening } from "socket-function/src/networking";
26
+ import { getMachineId } from "../-a-auth/certs";
25
27
 
26
28
  export const LOCAL_DOMAIN = "LOCAL";
27
29
  export const LOCAL_DOMAIN_PATH = getPathStr1(LOCAL_DOMAIN);
@@ -36,6 +38,7 @@ const MAX_RECONNECT_TIME = timeInMinute * 15;
36
38
  const RECONNECT_POLL_INTERVAL = 10000;
37
39
 
38
40
  // Poll nodes that appear dead. Without this, if the internet goes down, we might forever ignore nodes.
41
+ // NOTE: During development this might seem excessive, however, after about an hour this should ramp up to the full poll interval of a minute, which should be fairly reasonable.
39
42
  const INITIAL_RECOVERY_POLL_INTERVAL = timeInSecond * 5;
40
43
  const INITIAL_RECOVERY_RUNS = 100;
41
44
  const RECOVERY_POLL_INTERVAL = timeInMinute;
@@ -85,6 +88,8 @@ class NodePathAuthorities {
85
88
 
86
89
  private selfAuthorities: AuthorityPath[] = [];
87
90
 
91
+ private outdatedNodeIds = new Set<string>();
92
+
88
93
  public debug_getAuthority(nodeId: string) {
89
94
  return this.authorities.get(nodeId);
90
95
  }
@@ -213,7 +218,7 @@ class NodePathAuthorities {
213
218
  let obj = this.authorities.get(nodeId);
214
219
  if (!obj) {
215
220
  // Might as well use this to add the node, if we don't know about it yet.
216
- ingestNewNodeIds([nodeId], []);
221
+ void ingestNewNodeIds([nodeId], []);
217
222
  return;
218
223
  }
219
224
  obj.authorityPaths = await PathController.nodes[nodeId].getAuthorityPaths();
@@ -225,7 +230,7 @@ class NodePathAuthorities {
225
230
  let firstPromise = new PromiseObj<unknown>();
226
231
  logErrors(firstPromise.promise);
227
232
 
228
- const ingestNewNodeIds = (newNodeIds: string[], removedNodeIds: string[]) => {
233
+ const ingestNewNodeIds = runInSerial(async (newNodeIds: string[], removedNodeIds: string[]) => {
229
234
  for (let nodeId of removedNodeIds) {
230
235
  this.authorities.delete(nodeId);
231
236
  }
@@ -249,15 +254,44 @@ class NodePathAuthorities {
249
254
  return;
250
255
  }
251
256
 
257
+ let nodeMachineId = getMachineId(nodeId);
258
+ let onSameMachine = Array.from(this.authorities.keys()).filter(x => getMachineId(x) === nodeMachineId && x !== nodeId);
259
+ if (this.outdatedNodeIds.has(nodeId) && onSameMachine.length > 0) {
260
+ // It's just an old thread that's dead, and once threads die, they don't come back. This should be fairly safe, as we confirm that both a new thread is running on the same port, and that the old thread doesn't respond.
261
+ return;
262
+ }
263
+
264
+ // Do an initial test by just opening a TCP connection. This will help filter out a lot of dead nodes immediately. Which, in turn, avoids having to create our initial connection signature, which saves a lot of time.
265
+ if (isNode()) {
266
+ let nodeIdObj = getNodeIdLocation(nodeId);
267
+ if (!nodeIdObj) {
268
+ console.error(`Bad nodeId ${nodeId}`);
269
+ return;
270
+ }
271
+ let isListening = await testTCPIsListening(nodeIdObj.address, nodeIdObj.port);
272
+ if (!isListening) return;
273
+ }
274
+
275
+ console.log(blue(`Checking for status of node ${nodeId}`));
252
276
  let time = Date.now();
253
277
  let createTime = await timeoutToUndefinedSilent(POLL_RATE, PathController.nodes[nodeId].getCreateTime());
254
278
  if (createTime === undefined) {
279
+ if (!isNodeIdLocal(nodeId)) {
280
+ let aliveNodes = await Promise.all(onSameMachine.map(x => errorToUndefinedSilent(PathController.nodes[x].isReadReady())));
281
+ if (aliveNodes.some(x => x)) {
282
+ console.warn(`Node is on the same machine as port as another alive node. AND, it is not responding. Assuming it is just an older version of a dead node`, { nodeId, aliveNodes });
283
+ this.outdatedNodeIds.add(nodeId);
284
+ return;
285
+ }
286
+ }
287
+
255
288
  // Don't log for 127-0-0-1, as it usually fails, and is mostly a development optimization
256
289
  if (!nodeId.includes("127-0-0-1")) {
257
290
  if (!this.previouslyNotAvailableNodes.has(nodeId)) {
258
291
  console.log(yellow(`Node didn't respond to getCreateTime`), { nodeId });
259
292
  }
260
293
  }
294
+ console.log(yellow(`Node ${nodeId} is not available`));
261
295
  this.previouslyNotAvailableNodes.add(nodeId);
262
296
  return;
263
297
  }
@@ -311,7 +345,7 @@ class NodePathAuthorities {
311
345
  if (isCurrentFirst) {
312
346
  firstPromise.resolve(promise);
313
347
  }
314
- };
348
+ });
315
349
 
316
350
  let time = Date.now();
317
351
  watchDeltaNodeIds(obj => ingestNewNodeIds(obj.newNodeIds, obj.removedNodeIds));
@@ -340,7 +374,7 @@ class NodePathAuthorities {
340
374
  nonExistentNodes.push(node);
341
375
  }
342
376
  // Pretend all dead nodes are new
343
- ingestNewNodeIds(nonExistentNodes, []);
377
+ await ingestNewNodeIds(nonExistentNodes, []);
344
378
  });
345
379
 
346
380
  let readyCount = 0;
@@ -23,6 +23,8 @@ import { TypedConfigEditor } from "../../library-components/TypedConfigEditor";
23
23
  import { managementPageURL } from "../../diagnostics/managementPages";
24
24
  import { getLogViewerParams } from "../../diagnostics/logs/IndexedLogs/LogViewerParams";
25
25
  import { getScreenName } from "../machineApplyMainCode";
26
+ import { getOwnThreadId } from "../../-f-node-discovery/NodeDiscovery";
27
+ import { decodeNodeId } from "../../-a-auth/certs";
26
28
 
27
29
 
28
30
 
@@ -338,12 +340,12 @@ export class ServiceDetailPage extends qreact.Component {
338
340
  <div
339
341
  className={css.hbox(12)}
340
342
  >
341
- <div className={css.vbox(5)}>
343
+ <div className={css.hbox(5)}>
342
344
  <div>
343
345
  {screenName}
344
346
  </div>
345
347
  <div>
346
- {machineId} ({machineInfo.info["getExternalIP"]})
348
+ {serviceInfo?.nodeId || machineId} ({machineInfo.info["getExternalIP"]})
347
349
  </div>
348
350
  </div>
349
351
  {isDisabled && (
@@ -59,6 +59,8 @@ export type MachineInfo = {
59
59
  // Only times launched for the current applyNodeId, but... still very useful.
60
60
  totalTimesLaunched: number;
61
61
  // Might take a while to set (15 minutes or more). It's better to look in the nodes list and find the one that seems to match (maybe looking at the startup path to find it?)
62
+ // TODO: Make this work correctly when we deploy multiple instances (the node needs to know which index it is so it can write to a unique file).
63
+ // - We could probably set environment variables in the screen?
62
64
  nodeId: string;
63
65
  }>;
64
66
  };
@@ -3,7 +3,7 @@ import { nestArchives } from "../../../-a-archives/archives";
3
3
  import { getArchivesBackblaze } from "../../../-a-archives/archivesBackBlaze";
4
4
  import { archiveJSONT } from "../../../-a-archives/archivesJSONT";
5
5
  import { Querysub } from "../../../4-querysub/QuerysubController";
6
- import { getDomain } from "../../../config";
6
+ import { getDomain, isPublic } from "../../../config";
7
7
  import { MachineInfo } from "../../../deployManager/machineSchema";
8
8
  import { createMatchesPattern } from "../IndexedLogs/bufferSearchFindMatcher";
9
9
  import { LogDatum, getErrorLogs } from "../diskLogger";
@@ -482,7 +482,7 @@ const ErrorNotificationServiceBase = SocketFunction.register(
482
482
 
483
483
  class ErrorNotificationData {
484
484
  public async getData() {
485
- let controllerNodeId = await getControllerNodeId(ErrorNotificationServiceBase);
485
+ let controllerNodeId = await getControllerNodeId(ErrorNotificationServiceBase, !isPublic());
486
486
  if (!controllerNodeId) {
487
487
  throw new Error(`Could not find node exposing controller ErrorNotificationServiceBase`);
488
488
  }
@@ -490,7 +490,7 @@ class ErrorNotificationData {
490
490
  }
491
491
 
492
492
  public async getUnmatchedErrorsLimited() {
493
- let controllerNodeId = await getControllerNodeId(ErrorNotificationServiceBase);
493
+ let controllerNodeId = await getControllerNodeId(ErrorNotificationServiceBase, !isPublic());
494
494
  if (!controllerNodeId) {
495
495
  throw new Error(`Could not find node exposing controller ErrorNotificationServiceBase`);
496
496
  }
@@ -536,7 +536,7 @@ class ErrorNotificationData {
536
536
  }
537
537
 
538
538
  private static ensureWatchingErrorsHTTP = lazy(async () => {
539
- let controllerNodeId = await getControllerNodeId(ErrorNotificationServiceBase);
539
+ let controllerNodeId = await getControllerNodeId(ErrorNotificationServiceBase, !isPublic());
540
540
  if (!controllerNodeId) {
541
541
  ErrorNotificationData.ensureWatchingErrorsHTTP.reset();
542
542
  throw new Error(`Could not find node exposing controller ErrorNotificationServiceBase`);
@@ -572,7 +572,7 @@ class ErrorNotificationData {
572
572
  }
573
573
 
574
574
  public async getSuppressionEntries() {
575
- let controllerNodeId = await getControllerNodeId(ErrorNotificationServiceBase);
575
+ let controllerNodeId = await getControllerNodeId(ErrorNotificationServiceBase, !isPublic());
576
576
  if (!controllerNodeId) {
577
577
  throw new Error(`Could not find node exposing controller ErrorNotificationServiceBase`);
578
578
  }
@@ -580,7 +580,7 @@ class ErrorNotificationData {
580
580
  }
581
581
 
582
582
  public async setSuppressionEntry(entry: SuppressionEntry) {
583
- let controllerNodeId = await getControllerNodeId(ErrorNotificationServiceBase);
583
+ let controllerNodeId = await getControllerNodeId(ErrorNotificationServiceBase, !isPublic());
584
584
  if (!controllerNodeId) {
585
585
  throw new Error(`Could not find node exposing controller ErrorNotificationServiceBase`);
586
586
  }
@@ -588,7 +588,7 @@ class ErrorNotificationData {
588
588
  }
589
589
 
590
590
  public async deleteSuppressionEntry(id: string) {
591
- let controllerNodeId = await getControllerNodeId(ErrorNotificationServiceBase);
591
+ let controllerNodeId = await getControllerNodeId(ErrorNotificationServiceBase, !isPublic());
592
592
  if (!controllerNodeId) {
593
593
  throw new Error(`Could not find node exposing controller ErrorNotificationServiceBase`);
594
594
  }
@@ -600,7 +600,7 @@ class ErrorNotificationData {
600
600
  }
601
601
 
602
602
  public async updateSuppressionNotes(id: string, notes: string | undefined) {
603
- let controllerNodeId = await getControllerNodeId(ErrorNotificationServiceBase);
603
+ let controllerNodeId = await getControllerNodeId(ErrorNotificationServiceBase, !isPublic());
604
604
  if (!controllerNodeId) {
605
605
  throw new Error(`Could not find node exposing controller ErrorNotificationServiceBase`);
606
606
  }
@@ -1,64 +1,14 @@
1
1
  /*
2
2
  todonext
3
3
 
4
- OKAY! CORE CONCEPTS
5
- - Index needs to be fast searched, so it's uncompressed form has to be small
6
- - Nesting compression is fine, as long as it's small enough that each search decompresses very little
7
- - The enemy is storing every single position, we get around this by:
8
- - In-exact matches, only storing block indexes.
9
- - Requires full search to verify, but... in practice has low false hit rate
10
- - Input key (unit) is only inexact matched.
11
- - Input query almost always has more than 1 byte of uniqueness. This means if even part is only 50% accurate, we can still get very high hit rates (8 bytes means 255/256 hit rate!)
4
+ -1) Deploy everything again!
12
5
 
6
+ -2) Fix synchronization speed from remote to local.
7
+ - I think we just have a lot of data, which our code will detect and log now.
8
+ - And we'll see how much data we are even sending
13
9
 
14
- todonext
15
-
16
-
17
-
18
- IMPORTANT! Now I am properly calling shutdown, so none of the streamed logs should ever break. The code should be waiting until everything's fully flushed before it allows the shutdown handler to finish running. If we see any more errors, we need to investigate them.
19
-
20
-
21
-
22
-
23
- //todonext
24
-
25
-
26
-
27
- 3) "guessed node id" is never working? At least, on the machine's page it is ALWAYS empty, even though it should be almost always detected...
28
-
29
- 4) Verify it works again, when we have multiple servers
30
-
31
- Invalid addition, you tried to add 1 + Infinity, operands must be valid numbers
32
-
33
-
34
-
35
- 0) Add LZ4 compression to socket-function by default
36
- - Setup local tester for it to start
37
- - with non-synced endpoint, just surely socket-function
38
- - purely nodejs, just talking to itself
39
- - Allow setting "compress" to "none" or "lz4" or "zip" or "zip0" or "zip3", etc, for levels.
40
- - default is "lz4"
41
- - REQUIRES feature checking the remote, to make sure it is new enough to accept this.
42
- - A generic thing which gets the version is probably fine.
43
- - When decoding, I think we should just check the first byte. It should be a magic number which tells us which compression it's using.
44
- - LZ4 compression is fast enough that this should cause basically no overhead, and in many cases greatly reduce the bandwidth (which will increase the speed).
45
- - We're gonna have to investigate how we're sending buffers anyway. I think this should be easy, but we
46
- 0.1) Verify the size difference with some local testing
47
- - ALSO, verify the processing overhead is acceptable.
48
- 1) Deploy, which SHOULD be backwards compatible with everything?
49
-
50
-
51
- -1) Change PathValueSerialize to use LZ4
52
- - I think we already have a compression flag here, so it should be easy enough to just support it to change it to support LZ4.
53
- - I think we want to disable the server, and then when we launch the server locally, it should have to read from disk.
54
- -2) It would probably be valuable to test the synchronization speed between two different servers. It seems like when the remote is running and we start a local server, it really takes a long time for it to get ready. As in minutes.
55
- - This might have already been fixed by us improving the compression speed to not use gzip, maybe, but we should test it again, and if not, we should see what's slow
56
- - We might need to jump into the server and look at its profiles there by forcing them to omit, which isn't too hard, but it is kind of annoying.
10
+ Write Monthly Summary
57
11
 
58
- -3) During server startup (when there is a remote server), we spend a lot of time in identityHook.
59
- - BUT... Our check to see if the async call that we're doing underneath is taking more than 200 milliseconds, is never firing. Even though apparently plenty of calls are taking over a second in total, so what why is it slow? Are some of the synchronous functions we're calling slow?
60
- - Basically, there's nowhere that could be slow, and we're timing it, and it times as being fast. However, in the profiles, it shows up as being slow.
61
- - It could also be an asynchronous issue, so some other timing is getting alias to it. That is feasible...
62
12
  */
63
13
 
64
14
 
@@ -73,6 +23,9 @@ Invalid addition, you tried to add 1 + Infinity, operands must be valid numbers
73
23
  // - I mean, we have to implement it without caching anyways to start, so we can just do that, and then cache it later if we see the need...
74
24
  // - WELL, we need SOME kind of caching... Maybe... we DO use a strict time range, and then... if we've scanned a pending file in the past, we can cache that, because we know we've got all of it's values. Hmm...
75
25
  // - Use lifecycles to debug rejections. To a point, until it is likely we are out of sync, then we should write the sync verification code.
26
+ // - Maybe it isn't necessarily rejections, but definitely if we're running stuff locally and remotely it breaks. Could it be the case that we can't run the path value server both locally and remotely? And if so, we really should prevent this in some way.
27
+ // - Annoyingly enough, we're actually going to have to add a case where we can scan the local logs as well as the remote logs to debug running it locally...
28
+ // - ALTHOUGH, It does fail just if we have the local running server, But we're on the remote site. So we might be able to just debug it from that life cycle, from the remote life cycles.
76
29
 
77
30
 
78
31
  // todonext;
@@ -1,32 +1,2 @@
1
- // NOTE: Even if we wanted to use the production version, we couldn't because it's not compatible with the client-side code, because they decided to do a file read to load in their WebAssembly.
2
- import lz4_stream from "../misc/lz4_wasm_nodejs";
3
- import { measureFnc } from "socket-function/src/profiling/measure";
4
- export class LZ4 {
5
- @measureFnc
6
- static compress(data: Buffer): Buffer {
7
- return this.compressUntracked(data);
8
- }
9
- static compressUntracked(data: Buffer): Buffer {
10
- try {
11
- return Buffer.from(lz4_stream.compress(data));
12
- } catch (e) {
13
- // Rethrow non errors as properly wrapped errors
14
- if (!(e && e instanceof Error)) {
15
- throw new Error(`Error compressing LZ4: ${e}`);
16
- }
17
- throw e;
18
- }
19
- }
20
- @measureFnc
21
- static decompress(data: Buffer): Buffer {
22
- try {
23
- return Buffer.from(lz4_stream.decompress(data));
24
- } catch (e) {
25
- // Rethrow non errors as properly wrapped errors
26
- if (!(e && e instanceof Error)) {
27
- throw new Error(`Error decompressing LZ4: ${e}`);
28
- }
29
- throw e;
30
- }
31
- }
32
- }
1
+ import { LZ4 } from "socket-function/src/lz4/LZ4";
2
+ export { LZ4 };
package/test.ts CHANGED
@@ -6,12 +6,105 @@ import "./inject";
6
6
  import { Querysub } from "./src/4-querysub/QuerysubController";
7
7
  import { getErrorLogs, getLoggers2Async } from "./src/diagnostics/logs/diskLogger";
8
8
  import { watchAllValues } from "./src/diagnostics/logs/errorNotifications2/logWatcher";
9
+ import { SocketFunction } from "socket-function/SocketFunction";
10
+ import fs from "fs";
11
+ import { getControllerNodeId, getControllerNodeIdList } from "./src/-g-core-values/NodeCapabilities";
12
+ import { delay } from "socket-function/src/batching";
13
+ import { list, timeInSecond } from "socket-function/src/misc";
14
+ import { lazy } from "socket-function/src/caching";
15
+ import { formatNumber } from "socket-function/src/formatting/format";
16
+ import { LZ4 } from "socket-function/src/lz4/LZ4";
9
17
 
18
+ const paths = [
19
+ "D:/repos/qs-cyoa/database-storage/disklogs/join-0-dply-1756166400000-1756252800000.log",
20
+ "D:/repos/qs-cyoa/database-storage/disklogs/cyoa-0-dply-1756252800000-1756339200000.log",
21
+ "D:/repos/qs-cyoa/database-storage/disklogs/server-0-dply-1756425600000-1756512000000.log",
22
+ "D:/repos/qs-cyoa/database-storage/disklogs/server-1-dply-1756425600000-1756512000000.log",
23
+ "D:/repos/qs-cyoa/database-storage/disklogs/server-0-dply-1756684800000-1756771200000.log",
24
+ "D:/repos/qs-cyoa/database-storage/disklogs/server-1-dply-1756684800000-1756771200000.log",
25
+ "D:/repos/qs-cyoa/database-storage/disklogs/bootstrapper-0-dply-1756425600000-1756512000000.log",
26
+ "D:/repos/qs-cyoa/database-storage/disklogs/server-1-dply-1756598400000-1756684800000.log",
27
+ "D:/repos/qs-cyoa/database-storage/disklogs/server-0-dply-1756598400000-1756684800000.log",
28
+ "D:/repos/qs-cyoa/database-storage/disklogs/server-1-dply-1756252800000-1756339200000.log",
29
+ "D:/repos/qs-cyoa/database-storage/disklogs/server-0-dply-1756252800000-1756339200000.log",
30
+ "D:/repos/qs-cyoa/database-storage/disklogs/server-1-dply-1756512000000-1756598400000.log",
31
+ "D:/repos/qs-cyoa/database-storage/disklogs/server-0-dply-1756512000000-1756598400000.log",
32
+ "D:/repos/qs-cyoa/database-storage/disklogs/bootstrapper-0-dply-1756339200000-1756425600000.log",
33
+ "D:/repos/qs-cyoa/database-storage/disklogs/server-1-dply-1756339200000-1756425600000.log",
34
+ "D:/repos/qs-cyoa/database-storage/disklogs/server-0-dply-1756339200000-1756425600000.log",
35
+ "D:/repos/qs-cyoa/database-storage/disklogs/bootstrapper-0-dply-1756684800000-1756771200000.log",
36
+ "D:/repos/qs-cyoa/database-storage/disklogs/function-0-dply-1756425600000-1756512000000.log",
37
+ "D:/repos/qs-cyoa/database-storage/disklogs/bootstrapper-0-dply-1756598400000-1756684800000.log",
38
+ "D:/repos/qs-cyoa/database-storage/disklogs/bootstrapper-0-dply-1756512000000-1756598400000.log",
39
+ "D:/repos/qs-cyoa/database-storage/disklogs/function-0-dply-1756684800000-1756771200000.log",
40
+ "D:/repos/qs-cyoa/database-storage/disklogs/function-0-dply-1756598400000-1756684800000.log",
41
+ "D:/repos/qs-cyoa/database-storage/disklogs/cyoa-0-dply-1756598400000-1756684800000.log",
42
+ "D:/repos/qs-cyoa/database-storage/disklogs/cyoa-1-dply-1756598400000-1756684800000.log",
43
+ "D:/repos/qs-cyoa/database-storage/disklogs/cyoa-0-dply-1756512000000-1756598400000.log",
44
+ "D:/repos/qs-cyoa/database-storage/disklogs/cyoa-1-dply-1756512000000-1756598400000.log",
45
+ "D:/repos/qs-cyoa/database-storage/disklogs/server-0-dply-1756166400000-1756252800000.log",
46
+ "D:/repos/qs-cyoa/database-storage/disklogs/server-1-dply-1756166400000-1756252800000.log",
47
+ "D:/repos/qs-cyoa/database-storage/disklogs/join-0-dply-1756252800000-1756339200000.log",
48
+ "D:/repos/qs-cyoa/database-storage/disklogs/function-0-dply-1756252800000-1756339200000.log",
49
+ "D:/repos/qs-cyoa/database-storage/disklogs/join-0-dply-1756425600000-1756512000000.log",
50
+ "D:/repos/qs-cyoa/database-storage/disklogs/server-0-dply-1756080000000-1756166400000.log",
51
+ "D:/repos/qs-cyoa/database-storage/disklogs/gc-0-dply-1756425600000-1756512000000.log",
52
+ "D:/repos/qs-cyoa/database-storage/disklogs/join-0-dply-1756339200000-1756425600000.log",
53
+ "D:/repos/qs-cyoa/database-storage/disklogs/gc-0-dply-1756252800000-1756339200000.log",
54
+ "D:/repos/qs-cyoa/database-storage/disklogs/server-1-dply-1756080000000-1756166400000.log",
55
+ "D:/repos/qs-cyoa/database-storage/disklogs/gc-0-dply-1756339200000-1756425600000.log",
56
+ "D:/repos/qs-cyoa/database-storage/disklogs/join-0-dply-1756598400000-1756684800000.log",
57
+ "D:/repos/qs-cyoa/database-storage/disklogs/join-0-dply-1756684800000-1756771200000.log",
58
+ "D:/repos/qs-cyoa/database-storage/disklogs/cyoa-1-dply-1756252800000-1756339200000.log",
59
+ "D:/repos/qs-cyoa/database-storage/disklogs/join-0-dply-1756512000000-1756598400000.log",
60
+ "D:/repos/qs-cyoa/database-storage/disklogs/join-0-dply-1756080000000-1756166400000.log",
61
+ ];
62
+
63
+ const FACTOR = 1;
64
+
65
+
66
+ let getData = lazy(async () => {
67
+ let buffers: Buffer[] = [];
68
+ for (let path of paths) {
69
+ let data = await fs.promises.readFile(path, "utf8");
70
+ buffers.push(Buffer.from(data));
71
+ }
72
+ return list(FACTOR).map(i => buffers.map(b => Buffer.from(b))).flat();
73
+ });
74
+
75
+ class TestControllerBase {
76
+ async test() {
77
+ return (await getData()).slice();
78
+ }
79
+ }
80
+
81
+ const TestController = SocketFunction.register("TestController-019ca5fa-52ef-73bf-b918-4d9cde51b618", new TestControllerBase(), () => ({
82
+ test: {
83
+ //compress: false
84
+ },
85
+ }));
86
+ function compareBuffers(result: Buffer[], correct: Buffer[]) {
87
+ if (result.length !== correct.length) throw new Error(`Result does not match correct: ${result.length} !== ${correct.length}`);
88
+ for (let i = 0; i < result.length; i++) {
89
+ if (!result[i].equals(correct[i])) throw new Error(`Result does not match correct: ${result[i].length} !== ${correct[i].length}, at index ${i}`);
90
+ }
91
+ }
10
92
  async function main() {
11
93
  await Querysub.hostService("testwatcher");
12
- let errorLogs = await getErrorLogs();
13
- for await (let error of watchAllValues(errorLogs)) {
14
- process.stdout.write(JSON.stringify(error) + "\n");
94
+ //SocketFunction.logMessages = true;
95
+
96
+ while (true) {
97
+ try {
98
+ let otherNode = await getControllerNodeId(TestController);
99
+ if (!otherNode) throw new Error("No other node found");
100
+ let result = (await TestController.nodes[otherNode].test());
101
+ let correct = (await getData());
102
+ compareBuffers(result, correct);
103
+ console.log(`Received ${formatNumber(result.reduce((a, b) => a + b.length, 0))} bytes from ${otherNode}`);
104
+ } catch (e: any) {
105
+ console.error(e);
106
+ }
107
+ await delay(timeInSecond * 1);
15
108
  }
16
109
  }
17
110