querysub 0.328.0 → 0.329.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,8 @@
1
+ #!/usr/bin/env node
2
+
3
+ // public, so we can connect to it from any node (for diagnostics, etc)
4
+ process.argv.push("--public");
5
+ process.argv.push("--nobreak");
6
+
7
+ require("typenode");
8
+ require("../src/diagnostics/logs/errorNotifications/errorDigestEmail.tsx");
@@ -0,0 +1,8 @@
1
+ #!/usr/bin/env node
2
+
3
+ // public, so we can connect to it from any node (for diagnostics, etc)
4
+ process.argv.push("--public");
5
+ process.argv.push("--nobreak");
6
+
7
+ require("typenode");
8
+ require("../src/diagnostics/logs/errorNotifications/errorDigests.tsx");
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "querysub",
3
- "version": "0.328.0",
3
+ "version": "0.329.0",
4
4
  "main": "index.js",
5
5
  "license": "MIT",
6
6
  "note1": "note on node-forge fork, see https://github.com/digitalbazaar/forge/issues/744 for details",
@@ -22,7 +22,7 @@
22
22
  "js-sha512": "^0.9.0",
23
23
  "node-forge": "https://github.com/sliftist/forge#e618181b469b07bdc70b968b0391beb8ef5fecd6",
24
24
  "pako": "^2.1.0",
25
- "socket-function": "^0.138.0",
25
+ "socket-function": "^0.140.0",
26
26
  "terser": "^5.31.0",
27
27
  "typesafecss": "^0.22.0",
28
28
  "yaml": "^2.5.0",
@@ -55,7 +55,9 @@
55
55
  "join": "./bin/join.js",
56
56
  "join-public": "./bin/join-public.js",
57
57
  "merge": "./bin/merge.js",
58
- "addsuperuser": "./bin/addsuperuser.js"
58
+ "addsuperuser": "./bin/addsuperuser.js",
59
+ "error-email": "./bin/error-email.js",
60
+ "error-im": "./bin/error-im.js"
59
61
  },
60
62
  "devDependencies": {
61
63
  "dependency-cruiser": "^12.11.0",
@@ -0,0 +1,52 @@
1
+ import { lazy } from "socket-function/src/caching";
2
+ import { Archives } from "./archives";
3
+ import { decodeCborx, encodeCborx } from "../misc/cloneHelpers";
4
+ import { ArchiveT } from "./archivesJSONT";
5
+
6
+ export function archiveCborT<T>(archives: () => Archives): ArchiveT<T> {
7
+ archives = lazy(archives);
8
+ async function get(key: string) {
9
+ let buffer = await archives().get(key);
10
+ if (!buffer) return undefined;
11
+ return decodeCborx(buffer) as T;
12
+ }
13
+ async function set(key: string, value: T) {
14
+ await archives().set(key, encodeCborx(value));
15
+ }
16
+ async function deleteFnc(key: string) {
17
+ await archives().del(key);
18
+ }
19
+ async function keys() {
20
+ return (await archives().find("")).map(value => value.toString());
21
+ }
22
+ async function values() {
23
+ let keysArray = await keys();
24
+ let results: T[] = [];
25
+ await Promise.all(keysArray.map(async key => {
26
+ let value = await get(key);
27
+ if (value) {
28
+ results.push(value);
29
+ }
30
+ }));
31
+ return results;
32
+ }
33
+ async function entries(): Promise<[string, T][]> {
34
+ let keysArray = await keys();
35
+ let results: [string, T][] = [];
36
+ await Promise.all(keysArray.map(async key => {
37
+ let value = await get(key);
38
+ if (value) {
39
+ results.push([key, value]);
40
+ }
41
+ }));
42
+ return results;
43
+ }
44
+ async function* asyncIterator(): AsyncIterator<[string, T]> {
45
+ for (let [key, value] of await entries()) {
46
+ yield [key, value];
47
+ }
48
+ }
49
+ return {
50
+ get, set, delete: deleteFnc, keys, values, entries, [Symbol.asyncIterator]: asyncIterator
51
+ };
52
+ }
@@ -1,7 +1,7 @@
1
1
  import { lazy } from "socket-function/src/caching";
2
2
  import { Archives } from "./archives";
3
3
 
4
- export type ArchiveJSONT<T> = {
4
+ export type ArchiveT<T> = {
5
5
  get(key: string): Promise<T | undefined>;
6
6
  set(key: string, value: T): Promise<void>;
7
7
  delete(key: string): Promise<void>;
@@ -11,12 +11,12 @@ export type ArchiveJSONT<T> = {
11
11
  [Symbol.asyncIterator](): AsyncIterator<[string, T]>;
12
12
  };
13
13
 
14
- export function archiveJSONT<T>(archives: () => Archives): ArchiveJSONT<T> {
14
+ export function archiveJSONT<T>(archives: () => Archives): ArchiveT<T> {
15
15
  archives = lazy(archives);
16
16
  async function get(key: string) {
17
17
  let buffer = await archives().get(key);
18
18
  if (!buffer) return undefined;
19
- return JSON.parse(buffer.toString());
19
+ return JSON.parse(buffer.toString()) as T;
20
20
  }
21
21
  async function set(key: string, value: T) {
22
22
  await archives().set(key, Buffer.from(JSON.stringify(value)));
@@ -29,11 +29,25 @@ export function archiveJSONT<T>(archives: () => Archives): ArchiveJSONT<T> {
29
29
  }
30
30
  async function values() {
31
31
  let keysArray = await keys();
32
- return Promise.all(keysArray.map(key => get(key)));
32
+ let results: T[] = [];
33
+ await Promise.all(keysArray.map(async key => {
34
+ let value = await get(key);
35
+ if (value) {
36
+ results.push(value);
37
+ }
38
+ }));
39
+ return results;
33
40
  }
34
41
  async function entries(): Promise<[string, T][]> {
35
42
  let keysArray = await keys();
36
- return Promise.all(keysArray.map(async key => [key, await get(key)]));
43
+ let results: [string, T][] = [];
44
+ await Promise.all(keysArray.map(async key => {
45
+ let value = await get(key);
46
+ if (value) {
47
+ results.push([key, value]);
48
+ }
49
+ }));
50
+ return results;
37
51
  }
38
52
  async function* asyncIterator(): AsyncIterator<[string, T]> {
39
53
  for (let [key, value] of await entries()) {
@@ -35,6 +35,7 @@ import { deepCloneCborx } from "../misc/cloneHelpers";
35
35
  import { formatPercent, formatTime } from "socket-function/src/formatting/format";
36
36
  import { addStatPeriodic, interceptCalls, onAllPredictionsFinished, onTimeProfile } from "../-0-hooks/hooks";
37
37
  import { onNextPaint } from "../functional/onNextPaint";
38
+ import { isAsyncFunction } from "../misc";
38
39
 
39
40
  // TODO: Break this into two parts:
40
41
  // 1) Run and get accesses
@@ -975,6 +976,9 @@ export class PathValueProxyWatcher {
975
976
  public createWatcher<Result = void>(
976
977
  options: WatcherOptions<Result>
977
978
  ): SyncWatcher {
979
+ if (isAsyncFunction(options.watchFunction)) {
980
+ throw new Error(`Async functions are not supported in watchers. They must run the caller synchronously. You are likely not using Await anyway, so just remove the async and make it a synchronous function. The caller will be called again whenever the data you access changes, And if you are running this to return a result, it will be rerun until all the data you want is synchronized. Watch function: ${options.watchFunction.toString()}`);
981
+ }
978
982
  // NOTE: Setting an order is needed for rendering, so parents render before children. I believe
979
983
  // it is generally what we want, so event triggering is consistent, and fits with any tree based
980
984
  // watching system. If this causes problems we COULD remove it from here and have just qreact.tsx set it.
package/src/config.ts CHANGED
@@ -11,6 +11,7 @@ export const serverPort = 11748;
11
11
  let yargObj = parseArgsFactory()
12
12
  .option("nonetwork", { type: "boolean", desc: `Disables all network requirements. Reduces security, as this means we cannot use real certificates.` })
13
13
  .option("domain", { type: "string", desc: `Sets the domain` })
14
+ .option("emaildomain", { type: "string", desc: `Sets the domain to use for email` })
14
15
  .option("client", { type: "boolean", desc: `Drops permissions, acting as an unauthenticated node` })
15
16
  .option("authority", { type: "string", desc: `Defines the base paths we are an authority on (the domain is prepended to them). Either a file path to a JSON(AuthorityPath[]), or a base64 representation of the JSON(AuthorityPath[]).` })
16
17
  .option("nobreak", { type: "boolean", desc: "Do not break on errors. Safer to set this than to just not set debugbreak, as some places might break without checking debugbreak, but nobreak works at a level where it is always used." })
@@ -20,12 +21,15 @@ let yargObj = parseArgsFactory()
20
21
  .option("recovery", { type: "boolean", desc: `Allows any localhost connections to act like a superuser (and a trusted node), to help recover the database (as you need permission to access the snapshot page).` })
21
22
  // TODO: The bootstrapper is a single file. Maybe we shouldn't run the entire service just for that. Although... maybe it's fine, as services are light?
22
23
  .option("bootstraponly", { type: "boolean", desc: "Don't register as an edge node, so we serve the bootstrap files, but we don't need up to date code because we are not used for endpoints or the UI." })
24
+ .option("notifyemails", { type: "array", desc: "The emails to notify when errors occur." })
23
25
  .argv
24
26
  ;
25
-
26
- let querysubConfig = lazy((): {
27
+ type QuerysubConfig = {
27
28
  domain?: string;
28
- } => {
29
+ emaildomain?: string;
30
+ notifyemails?: string[];
31
+ };
32
+ let querysubConfig = lazy((): QuerysubConfig => {
29
33
  if (!isNode()) throw new Error("querysubConfig is only available on the server");
30
34
  const path = "./querysub.json";
31
35
  if (!fs.existsSync(path)) {
@@ -54,6 +58,14 @@ export function getDomain() {
54
58
  return yargObj.domain || querysubConfig().domain || "querysub.com";
55
59
  }
56
60
 
61
+ export function getEmailDomain() {
62
+ return yargObj.emaildomain || querysubConfig().emaildomain || getDomain();
63
+ }
64
+
65
+ export function getNotifyEmails() {
66
+ return Array.from(new Set([...(yargObj.notifyemails || []), ...(querysubConfig().notifyemails || [])]));
67
+ }
68
+
57
69
  export function baseIsClient() {
58
70
  return !isNode() || yargObj.client;
59
71
  }
@@ -5,7 +5,7 @@ import { getMachineId, getOwnMachineId } from "../../-a-auth/certs";
5
5
  import { isDefined, parseFileNameKVP, parsePath, partialCopyObject, streamToIteratable, sum, toFileNameKVP } from "../../misc";
6
6
  import { registerShutdownHandler } from "../periodic";
7
7
  import { batchFunction, delay, runInSerial, runInfinitePoll, runInfinitePollCallAtStart } from "socket-function/src/batching";
8
- import { PromiseObj, isNode, keyByArray, nextId, sort, timeInDay, timeInHour, timeInMinute } from "socket-function/src/misc";
8
+ import { PromiseObj, isNode, keyByArray, list, nextId, sort, timeInDay, timeInHour, timeInMinute } from "socket-function/src/misc";
9
9
  import os from "os";
10
10
  import { getOwnThreadId } from "../../-f-node-discovery/NodeDiscovery";
11
11
  import fs from "fs";
@@ -310,7 +310,10 @@ export class FastArchiveAppendable<Datum> {
310
310
  let compressed = await measureBlock(async () => Zip.gzip(data), "FastArchiveAppendable|compress");
311
311
  console.log(`Uploading ${formatNumber(data.length)}B (compressed to ${formatNumber(compressed.length)}B) logs to ${backblazePath} from ${fullPath}`);
312
312
  await archives.set(backblazePath, compressed);
313
- await fs.promises.unlink(fullPath);
313
+ // Ignore unlink errors to reduce excess logging. This races on startup, so it is likely we'll hit this a fair amount (especially because archives.set is so slow)
314
+ try {
315
+ await fs.promises.unlink(fullPath);
316
+ } catch { }
314
317
  } catch (e: any) {
315
318
  // Just skip it, if the first file in the directory is broken we don't want to never move any files
316
319
  console.error(`Error moving log file ${fullPath}: ${e.stack}`);
@@ -689,11 +692,17 @@ export class FastArchiveAppendable<Datum> {
689
692
  if (stoppedPromise.resolveCalled) return;
690
693
  await fileProgress(1, 0, true);
691
694
  }
692
- for (let file of syncResult.files) {
693
- await downloadFileWrapper(file);
695
+ let remaining = syncResult.files.slice();
696
+ async function runThread() {
697
+ while (true) {
698
+ let file = remaining.shift();
699
+ if (!file) {
700
+ return;
701
+ }
702
+ await downloadFileWrapper(file);
703
+ }
694
704
  }
695
- await runInSerial(async () => {
696
- });
705
+ await Promise.all(list(32).map(() => runThread()));
697
706
 
698
707
  await (await createProgress("Done", 0))(1, 1, true);
699
708
  } catch (e: any) {
@@ -36,6 +36,7 @@ import { blue, magenta } from "socket-function/src/formatting/logColors";
36
36
  import { FastArchiveAppendable, getFileTimeStamp } from "./FastArchiveAppendable";
37
37
  import { IdentityController_getMachineId, IdentityController_getReconnectNodeId } from "../../-c-identity/IdentityController";
38
38
  import { fsExistsAsync } from "../../fs";
39
+ import { Querysub } from "../../4-querysub/QuerysubController";
39
40
 
40
41
  export type FileMetadata = {
41
42
  nodeId?: string;
@@ -275,6 +276,9 @@ export class FastArchiveAppendableControllerBase {
275
276
  }): Promise<{
276
277
  files: FileMetadata[];
277
278
  }> {
279
+ if (!SocketFunction.mountedNodeId) {
280
+ throw new Error(`Cannot use FastArchiveAppendableController before SocketFunction is mounted`);
281
+ }
278
282
  let syncId = config.syncId ?? "";
279
283
 
280
284
  // Define inline functions for parallel execution
@@ -421,6 +425,7 @@ export class FastArchiveAppendableControllerBase {
421
425
  let urlObj = new URL(url);
422
426
  urlObj.hostname = ipDomain;
423
427
  url = urlObj.toString();
428
+
424
429
  let timeStamp = getFileTimeStamp(file.path);
425
430
  let startTime = timeStamp.startTime;
426
431
  let endTime = timeStamp.endTime;
@@ -446,6 +451,7 @@ export class FastArchiveAppendableControllerBase {
446
451
 
447
452
  let allFilesList = await Promise.all(filePromises);
448
453
  let allFiles = allFilesList.flat();
454
+ // Newest first, so recent errors are found quickly
449
455
  sort(allFiles, x => -x.startTime);
450
456
 
451
457
  return {
@@ -6,11 +6,12 @@ import { getOwnNodeId } from "../../-f-node-discovery/NodeDiscovery";
6
6
  import { logErrors } from "../../errors";
7
7
  import { addGlobalContext } from "./diskLogger";
8
8
  import child_process from "child_process";
9
+ import { getNodeIdLocation } from "socket-function/src/nodeCache";
9
10
 
10
11
  export function addBuiltInContext() {
11
12
  addGlobalContext(() => {
12
13
  let nodeId = getOwnNodeId();
13
- let nodeParts = decodeNodeId(nodeId);
14
+ let nodeParts = getNodeIdLocation(nodeId);
14
15
  return {
15
16
  __machineId: getOwnMachineId(),
16
17
  __mountId: SocketFunction.mountedNodeId,