querysub 0.372.0 → 0.374.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "querysub",
3
- "version": "0.372.0",
3
+ "version": "0.374.0",
4
4
  "main": "index.js",
5
5
  "license": "MIT",
6
6
  "note1": "note on node-forge fork, see https://github.com/digitalbazaar/forge/issues/744 for details",
@@ -185,7 +185,7 @@ function addNodeIdBase(nodeId: string) {
185
185
  function setNodeIds(nodeIds: string[]) {
186
186
  nodeIds = nodeIds.filter(x => x !== SPECIAL_NODE_ID_FOR_UNMOUNTED_NODE);
187
187
 
188
- logDisk("log", "setNodeIds", { nodeIds });
188
+ console.info("setNodeIds", { nodeIds });
189
189
  // Also try all localhost ports, if we are developing and not in public mode
190
190
  if (isNode() && !isPublic() && isDevDebugbreak()) {
191
191
  let ports = new Set(nodeIds.map(nodeId => decodeNodeId(nodeId)?.port).filter(isDefined));
@@ -350,7 +350,7 @@ async function runHeartbeatAuditLoop() {
350
350
  }
351
351
  } else {
352
352
  deadCount.delete(nodeId);
353
- logDisk("log", "Read node heartbeat", { nodeId, lastTime });
353
+ console.info("Read node heartbeat", { nodeId, lastTime });
354
354
  }
355
355
  }
356
356
  if (pendingDeadCount) {
@@ -177,7 +177,7 @@ class PathValueCommitter {
177
177
  markArrayAsSplitable(values);
178
178
  const { Querysub } = await import("../4-querysub/Querysub");
179
179
  let serializedValues = await pathValueSerializer.serialize(values, { compress: Querysub.COMPRESS_NETWORK });
180
- logDisk("log", "Send PathValues to server", { valueCount: values.length, targetId: otherAuthority, });
180
+ console.info("Send PathValues to server", { valueCount: values.length, targetId: otherAuthority, });
181
181
  let forwardPromise = PathValueController.nodes[otherAuthority].forwardWrites(
182
182
  serializedValues,
183
183
  undefined,
@@ -86,9 +86,9 @@ class PathValueControllerBase {
86
86
  auditLog("RECEIVE VALUE", { path: value.path, time: value.time.time, sourceNodeId });
87
87
  }
88
88
  }
89
- logDisk("log", "Received PathValues via forwardWrites", { valueCount: values.length, callerId, });
89
+ console.info("Received PathValues via forwardWrites", { valueCount: values.length, callerId, });
90
90
  for (let value of values) {
91
- logDisk("log", "Received PathValue for path", { path: value.path, time: value.time.time, callerId });
91
+ console.info("Received PathValue for path", { path: value.path, time: value.time.time, callerId });
92
92
  }
93
93
 
94
94
  if (isCoreQuiet) {
@@ -309,7 +309,7 @@ class TransactionLocker {
309
309
  }
310
310
  public async createConfirm(key: string) {
311
311
  let path = this.getConfirmKey(key);
312
- logDisk("log", "Creating confirmation for ${key}");
312
+ console.info("Creating confirmation for ${key}");
313
313
  await this.storage.setValue(path, Buffer.from(""));
314
314
  return path;
315
315
  }
@@ -362,7 +362,7 @@ class TransactionLocker {
362
362
  delete: ellipsize(deletes.map(a => debugFileInfo(a.key)).join(","), 50),
363
363
  });
364
364
 
365
- logDisk("log", "Writing transaction", {
365
+ console.info("Writing transaction", {
366
366
  name,
367
367
  ops: transaction.ops.length,
368
368
  });
@@ -488,7 +488,7 @@ class TransactionLocker {
488
488
  }
489
489
  }
490
490
 
491
- logDisk("log", "Read archive state", {
491
+ console.info("Read archive state", {
492
492
  rawFilesCount: files.length,
493
493
  confirmedCount: currentDataFiles.size,
494
494
  rawFiles: files.map(a => a.file),
@@ -507,7 +507,7 @@ class TransactionLocker {
507
507
  let result = await tryToRead();
508
508
  if (result) {
509
509
  let timeToRead = Date.now() - startTime;
510
- logDisk("log", `Read data state in ${formatTime(timeToRead)}`);
510
+ console.info(`Read data state in ${formatTime(timeToRead)}`);
511
511
  return result;
512
512
  }
513
513
  }
@@ -546,7 +546,7 @@ class TransactionLocker {
546
546
  let rawLookup = new Set(Array.from(rawDataFiles).map(a => a.file));
547
547
  // If any creates are not confirmed, it must not have been applied
548
548
  if (transaction.ops.some(a => a.type === "create" && rawLookup.has(a.key) && !confirmedKeys.has(a.key))) {
549
- logDisk("log", `Transaction not applied (has pending confirmations of creates)`, {
549
+ console.info(`Transaction not applied (has pending confirmations of creates)`, {
550
550
  keys: transaction.ops
551
551
  .filter(a => a.type === "create" && rawLookup.has(a.key) && !confirmedKeys.has(a.key))
552
552
  .map(a => a.key)
@@ -555,7 +555,7 @@ class TransactionLocker {
555
555
  }
556
556
  // If any deletes still exist, it must not have been applied
557
557
  if (transaction.ops.some(a => a.type === "delete" && confirmedKeys.has(a.key))) {
558
- logDisk("log", `Transaction not applied (has pending deletes)`, {
558
+ console.info(`Transaction not applied (has pending deletes)`, {
559
559
  keys: transaction.ops
560
560
  .filter(a => a.type === "delete" && confirmedKeys.has(a.key))
561
561
  .map(a => a.key)
@@ -568,7 +568,7 @@ class TransactionLocker {
568
568
  let createCount = transaction.ops.filter(a => a.type === "create").length;
569
569
  let deleteCount = transaction.ops.filter(a => a.type === "delete").length;
570
570
  let lockedFiles = transaction.lockedFilesMustEqual?.length;
571
- logDisk("log", `Applying transaction with ${createCount} file creates and ${deleteCount} file deletes. ${lockedFiles !== undefined && `Lock state depends on ${lockedFiles} files` || ""}`, {
571
+ console.info(`Applying transaction with ${createCount} file creates and ${deleteCount} file deletes. ${lockedFiles !== undefined && `Lock state depends on ${lockedFiles} files` || ""}`, {
572
572
  transactions: transaction.ops.map(x => JSON.stringify(x)),
573
573
  });
574
574
  logNodeStats(`archives|TΔ Apply`, formatNumber, 1);
@@ -594,7 +594,7 @@ class TransactionLocker {
594
594
  };
595
595
  await Promise.all(list(CONCURRENT_WRITE_COUNT).map(runThread));
596
596
 
597
- logDisk("log", `Applied transaction with ${createCount} file creates and file ${deleteCount} deletes. ${lockedFiles !== undefined && `Lock state depends on ${lockedFiles} files` || ""}`, {
597
+ console.info(`Applied transaction with ${createCount} file creates and file ${deleteCount} deletes. ${lockedFiles !== undefined && `Lock state depends on ${lockedFiles} files` || ""}`, {
598
598
  transactions: transaction.ops.map(x => JSON.stringify(x)),
599
599
  });
600
600
  }
@@ -660,7 +660,7 @@ class TransactionLocker {
660
660
  let threshold = activeT.createTime + this.storage.propagationTime;
661
661
  if (Date.now() < threshold) {
662
662
  let waitTime = threshold - Date.now();
663
- logDisk("log", `Waiting ${formatTime(waitTime)} for transaction ${activeT.seqNum} to settle.`);
663
+ console.info(`Waiting ${formatTime(waitTime)} for transaction ${activeT.seqNum} to settle.`);
664
664
  await new Promise(resolve => setTimeout(resolve, waitTime));
665
665
  return this.getFilesBase();
666
666
  }
@@ -768,7 +768,7 @@ class TransactionLocker {
768
768
  let dels = transaction.ops.filter(a => a.type === "delete").length;
769
769
  let creates = transaction.ops.filter(a => a.type === "create").length;
770
770
  let createBytes = transaction.ops.map(a => a.type === "create" && a.value?.length || 0).reduce((a, b) => a + b, 0);
771
- logDisk("log", `Starting transaction with ${creates} file creates and ${dels} file deletes, ${formatNumber(createBytes)}B`, {
771
+ console.info(`Starting transaction with ${creates} file creates and ${dels} file deletes, ${formatNumber(createBytes)}B`, {
772
772
  createFilesNames: transaction.ops.filter(a => a.type === "create").map(a => a.key),
773
773
  deleteFilesNames: transaction.ops.filter(a => a.type === "delete").map(a => a.key),
774
774
  });
@@ -797,7 +797,7 @@ class TransactionLocker {
797
797
  let beforeData = await this.getFilesBase();
798
798
  if (!this.isTransactionValid(transaction, beforeData.dataFiles, beforeData.rawDataFiles)) {
799
799
  logNodeStats(`archives|TΔ Rejected`, formatNumber, 1);
800
- logDisk("log", `Finished transaction with rejection, ${transaction.ops.length} ops`);
800
+ console.info(`Finished transaction with rejection, ${transaction.ops.length} ops`);
801
801
  return "rejected";
802
802
  }
803
803
 
@@ -806,7 +806,7 @@ class TransactionLocker {
806
806
  let afterData = await this.getFilesBase();
807
807
  if (this.wasTransactionApplied(transaction, afterData.dataFiles, afterData.rawDataFiles)) {
808
808
  logNodeStats(`archives|TΔ Accepted`, formatNumber, 1);
809
- logDisk("log", `Finished transaction with ${transaction.ops.length} ops`);
809
+ console.info(`Finished transaction with ${transaction.ops.length} ops`);
810
810
  return "accepted";
811
811
  }
812
812
  }
@@ -58,7 +58,7 @@ function debugLogBase(type: string, values: { [key: string]: unknown }) {
58
58
  return;
59
59
  }
60
60
  if (isDiskAudit()) {
61
- logDisk("log", type, values, { "diskAudit": true });
61
+ console.info(type, values, { "diskAudit": true });
62
62
  }
63
63
  let newEntry: DebugLog = { type, time: Date.now(), values };
64
64
  logHistory.push(newEntry);
@@ -1261,7 +1261,7 @@ class PathWatcher {
1261
1261
  auditLog("new non-local WATCH PARENT", { path, watcher: config.callback });
1262
1262
  }
1263
1263
  }
1264
- logDisk("log", `New PathValue watches`, {
1264
+ console.info(`New PathValue watches`, {
1265
1265
  newPathsWatched: newPathsWatched.size,
1266
1266
  newParentsWatched: newParentsWatched.size,
1267
1267
  });
@@ -1394,7 +1394,7 @@ class PathWatcher {
1394
1394
  }
1395
1395
 
1396
1396
  if (fullyUnwatched.paths.length > 0 || fullyUnwatched.parentPaths.length > 0) {
1397
- logDisk("log", `Unwatched PathValue watches`, {
1397
+ console.info(`Unwatched PathValue watches`, {
1398
1398
  unwatchedPaths: fullyUnwatched.paths.length,
1399
1399
  unwatchedParents: fullyUnwatched.parentPaths.length,
1400
1400
  });
@@ -689,7 +689,7 @@ export class PathFunctionRunner {
689
689
  let syncTime = wallTime - evalTime;
690
690
 
691
691
 
692
- logDisk("log", "Finished FunctionRunner function", {
692
+ console.info("Finished FunctionRunner function", {
693
693
  ...callPath, argsEncoded: "", functionSpec,
694
694
  wallTime, syncTime, evalTime,
695
695
  loops: runCount,
@@ -41,7 +41,7 @@ async function main() {
41
41
  await Querysub.hostService("PathFunctionRunnerMain");
42
42
 
43
43
  if (!isPublic()) {
44
- IndexedLogs.runLogMoveLoop();
44
+ void IndexedLogs.runLogMoveLoop();
45
45
  }
46
46
 
47
47
  // Use a fairly high stick time (the default is 10s), because having wait to sync data is very slow,
@@ -2119,7 +2119,7 @@ function updateDOMNodeFields(domNode: DOMNode, vNode: VirtualDOM, prevVNode: Vir
2119
2119
  if (name === "blur") {
2120
2120
  let target = args[0].currentTarget as HTMLElement;
2121
2121
  if (!target.getAttribute("data-blur-on-unmount") && !target.isConnected) {
2122
- logDisk("log", "Ignoring blur for disconnected element. You can use data-blur-on-unmount to re-enable blurs on this element.", target);
2122
+ console.info("Ignoring blur for disconnected element. You can use data-blur-on-unmount to re-enable blurs on this element.", target);
2123
2123
  return;
2124
2124
  }
2125
2125
  }
@@ -2428,7 +2428,7 @@ function blurFixOnMouseDownHack(event: MouseEvent) {
2428
2428
 
2429
2429
  // Looks like we are going to blur, so blur now
2430
2430
  if (selected instanceof HTMLElement && !selected.hasAttribute("data-no-early-blur")) {
2431
- logDisk("log", `Simulating early blur to prevent blur from firing after mousedown. This solves a problem where mousedown changes the UI, and then the blur fires on the wrong element. You can use data-no-early-blur to opt-out of this feature`, selected);
2431
+ console.info(`Simulating early blur to prevent blur from firing after mousedown. This solves a problem where mousedown changes the UI, and then the blur fires on the wrong element. You can use data-no-early-blur to opt-out of this feature`, selected);
2432
2432
  selected.blur();
2433
2433
  }
2434
2434
  }
@@ -480,7 +480,7 @@ export class QuerysubControllerBase {
480
480
  // make the UI look cleaner (instead of showing stale values, it shows nothing)?
481
481
  let undefinedValues: PathValue[] = newPathsNotAllowed.map(path => ({ path, value: undefined, canGCValue: true, isTransparent: true, time: epochTime, locks: [], lockCount: 0, valid: true, event: false }));
482
482
 
483
- logDisk("log", "Disallowing PathValue watches due to disallowed permissions", { count: newPathsNotAllowed.length, callerId });
483
+ console.info("Disallowing PathValue watches due to disallowed permissions", { count: newPathsNotAllowed.length, callerId });
484
484
 
485
485
  ignoreErrors(pathValueSerializer.serialize(undefinedValues, { compress: Querysub.COMPRESS_NETWORK }).then(buffers =>
486
486
  PathValueController.nodes[callerId].forwardWrites(
@@ -188,7 +188,7 @@ async function checkAuthority(authority: AuthorityPath, threshold: number) {
188
188
  console.error(`Value mismatch between disk and memory for ${formatNumber(changedValues.size)} values. Ex: ${firstValue.path}`);
189
189
  }
190
190
  if (removedValues.size > 0) {
191
- logDisk("log", green(`Removing ${formatNumber(removedValues.size)} paths from memory which have been GCed on the disk.`));
191
+ console.info(green(`Removing ${formatNumber(removedValues.size)} paths from memory which have been GCed on the disk.`));
192
192
  }
193
193
  if (changedValues.size > 0 || removedValues.size > 0) {
194
194
  let allValues = new Set([...changedValues, ...removedValues]);
@@ -25,6 +25,15 @@ export type FilePathsByMachine = {
25
25
  errorCount: number;
26
26
  };
27
27
 
28
+ function getSourceColor(sourceName: string | undefined): string | undefined {
29
+ if (!sourceName) return undefined;
30
+ const lastPart = sourceName.split("/").pop()?.toLowerCase();
31
+ if (lastPart === "error") return css.colorhsl(0, 80, 40);
32
+ if (lastPart === "warn") return css.colorhsl(40, 80, 40);
33
+ if (lastPart === "info") return css.colorhsl(280, 80, 40);
34
+ if (lastPart === "log") return css.colorhsl(200, 80, 40);
35
+ return undefined;
36
+ }
28
37
 
29
38
  export class FilePathSelector extends qreact.Component<{
30
39
  paths: TimeFilePathWithSize[];
@@ -377,7 +386,7 @@ export class FilePathSelectorModal extends qreact.Component<{
377
386
  </td>
378
387
  <td className={css.pad2(2)}>{this.props.formatBytes(file.size)}</td>
379
388
  <td className={css.pad2(2)}>{file.logCount !== undefined ? formatNumber(file.logCount) : <span className={css.colorhsl(40, 60, 40)}>pending</span>}</td>
380
- <td className={css.pad2(2)}>{file.sourceName}</td>
389
+ <td className={css.pad2(2) + (getSourceColor(file.sourceName) || "")}>{file.sourceName}</td>
381
390
  <td className={css.pad2(2)}>{file.dedupe}</td>
382
391
  <td className={css.pad2(2)}>{formatDateTime(file.startTime)}</td>
383
392
  <td className={css.pad2(2)}>
@@ -429,7 +438,7 @@ export class FilePathSelectorModal extends qreact.Component<{
429
438
  <div className={css.vbox(5)}>
430
439
  {Array.from(bySource.entries()).map(([sourceName, stats]) => (
431
440
  <div key={sourceName} className={css.hbox(10)}>
432
- <div className={css.minWidth(240)}>{sourceName}:</div>
441
+ <div className={css.minWidth(240) + (getSourceColor(sourceName) || "")}>{sourceName}:</div>
433
442
  <div className={css.minWidth(140)}>Files: {formatNumber(stats.fileCount)} ({formatNumber(stats.pendingCount)} pending)</div>
434
443
  <div className={css.minWidth(120)}>Size: {this.props.formatBytes(stats.size)}</div>
435
444
  <div className={css.minWidth(100)}>Logs: {formatNumber(stats.logCount)}</div>
@@ -25,6 +25,7 @@ import { blue } from "socket-function/src/formatting/logColors";
25
25
  import { LimitGroup } from "../../../functional/limitProcessing";
26
26
  import { getAllNodeIds } from "../../../-f-node-discovery/NodeDiscovery";
27
27
  import { NodeCapabilitiesController } from "../../../-g-core-values/NodeCapabilities";
28
+ import { getLoggers2Async } from "../diskLogger";
28
29
 
29
30
  export type TimeFilePathWithSize = TimeFilePath & {
30
31
  size: number;
@@ -40,7 +41,6 @@ export class IndexedLogs<T> {
40
41
  name: string,
41
42
  maxSingleFileData?: number;
42
43
  maxCountPerFile?: number;
43
- forceUsePublicLogs?: boolean;
44
44
  getTime: (result: T) => number | undefined;
45
45
  }) {
46
46
  loggerByName.set(this.config.name, this as any);
@@ -48,9 +48,10 @@ export class IndexedLogs<T> {
48
48
 
49
49
 
50
50
  private static shouldRunLoop = false;
51
- public static runLogMoveLoop() {
51
+ public static async runLogMoveLoop() {
52
52
  IndexedLogs.shouldRunLoop = true;
53
- for (let indexedLogs of loggerByName.values()) {
53
+ let allLoggers = await getLoggers2Async();
54
+ for (let indexedLogs of Object.values(allLoggers)) {
54
55
  indexedLogs.runLogMoverLoop();
55
56
  }
56
57
  }
@@ -91,7 +92,7 @@ export class IndexedLogs<T> {
91
92
  let basePublic: Archives = getArchivesHome(getDomain());
92
93
  // NOTE: The local disk is so fast that reading in 10 megabytes is nothing, And if we read in too small of a value, the overhead per read ends up making this take forever.
93
94
  let extraReadSize = 1024 * 1024 * 10;
94
- if (this.config.forceUsePublicLogs || isPublic()) {
95
+ if (isPublic()) {
95
96
  basePublic = getArchivesBackblaze(getDomain());
96
97
  // NOTE: While the latency to back plays is high, now we're reading in parallel, so it shouldn't be as big of an issue.
97
98
  extraReadSize = 1024 * 1024 * 1;
@@ -274,8 +275,16 @@ export class IndexedLogs<T> {
274
275
  forceReadPublic?: boolean;
275
276
  }): Promise<TimeFilePathWithSize[]> {
276
277
  let finalPaths: TimeFilePathWithSize[] = [];
278
+ if (config.forceReadPublic && !isPublic()) {
279
+ let machineNodes = await this.getMachineNodes();
280
+ if (machineNodes.length === 0) throw new Error(`Cannot find any public nodes to read from`);
281
+ return await IndexedLogShimController.nodes[machineNodes[0]].getPaths({
282
+ ...config,
283
+ indexedLogsName: this.config.name,
284
+ });
285
+ }
277
286
 
278
- if (!config.only && (config.forceReadPublic || isPublic())) {
287
+ if (config.only !== "local" && isPublic()) {
279
288
  let machineNodes = await this.getMachineNodes();
280
289
  await Promise.all(machineNodes.map(async (machineNode) => {
281
290
  try {
@@ -291,11 +300,6 @@ export class IndexedLogs<T> {
291
300
  }));
292
301
  }
293
302
 
294
- // If we're forcefully reading from the public server, but we're not public, the code above will be the only code which adds to results
295
- if (!isPublic() && config.forceReadPublic) {
296
- return finalPaths;
297
- }
298
-
299
303
  let localLogs = this.getLocalLogs();
300
304
  let backblazeLogs = this.getPublicLogs();
301
305
  let paths: TimeFilePath[] = [];
@@ -347,6 +351,16 @@ export class IndexedLogs<T> {
347
351
  onResult: (match: T) => void;
348
352
  onResults?: (results: IndexedLogResults) => Promise<boolean>;
349
353
  }): Promise<IndexedLogResults> {
354
+
355
+ if (config.params.forceReadPublic && !isPublic()) {
356
+ let machineNodes = await this.getMachineNodes();
357
+ if (machineNodes.length === 0) throw new Error(`Cannot find any public nodes to read from`);
358
+ return await this.clientFind({
359
+ ...config,
360
+ nodeId: machineNodes[0],
361
+ });
362
+ }
363
+
350
364
  let startTime = Date.now();
351
365
  let interval: NodeJS.Timeout | undefined;
352
366
 
@@ -360,7 +374,8 @@ export class IndexedLogs<T> {
360
374
  let allResults = new Map<string, IndexedLogResults>();
361
375
  let allDone: Promise<void>[] = [];
362
376
 
363
- if (config.params.only !== "local" && (config.params.forceReadPublic || isPublic())) {
377
+ // Read other nodes if on a public server
378
+ if (config.params.only !== "local" && isPublic()) {
364
379
  let machineNodes = await this.getMachineNodes();
365
380
  allDone.push(...machineNodes.map(async (machineNode) => {
366
381
  try {
@@ -385,12 +400,6 @@ export class IndexedLogs<T> {
385
400
  }));
386
401
  }
387
402
 
388
- // If we're forcefully reading from the public server, but we're not public, the code above will be the only code which adds to results
389
- if (!isPublic() && config.params.forceReadPublic) {
390
- await Promise.all(allDone);
391
- return getFinalResults();
392
- }
393
-
394
403
 
395
404
  let results: IndexedLogResults = createEmptyIndexedLogResults();
396
405
  allResults.set("", results);
@@ -686,6 +686,12 @@ export class LogViewer3 extends qreact.Component {
686
686
  checkbox
687
687
  label="Exclude Pending Results"
688
688
  url={excludePendingResults}
689
+ onChangeValue={(newValue) => {
690
+ if (newValue) {
691
+ savedPathsURL.value = "";
692
+ void this.loadPaths();
693
+ }
694
+ }}
689
695
  />
690
696
  {!isPublic() && <InputLabelURL
691
697
  checkbox
@@ -146,7 +146,7 @@ void Promise.resolve().then(() => {
146
146
 
147
147
 
148
148
  const logDiskDontShim = logDisk;
149
- /** NOTE: Calling this directly means we lose __FILE__ tracking. But... that's probably fine... */
149
+ /** @deprecated, Don't call this directly, call console info instead, which our shim will prevent from logging to the console, but it will still call logDisk. */
150
150
  export function logDisk(type: "log" | "warn" | "info" | "error", ...args: unknown[]) {
151
151
  if (!isNode()) return;
152
152
  try {
@@ -55,6 +55,8 @@ export function shimConsoleLogs() {
55
55
  // Some arguments might throw if accessed (as they might be proxies), so
56
56
  // catch and ignore errors
57
57
  }
58
+ // NOTE: Info really has absolutely no purpose. There's no reason to use info instead of log, so we're going to give it a purpose. Infos are not going to be shown in the console and are only going to be logged to disk. This helps fix our shimming issue, where when we call log disk directly, we lose out on the source file information.
59
+ if (fncName === "info") return;
58
60
  return originalFnc(...args);
59
61
  };
60
62
  }
@@ -20,8 +20,6 @@ IMPORTANT! Now I am properly calling shutdown, so none of the streamed logs shou
20
20
 
21
21
 
22
22
 
23
- UGH... I'm seeing the remote move logs saying that it moved a lot of files.
24
-
25
23
  3) Ensure our moveloops are working correctly, on both dev and public
26
24
 
27
25
 
@@ -72,7 +72,7 @@ function logProfileMeasuresTimingsNow() {
72
72
  measureObj = startMeasure();
73
73
  function diskLogMeasureObj(table: FormattedMeasureTable | undefined) {
74
74
  if (!table) return;
75
- logDisk("log", table.title, { entries: table.entries });
75
+ console.info(table.title, { entries: table.entries });
76
76
  }
77
77
  diskLogMeasureObj(logMeasureTable(profile, {
78
78
  name: `watchdog at ${new Date().toLocaleString()}`,
@@ -626,7 +626,7 @@ function sendLoginEmail(config: {
626
626
  );
627
627
  if (alreadyAllowed) {
628
628
  data().machineSecure[machineId].userId = userId;
629
- logDisk("log", `User ${userId} already allowed for current ip and machine id, no need to send email.`);
629
+ console.info(`User ${userId} already allowed for current ip and machine id, no need to send email.`);
630
630
  return;
631
631
  }
632
632
  }
@@ -818,14 +818,14 @@ function inviteUser(config: { email: string }) {
818
818
  Querysub.ignorePermissionsChecks(() => {
819
819
  let curUserObj = getUserObjAssert();
820
820
  if (config.email in curUserObj.invitedUsers2) {
821
- logDisk("log", `User ${config.email} already invited`);
821
+ console.info(`User ${config.email} already invited`);
822
822
  return;
823
823
  }
824
824
  // If the user already exists, don't invite
825
825
  const { email } = config;
826
826
  let userId = atomic(data().secure.emailToUserId[email]) || createNewUserId();
827
827
  if (userId in data().users) {
828
- logDisk("log", `User ${userId} already exists, no need to invite`);
828
+ console.info(`User ${userId} already exists, no need to invite`);
829
829
  return;
830
830
  }
831
831