querysub 0.325.0 → 0.326.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "querysub",
3
- "version": "0.325.0",
3
+ "version": "0.326.0",
4
4
  "main": "index.js",
5
5
  "license": "MIT",
6
6
  "note1": "note on node-forge fork, see https://github.com/digitalbazaar/forge/issues/744 for details",
@@ -12,10 +12,11 @@ import { Args } from "socket-function/src/types";
12
12
  import { getArchivesBackblaze } from "./archivesBackBlaze";
13
13
  import { formatNumber } from "socket-function/src/formatting/format";
14
14
  import { SizeLimiter } from "../diagnostics/SizeLimiter";
15
+ import { isPublic } from "../config";
15
16
 
16
17
  const SIZE_LIMIT = new SizeLimiter({
17
18
  diskRoot: getStorageDir(),
18
- maxBytes: 1024 * 1024 * 1024 * 50,
19
+ maxBytes: isPublic() ? 1024 * 1024 * 1024 * 250 : 1024 * 1024 * 1024 * 50,
19
20
  // Anything less than this and we can't even load enough weights models for a single task
20
21
  minBytes: 1024 * 1024 * 1024 * 8,
21
22
  maxDiskFraction: 0.3,
@@ -191,6 +191,7 @@ const changeIdentityOnce = cacheWeak(async function changeIdentityOnce(connectio
191
191
  };
192
192
  let signature = sign(threadKeyCert, payload);
193
193
  await timeoutToError(
194
+ // NOTE: This timeout has to be small as if we try to connect to a node to send it something time sensitive such as a PathValue and it takes too long it might result in us having a PathValue which is expired. The threshold is around 60 seconds and so we want to build a timeout calling a few different nodes before the PathValue expires.
194
195
  10 * 1000,
195
196
  IdentityController.nodes[nodeId].changeIdentity(signature, payload),
196
197
  () => new Error(`Timeout calling changeIdentity for ${nodeId}`)
@@ -1,7 +1,7 @@
1
1
  import { measureWrap } from "socket-function/src/profiling/measure";
2
2
  import { getIdentityCA, getMachineId, getOwnMachineId } from "../-a-auth/certs";
3
3
  import { getArchives } from "../-a-archives/archives";
4
- import { isNode, throttleFunction, timeInSecond } from "socket-function/src/misc";
4
+ import { isNode, throttleFunction, timeInHour, timeInSecond } from "socket-function/src/misc";
5
5
  import { SocketFunctionHook } from "socket-function/SocketFunctionTypes";
6
6
  import { SocketFunction } from "socket-function/SocketFunction";
7
7
  import { IdentityController_getMachineId } from "../-c-identity/IdentityController";
@@ -19,6 +19,7 @@ import { magenta } from "socket-function/src/formatting/logColors";
19
19
  // Cache the untrust list, to prevent bugs from causing too many backend reads (while also allowing
20
20
  // bad servers which make request before their trust is verified from staying broken).
21
21
  const UNTRUST_CACHE_TIME = 30 * timeInSecond;
22
+ const TRUSTED_CACHE_RESET_INTERVAL = timeInHour;
22
23
 
23
24
  const archives = lazy(() => getArchives("trust2/"));
24
25
 
@@ -57,6 +58,8 @@ export const isTrusted = measureWrap(async function isTrusted(machineId: string)
57
58
  // See the comment in requiresNetworkTrustHook for why clients have to trust all callers.
58
59
  if (isClient()) return true;
59
60
 
61
+ await populateTrustedCache();
62
+
60
63
  if (trustedCache.has(machineId)) {
61
64
  return true;
62
65
  }
@@ -70,30 +73,11 @@ export const isTrusted = measureWrap(async function isTrusted(machineId: string)
70
73
  return false;
71
74
  }
72
75
 
73
- return await isTrustedBase(machineId);
74
- });
75
- let trustedCachePopulated = false;
76
- const isTrustedBase = runInSerial(measureWrap(async function isTrustedBase(machineId: string) {
77
- if (!trustedCachePopulated) {
78
- trustedCachePopulated = true;
79
- let trustedMachineIds = await archives().find("");
80
- lastArchivesTrusted = trustedMachineIds.slice();
81
- for (let trustedMachineId of trustedMachineIds) {
82
- trustedCache.add(trustedMachineId);
83
- // NOTE: We don't load trust certs here, as we need to load them on demand in case the trust changes after our initial startup.
84
- }
85
- } else {
86
- // Checking a single entry is a lot faster (as find is slow)
87
- let trusted = await archives().get(machineId);
88
- if (trusted) {
89
- trustedCache.add(machineId);
90
- }
76
+ // Checking a single entry is fast and if we don't trust them they'll be added to untrusted cache so it shouldn't slow things down by too much.
77
+ let trusted = await archives().get(machineId);
78
+ if (trusted) {
79
+ trustedCache.add(machineId);
91
80
  }
92
- // Always trust ourself
93
- trustedCache.add(getOwnMachineId());
94
-
95
- // NOTE: This only happens to servers that we connect to. Also we only allow the machine ID to be this special ID in the case it's on our domain. And because we use HTTPS when connecting to domains, it means that it must be implicitly trusted if it has a certificate for our domain.
96
- trustedCache.add("127-0-0-1");
97
81
 
98
82
  if (!trustedCache.has(machineId)) {
99
83
  untrustedCache.set(machineId, Date.now() + UNTRUST_CACHE_TIME);
@@ -101,7 +85,24 @@ const isTrustedBase = runInSerial(measureWrap(async function isTrustedBase(machi
101
85
  } else {
102
86
  return true;
103
87
  }
104
- }));
88
+ });
89
+ let populateTrustedCache = lazy(async () => {
90
+ let trustedMachineIds = await archives().find("");
91
+ lastArchivesTrusted = trustedMachineIds.slice();
92
+ for (let trustedMachineId of trustedMachineIds) {
93
+ trustedCache.add(trustedMachineId);
94
+ }
95
+ // Always trust ourself
96
+ trustedCache.add(getOwnMachineId());
97
+
98
+ // NOTE: This only happens to servers that we connect to. Also we only allow the machine ID to be this special ID in the case it's on our domain. And because we use HTTPS when connecting to domains, it means that it must be implicitly trusted if it has a certificate for our domain.
99
+ trustedCache.add("127-0-0-1");
100
+
101
+ setTimeout(() => {
102
+ trustedCache.clear();
103
+ populateTrustedCache.reset();
104
+ }, TRUSTED_CACHE_RESET_INTERVAL);
105
+ });
105
106
 
106
107
  export async function isNodeTrusted(nodeId: string) {
107
108
  let domainName = getNodeIdDomainMaybeUndefined(nodeId);
@@ -571,9 +571,6 @@ export class FastArchiveViewer<T> extends qreact.Component<{
571
571
  flavor="large"
572
572
  fillWidth
573
573
  onKeyUp={this.handleDownload}
574
- ref2={() => {
575
- void this.handleDownload();
576
- }}
577
574
  noEnterKeyBlur
578
575
  placeholder="Filter terms, ex x | y & z"
579
576
  />
@@ -5,22 +5,37 @@ import { timeInSecond } from "socket-function/src/misc";
5
5
  import { formatDateTime } from "socket-function/src/formatting/format";
6
6
 
7
7
 
8
- //todonext
9
8
 
9
+ // 3) Dismissing of certain errors is not working. They keep coming back.
10
+ // - I think our expiry date comparison code might be wrong. It seems like once they leave the maybe expired range they come back immediately. If we can reproduce this locally, it'll be trivial to debug because the suppression stuff is global, so we can just see if there are any errors, and if there are, we break in on them.
11
+ // UGH... To debug this, we need to ignore the changes and then we need to work on the other stuff and then we need to come back later and see if those changes have shown up again. We basically need to debug it when it happens. We can't debug it now. It's too late, Now the errors should be showing up because they are expired.
10
12
 
11
- // 5) Add a channel to watch suppression entries, as once we suppress it, we don't want notifications anymore anywhere, And we don't want to have to wait
12
- // - test by logging when we get errors and then having our script which automatically keeps adding errors to keep running so we can verify that we keep logging new errors and then ignore the errors and this suppression should propagate and cause us to no longer omit the errors.
13
- // - This will also cause the errors to be suppressed across different HTTP servers.
14
13
 
15
- // 5) Get IMs actually sending.
14
+ // The constant error notifications might be fixed now. We'll see tomorrow after all the rolling updates finish.
15
+ // 4) fix whatever's causing constant error notifications. Something is broadcasting on the Recent Errors Change channel constantly.
16
+ // - I guess usually there's no server that's going to be listening on it. So it's... Not that big of a deal, but it's still annoying.
17
+
18
+
19
+ // 4.1) update channel watching so you can specify that you want to watch only on a specific node ID and then update our code so we only watch it on the controller node ID that we're interfacing with.
20
+
21
+
22
+ // 5) Verify our suppression updates broadcast across the channel correctly, causing us to be able to suppress a notification and our watching script to then stop seeing the new updates. Realistically, it's the calling script that stops setting them, but same thing.
23
+
24
+
25
+
26
+ // 5) Set up the Instant Messaging Sending API.
27
+ // - Discord. With beeper it won't really matter what we're messaging. We could also do WhatsApp. It's really all the same.
16
28
 
17
29
  // 6) Set up all the code to properly rate limit IMs, batch them, link back to the log page, etc.
18
30
  // - Just link to the error page for the last week. We don't need to link to anything specific.
19
31
  // - properly getting the node ID that we're going to be watching, and if it goes down, getting a new one, and ignoring messages from the old node.
20
32
  // - And if no node exists, we need to warn and then wait.
21
33
 
34
+
22
35
  // 7) Write the digest script, which is very different, but will run in the same entry.
23
- // - Separate warnings and errors and also bucket by time bucket
36
+ // - Separate warnings and errors and also bucket by time bucket
37
+ // - suppressed errors by time bucket (but no type, as we definitely don't want to parse all suppressed errors...)
38
+ // - Time the entire thing, and put that, and the profile, in the digest too! That will give us a good gauge on if the errors/suppressions are getting slow (due to a lot of errors, or a lot of suppression checks!)
24
39
  // 8) Write a page that shows the results of the digest in tabs, writing the digest probably just to backblaze
25
40
  // - For now, just have two tabs, one for errors and one for warnings.
26
41
  // - If we're going to do a full scan, we might as well show time series data as well. It's trivial.
@@ -20,16 +20,6 @@ Very small amount of data
20
20
  - For now this will just be for:
21
21
  - non-suppressed errors
22
22
  - suppressed errors
23
- - Eventually the goal of this is to add our tracking charts to this. There are some really useful metrics we can track.
24
- - unique visit IPs.
25
- - Percent bounces.
26
- - average visit length and also median visit length top 95" bottom 5"
27
- - average stories read average story percentages read
28
- - percent of first page story views by subscribers percent of fifth page story view by subscribers
29
- - Number of users visiting the fifth page.
30
- - New subscriptions
31
- - Subscription cancellations
32
- - Total subscriptions
33
23
 
34
24
 
35
25
 
@@ -55,51 +45,9 @@ Very small amount of data
55
45
  2.1) Collections of life cycles so we can further reduce the complexity.
56
46
  - Uses FastArchiveViewer, but instead of showing a table, shows lifecycles (a derived concept)
57
47
  - We save them in backblaze, with a bit of cache for loading them
58
- - List of life cycles
59
- - Life cycle
60
- - Title
61
- - Operation list (each supports | / &, but having multiple is even better)
62
- - Match filter
63
- - Group key extractions (optional, if not set it becomes a singleton)
64
- - Just a field name
65
- - CAN have multiple, which adds us as multiple life cycles
66
- - With each one being namespaced using the key, so we can tell them apart
67
- - Global value setting (optional, if not set it has no global state impact)
68
- - A list of set values
69
- - Each one is an expression which can use fields in the object, ex:
70
- - `alivePathValueServers.$threadId = true`
71
- - Show AND SHOULDN'T include match filters!
72
- - So when we should the count matched, we can show stats for these, which will often be should have "finished", and shouldn't have "error", so we can see completed, and errors
73
- - Similar to error notifications, but... it's nice to also have this here, as we could miss the notification, or suppress it, but when we are looking at a life cycle it's relevant skyrockets.
74
- - ALSO for start, so we can see cutoff starts!
75
- OH! How do we handle cut off starts?
76
- - Maybe... we have a "read preload" duration, and... we read that, BUT, only include life cycles which are also in our main selected time. So we don't cut anything off in our main time, but don't add new values which also get cut off!
77
- - Same time/machine/thread selector as log viewer
78
- - Allow filtering to specific life cycles
79
- - After download, shows matches per life cycle
80
- - Button to reset to all
81
- - Download logs, and scan for selected life cycles
82
- - Immediately on load, showing progress and throttling, so it's not too bad
83
- - Result
84
- - List of life cycles, with count of each
85
- - Table of individual life cycles?
86
- - Can then filter within these life cycles by searching
87
- - BUT, importantly, if any log is matched in a life cycle, the entire life cycle is matched
88
- - AND, global settings for ALL life cycles are applied, not just filtered ones!
89
- - Table of result life cycles
90
- - Preview shows first matched line
91
- - ALSO, shows duration of life cycle!
92
- - And start date
93
- - Expand to see pin that specific life cycle above
94
- - Show list of logs in it, in another table, with searching on each of them
95
- - Start date AND duration of each line!
96
- - Can pin multiple life cycles (I guess might as well)
97
- - Show list of global value expressions as well (limited, but with filtering to search them easily)
98
- - Can expand a global value to see object (but again... limited?)
99
- - Can select a specific global value path, to have it injected into
100
- - a column for before and after each life cycle
101
- - A column for after each line within a life cycle
102
- - Can also select parent values, to show all paths under that (use ShowMore, to make them manageable...)
48
+ - show the life cycles and allow viewing just the specific life cycles and then drilling into those life cycles. Also allow viewing multiple at once so we can view a stream that has many life cycles (as in life cycle collections, which we can also save. )
49
+ - Show overlap when we're showing the list of life cycles by having it first sorted by start time and then have some kind of indicator for how many values after the value it overlaps with. So if it's serial there'll be no overlap. If everything is being queued up and then run in serial we'll see the overlap go from 1, 2, 3, 4, 5, 6, 7, 8, etc. If it's just in parallel, it'll count up to a number and then go up and down a little bit as values are added and removed. If everything's added at once and it's in parallel, then the values will go up, but then they'll very quickly go down.
50
+ - Maybe we should have some indication for how much parallel overlap there is? Or kind of like how much gap there is to the start time of the next thing and to the end time of the next thing, something like that.
103
51
 
104
52
  6) Add life cycles for
105
53
  - Node discovery life cycle (new node, check for alive, check for paths, etc)
@@ -112,6 +60,7 @@ Very small amount of data
112
60
  - Use different keys for threadId, and, triggeredNodeId, so we can track how a node discovers other nodes, vs how a node is discovered
113
61
  - Mark as dead, dead count increases, remove node as dead
114
62
  - MAYBE there should be 2 lifecycles, one for all, and one for just creation type stuff (no discovery)
63
+ - There's a life cycle for something dying which starts when we first see it's dead and ends when the count reaches the maximum. We want to use the capability to change the ending state so we can have it as partially dead but not fully dead or fully dead so we can see if the dead life cycles keep starting but not finishing which would be bad.
115
64
  - Trusted machine lifecycle
116
65
  - Check if we need to add trust
117
66
  - Add trust to archives
@@ -138,6 +87,10 @@ Very small amount of data
138
87
  - Re-enable all of our services as well.
139
88
  - Just logs MIGHT be able to do it, but... life cycles should make it a lot easier to correlate logs, which is something we need to do anyways to solve it...
140
89
 
90
+ Make sure we check our life cycles for nodes being added and removed to make sure that life cycle path is pretty empty and there aren't nodes constantly being lost and re-added.
91
+
92
+ Check the startup lifecycle to make sure we can detect the nodes pretty fast and in parallel, instead of serial
93
+
141
94
  10) Verify old user/fast-log-cache machine folders are deleted
142
95
 
143
96
 
@@ -309,11 +309,19 @@ export function getSyncedController<T extends SocketRegistered>(
309
309
  obj.promise = undefined;
310
310
  obj.invalidated = true;
311
311
  call(...args);
312
+ // Assign to itself, to preset the type assumptions typescript makes (otherwise we get an error below)
313
+ obj = obj as any;
312
314
  let promise = atomic(obj.promise);
313
315
  if (!promise) {
314
316
  debugger;
315
317
  throw new Error(`Impossible, called function, but promise is not found for ${fncName}`);
316
318
  }
319
+ // Don't cache promise calls
320
+ void promise.finally(() => {
321
+ if (obj.promise === promise) {
322
+ obj.promise = undefined;
323
+ }
324
+ });
317
325
  return promise;
318
326
  });
319
327
  };
@@ -9,6 +9,9 @@ import { renderToString } from "./renderToString";
9
9
  import { debugTime } from "../../src/0-path-value-core/pathValueCore";
10
10
  import { measureBlock } from "socket-function/src/profiling/measure";
11
11
 
12
+ // TODO: Create more icons with: https://www.recraft.ai/project/e2a2200f-bed2-4426-b8f1-e5120b9dc990
13
+ // - This should also make creating animated SVGs a lot easier. If the base is nice, and we add comments, we can probably get the AI to help animate it
14
+
12
15
  // Most icons are from https://www.figma.com/file/eVpKKmt8uOKmSYKW4LyusF/Free-Icon-Pack-1600%2B-icons-(Community)?node-id=1654-9894&t=0bDbK0bA9KGpswRE-0
13
16
 
14
17
  // TODO: Add a build step that does this (storing the .svgs in a file), so we don't
package/src/server.ts CHANGED
@@ -33,7 +33,8 @@ async function main() {
33
33
 
34
34
  Error.stackTraceLimit = 20;
35
35
 
36
- //SocketFunction.logMessages = true;
36
+ // SocketFunction.logMessages = true;
37
+ // SocketFunction.silent = false;
37
38
 
38
39
  // ClientWatcher.DEBUG_READS = true;
39
40
  // ClientWatcher.DEBUG_WRITES = true;