querysub 0.406.0 → 0.407.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,7 @@
1
+ #!/usr/bin/env node
2
+
3
+ // Always local, as we want to always use the local code? Might not be needed anymore?
4
+ process.argv.push("--local");
5
+
6
+ require("typenode");
7
+ require("../src/4-deploy/deployPrefixes");
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "querysub",
3
- "version": "0.406.0",
3
+ "version": "0.407.0",
4
4
  "main": "index.js",
5
5
  "license": "MIT",
6
6
  "note1": "note on node-forge fork, see https://github.com/digitalbazaar/forge/issues/744 for details",
@@ -23,6 +23,7 @@
23
23
  },
24
24
  "bin": {
25
25
  "deploy": "./bin/deploy.js",
26
+ "deploy-prefixes": "./bin/deploy-prefixes.js",
26
27
  "server": "./bin/server.js",
27
28
  "server-public": "./bin/server-public.js",
28
29
  "function": "./bin/function.js",
@@ -46,8 +46,6 @@ let DISK_AUDIT_RATE = timeInMinute * 15;
46
46
  // probably is less than that). Which is around 2.5 cents on digital ocean IF we go over
47
47
  // our 1TB/month allowance.
48
48
  let API_AUDIT_RATE = timeInSecond * 30;
49
- // BUT, for now, poll less often... because I think it is lagging our 2 core potato digital ocean server.
50
- API_AUDIT_RATE = timeInMinute * 5;
51
49
  let API_AUDIT_COUNT = 12;
52
50
 
53
51
 
@@ -5,7 +5,7 @@ import { archiveJSONT } from "../-a-archives/archivesJSONT";
5
5
  import { getDomain, isPublic } from "../config";
6
6
  import { cache, lazy } from "socket-function/src/caching";
7
7
  import { SocketFunction } from "socket-function/SocketFunction";
8
- import { runInSerial, runInfinitePollCallAtStart } from "socket-function/src/batching";
8
+ import { delay, runInSerial, runInfinitePollCallAtStart } from "socket-function/src/batching";
9
9
  import { getAllNodeIds, getOwnNodeId, isOwnNodeId, onNodeBroadcasted, syncNodesNow, watchDeltaNodeIds, watchNodeIds } from "../-f-node-discovery/NodeDiscovery";
10
10
  import { IdentityController_getCurrentReconnectNodeIdAssert } from "../-c-identity/IdentityController";
11
11
  import { requiresNetworkTrustHook } from "../-d-trust/NetworkTrust2";
@@ -15,6 +15,7 @@ import { timeoutToError } from "../errors";
15
15
  import { AuthoritySpec } from "./PathRouter";
16
16
  import { formatTime } from "socket-function/src/formatting/format";
17
17
  import { getAllAuthoritySpec, getEmptyAuthoritySpec } from "./PathRouterServerAuthoritySpec";
18
+ import { getPrefixesForDeploy } from "../3-path-functions/syncSchema";
18
19
 
19
20
 
20
21
  let NETWORK_POLL_INTERVAL = timeInMinute * 5;
@@ -201,9 +202,13 @@ class AuthorityLookup {
201
202
  if (isClient()) {
202
203
  // Doesn't matter what the node ID is, we should really only be connecting to the browser node ID, Which will have all the data for the current domain.
203
204
  // - Get all node IDs should restrict our nodes to just the browser node ID. If we ever change this, then either it's redundant nodes and they all have all the same data, or we need to figure out what data they have, And as their proxies, it probably won't be their actual authority data. So that will require new API functions, etc.
205
+ await new Promise(r => setImmediate(r));
206
+ await delay(1);
204
207
  this.updatePaths(nodeId, {
205
- ...getAllAuthoritySpec(),
206
208
  nodeId: nodeId,
209
+ prefixes: await getPrefixesForDeploy(),
210
+ routeStart: 0,
211
+ routeEnd: 1,
207
212
  }, true);
208
213
  return;
209
214
  }
@@ -9,6 +9,7 @@ import { unique } from "../misc";
9
9
  import { measureFnc } from "socket-function/src/profiling/measure";
10
10
  import { getRoutingOverride, hasPrefixHash } from "./PathRouterRouteOverride";
11
11
  import { sha256 } from "js-sha256";
12
+ import { removeRange } from "../rangeMath";
12
13
 
13
14
 
14
15
  // Cases
@@ -36,6 +37,25 @@ export type AuthoritySpec = {
36
37
  excludeDefault?: boolean;
37
38
  };
38
39
 
40
+ export function debugSpec(spec: AuthoritySpec) {
41
+ return {
42
+ info: `${spec.routeStart}-${spec.routeEnd} (${spec.prefixes.length} prefixes${spec.excludeDefault ? " excluding default" : ""})`,
43
+ spec: { do: { not: { expand: { spec } } } },
44
+ };
45
+ }
46
+
47
+ function getMatchingPrefix(spec: AuthoritySpec, path: string): string | undefined {
48
+ let longestPrefix: string | undefined;
49
+ for (let prefix of spec.prefixes) {
50
+ if (path.startsWith(prefix) && prefix !== path) {
51
+ if (!longestPrefix || longestPrefix.length < prefix.length) {
52
+ longestPrefix = prefix;
53
+ }
54
+ }
55
+ }
56
+ return longestPrefix;
57
+ }
58
+
39
59
 
40
60
  export class PathRouter {
41
61
 
@@ -47,6 +67,10 @@ export class PathRouter {
47
67
  */
48
68
  @measureFnc
49
69
  public static getRouteChildKey(path: string): number {
70
+ let override = getRoutingOverride(path);
71
+ if (override) {
72
+ return override.route;
73
+ }
50
74
  let key = getLastPathPart(path);
51
75
  return this.getSingleKeyRoute(key);
52
76
  }
@@ -61,11 +85,12 @@ export class PathRouter {
61
85
  path = hack_stripPackedPath(path);
62
86
  let override = getRoutingOverride(path);
63
87
  if (override) {
64
- if (!hasPrefixHash({ spec, prefixHash: override.prefixHash })) return -1;
88
+ if (spec.excludeDefault && !hasPrefixHash({ spec, prefixHash: override.prefixHash })) return -1;
89
+ if (override.route < spec.routeStart || override.route >= spec.routeEnd) return -1;
65
90
  return override.route;
66
91
  }
67
92
 
68
- let prefix = spec.prefixes.find(x => path.startsWith(x) && x !== path);
93
+ let prefix = getMatchingPrefix(spec, path);
69
94
  if (prefix) {
70
95
  let key = getPathIndex(path, getPathDepth(prefix));
71
96
  if (key === undefined) {
@@ -79,6 +104,17 @@ export class PathRouter {
79
104
  return hash;
80
105
  }
81
106
 
107
+ // Mostly for debugging
108
+ @measureFnc
109
+ public static getAllRoutes(path: string): number[] {
110
+ let routes: number[] = [];
111
+ for (let authority of authorityLookup.getTopologySync()) {
112
+ let route = this.getRouteFull({ path, spec: authority.authoritySpec });
113
+ routes.push(route);
114
+ }
115
+ return routes;
116
+ }
117
+
82
118
  private static lastKeyRoute = {
83
119
  key: "",
84
120
  route: -1,
@@ -86,6 +122,12 @@ export class PathRouter {
86
122
  // Takes a key which is a part of a path. Mostly used PathRouterRouteOverride, or other PathRouter helpers.
87
123
  public static getSingleKeyRoute(key: string): number {
88
124
  if (key && this.lastKeyRoute.key === key) return this.lastKeyRoute.route;
125
+ let override = getRoutingOverride(key);
126
+ if (override) {
127
+ this.lastKeyRoute.key = key;
128
+ this.lastKeyRoute.route = override.route;
129
+ return override.route;
130
+ }
89
131
  let hash = fastHash(key);
90
132
  let route = hash % (1000 * 1000 * 1000) / (1000 * 1000 * 1000);
91
133
  this.lastKeyRoute.key = key;
@@ -145,18 +187,11 @@ export class PathRouter {
145
187
 
146
188
  let prefixes = ourSpec.prefixes.slice();
147
189
  sort(prefixes, x => x.length);
148
- function getPrefix(path: string): string | undefined {
149
- for (let prefix of prefixes) {
150
- if (path.startsWith(prefix) && prefix !== path) return prefix;
151
- }
152
- return undefined;
153
- }
154
-
155
190
 
156
191
  // NOTE: If there are few enough path values for a prefix, we don't even need to calculate the routing hash.
157
192
  let byPrefix = new Map<string | undefined, PathValue[]>();
158
193
  for (let value of values) {
159
- let prefix = getPrefix(value.path);
194
+ let prefix = getMatchingPrefix(ourSpec, value.path);
160
195
  let values = byPrefix.get(prefix);
161
196
  if (!values) {
162
197
  values = [];
@@ -283,11 +318,12 @@ export class PathRouter {
283
318
 
284
319
 
285
320
  // NOTE: This might return overlapping specs. Presently, this is only used when we're loading our initial data, so it's fine. However, if we use this for another purpose in the future, it might cause problems. So we might need to implement a different function for that theoretical future purpose.
321
+ // NOTE: Only takes remote nodes as presently this is just used during startup.
286
322
  @measureFnc
287
323
  public static getAuthoritySources(config: {
288
324
  target: AuthoritySpec;
289
325
  preferredNodeIds?: string[];
290
- }): (AuthoritySpec & { useFullPathHash?: boolean })[] {
326
+ }): AuthoritySpec[] {
291
327
  let { target } = config;
292
328
  let allSources = authorityLookup.getTopologySync();
293
329
  allSources = allSources.filter(x => !isOwnNodeId(x.nodeId));
@@ -368,41 +404,21 @@ export class PathRouter {
368
404
  for (let source of group) {
369
405
  let s = source.routeStart;
370
406
  let e = source.routeEnd;
371
- // Find if it overlaps any missing range, and if so, cut out that part of the missing change, and add the nodeId to usedNodeIds
372
- for (let i = missingRanges.length - 1; i >= 0; i--) {
373
- let missingRange = missingRanges[i];
374
- if (s >= missingRange.end || e <= missingRange.start) continue;
375
- let startTaken = Math.max(missingRange.start, s);
376
- let endTaken = Math.min(missingRange.end, e);
377
- // NOTE: It's not ideal that we might have to fragment one node ID between multiple requests. However, in practice, there shouldn't be much fragmentation here. The ranges that our nodes are breaking down by should be consistent, so there's actually no overlap or subsets.
407
+ let { removedRanges } = removeRange(missingRanges, { start: s, end: e });
408
+ for (let removedRange of removedRanges) {
378
409
  usedParts.push({
379
410
  nodeId: source.nodeId,
380
- routeStart: startTaken,
381
- routeEnd: endTaken,
411
+ routeStart: removedRange.start,
412
+ routeEnd: removedRange.end,
382
413
  prefixes: target.prefixes,
383
414
  excludeDefault: target.excludeDefault,
384
- useFullPathHash,
385
415
  });
386
- missingRanges.splice(i, 1);
387
- // Add back the parts we didn't overlap
388
- if (missingRange.start < s) {
389
- missingRanges.push({
390
- start: missingRange.start,
391
- end: s,
392
- });
393
- }
394
- if (missingRange.end > e) {
395
- missingRanges.push({
396
- start: e,
397
- end: missingRange.end,
398
- });
399
- }
400
416
  }
401
417
  if (missingRanges.length === 0) break;
402
418
  }
403
419
  if (missingRanges.length === 0) {
404
420
  if (!hashesSameAsTarget(group[0])) {
405
- console.warn(`Found a cohesive group that doesn't hash the same way we do. Expanding our range to be the full range of the group. This will be slower than it needs to.`);
421
+ console.warn(`Could only match a cohesive group that doesn't hash the same way we do. Expanding our range to be the full range of the group. This will be slower than it needs to, and might cause issues that result in only partial synchronization. This should be fixable with a deploy.`, config.target);
406
422
  }
407
423
  return usedParts;
408
424
  }
@@ -418,9 +434,6 @@ export class PathRouter {
418
434
  public static getChildReadNodes(path: string, config?: {
419
435
  preferredNodeIds?: string[];
420
436
  }): {
421
- // By default we hash the key directly under the path. However, If that is not how our nodes were sharded, as in they didn't use it as a prefix, then they will be sharded by hashing the full path, and so when we receive the data, we need to keep this in mind and filter the data based on the full path hash.
422
- useFullPathHash?: boolean;
423
-
424
437
  // NOTE: If at all possible, we will cover all ranges. Node of the returned nodes will be redundant.
425
438
  // - Sorted by range.start
426
439
  nodes: {
@@ -433,20 +446,66 @@ export class PathRouter {
433
446
  if (this.isSelfAuthority(path)) {
434
447
  return { nodes: [{ nodeId: getOwnNodeId(), range: { start: 0, end: 1 } }] };
435
448
  }
449
+ let preferredNodeIds = new Set(config?.preferredNodeIds ?? []);
436
450
 
437
451
  // If a prefix is a parent of path, then it is the same as matching just the path directly
438
452
  // (If our prefix directly equals one of the other matches, then it's more complicated, As then, the child keys of path are what is hashed, and so all the children will have different routes, so we might match multiple nodes. The same thing if we're matching the remaining case, in which case it's a full path hash, so the child key matters, and again, different routes).
439
453
  // - The different route case is how the FuntionRunner works, and without it large databases couldn't run functions. However, most applications won't directly use it.
440
454
  let allSources = authorityLookup.getTopologySync();
441
- allSources = allSources.filter(x => !isOwnNodeId(x.nodeId));
442
- let nestedMatches = allSources.filter(x =>
443
- x.authoritySpec.prefixes.some(y => path.startsWith(y) && y !== path)
444
- && this.matchesAuthoritySpec(x.authoritySpec, path)
445
- );
455
+ // Prefer our own node
456
+ sort(allSources, x => isOwnNodeId(x.nodeId) ? -1 : 1);
457
+
458
+
459
+ // Direct prefixes always take priority, as almost everything is under a prefix anyways...
460
+
461
+
462
+ // Direct prefix. This happens for things like calls and functions, it requires more advanced routing as it means we're going to route between multiple servers, but... it is important
463
+ let hasPrefix = allSources.filter(x => x.authoritySpec.prefixes.some(y => y === path)).map(x => x.authoritySpec);
464
+ if (hasPrefix.length > 0) {
465
+ shuffle(hasPrefix, Math.random());
466
+ sort(hasPrefix, x => preferredNodeIds.has(x.nodeId) ? -1 : 1);
467
+
468
+ let missingRanges: { start: number; end: number }[] = [{
469
+ start: 0,
470
+ end: 1,
471
+ }];
472
+ let usedParts: {
473
+ nodeId: string;
474
+ range: { start: number; end: number };
475
+ }[] = [];
476
+ for (let source of hasPrefix) {
477
+ let s = source.routeStart;
478
+ let e = source.routeEnd;
479
+ let { removedRanges } = removeRange(missingRanges, { start: s, end: e });
480
+ // NOTE: It's not ideal that we might have to fragment one node ID between multiple requests. However, in practice, there shouldn't be much fragmentation here. The ranges that our nodes are breaking down by should be consistent, so there's actually no overlap or subsets.
481
+ for (let removedRange of removedRanges) {
482
+ usedParts.push({
483
+ nodeId: source.nodeId,
484
+ range: removedRange,
485
+ });
486
+ }
487
+ if (missingRanges.length === 0) break;
488
+ }
489
+ if (missingRanges.length === 0) {
490
+ return { nodes: usedParts };
491
+ }
492
+ }
493
+
494
+ let nestedMatches = allSources.filter(x => {
495
+ // There's nested prefixes, so if we match any prefix explicitly, we can't just take one of the previous prefixes because that isn't how the hashing will work.
496
+ // - This happens if it's a direct match, but one of the shards is down, in which case we can't get a full match.
497
+ if (x.authoritySpec.prefixes.some(y => y === path)) return false;
498
+
499
+ // If our path, which we're going to read the children of, is the child of another path, then it means in that other path, the child key will be known to us constant, and so we're going to match exactly one authority.
500
+ return (
501
+ x.authoritySpec.prefixes.some(y => path.startsWith(y) && y !== path)
502
+ && this.matchesAuthoritySpec(x.authoritySpec, path)
503
+ );
504
+ });
446
505
  if (nestedMatches.length > 0) {
447
506
  shuffle(nestedMatches, Math.random());
448
- let preferredNodeIds = new Set(config?.preferredNodeIds ?? []);
449
507
  sort(nestedMatches, x => preferredNodeIds.has(x.nodeId) ? -1 : 1);
508
+ sort(allSources, x => isOwnNodeId(x.nodeId) ? -1 : 1);
450
509
  return {
451
510
  nodes: nestedMatches.map(x => ({
452
511
  nodeId: x.nodeId,
@@ -456,25 +515,53 @@ export class PathRouter {
456
515
  };
457
516
  }
458
517
 
459
- let fullSources = this.getAuthoritySources({
460
- target: {
461
- nodeId: "",
462
- prefixes: [path],
463
- routeStart: 0,
464
- routeEnd: 1,
465
- excludeDefault: true,
466
- },
467
- preferredNodeIds: config?.preferredNodeIds,
518
+ // If we are not under any prefixes of it, then it will be a full path hash
519
+ let fullPathMatches = allSources.filter(x => {
520
+ return !x.authoritySpec.prefixes.some(y => path.startsWith(y) && y !== path);
468
521
  });
469
- if (fullSources.length === 0) return { nodes: [] };
522
+ // Same as prefix matches. Not preferred, and not preferred over being under a prefix, but required for some root data, or data with no prefixes.
523
+ if (fullPathMatches.length > 0) {
524
+ shuffle(fullPathMatches, Math.random());
525
+ sort(fullPathMatches, x => preferredNodeIds.has(x.nodeId) ? -1 : 1);
526
+ sort(allSources, x => isOwnNodeId(x.nodeId) ? -1 : 1);
527
+ let missingRanges: { start: number; end: number }[] = [{
528
+ start: 0,
529
+ end: 1,
530
+ }];
531
+ let usedParts: {
532
+ nodeId: string;
533
+ range: { start: number; end: number };
534
+ }[] = [];
535
+ for (let source of fullPathMatches) {
536
+ let s = source.authoritySpec.routeStart;
537
+ let e = source.authoritySpec.routeEnd;
538
+ let { removedRanges } = removeRange(missingRanges, { start: s, end: e });
539
+ // NOTE: It's not ideal that we might have to fragment one node ID between multiple requests. However, in practice, there shouldn't be much fragmentation here. The ranges that our nodes are breaking down by should be consistent, so there's actually no overlap or subsets.
540
+ for (let removedRange of removedRanges) {
541
+ usedParts.push({
542
+ nodeId: source.nodeId,
543
+ range: removedRange,
544
+ });
545
+ }
546
+ if (missingRanges.length === 0) break;
547
+ }
548
+ if (missingRanges.length === 0) {
549
+ return { nodes: usedParts };
550
+ }
551
+ }
470
552
 
471
- return {
472
- useFullPathHash: fullSources.some(x => x.useFullPathHash),
473
- nodes: fullSources.map(x => ({
474
- nodeId: x.nodeId,
475
- range: { start: x.routeStart, end: x.routeEnd },
476
- })),
477
- };
553
+
554
+
555
+ // TODO: We could maybe match a partial match. However, even that is suspect. The site being partially broken is almost worse than it being completely broken. We should just get ALL the shards running again...
556
+
557
+
558
+ require("debugbreak")(2);
559
+ debugger;
560
+
561
+
562
+ // NOTE: We *could* actually synchronize it even if it doesn't have a prefix shard as we can fall back to just the full path sharding. However, it becomes very complicated if we want a specific range, and then it becomes complicated if it then switches to prefix hashing (With the nodes that were using the full path hashing slowly going away). AND... key synchronization IS slow, so it's good to discourage it in general.
563
+ console.error(`Want to sync a prefix which is not under an existing prefix, nor equal to a prefix. 1) The servers are down. 2) Don't access the .keys() 3) call addRoutingPrefixForDeploy to add a route/parent route explicitly (as is done in PathFunctionRunner.ts). Path: ${JSON.stringify(path)}`, { path, allSources });
564
+ return { nodes: [] };
478
565
  }
479
566
 
480
567
 
@@ -538,3 +625,5 @@ export class PathRouter {
538
625
 
539
626
  }
540
627
 
628
+
629
+ (globalThis as any).PathRouter = PathRouter;
@@ -22,7 +22,7 @@ let keySpecialIdentifier = (
22
22
  export function createRoutingOverrideKey(config: {
23
23
  originalKey: string;
24
24
  routeKey: string;
25
- // This is not the prefix that it has to match. This is the prefix we remap it to. And so anything which matches this remap prefix will match this routing key.
25
+ // This is the prefix it has the equivalent of. We need this, so if something excludes default, it doesn't automatically get every routing overridden value.
26
26
  remappedPrefix: string;
27
27
  }) {
28
28
  let { originalKey, routeKey, remappedPrefix } = config;
@@ -1,12 +1,12 @@
1
1
  import { SocketFunction } from "socket-function/SocketFunction";
2
2
  import { delay, batchFunction } from "socket-function/src/batching";
3
- import { isNode, timeInSecond } from "socket-function/src/misc";
3
+ import { deepCloneJSON, isNode, timeInSecond } from "socket-function/src/misc";
4
4
  import { measureBlock, measureFnc } from "socket-function/src/profiling/measure";
5
5
  import { isTrustedByNode } from "../-d-trust/NetworkTrust2";
6
6
  import { areNodeIdsEqual, isOwnNodeId } from "../-f-node-discovery/NodeDiscovery";
7
7
  import { ActionsHistory } from "../diagnostics/ActionsHistory";
8
8
  import { errorToUndefined, logErrors, timeoutToUndefined } from "../errors";
9
- import { getPathFromStr } from "../path";
9
+ import { getPathFromStr, hack_stripPackedPath } from "../path";
10
10
  import { PathValueControllerBase } from "./PathValueController";
11
11
  import { PathRouter } from "./PathRouter";
12
12
  import { PathValue, MAX_ACCEPTED_CHANGE_AGE, WriteState, debugPathValuePath, compareTime, epochTime } from "./pathValueCore";
@@ -16,6 +16,11 @@ import { red } from "socket-function/src/formatting/logColors";
16
16
  import { isClient } from "../config2";
17
17
  import { auditLog, isDebugLogEnabled } from "./auditLogs";
18
18
  import { authorityLookup } from "./AuthorityLookup";
19
+ import { debugNodeId } from "../-c-identity/IdentityController";
20
+ import { decodeNodeId } from "../-a-auth/certs";
21
+ import { decodeParentFilter, encodeParentFilter } from "./hackedPackedPathParentFiltering";
22
+ import { deepCloneCborx } from "../misc/cloneHelpers";
23
+ import { removeRange } from "../rangeMath";
19
24
  setImmediate(() => import("../1-path-client/RemoteWatcher"));
20
25
  setImmediate(() => import("../4-querysub/Querysub"));
21
26
 
@@ -32,10 +37,7 @@ export type BatchValues = {
32
37
  export type RemoteValueAndValidState = {
33
38
  sourceNodeId: string;
34
39
  pathValues: PathValue[];
35
- validStates: WriteState[];
36
40
  initialTriggers: { values: Set<string>; parentPaths: Set<string> };
37
- // Means it's from an authority path sync
38
- authoritySyncPaths?: Set<string>;
39
41
  };
40
42
 
41
43
  class PathValueCommitter {
@@ -194,7 +196,6 @@ class PathValueCommitter {
194
196
  parentSyncs: [],
195
197
  initialTriggers: { values: new Set(), parentPaths: new Set() },
196
198
  });
197
- PathRouter.getAllAuthorities(pathValue.path);
198
199
  console.error(`There are no authorities for path ${pathValue.path}. The write will be lost.`, {
199
200
  path: pathValue.path,
200
201
  timeId: pathValue.time.time,
@@ -215,6 +216,7 @@ class PathValueCommitter {
215
216
  // Don't send to bad nodes for 60 seconds
216
217
  const nodeIgnoreTime = Date.now() - 1000 * 60;
217
218
  let promises = Array.from(valuesPerOtherAuthority.entries()).map(async ([otherAuthority, values]) => {
219
+
218
220
  let disconnected = SocketFunction.getLastDisconnectTime(otherAuthority);
219
221
  if (disconnected && disconnected > nodeIgnoreTime) {
220
222
  // If it disconnected recently... don't send to it for a little bit, so we don't spend
@@ -299,16 +301,34 @@ class PathValueCommitter {
299
301
  if (!isClient()) {
300
302
  measureBlock(function ignoreUnrequestedValues() {
301
303
  for (let batch of batched) {
302
- function isWrongAuthority(path: string) {
304
+ function isWrongAuthority(path: string, value?: PathValue, type?: string) {
303
305
  let watchingAuthorityId = remoteWatcher.getExistingWatchRemoteNodeId(path);
304
306
  // If we AREN'T watching it... it's actually fine, we can receive any values.
305
307
  // When we start watching, those values will get clobbered.
306
308
  if (watchingAuthorityId === undefined) return false;
307
- return !areNodeIdsEqual(watchingAuthorityId, batch.sourceNodeId);
309
+ if (!areNodeIdsEqual(watchingAuthorityId, batch.sourceNodeId)) {
310
+ let valueWatchNode = remoteWatcher.getValueWatchRemoteNodeId(path);
311
+ if (!valueWatchNode || !areNodeIdsEqual(valueWatchNode, batch.sourceNodeId)) {
312
+ let candidates = PathRouter.getAllAuthorities(path);
313
+ require("debugbreak")(2);
314
+ debugger;
315
+ console.warn(`Ignoring value from wrong authority. Should have been ${debugNodeId(watchingAuthorityId)}, but was received from ${debugNodeId(batch.sourceNodeId)}.`, {
316
+ path,
317
+ type,
318
+ timeId: value?.time.time,
319
+ source: value?.source,
320
+ sourceNodeId: debugNodeId(batch.sourceNodeId),
321
+ sourceNodeThreadId: decodeNodeId(batch.sourceNodeId)?.threadId,
322
+ watchingAuthorityId: debugNodeId(watchingAuthorityId),
323
+ watchingAuthorityNodeThreadId: decodeNodeId(watchingAuthorityId)?.threadId,
324
+ isTransparent: value?.isTransparent,
325
+ });
326
+ }
327
+ return true;
328
+ }
329
+ return false;
308
330
  }
309
331
  batch.pathValues = batch.pathValues.filter(value => {
310
- // Authorities watch other authorities using path watches, And so one path can come from many authorities, so we just have to accept it no matter what, if it's from that type of source.
311
- if (batch.authoritySyncPaths?.has(value.path)) return true;
312
332
  // NOTE: See the definition for lock count for why this check isn't checking all the possible cases. Essentially, locks is often empty, and that's intentional. However, the reverse should never be true, locks should never have values when lockCount is 0.
313
333
  if (value.lockCount === 0 && value.locks.length > 0) {
314
334
  console.error(red(`Ignoring value with invalid lockCount. Was ${value.lockCount}, but we have ${value.locks.length} locks. locks are optional, but lockCount isn't. We should never have locks without having lockCount set. ${debugPathValuePath(value)}`));
@@ -316,30 +336,33 @@ class PathValueCommitter {
316
336
  }
317
337
 
318
338
  if (PathRouter.isSelfAuthority(value.path)) return true;
319
- if (isWrongAuthority(value.path)) return false;
320
- // epochTimes are just indicators that the value has no value, and so are safe to sync
321
- // from any source. They won't cause future conflicts, because any other value overrides them.
322
- // - This might not be required?
323
- if (compareTime(value.time, epochTime) === 0) {
324
- return true;
339
+ if (isWrongAuthority(value.path, value, "value")) {
340
+ return false;
325
341
  }
326
-
327
- // Also warn, because... if we get a lot of these, there might be a bug.
328
- // A few when we change watches is possible, but it should be rare.
329
- let watchingAuthorityId = remoteWatcher.getExistingWatchRemoteNodeId(value.path);
330
- auditLog("IGNORING VALUE FROM DIFFERENT AUTHORITY", { path: value.path, watchingAuthorityId, receivedFromAuthority: batch.sourceNodeId });
331
- return false;
342
+ return true;
332
343
  });
333
344
 
334
- for (let value of batch.initialTriggers.values) {
335
- if (isWrongAuthority(value)) {
345
+ for (let value of Array.from(batch.initialTriggers.values)) {
346
+ if (isWrongAuthority(value, undefined, "initialTrigger")) {
336
347
  batch.initialTriggers.values.delete(value);
337
348
  }
338
349
  }
339
- for (let parentPath of batch.initialTriggers.parentPaths) {
340
- if (isWrongAuthority(parentPath)) {
341
- batch.initialTriggers.parentPaths.delete(parentPath);
350
+ for (let parentPath of Array.from(batch.initialTriggers.parentPaths)) {
351
+ if (remoteWatcher.isFinalRemoteWatchPath({ parentPath, nodeId: batch.sourceNodeId })) {
352
+ continue;
342
353
  }
354
+
355
+ // TODO: Remove this breakpoint eventually. This can happen naturally when servers go down?
356
+ require("debugbreak")(2);
357
+ debugger;
358
+ remoteWatcher.isFinalRemoteWatchPath({ parentPath, nodeId: batch.sourceNodeId });
359
+
360
+ console.warn(`Ignoring parent path which we aren't watching. From ${debugNodeId(batch.sourceNodeId)}.`, {
361
+ parentPath,
362
+ sourceNodeId: debugNodeId(batch.sourceNodeId),
363
+ sourceNodeThreadId: decodeNodeId(batch.sourceNodeId)?.threadId,
364
+ });
365
+ batch.initialTriggers.parentPaths.delete(parentPath);
343
366
  }
344
367
  }
345
368
  });
@@ -347,7 +370,7 @@ class PathValueCommitter {
347
370
 
348
371
 
349
372
  // path => sourceNodeId
350
- let parentSyncs = new Map<string, string>();
373
+ let parentSyncs = new Map<string, Set<string>>();
351
374
 
352
375
  // We need to do a bit of work to properly clear path values that are from old initial triggers. As if we receive two initial triggers, they need to clobber each other. And if we collapse it, we lose that information. So we have to do that here.
353
376
  let finalResults = new Map<string, {
@@ -380,16 +403,28 @@ class PathValueCommitter {
380
403
  results.pathValues.push(pathValue);
381
404
  }
382
405
  for (let parentPath of batch.initialTriggers.parentPaths) {
383
- parentSyncs.set(parentPath, batch.sourceNodeId);
406
+ let sourceNodeIds = parentSyncs.get(parentPath);
407
+ if (!sourceNodeIds) {
408
+ sourceNodeIds = new Set();
409
+ parentSyncs.set(parentPath, sourceNodeIds);
410
+ }
411
+ sourceNodeIds.add(batch.sourceNodeId);
384
412
  }
385
413
  }
386
414
 
387
415
  let parentPaths = new Set(parentSyncs.keys());
388
416
  let initialValues = new Set(Array.from(finalResults.values()).filter(x => x.initialTrigger).map(x => x.path));
389
417
 
418
+ let parentSyncsList: { parentPath: string; sourceNodeId: string }[] = [];
419
+ for (let [parentPath, sourceNodeIds] of parentSyncs.entries()) {
420
+ for (let sourceNodeId of sourceNodeIds) {
421
+ parentSyncsList.push({ parentPath, sourceNodeId });
422
+ }
423
+ }
424
+
390
425
  validStateComputer.ingestValuesAndValidStates({
391
426
  pathValues: Array.from(finalResults.values()).map(x => x.pathValues).flat(),
392
- parentSyncs: Array.from(parentSyncs.entries()).map(([parentPath, sourceNodeId]) => ({ parentPath, sourceNodeId })),
427
+ parentSyncs: parentSyncsList,
393
428
  initialTriggers: { values: initialValues, parentPaths: parentPaths },
394
429
  });
395
430
  },
@@ -159,13 +159,11 @@ export class PathValueControllerBase {
159
159
  }
160
160
 
161
161
  try {
162
- let parentSyncs = Array.from(config.initialTriggers?.parentPaths || [])
163
- .map(x => ({ parentPath: x, sourceNodeId: callerId }));
164
162
  let initialTriggers = config.initialTriggers || { values: new Set(), parentPaths: new Set() };
165
- validStateComputer.ingestValuesAndValidStates({
163
+ await pathValueCommitter.ingestRemoteValuesAndValidStates({
166
164
  pathValues: values,
167
- parentSyncs,
168
165
  initialTriggers,
166
+ sourceNodeId: callerId,
169
167
  });
170
168
  } catch (error) {
171
169
  console.error("Error ingesting values and valid states", error);