querysub 0.340.0 → 0.342.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +3 -3
- package/src/-a-archives/archivesBackBlaze.ts +29 -11
- package/src/2-proxy/PathValueProxyWatcher.ts +219 -53
- package/src/2-proxy/garbageCollection.ts +2 -1
- package/src/3-path-functions/syncSchema.ts +8 -7
- package/src/4-dom/qreact.tsx +12 -1
- package/src/4-querysub/Querysub.ts +30 -29
- package/src/4-querysub/QuerysubController.ts +71 -43
- package/src/4-querysub/predictionQueue.tsx +184 -0
- package/src/4-querysub/querysubPrediction.ts +148 -274
- package/src/4-querysub/schemaHelpers.ts +36 -0
- package/src/diagnostics/logs/errorNotifications/errorDigestEmail.tsx +1 -1
- package/src/diagnostics/logs/errorNotifications/errorWatchEntry.tsx +1 -0
- package/src/diagnostics/logs/lifeCycleAnalysis/spec.md +5 -0
- package/src/functional/promiseCache.ts +10 -1
- package/src/functional/runCommand.ts +2 -63
- package/src/library-components/SyncedController.ts +120 -68
- package/src/library-components/SyncedControllerLoadingIndicator.tsx +1 -1
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "querysub",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.342.0",
|
|
4
4
|
"main": "index.js",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"note1": "note on node-forge fork, see https://github.com/digitalbazaar/forge/issues/744 for details",
|
|
@@ -50,9 +50,9 @@
|
|
|
50
50
|
"js-sha512": "^0.9.0",
|
|
51
51
|
"node-forge": "https://github.com/sliftist/forge#e618181b469b07bdc70b968b0391beb8ef5fecd6",
|
|
52
52
|
"pako": "^2.1.0",
|
|
53
|
-
"socket-function": "^0.
|
|
53
|
+
"socket-function": "^0.152.0",
|
|
54
54
|
"terser": "^5.31.0",
|
|
55
|
-
"typesafecss": "^0.
|
|
55
|
+
"typesafecss": "^0.23.0",
|
|
56
56
|
"yaml": "^2.5.0",
|
|
57
57
|
"yargs": "^15.3.1"
|
|
58
58
|
},
|
|
@@ -501,6 +501,8 @@ export class ArchivesBackblaze {
|
|
|
501
501
|
return api;
|
|
502
502
|
});
|
|
503
503
|
|
|
504
|
+
// Keep track of when we last reset because of a 503
|
|
505
|
+
private last503Reset = 0;
|
|
504
506
|
// IMPORTANT! We must always CATCH AROUND the apiRetryLogic, NEVER inside of fnc. Otherwise we won't
|
|
505
507
|
// be able to recreate the auth token.
|
|
506
508
|
private async apiRetryLogic<T>(
|
|
@@ -513,16 +515,40 @@ export class ArchivesBackblaze {
|
|
|
513
515
|
} catch (err: any) {
|
|
514
516
|
if (retries <= 0) throw err;
|
|
515
517
|
|
|
518
|
+
// If it's a 503 and it's been a minute since we last reset, then Wait and reset.
|
|
519
|
+
if (
|
|
520
|
+
(err.stack.includes(`503`)
|
|
521
|
+
|| err.stack.includes(`"service_unavailable"`)
|
|
522
|
+
|| err.stack.includes(`"internal_error"`)
|
|
523
|
+
) && Date.now() - this.last503Reset > 60 * 1000) {
|
|
524
|
+
this.log("503 error, waiting a minute and resetting: " + err.message);
|
|
525
|
+
await delay(10 * 1000);
|
|
526
|
+
// We check again in case, and in the very likely case that this is being run in parallel, we only want to reset once.
|
|
527
|
+
if (Date.now() - this.last503Reset > 60 * 1000) {
|
|
528
|
+
this.log("Resetting getAPI and getBucketAPI: " + err.message);
|
|
529
|
+
this.last503Reset = Date.now();
|
|
530
|
+
getAPI.reset();
|
|
531
|
+
this.getBucketAPI.reset();
|
|
532
|
+
}
|
|
533
|
+
return this.apiRetryLogic(fnc, retries - 1);
|
|
534
|
+
}
|
|
535
|
+
|
|
536
|
+
// If the error is that the authorization token is invalid, reset getBucketAPI and getAPI
|
|
537
|
+
// If the error is that the bucket isn't found, reset getBucketAPI
|
|
538
|
+
if (err.stack.includes(`"expired_auth_token"`)) {
|
|
539
|
+
this.log("Authorization token expired");
|
|
540
|
+
getAPI.reset();
|
|
541
|
+
this.getBucketAPI.reset();
|
|
542
|
+
return this.apiRetryLogic(fnc, retries - 1);
|
|
543
|
+
}
|
|
544
|
+
|
|
516
545
|
if (
|
|
517
546
|
err.stack.includes(`no tomes available`)
|
|
518
|
-
|| err.stack.includes(`"service_unavailable"`)
|
|
519
547
|
|| err.stack.includes(`ETIMEDOUT`)
|
|
520
|
-
|| err.stack.includes(`"internal_error"`)
|
|
521
548
|
|| err.stack.includes(`socket hang up`)
|
|
522
549
|
// Eh... this might be bad, but... I think we just get random 400 errors. If this spams errors,
|
|
523
550
|
// we can remove this line.
|
|
524
551
|
|| err.stack.includes(`400 Bad Request`)
|
|
525
|
-
|| err.stack.includes(`"no tomes available"`)
|
|
526
552
|
|| err.stack.includes(`getaddrinfo ENOTFOUND`)
|
|
527
553
|
) {
|
|
528
554
|
this.log(err.message + " retrying in 5s");
|
|
@@ -546,14 +572,6 @@ export class ArchivesBackblaze {
|
|
|
546
572
|
console.error(`getaddrinfo ENOTFOUND ${hostname}`, { lookupAddresses, resolveAddresses, apiUrl: api.apiUrl, fullError: err.stack });
|
|
547
573
|
}
|
|
548
574
|
|
|
549
|
-
// If the error is that the authorization token is invalid, reset getBucketAPI and getAPI
|
|
550
|
-
// If the error is that the bucket isn't found, reset getBucketAPI
|
|
551
|
-
if (err.stack.includes(`"expired_auth_token"`)) {
|
|
552
|
-
this.log("Authorization token expired");
|
|
553
|
-
getAPI.reset();
|
|
554
|
-
this.getBucketAPI.reset();
|
|
555
|
-
return this.apiRetryLogic(fnc, retries - 1);
|
|
556
|
-
}
|
|
557
575
|
// TODO: Handle if the bucket is deleted?
|
|
558
576
|
throw err;
|
|
559
577
|
}
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
import { measureCode, measureWrap, registerMeasureInfo } from "socket-function/src/profiling/measure";
|
|
2
2
|
import { SocketFunction } from "socket-function/SocketFunction";
|
|
3
|
-
import { deepCloneJSON, getKeys, isNode, recursiveFreeze, timeInMinute } from "socket-function/src/misc";
|
|
3
|
+
import { binarySearchBasic, binarySearchBasic2, binarySearchIndex, deepCloneJSON, getKeys, insertIntoSortedList, isNode, recursiveFreeze, timeInMinute, timeInSecond } from "socket-function/src/misc";
|
|
4
4
|
import { canHaveChildren, MaybePromise } from "socket-function/src/types";
|
|
5
5
|
import { blue, green, magenta, red, yellow } from "socket-function/src/formatting/logColors";
|
|
6
6
|
import { cache, cacheLimited, lazy } from "socket-function/src/caching";
|
|
7
|
-
import { delay, runInfinitePoll } from "socket-function/src/batching";
|
|
7
|
+
import { delay, runInSerial, runInfinitePoll } from "socket-function/src/batching";
|
|
8
8
|
import { errorify, logErrors } from "../errors";
|
|
9
9
|
import { appendToPathStr, getLastPathPart, getParentPathStr, getPathDepth, getPathFromStr, getPathIndex, getPathIndexAssert, getPathPrefix, getPathStr1, getPathStr2, getPathSuffix, slicePathStrToDepth } from "../path";
|
|
10
10
|
import { addEpsilons } from "../bits";
|
|
@@ -36,6 +36,7 @@ import { formatPercent, formatTime } from "socket-function/src/formatting/format
|
|
|
36
36
|
import { addStatPeriodic, interceptCalls, onAllPredictionsFinished, onTimeProfile } from "../-0-hooks/hooks";
|
|
37
37
|
import { onNextPaint } from "../functional/onNextPaint";
|
|
38
38
|
import { isAsyncFunction } from "../misc";
|
|
39
|
+
import { isClient } from "../config2";
|
|
39
40
|
|
|
40
41
|
// TODO: Break this into two parts:
|
|
41
42
|
// 1) Run and get accesses
|
|
@@ -45,6 +46,10 @@ import { isAsyncFunction } from "../misc";
|
|
|
45
46
|
|
|
46
47
|
const DEFAULT_MAX_LOCKS = 1000;
|
|
47
48
|
|
|
49
|
+
// After this time we allow proxies to be reordered, even if there's flags that tell them not to be.
|
|
50
|
+
const MAX_PROXY_REORDER_BLOCK_TIME = timeInSecond * 10;
|
|
51
|
+
|
|
52
|
+
let nextSeqNum = 1;
|
|
48
53
|
let nextOrderSeqNum = 1;
|
|
49
54
|
|
|
50
55
|
export interface WatcherOptions<Result> {
|
|
@@ -115,6 +120,7 @@ export interface WatcherOptions<Result> {
|
|
|
115
120
|
debugName?: string;
|
|
116
121
|
|
|
117
122
|
watchFunction: () => Result;
|
|
123
|
+
baseFunction?: Function;
|
|
118
124
|
|
|
119
125
|
// Only called after all the reads are synced
|
|
120
126
|
// - getTriggeredWatcher will function correctly in this callback
|
|
@@ -214,10 +220,27 @@ export interface WatcherOptions<Result> {
|
|
|
214
220
|
|
|
215
221
|
maxLocksOverride?: number;
|
|
216
222
|
|
|
223
|
+
/** Finish in the order we start in. This is required to prevent races.
|
|
224
|
+
* - Defaults to true client side if we're using Querysub.commitAsync
|
|
225
|
+
* - If a number overrides the order, this is used when we're making predictions, so they trigger based on the order of their parent, instead of the order of when they started.
|
|
226
|
+
* - Only applies the order based on other functions that have this flag set.
|
|
227
|
+
*/
|
|
228
|
+
finishInStartOrder?: number | boolean;
|
|
229
|
+
|
|
230
|
+
/** Only to be used for logging. Is VERY useful in certain circumstances */
|
|
231
|
+
predictMetadata?: FunctionMetadata;
|
|
232
|
+
|
|
217
233
|
// NOTE: The reason there isn't throttle support here is very frequently when you want to throttle one component rendering, it's because you have many components. So you actually want to throttle many components and have them throttle in conjunction with each other, which results in the logic becoming complicated.
|
|
218
234
|
// - But maybe we should support the single throttle case anyways?
|
|
219
235
|
}
|
|
220
236
|
|
|
237
|
+
export type DryRunResult = {
|
|
238
|
+
writes: PathValue[];
|
|
239
|
+
readPaths: Set<string>
|
|
240
|
+
readParentPaths: Set<string>;
|
|
241
|
+
result: unknown;
|
|
242
|
+
};
|
|
243
|
+
|
|
221
244
|
let harvestableReadyLoopCount = 0;
|
|
222
245
|
let harvestableWaitingLoopCount = 0;
|
|
223
246
|
let lastZombieCount = 0;
|
|
@@ -257,6 +280,7 @@ const getAllowedLockDomainsPrefixes = function getTrustedDomains(): string[] {
|
|
|
257
280
|
};
|
|
258
281
|
|
|
259
282
|
export type SyncWatcher = {
|
|
283
|
+
seqNum: number;
|
|
260
284
|
options: WatcherOptions<any>;
|
|
261
285
|
|
|
262
286
|
dispose: () => void;
|
|
@@ -306,7 +330,10 @@ export type SyncWatcher = {
|
|
|
306
330
|
/** The key is the time we read at. This is required to create the lock.
|
|
307
331
|
* - Only utilized if we commit the write
|
|
308
332
|
*/
|
|
309
|
-
pendingAccesses: Map<Time | undefined, Map<string,
|
|
333
|
+
pendingAccesses: Map<Time | undefined, Map<string, {
|
|
334
|
+
pathValue: PathValue;
|
|
335
|
+
noLocks: boolean;
|
|
336
|
+
}>>;
|
|
310
337
|
pendingEpochAccesses: Map<Time | undefined, Set<string>>;
|
|
311
338
|
|
|
312
339
|
// A necessity for react rendering, as rendering unsynced data almost always looks bad,
|
|
@@ -584,14 +611,22 @@ export class PathValueProxyWatcher {
|
|
|
584
611
|
readValues = new Map();
|
|
585
612
|
watcher.pendingAccesses.set(currentReadTime, readValues);
|
|
586
613
|
}
|
|
587
|
-
readValues.
|
|
614
|
+
let prevObj = readValues.get(pathValue.path);
|
|
615
|
+
let noLocks = watcher.options.noLocks || watcher.options.unsafeNoLocks || false;
|
|
616
|
+
if (!prevObj) {
|
|
617
|
+
readValues.set(pathValue.path, { pathValue, noLocks });
|
|
618
|
+
} else {
|
|
619
|
+
prevObj.pathValue = pathValue;
|
|
620
|
+
// It can only be no locks if it's always no locks (prev && current)
|
|
621
|
+
prevObj.noLocks = prevObj.noLocks && noLocks;
|
|
622
|
+
}
|
|
588
623
|
}
|
|
589
624
|
}
|
|
590
625
|
|
|
591
626
|
return pathValue;
|
|
592
627
|
};
|
|
593
628
|
public getCallback = (pathStr: string, syncParentKeys?: "parentKeys", readTransparent?: "readTransparent"): { value: unknown } | undefined => {
|
|
594
|
-
if (PathValueProxyWatcher.BREAK_ON_READS.size > 0 && proxyWatcher.isAllSynced()) {
|
|
629
|
+
if (PathValueProxyWatcher.BREAK_ON_READS.size > 0 && (proxyWatcher.isAllSynced() || this)) {
|
|
595
630
|
// NOTE: We can't do a recursive match, as the parent paths include the
|
|
596
631
|
// root, which is constantly read, but not relevant.
|
|
597
632
|
if (PathValueProxyWatcher.BREAK_ON_READS.has(pathStr)) {
|
|
@@ -1045,8 +1080,17 @@ export class PathValueProxyWatcher {
|
|
|
1045
1080
|
}
|
|
1046
1081
|
|
|
1047
1082
|
let now = Date.now();
|
|
1083
|
+
let debugName = (
|
|
1084
|
+
options.debugName
|
|
1085
|
+
|| options.watchFunction.name
|
|
1086
|
+
|| options.watchFunction.toString().replaceAll("\n", "\\n").replaceAll(/ +/g, " ").slice(0, 50)
|
|
1087
|
+
);
|
|
1088
|
+
if (debugName === "watchFunction" && options.baseFunction) {
|
|
1089
|
+
debugName = options.baseFunction.name || options.baseFunction.toString().replaceAll("\n", "\\n").replaceAll(/ +/g, " ").slice(0, 50);
|
|
1090
|
+
}
|
|
1048
1091
|
let watcher: SyncWatcher = {
|
|
1049
|
-
|
|
1092
|
+
seqNum: nextSeqNum++,
|
|
1093
|
+
debugName,
|
|
1050
1094
|
dispose: () => { },
|
|
1051
1095
|
disposed: false,
|
|
1052
1096
|
onInnerDisposed: [],
|
|
@@ -1091,6 +1135,7 @@ export class PathValueProxyWatcher {
|
|
|
1091
1135
|
hackHistory: [],
|
|
1092
1136
|
createTime: getTimeUnique(),
|
|
1093
1137
|
};
|
|
1138
|
+
addToProxyOrder(watcher);
|
|
1094
1139
|
const SHOULD_TRACE = this.SHOULD_TRACE(watcher);
|
|
1095
1140
|
const proxy = this.proxy;
|
|
1096
1141
|
|
|
@@ -1131,6 +1176,19 @@ export class PathValueProxyWatcher {
|
|
|
1131
1176
|
watcher.pendingUnsyncedAccesses.size > 0
|
|
1132
1177
|
|| watcher.pendingUnsyncedParentAccesses.size > 0
|
|
1133
1178
|
|| watcher.specialPromiseUnsynced
|
|
1179
|
+
|| isProxyBlockedByOrder(watcher)
|
|
1180
|
+
);
|
|
1181
|
+
};
|
|
1182
|
+
const getReadyToCommit = () => {
|
|
1183
|
+
let blocked = isProxyBlockedByOrder(watcher);
|
|
1184
|
+
if (blocked) {
|
|
1185
|
+
console.info(`Proxy ${watcher.debugName} is blocked by order`);
|
|
1186
|
+
}
|
|
1187
|
+
return (
|
|
1188
|
+
watcher.lastUnsyncedAccesses.size === 0
|
|
1189
|
+
&& watcher.lastUnsyncedParentAccesses.size === 0
|
|
1190
|
+
&& !watcher.lastSpecialPromiseUnsynced
|
|
1191
|
+
&& !isProxyBlockedByOrder(watcher)
|
|
1134
1192
|
);
|
|
1135
1193
|
};
|
|
1136
1194
|
function afterTrigger() {
|
|
@@ -1142,6 +1200,7 @@ export class PathValueProxyWatcher {
|
|
|
1142
1200
|
}
|
|
1143
1201
|
|
|
1144
1202
|
function dispose() {
|
|
1203
|
+
if (watcher.disposed) return;
|
|
1145
1204
|
watcher.disposed = true;
|
|
1146
1205
|
clientWatcher.unwatch(trigger);
|
|
1147
1206
|
self.allWatchers.delete(watcher);
|
|
@@ -1152,6 +1211,9 @@ export class PathValueProxyWatcher {
|
|
|
1152
1211
|
}
|
|
1153
1212
|
|
|
1154
1213
|
self.allWatchersLookup.delete(trigger);
|
|
1214
|
+
|
|
1215
|
+
// TODO: Is this guaranteed to be called all the time? What about nested proxy watchers?
|
|
1216
|
+
void finishProxyAndTriggerNext(watcher);
|
|
1155
1217
|
}
|
|
1156
1218
|
function runWatcher() {
|
|
1157
1219
|
watcher.pendingWrites.clear();
|
|
@@ -1169,6 +1231,7 @@ export class PathValueProxyWatcher {
|
|
|
1169
1231
|
// IMPORTANT! Reset onInnerDisposed, so onCommitFinished doesn't result in a callback
|
|
1170
1232
|
// per time we wan the watcher!
|
|
1171
1233
|
watcher.onInnerDisposed = [];
|
|
1234
|
+
watcher.specialPromiseUnsynced = false;
|
|
1172
1235
|
|
|
1173
1236
|
// NOTE: If runAtTime is undefined, the writeTime will be undefined, causing us to read the latest data.
|
|
1174
1237
|
// When we finish running we will determine a lock time > any received times.
|
|
@@ -1219,18 +1282,21 @@ export class PathValueProxyWatcher {
|
|
|
1219
1282
|
// to do the permissions checks if they want them
|
|
1220
1283
|
throw new Error(`Nested synced function calls are not allowed. Call the function directly, or use Querysub.onCommitFinished to wait for the function to finish.`);
|
|
1221
1284
|
} else if (handling === "after" || handling === undefined) {
|
|
1222
|
-
//
|
|
1223
|
-
//
|
|
1224
|
-
//
|
|
1225
|
-
//
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
|
|
1285
|
+
// UPDATE: This isn't feasible. For one, it has n squared complexity. Also, I don't think it would work in the case if you make two function calls within one function? So it seems like it would only ever work if the function calls were in different functions?
|
|
1286
|
+
// UPDATE: Use `if (Querysub.syncAnyPredictionsPending()) return;` at the start of a function if you want to emulate this behavior
|
|
1287
|
+
// - I think the solution for this is that the second function itself needs to wait for all predictions to finish. It's annoying, but I think it's required...
|
|
1288
|
+
// // We need to wait for predictions to finish, otherwise we run into situations
|
|
1289
|
+
// // where we call a function which should change a parameter we want to pass
|
|
1290
|
+
// // to another function, but because the first call didn't predict, the second
|
|
1291
|
+
// // call gets a different values, causing all kinds of issues.
|
|
1292
|
+
// if (watcher.pendingCalls.length === 0) {
|
|
1293
|
+
// let waitPromise = onAllPredictionsFinished();
|
|
1294
|
+
// if (waitPromise) {
|
|
1295
|
+
// proxyWatcher.triggerOnPromiseFinish(waitPromise, {
|
|
1296
|
+
// waitReason: "Waiting for predictions to finish",
|
|
1297
|
+
// });
|
|
1298
|
+
// }
|
|
1299
|
+
// }
|
|
1234
1300
|
|
|
1235
1301
|
watcher.pendingCalls.push({ call, metadata });
|
|
1236
1302
|
} else if (handling === "ignore") {
|
|
@@ -1280,13 +1346,14 @@ export class PathValueProxyWatcher {
|
|
|
1280
1346
|
watcher.permissionsChecker = undefined;
|
|
1281
1347
|
}
|
|
1282
1348
|
|
|
1349
|
+
|
|
1350
|
+
let anyUnsynced = watcher.hasAnyUnsyncedAccesses();
|
|
1351
|
+
|
|
1283
1352
|
watcher.lastUnsyncedAccesses = watcher.pendingUnsyncedAccesses;
|
|
1284
1353
|
watcher.lastUnsyncedParentAccesses = watcher.pendingUnsyncedParentAccesses;
|
|
1285
1354
|
watcher.lastSpecialPromiseUnsynced = watcher.specialPromiseUnsynced;
|
|
1286
|
-
const specialPromiseUnsynced = watcher.specialPromiseUnsynced;
|
|
1287
|
-
watcher.specialPromiseUnsynced = false;
|
|
1288
1355
|
|
|
1289
|
-
|
|
1356
|
+
// TODO: Add promise delays as well as the finish order reordering delays to the sync timings.
|
|
1290
1357
|
if (watcher.options.logSyncTimings) {
|
|
1291
1358
|
if (anyUnsynced) {
|
|
1292
1359
|
watcher.logSyncTimings = watcher.logSyncTimings || {
|
|
@@ -1387,12 +1454,15 @@ export class PathValueProxyWatcher {
|
|
|
1387
1454
|
}, 60000);
|
|
1388
1455
|
}
|
|
1389
1456
|
watcher.countSinceLastFullSync++;
|
|
1457
|
+
if (watcher.countSinceLastFullSync > 10) {
|
|
1458
|
+
console.warn(`Watcher ${watcher.debugName} has been unsynced for ${watcher.countSinceLastFullSync} times. This is fine, but maybe optimize it. Why is it cascading?`, watcher.lastUnsyncedAccesses, watcher.lastUnsyncedParentAccesses, watcher.options.watchFunction);
|
|
1459
|
+
}
|
|
1390
1460
|
if (watcher.countSinceLastFullSync > 500) {
|
|
1391
1461
|
debugger;
|
|
1392
1462
|
// NOTE: Using forceEqualWrites will also fix this a lot of the time, such as when
|
|
1393
1463
|
// a write contains random numbers or dates.
|
|
1394
1464
|
let errorMessage = `Too many attempts (${watcher.countSinceLastFullSync}) to sync with different values. If you are reading in a loop, make sure to read all the values, instead of aborting the loop if a value is not synced. ALSO, make sure you don't access paths with Math.random() or Date.now(). This will prevent the sync loop from ever stabilizing.`;
|
|
1395
|
-
if (
|
|
1465
|
+
if (watcher.lastSpecialPromiseUnsynced) {
|
|
1396
1466
|
errorMessage += ` A promise is being accessed, so it is possible triggerOnPromiseFinish is being used on a new promise every loop, which cannot work (you MUST cache MaybePromise and replace the value with a non-promise, otherwise it will never be available synchronously!)`;
|
|
1397
1467
|
}
|
|
1398
1468
|
errorMessage += ` (${watcher.debugName}})`;
|
|
@@ -1468,7 +1538,9 @@ export class PathValueProxyWatcher {
|
|
|
1468
1538
|
if (!readTime) {
|
|
1469
1539
|
readTime = writeTime;
|
|
1470
1540
|
}
|
|
1471
|
-
for (let
|
|
1541
|
+
for (let valueObj of values.values()) {
|
|
1542
|
+
if (valueObj.noLocks) continue;
|
|
1543
|
+
let value = valueObj.pathValue;
|
|
1472
1544
|
// If any value has no locks, AND is local, it won't be rejected, so we don't need to lock it
|
|
1473
1545
|
if (value.lockCount === 0 && value.path.startsWith(LOCAL_DOMAIN_PATH)) {
|
|
1474
1546
|
continue;
|
|
@@ -1544,6 +1616,11 @@ export class PathValueProxyWatcher {
|
|
|
1544
1616
|
throw new Error(`Too many locks for ${watcher.debugName} (${locks.length} > ${maxLocks}). Use Querysub.noLocks(() => ...) around code that is accessing too many values, assuming you don't want to lock them. You can override max locks with maxLocksOverride (in options / functionMetadata).`);
|
|
1545
1617
|
}
|
|
1546
1618
|
|
|
1619
|
+
|
|
1620
|
+
// if (debugName.includes("setBookNodes")) {
|
|
1621
|
+
// debugger;
|
|
1622
|
+
// }
|
|
1623
|
+
|
|
1547
1624
|
setValues = clientWatcher.setValues({
|
|
1548
1625
|
values: watcher.pendingWrites,
|
|
1549
1626
|
eventPaths: watcher.pendingEventWrites,
|
|
@@ -1556,37 +1633,37 @@ export class PathValueProxyWatcher {
|
|
|
1556
1633
|
});
|
|
1557
1634
|
}
|
|
1558
1635
|
|
|
1559
|
-
|
|
1560
|
-
|
|
1561
|
-
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
|
|
1566
|
-
|
|
1636
|
+
doCallCreation(watcher, () => {
|
|
1637
|
+
// TODO: Maybe return this in some way, so dryRun can know what calls we want to call?
|
|
1638
|
+
for (let { call, metadata } of watcher.pendingCalls) {
|
|
1639
|
+
// The calls have to happen after our local writes. This is because they are likely to
|
|
1640
|
+
// influence the local writes, and we don't want our local writes to be always invalidated
|
|
1641
|
+
call.runAtTime = getNextTime();
|
|
1642
|
+
logErrors(runCall(call, metadata));
|
|
1643
|
+
watcher.options.onCallCommit?.(call, metadata);
|
|
1644
|
+
}
|
|
1645
|
+
});
|
|
1567
1646
|
|
|
1568
1647
|
return setValues;
|
|
1569
1648
|
|
|
1570
1649
|
}
|
|
1571
1650
|
const hasUnsyncedBefore = measureWrap(function proxyWatchHasUnsyncedBefore() {
|
|
1572
|
-
// NOTE: We COULD remove any synced values, however... we will generally sync all values at once,
|
|
1573
|
-
// so we don't really need to optimize the cascading case here. Also... deleting values
|
|
1574
|
-
// requires cloning while we iterate, as well as mutating the set, which probably makes the
|
|
1575
|
-
// non-cascading case slower.
|
|
1576
|
-
let anyUnsynced = false;
|
|
1651
|
+
// NOTE: We COULD remove any synced values from lastUnsyncedAccesses, however... we will generally sync all values at once, so we don't really need to optimize the cascading case here. Also... deleting values requires cloning while we iterate, as well as mutating the set, which probably makes the non-cascading case slower.
|
|
1577
1652
|
for (let path of watcher.lastUnsyncedAccesses) {
|
|
1578
1653
|
if (!authorityStorage.isSynced(path)) {
|
|
1579
|
-
|
|
1654
|
+
return true;
|
|
1580
1655
|
}
|
|
1581
1656
|
}
|
|
1582
1657
|
for (let path of watcher.lastUnsyncedParentAccesses) {
|
|
1583
1658
|
if (!authorityStorage.isParentSynced(path)) {
|
|
1584
|
-
|
|
1659
|
+
return true;
|
|
1585
1660
|
}
|
|
1586
1661
|
}
|
|
1662
|
+
// Actually, a lot of calls are going to be blocked by the proxy order, and we want to run them in parallel. So we have all the values synced, so we're ready to finish them when the next one finally finishes.
|
|
1663
|
+
//if (isProxyBlockedByOrder(watcher)) return true;
|
|
1587
1664
|
// NOTE: We don't check promises as they often access non-synced code, and so we might retrigger
|
|
1588
1665
|
// and not use the same promise, so it might be wrong to check them.
|
|
1589
|
-
return
|
|
1666
|
+
return false;
|
|
1590
1667
|
});
|
|
1591
1668
|
function logUnsynced() {
|
|
1592
1669
|
let anyLogged = false;
|
|
@@ -1718,11 +1795,7 @@ export class PathValueProxyWatcher {
|
|
|
1718
1795
|
const nonLooping = options.runImmediately || options.allowUnsyncedReads;
|
|
1719
1796
|
|
|
1720
1797
|
const readyToCommit = (
|
|
1721
|
-
nonLooping || (
|
|
1722
|
-
watcher.lastUnsyncedAccesses.size === 0
|
|
1723
|
-
&& watcher.lastUnsyncedParentAccesses.size === 0
|
|
1724
|
-
&& !watcher.lastSpecialPromiseUnsynced
|
|
1725
|
-
)
|
|
1798
|
+
nonLooping || getReadyToCommit()
|
|
1726
1799
|
);
|
|
1727
1800
|
|
|
1728
1801
|
if (!nonLooping) {
|
|
@@ -1919,12 +1992,7 @@ export class PathValueProxyWatcher {
|
|
|
1919
1992
|
/** Run the same as usual, but instead of committing writes, returns them. */
|
|
1920
1993
|
public async dryRunFull(
|
|
1921
1994
|
options: Omit<WatcherOptions<any>, "onResultUpdated" | "onWriteCommitted">
|
|
1922
|
-
): Promise<{
|
|
1923
|
-
writes: PathValue[];
|
|
1924
|
-
readPaths: Set<string>
|
|
1925
|
-
readParentPaths: Set<string>;
|
|
1926
|
-
result: unknown;
|
|
1927
|
-
}> {
|
|
1995
|
+
): Promise<DryRunResult> {
|
|
1928
1996
|
type Result = {
|
|
1929
1997
|
writes: PathValue[];
|
|
1930
1998
|
readPaths: Set<string>
|
|
@@ -1934,7 +2002,7 @@ export class PathValueProxyWatcher {
|
|
|
1934
2002
|
let onResult!: (result: Result) => void;
|
|
1935
2003
|
let onError!: (error: Error) => void;
|
|
1936
2004
|
let resultPromise = new Promise<Result>((resolve, reject) => { onResult = resolve; onError = reject; });
|
|
1937
|
-
this.createWatcher({
|
|
2005
|
+
let watcher = this.createWatcher({
|
|
1938
2006
|
...options,
|
|
1939
2007
|
canWrite: true,
|
|
1940
2008
|
dryRun: true,
|
|
@@ -1953,7 +2021,8 @@ export class PathValueProxyWatcher {
|
|
|
1953
2021
|
}
|
|
1954
2022
|
}
|
|
1955
2023
|
});
|
|
1956
|
-
|
|
2024
|
+
let result = await resultPromise;
|
|
2025
|
+
return result;
|
|
1957
2026
|
}
|
|
1958
2027
|
|
|
1959
2028
|
|
|
@@ -1981,7 +2050,7 @@ export class PathValueProxyWatcher {
|
|
|
1981
2050
|
}
|
|
1982
2051
|
|
|
1983
2052
|
public isAllSynced() {
|
|
1984
|
-
return !this.
|
|
2053
|
+
return !this.getTriggeredWatcherMaybeUndefined()?.hasAnyUnsyncedAccesses();
|
|
1985
2054
|
}
|
|
1986
2055
|
/** @deprecated try not to call getTriggeredWatcherMaybeUndefined, and instead try to call Querysub helper
|
|
1987
2056
|
* functions. getTriggeredWatcherMaybeUndefined exposes too much of our interface, which we need
|
|
@@ -2289,3 +2358,100 @@ registerMeasureInfo(() => {
|
|
|
2289
2358
|
triggerCount = 0;
|
|
2290
2359
|
return `${formatPercent(current)} paint dropped triggers`;
|
|
2291
2360
|
});
|
|
2361
|
+
|
|
2362
|
+
|
|
2363
|
+
|
|
2364
|
+
|
|
2365
|
+
// #region Proxy Ordering
|
|
2366
|
+
let proxiesOrdered: SyncWatcher[] = [];
|
|
2367
|
+
function getProxyOrder(proxy: SyncWatcher): string | undefined {
|
|
2368
|
+
// We can't wait if we're running immediately.
|
|
2369
|
+
if (proxy.options.runImmediately) return undefined;
|
|
2370
|
+
|
|
2371
|
+
let value = proxy.options.finishInStartOrder;
|
|
2372
|
+
|
|
2373
|
+
if (value === false) return undefined;
|
|
2374
|
+
if (value === true) {
|
|
2375
|
+
value = proxy.createTime;
|
|
2376
|
+
}
|
|
2377
|
+
if (value === undefined) return undefined;
|
|
2378
|
+
return getPathStr2(value.toFixed(10).padStart(30, "0") + "", proxy.seqNum + "");
|
|
2379
|
+
}
|
|
2380
|
+
function addToProxyOrder(proxy: SyncWatcher) {
|
|
2381
|
+
let order = getProxyOrder(proxy);
|
|
2382
|
+
if (order === undefined) return;
|
|
2383
|
+
let index = binarySearchBasic(proxiesOrdered, x => getProxyOrder(x) || 0, order);
|
|
2384
|
+
if (index >= 0) {
|
|
2385
|
+
throw new Error(`Proxy ${proxy.debugName} is already in the proxy order at index ${index}? This shouldn't be possible, as the sequence number should make it unique. `);
|
|
2386
|
+
}
|
|
2387
|
+
insertIntoSortedList(proxiesOrdered, x => getProxyOrder(x) || 0, proxy);
|
|
2388
|
+
setTimeout(() => {
|
|
2389
|
+
let index = binarySearchBasic(proxiesOrdered, x => getProxyOrder(x) || 0, order);
|
|
2390
|
+
if (index >= 0) {
|
|
2391
|
+
if (proxiesOrdered.length > (index + 1)) {
|
|
2392
|
+
// Only warn if there's actually proxies after us.
|
|
2393
|
+
console.warn(`Allowing out-of-order proxy finishing due to long-running proxy not resolving. Not resolving: ${proxy.debugName} timed out after ${formatTime(MAX_PROXY_REORDER_BLOCK_TIME)}`);
|
|
2394
|
+
}
|
|
2395
|
+
void finishProxyAndTriggerNext(proxy, true);
|
|
2396
|
+
}
|
|
2397
|
+
}, MAX_PROXY_REORDER_BLOCK_TIME);
|
|
2398
|
+
}
|
|
2399
|
+
|
|
2400
|
+
// - This should be fine though, because if we're doing this for an async call, it means they're already expecting the result back within a promise. So waiting another promise shouldn't mess up the ordering.
|
|
2401
|
+
const finishProxyAndTriggerNext = runInSerial(
|
|
2402
|
+
async function finishProxyAndTriggerNext(proxy: SyncWatcher, fromTimeout: boolean = false) {
|
|
2403
|
+
let order = getProxyOrder(proxy);
|
|
2404
|
+
if (order === undefined) return;
|
|
2405
|
+
let index = binarySearchBasic(proxiesOrdered, x => getProxyOrder(x) || 0, order);
|
|
2406
|
+
if (index < 0) {
|
|
2407
|
+
console.warn(`Proxy ${proxy.debugName} is not in the proxy order at index ${index}? This shouldn't be possible, don't we only dispose it once? `);
|
|
2408
|
+
return;
|
|
2409
|
+
}
|
|
2410
|
+
|
|
2411
|
+
// NOTE: We have to wait to the next tick, as finish order code is most often on for async functions, in which case, even though we will have called onResultUpdated, the caller He's waiting on a promise, so we need to give it a tick to actually receive the value.
|
|
2412
|
+
// - This shouldn't cause any actual slowdown because people are calling the functions and usually awaiting them, so they won't notice a difference.
|
|
2413
|
+
// HACK: We wait 10 times because I guess there's a lot of change promises. I found five works, but adding more is fine. This is pretty bad, but... I don't think there's anything that can possibly be expecting an ordered function to finish synchronously. Anything that is waiting on the ordered function is going to actually be waiting with an await, so we could iterate 100 times here and it wouldn't break anything.
|
|
2414
|
+
for (let i = 0; i < 10; i++) {
|
|
2415
|
+
await Promise.resolve();
|
|
2416
|
+
}
|
|
2417
|
+
|
|
2418
|
+
proxiesOrdered.splice(index, 1);
|
|
2419
|
+
let next = proxiesOrdered[index];
|
|
2420
|
+
// Only trigger if it's the first entry and therefore it won't be blocked by the previous proxy, or, of course, if it's from a timeout, In which case, it won't be blocked anyway.
|
|
2421
|
+
if (next && (index === 0 || fromTimeout)) {
|
|
2422
|
+
console.info(`Triggering next proxy in order: ${next.debugName}`, next.options.baseFunction || next.options.watchFunction);
|
|
2423
|
+
next.explicitlyTrigger({
|
|
2424
|
+
newParentsSynced: new Set(),
|
|
2425
|
+
pathSources: new Set(),
|
|
2426
|
+
paths: new Set(),
|
|
2427
|
+
extraReasons: [`Delayed due to proxy order enforcement. Previous proxy finished, so we can now finish. Previous was ${proxy.debugName}`],
|
|
2428
|
+
});
|
|
2429
|
+
}
|
|
2430
|
+
}
|
|
2431
|
+
);
|
|
2432
|
+
function isProxyBlockedByOrder(proxy: SyncWatcher): boolean {
|
|
2433
|
+
let order = getProxyOrder(proxy);
|
|
2434
|
+
if (order === undefined) return false;
|
|
2435
|
+
let index = binarySearchBasic(proxiesOrdered, x => getProxyOrder(x) || 0, order);
|
|
2436
|
+
if (index <= 0) return false;
|
|
2437
|
+
return true;
|
|
2438
|
+
}
|
|
2439
|
+
|
|
2440
|
+
let currentCallCreationProxy: SyncWatcher | undefined = undefined;
|
|
2441
|
+
function doCallCreation(proxy: SyncWatcher, code: () => void) {
|
|
2442
|
+
let prev = currentCallCreationProxy;
|
|
2443
|
+
currentCallCreationProxy = proxy;
|
|
2444
|
+
try {
|
|
2445
|
+
return code();
|
|
2446
|
+
} finally {
|
|
2447
|
+
currentCallCreationProxy = prev;
|
|
2448
|
+
}
|
|
2449
|
+
}
|
|
2450
|
+
export function debug_getQueueOrder() {
|
|
2451
|
+
return proxiesOrdered;
|
|
2452
|
+
}
|
|
2453
|
+
export function getCurrentCallCreationProxy() {
|
|
2454
|
+
return currentCallCreationProxy;
|
|
2455
|
+
}
|
|
2456
|
+
|
|
2457
|
+
// #endregion Proxy Ordering
|
|
@@ -119,7 +119,8 @@ let getWatchEvaluator = lazy((): {
|
|
|
119
119
|
if (nextGetReadLocks) {
|
|
120
120
|
lastResult.readLocks = [];
|
|
121
121
|
for (let value of watcher.pendingAccesses) {
|
|
122
|
-
for (let
|
|
122
|
+
for (let vObj of value[1].values()) {
|
|
123
|
+
let v = vObj.pathValue;
|
|
123
124
|
lastResult.readLocks.push({
|
|
124
125
|
path: v.path,
|
|
125
126
|
startTime: v.time,
|
|
@@ -37,13 +37,14 @@ export function getExportPath(functionId: string): string {
|
|
|
37
37
|
|
|
38
38
|
export type FunctionMetadata<F = unknown> = {
|
|
39
39
|
nopredict?: boolean;
|
|
40
|
-
/**
|
|
41
|
-
* -
|
|
42
|
-
*
|
|
43
|
-
* -
|
|
44
|
-
*
|
|
45
|
-
*
|
|
46
|
-
*
|
|
40
|
+
/** Removes unnecessary function calls by first delaying by Querysub.DELAY_COMMIT_DELAY, Then looking at their predictions and removing ones where their predictions completely clobber the other ones.
|
|
41
|
+
* - Determines unnecessary calls via our function prediction. If our function is prediction is wrong, we might remove calls that shouldn't be removed.
|
|
42
|
+
* - This is quite dangerous, but extremely useful for things such as editors when you want to type and have things update quickly, but you don't want to spam the server with calls.
|
|
43
|
+
* - Only applied for clients.
|
|
44
|
+
* - We will always preserve the order of function calls in the same schema, so if you make another function call that doesn't have delay commit in the same schema, then we will forcefully commit all the delayed commit functions first.
|
|
45
|
+
* - This shouldn't cause an excessive amount of function commits. The delayed commit time is very small, maybe around five seconds. And if you are regularly having commits that are faster than that, that aren't delayed commit, then you're already having a lot of commits. And we will still remove unnecessary calls, even if we have to commit them earlier than the delay.
|
|
46
|
+
* - IMPORTANT! Our check for if values should be removed isn't perfect. If you write to a path that another value reads from, and then it writes that value to a path that we then read from... This would mean our value can't be clobbered as it has side effects which would affect values that could regularly clobber us. However, we don't detect this case, and we will just allow it to be clobbered.
|
|
47
|
+
* - However, when we clobber values, we will cancel them client-side fairly quickly, so it should be apparent if this bug happens, in which case the writes you're doing are too complicated and you shouldn't be using delay commit. Delay commit is only really intended for simple cases, such as the user presses a key and then you set some text equal to a result based on that key (It doesn't work with deltas, You need to be forcefully setting the value).
|
|
47
48
|
*/
|
|
48
49
|
delayCommit?: boolean;
|
|
49
50
|
|