querysub 0.340.0 → 0.341.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,13 +2,13 @@ import { cacheJSONArgsEqual, cacheWeak, lazy } from "socket-function/src/caching
2
2
  import { pathValueCommitter } from "../0-path-value-core/PathValueController";
3
3
  import { CLIENTSIDE_PREDICT_LEEWAY, MAX_ACCEPTED_CHANGE_AGE, MAX_CHANGE_AGE, PathValue, ReadLock, Time, authorityStorage, compareTime, debugTime, getCreatorId, getNextTime, lockToCallback, predictionLockVersion, timeMinusEpsilon } from "../0-path-value-core/pathValueCore";
4
4
  import { remoteWatcher } from "../1-path-client/RemoteWatcher";
5
- import { proxyWatcher, atomicObjectRead } from "../2-proxy/PathValueProxyWatcher";
5
+ import { proxyWatcher, atomicObjectRead, DryRunResult, getCurrentCallCreationProxy, debug_getQueueOrder } from "../2-proxy/PathValueProxyWatcher";
6
6
  import { getPathFromProxy, getProxyPath } from "../2-proxy/pathValueProxy";
7
7
  import { CallSpec, FunctionResult, FunctionSpec, debugCallSpec, functionSchema, overrideCurrentCall } from "../3-path-functions/PathFunctionRunner";
8
8
  import { getModuleFromConfig, setGitURLMapping } from "../3-path-functions/pathFunctionLoader";
9
9
  import { logErrors } from "../errors";
10
- import { getParentPathStr, getPathFromStr, getPathStr1, getPathStr2 } from "../path";
11
- import { Querysub, QuerysubController, QuerysubControllerBase, baseAddCall, callWaitOn, querysubNodeId } from "./QuerysubController";
10
+ import { getParentPathStr, getPathFromStr, getPathStr1, getPathStr2, getPathStr3 } from "../path";
11
+ import { Querysub, QuerysubController, QuerysubControllerBase, baseAddCall, registerPredictionBlocker, querysubNodeId } from "./QuerysubController";
12
12
  import { Benchmark } from "../diagnostics/benchmark";
13
13
  import { parseArgs } from "../3-path-functions/PathFunctionHelpers";
14
14
  import { runInSerial } from "socket-function/src/batching";
@@ -18,11 +18,14 @@ import { pathValueSerializer } from "../-h-path-value-serialize/PathValueSeriali
18
18
  import { setFlag } from "socket-function/require/compileFlags";
19
19
  import cbor from "cbor-x";
20
20
  import { FunctionMetadata } from "../3-path-functions/syncSchema";
21
- import { isNode, nextId, sort } from "socket-function/src/misc";
21
+ import { PromiseObj, isNode, nextId, sort } from "socket-function/src/misc";
22
22
  import { getBrowserUrlNode } from "../-f-node-discovery/NodeDiscovery";
23
23
  import { isLocal } from "../config";
24
24
  import { onAllPredictionsFinished } from "../-0-hooks/hooks";
25
25
  import { t } from "../2-proxy/schema2";
26
+ import { clientWatcher } from "../1-path-client/pathValueClientWatcher";
27
+ import { cacheAsyncLimitedJSON } from "../functional/promiseCache";
28
+ import { addEpsilons } from "../bits";
26
29
  setFlag(require, "cbor-x", "allowclient", true);
27
30
  const cborEncoder = lazy(() => new cbor.Encoder({ structuredClone: true }));
28
31
 
@@ -46,11 +49,12 @@ async function getPredictController() {
46
49
  // but we really should just give it the moduleId, or... even just avoid calling it altogether, as it doesn't
47
50
  // do too much for us if we already have the fully resolved path...
48
51
  // - Although using it DOES allow permissions checks to work nicely, so, eh... maybe it is fine to use pathFunctionLoader?
49
- const addModuleToLoader = cacheJSONArgsEqual(async (spec: FunctionSpec): Promise<void> => {
52
+ const addModuleToLoader = cacheJSONArgsEqual(async (spec: FunctionSpec): Promise<true> => {
50
53
  let controller = await getPredictController();
51
54
 
52
55
  let path = await controller.getModulePath({ functionSpec: spec });
53
56
  setGitURLMapping({ spec, resolvedPath: path });
57
+ return true;
54
58
  });
55
59
 
56
60
  const getDevFunctionSpecFromCall = cacheJSONArgsEqual(async (call: {
@@ -73,239 +77,42 @@ export function getCallResult(call: CallSpec) {
73
77
  /** Force predictions to run in the trigger order, so they can resolve an be added before
74
78
  * the next predictions. Also, to preserve call order.
75
79
  */
76
- const predictRunCommitLoop = runInSerial((run: () => Promise<PredictResult>) => run());
80
+ //const predictRunCommitLoop = runInSerial((run: () => Promise<PredictResult | undefined>) => run());
81
+ // UPDATE: We no longer run them in serial, as this caused a lot of lag if we ran a lot of functions which access unique values they needed to sync. Generally speaking, most predictions should run in serial anyway, As most functions should be accessing already synchronized values.
82
+ const predictRunCommitLoop = (run: () => Promise<PredictResult | undefined>) => run();
77
83
 
78
84
 
79
- interface DelayedCall {
80
- call: CallSpec;
81
- time: number;
82
- commit: () => void;
83
- cancel: () => void;
84
-
85
- result?: PredictResult;
86
- }
87
-
88
- let callPending: DelayedCall[] = [];
89
- function flushUpToIncluding(time: number) {
90
- while (callPending.length > 0) {
91
- if (callPending[0].time > time) break;
92
- let call = callPending.shift()!;
93
- call.commit();
94
- }
95
- updateSyncedCount();
96
- }
97
85
 
98
- export function flushDelayedFunctions() {
99
- while (callPending.length > 0) {
100
- flushUpToIncluding(callPending[callPending.length - 1].time);
101
- }
102
- }
103
86
 
104
- const pendingState = lazy(() => Querysub.createLocalSchema("querysubPrediction", {
105
- count: t.number
106
- })());
107
- export function getPendingCountSynced() {
108
- return pendingState().count;
109
- }
110
- function updateSyncedCount() {
111
- Querysub.commitLocal(() => {
112
- pendingState().count = callPending.length;
113
- });
87
+ // IMPORTANT! This has to be synchronous, and we need to synchronously get into the proxy watcher call. That way other calls after this know to wait for our proxy watcher to finish. Otherwise, it's very, very, very easy to write code, where you call a function, you expect it to write to a value, and then you run some other code, maybe which just uses a commit async, and tries to read from that value. But the value won't exist, because we won't even have started the prediction yet, and the proxy watcher can't possibly know to wait, because we haven't started the prediction. So... this has to be synchronous!
88
+ export function predictCall(call: CallSpec, metadata: FunctionMetadata): {
89
+ cancel: () => void;
90
+ predictPromise: Promise<PredictResult | undefined>;
91
+ } {
92
+ let predictObj = predictCallBase({ call, metadata });
93
+ let cancel = predictObj.cancel;
94
+ registerPredictionBlocker(call.CallId, predictObj.predictPromise);
95
+ return { cancel, predictPromise: predictObj.predictPromise };
114
96
  }
115
97
 
116
-
117
-
118
- export const addCall = runInSerial(async function addCall(call: CallSpec, metadata: FunctionMetadata) {
119
- const nodeId = await querysubNodeId();
120
- if (!nodeId) throw new Error("No querysub node found");
121
-
122
- // TODO: Use some heuristics to determine if we should predict the call or not (ex, based on predicted time
123
- // to run, current load, latency to server, time for server to run it, time to sync the necessary data, etc)
124
- // TODO: Allow some flags to tell us NOT to predict a call
125
- let predict = Querysub.PREDICT_CALLS;
126
- if (metadata.nopredict) {
127
- predict = false;
128
- }
129
-
130
- // NOTE: We predict when call.filterable, as filterable usually just means we are testing
131
- // new functions in development (instead of targeting specific hardware). If we find this
132
- // is annoying, we could add a flag in filterable that explicitly screens out prediction?
133
-
134
- let cancel = () => { };
135
-
136
- if (predict) {
137
- let delayed = metadata.delayCommit && !isNode();
138
- let predictObj = predictCall({ call, localOnly: delayed });
139
- cancel = predictObj.cancel;
140
- callWaitOn(call.CallId, predictObj.predictPromise);
141
- if (metadata.delayCommit && !isNode()) {
142
- let hasRun = false;
143
- let delayedCall: DelayedCall = {
144
- call,
145
- time: Date.now(),
146
- commit() {
147
- if (hasRun) return;
148
- hasRun = true;
149
- logErrors(baseAddCall(call, nodeId, cancel, `call (delayed from ${debugTime(call.runAtTime)})`));
150
- },
151
- cancel() { predictObj.cancel(); },
152
- };
153
- callPending.push(delayedCall);
154
- updateSyncedCount();
155
- setTimeout(() => flushUpToIncluding(delayedCall.time), Querysub.DELAY_COMMIT_DELAY);
156
- logErrors(predictObj.predictPromise.then(async (prediction): Promise<void> => {
157
- prediction = { ...prediction };
158
- const resultPrefix = getProxyPath(() => functionSchema()[call.DomainName].PathFunctionRunner[call.ModuleId].Results);
159
- prediction.writes = prediction.writes.filter(write => !write.path.startsWith(resultPrefix));
160
- delayedCall.result = prediction;
161
-
162
- let index = callPending.indexOf(delayedCall);
163
- if (index === -1) return;
164
- let ourWritePaths = new Set(prediction.writes.map(x => x.path));
165
- function candidateCouldBeChangedByWritesSimilarToOurs(candidate: PredictResult) {
166
- return (
167
- candidate.writes.some(write => ourWritePaths.has(write.path))
168
- || candidate.writes.some(write => candidate.readPaths.has(write.path))
169
- || candidate.writes.some(write =>
170
- candidate.readParentPaths.has(getParentPathStr(write.path))
171
- || candidate.readParentPaths.has(write.path)
172
- )
173
- );
174
- }
175
-
176
- // NOTE: Technically... we can remove a write if:
177
- // - latest.writes.paths is a superset of candidate.writes.paths
178
- // - (candidate <= intermediate < latest).all(x =>
179
- // test replace writes of x with previous values of x, as time of x
180
- // rerun writes up to and including latest
181
- // verify final resolved writes are equivalent as the final writes as if we had the candidate write
182
- // )
183
- // HOWEVER, the last condition is overly arduous to implement, wildly inefficient to calculate, so
184
- // we can simplify it to a more conservative (removes less candidates, but still is never wrong):
185
- // - (candidate < intermediate < latest).all(x => none of x.write.paths are in latest.writes.paths and none of x.read.paths are in latest.writes.paths)
186
- // - candidate.writes.paths is a subset of latest.writes.paths
187
- // - test replace writes of x with previous values of x, as time of x
188
- // rerun latest
189
- // verify writes2.writes === writes.writes (paths and values)
190
- // NOTE: See "delayCommit" in syncSchema for the risks of delaying commits.
191
-
192
- // Find the first write that is a candidate (due to our simplified condition this will only be 1).
193
- let curIndex = index - 1;
194
- while (curIndex >= 0) {
195
- let candidatePred = callPending[curIndex].result;
196
- if (!candidatePred) {
197
- // NOTE: This shouldn't happen, but if it does... we could just stop searching here, and not
198
- // remove any predictions.
199
- throw new Error("Predictions finished out of order.");
200
- }
201
- if (candidateCouldBeChangedByWritesSimilarToOurs(candidatePred)) {
202
- break;
203
- }
204
- curIndex--;
205
- }
206
- // Nothing to remove, we don't overlap any other writes
207
- if (curIndex < 0) {
208
- return;
209
- }
210
-
211
- let candidate = callPending[curIndex];
212
- let candidatePred = candidate.result;
213
- if (!candidatePred) {
214
- throw new Error("Predictions finished out of order.");
215
- }
216
- // candidate writes have to be a strict subset of our writes, otherwise there is no way we will clobber the candidate.
217
- if (candidatePred.writes.some(write => !ourWritePaths.has(write.path))) return;
218
-
219
-
220
- let debugName = `[redundant check]|${call.DomainName}.${call.FunctionId}`;
221
- // NOTE: Run right before the call, so we skip it's writes, but use everything else. We could also use
222
- // candidate.time + epsilon, because none of the values between call and candidate matter (they don't write
223
- // to any paths either read from).
224
- let beforeCall = { ...call };
225
- beforeCall.runAtTime = timeMinusEpsilon(getPredictTime(call.runAtTime));
226
- let withoutCandidateResult = await getCallWrites({
227
- call: beforeCall,
228
- debugName,
229
- overrides: candidatePred.replacedWriteValues
230
- });
231
-
232
- function areWritesEqual(writes: PathValue[], writes2: PathValue[]) {
233
- if (writes.length !== writes2.length) return false;
234
- // Write order should probably be the same anyways, so... it is far to compare in order
235
- for (let i = 0; i < writes.length; i++) {
236
- let write = writes[i];
237
- let write2 = writes2[i];
238
- if (write.path !== write2.path) return false;
239
- if (pathValueSerializer.compareValuePaths(write, write2) !== 0) {
240
- return false;
241
- }
242
- }
243
- return true;
244
- }
245
-
246
- if (!areWritesEqual(withoutCandidateResult.writes, prediction.writes)) {
247
- if (Querysub.DEBUG_CALLS) {
248
- // Failed to collapse delayed functions, because one of the previous predictions was used in the new predicted function. This is valid, but slower than necessary. If pure atomic behavior is not required (which it presumably isn't required, as delayCommit is already being used), try to rewrite the function to be in the form of a set, instead of an add. For example, "function add(count) { x += count; }" => "function set(value) { x = value; }" (calling with set(x + count)).
249
- console.log(`[Querysub] ${yellow("CANNOT DELAY COLLAPSE")} ${debugCallSpec(candidate.call)} `);
250
- }
251
- return;
252
- }
253
-
254
- let index2 = callPending.indexOf(candidate);
255
- if (index2 < 0) return;
256
- // At this point... we KNOW:
257
- // a) There are no writes between us and `candidate` (writes are never inserted in the middle, only removed,
258
- // so this condition will hold despite our delay)
259
- // b) `candidate` is not required for `call` to run
260
- // c) `call` replaces all values inside of `candidate`
261
- // Which means, `candidate is entirely redunant, and can be removed!
262
-
263
- if (Querysub.DEBUG_CALLS) {
264
- console.log(`[Querysub] ${magenta("removed redundant call")} ${green(debugCallSpec(candidate.call))} @${debugTime(candidate.call.runAtTime)}`);
265
- }
266
- callPending.splice(index2, 1);
267
- updateSyncedCount();
268
- // Cancel as well, in case it isn't redundant, so we can see the issue immediately.
269
- candidate.cancel();
270
-
271
- // Move our time back, otherwise repeated delays can result in not committing writes for
272
- // a long time (indefinitely really), which can result in a lot of lost work, and
273
- // has little speed benefit (comitting a write every 5 seconds is fine).
274
- delayedCall.time = candidate.time;
275
- }));
276
-
277
- if (Querysub.DEBUG_CALLS) {
278
- console.log(`[Querysub] ${magenta("delaying")} ${green(debugCallSpec(call))} @${debugTime(call.runAtTime)}`);
279
- }
280
- return;
281
- }
282
- }
283
-
284
- await flushDelayedFunctions();
285
- // NOTE: NO MORE calls can be queued here, because addCall is run in serial, so... we don't need
286
- // to worry about new calls with times > call.runAtTime (and if it happens we'll get a fairly
287
- // clear error clientside, which will show up in a notification, so it is a very safe assumption).
288
- await baseAddCall(call, nodeId, cancel, "call");
289
- });
290
-
291
98
  function getPredictTime(time: Time) {
292
99
  return { time: time.time, version: predictionLockVersion, creatorId: time.creatorId };
293
100
  }
294
101
 
295
- interface PredictResult {
102
+ export interface PredictResult {
296
103
  readPaths: Set<string>;
297
104
  readParentPaths: Set<string>;
298
105
  writes: PathValue[];
299
106
  /** Writes that have been replaced AT the write time of the prediction */
300
107
  replacedWriteValues: PathValue[];
301
108
  }
302
- function predictCall(config: {
109
+ function predictCallBase(config: {
303
110
  call: CallSpec;
304
- localOnly?: boolean;
111
+ metadata: FunctionMetadata;
305
112
  overrides?: PathValue[];
306
113
  }): {
307
114
  cancel: () => void;
308
- predictPromise: Promise<PredictResult>;
115
+ predictPromise: Promise<PredictResult | undefined>;
309
116
  } {
310
117
  let call = config.call;
311
118
  let pathResultWrite = getCallResultPath(call);
@@ -357,15 +164,14 @@ function predictCall(config: {
357
164
  ];
358
165
  let predictions: PredictResult | undefined;
359
166
  let predictPromise = predictRunCommitLoop(async () => {
167
+ // IMPORTANT! See the addCall note for why we have to NOT have any waiting before getCallWrites.
360
168
  if (Querysub.DEBUG_PREDICTIONS) {
361
169
  console.log(magenta(`Start predict call`), `${call.DomainName}.${call.FunctionId}`);
362
170
  }
363
171
 
364
- if (!config.localOnly) {
365
- // Watch the result, so we know when our prediction is rejected (which will be as soon as the result
366
- // has a real value).
367
- remoteWatcher.watchLatest({ paths: [pathResultWrite], parentPaths: [] });
368
- }
172
+ // Watch the result, so we know when our prediction is rejected (which will be as soon as the result
173
+ // has a real value).
174
+ remoteWatcher.watchLatest({ paths: [pathResultWrite], parentPaths: [] });
369
175
  let debugName = `[predict]|${call.DomainName}.${call.FunctionId}`;
370
176
 
371
177
  let dryRunResult: {
@@ -373,25 +179,52 @@ function predictCall(config: {
373
179
  readPaths: Set<string>;
374
180
  readParentPaths: Set<string>;
375
181
  };
182
+
183
+ let actualValueFinished = new PromiseObj();
184
+ function onActualFinished() {
185
+ if (authorityStorage.getValueAtTime(pathResultWrite, undefined)?.value) {
186
+ actualValueFinished.resolve();
187
+ }
188
+ }
189
+ clientWatcher.setWatches({
190
+ callback: onActualFinished,
191
+ paths: new Set([pathResultWrite]),
192
+ parentPaths: new Set(),
193
+ });
376
194
  try {
377
- dryRunResult = await getCallWrites({ call, debugName, overrides: config.overrides });
378
- } catch (e: any) {
379
-
380
- if (!pathValueSerializer.getPathValue(authorityStorage.getValueAtTime(pathResultWrite, undefined))) {
381
- console.log(`Skipping prediction for ${debugName} due to error running predictive call. Likely just an out of order error.`, e.stack);
382
- } else {
383
- // NOTE: This case happens a lot, because of how we handle locks. We don't receive locked values, and so
384
- // we assume all values have no locks, and only keep the latest. This is usually fine, but... if we lose
385
- // the race to predict the function against the server updating it, it is likely our prediction will now
386
- // be running before the latest write. In which case (as we don't really store write history), we will read undefined.
387
- // This isn't accurate, but... our write WILL almost certainly be wrong (as the value changed), so we don't log here.
195
+
196
+ try {
197
+ let tempDryRunResult = await Promise.race([
198
+ await getCallWrites({ call, debugName, overrides: config.overrides, useFinishReordering: true, metadata: config.metadata }),
199
+ actualValueFinished.promise,
200
+ ]);
201
+ if (!tempDryRunResult) {
202
+ if (Querysub.DEBUG_PREDICTIONS || Querysub.DEBUG_CALLS) {
203
+ console.log(magenta(`Abort predict call before prediction finished, already received call result`), `${call.DomainName}.${call.FunctionId}`);
204
+ }
205
+ return undefined;
206
+ }
207
+ dryRunResult = tempDryRunResult;
208
+ } catch (e: any) {
209
+
210
+ if (!pathValueSerializer.getPathValue(authorityStorage.getValueAtTime(pathResultWrite, undefined))) {
211
+ console.log(`Skipping prediction for ${debugName} due to error running predictive call. Likely just an out of order error.`, e.stack);
212
+ } else {
213
+ // NOTE: This case happens a lot, because of how we handle locks. We don't receive locked values, and so
214
+ // we assume all values have no locks, and only keep the latest. This is usually fine, but... if we lose
215
+ // the race to predict the function against the server updating it, it is likely our prediction will now
216
+ // be running before the latest write. In which case (as we don't really store write history), we will read undefined.
217
+ // This isn't accurate, but... our write WILL almost certainly be wrong (as the value changed), so we don't log here.
218
+ }
219
+ return {
220
+ writes: [],
221
+ readPaths: new Set(),
222
+ readParentPaths: new Set(),
223
+ replacedWriteValues: [],
224
+ };
388
225
  }
389
- return {
390
- writes: [],
391
- readPaths: new Set(),
392
- readParentPaths: new Set(),
393
- replacedWriteValues: [],
394
- };
226
+ } finally {
227
+ clientWatcher.unwatch(onActualFinished);
395
228
  }
396
229
  predictions = {
397
230
  writes: dryRunResult.writes,
@@ -422,7 +255,7 @@ function predictCall(config: {
422
255
  // code won't properly immediately reject our prediction, as we are not the authority on the
423
256
  // path, so it treats it as source of truth.
424
257
  if (authorityStorage.getValueAtTime(pathResultWrite, undefined)?.value) {
425
- if (Querysub.DEBUG_PREDICTIONS) {
258
+ if (Querysub.DEBUG_PREDICTIONS || Querysub.DEBUG_CALLS) {
426
259
  console.log(magenta(`Abort predict call, already received call result`), `${call.DomainName}.${call.FunctionId}`);
427
260
  }
428
261
  return predictions;
@@ -451,7 +284,7 @@ function predictCall(config: {
451
284
  lockToCallback.watchLock(predictLocks[0], predictions.writes);
452
285
 
453
286
  if (Querysub.DEBUG_PREDICTIONS) {
454
- console.log(magenta(`Finished predict call`), `${call.DomainName}.${call.FunctionId}`);
287
+ console.log(magenta(`Finished and applied predict call`), `${call.DomainName}.${call.FunctionId}`);
455
288
  }
456
289
  return predictions;
457
290
  });
@@ -508,7 +341,7 @@ function predictCall(config: {
508
341
  // clobbering our prediction).
509
342
  // - AND, we can't commit the pending calls until we sort out the order, so this necessarily requires
510
343
  // slowing down commits if we are delaying other calls.
511
- console.warn(`${red("Prediction was wrong")}: ${getPathFromStr(predict.path).join(".")} predict != finalValue`, predictValue, finalValue);
344
+ console.warn(`${red("Prediction was wrong")}: for ${call.DomainName}.${call.FunctionId} value path ${getPathFromStr(predict.path).join(".")} predict != finalValue. ${config.metadata.delayCommit && "This function is using delay commit. It's likely that you are using the time from Querysub.now() (or accessing the function call time in some other way). This doesn't work with delay commit because the server will rewrite the commit time, instead you should pass the timestamp as a parameter (And then make sure it's not some kind of privilege value that the user could cheat)" || ""} It might be the case that you shouldn't even predict the call client side (you can use the functionMetadata to set it as nopredict). It might also be the case that you do want to predict it, but you should batch the calls, so you only make one call, instead of many calls at once.`, predictValue, finalValue);
512
345
  }
513
346
  }
514
347
  }
@@ -520,43 +353,46 @@ function predictCall(config: {
520
353
  };
521
354
  }
522
355
 
523
- export async function getCallWrites(config: {
524
- debugName: string;
525
- call: CallSpec;
526
- overrides?: PathValue[];
527
- }) {
528
- let { call, debugName } = config;
356
+ const getDevFunctionCache = cacheAsyncLimitedJSON(100_000, getDevFunctionSpecFromCall);
357
+ const addToModuleLoaderCache = cacheAsyncLimitedJSON(100_000, addModuleToLoader);
529
358
 
530
- let functionSpec: FunctionSpec | undefined;
359
+ function getFunctionSpec(call: CallSpec): FunctionSpec | undefined {
531
360
  if (isLocal()) {
532
- let obj = await getDevFunctionSpecFromCall({
361
+ let obj = getDevFunctionCache({
533
362
  DomainName: call.DomainName,
534
363
  ModuleId: call.ModuleId,
535
364
  FunctionId: call.FunctionId,
536
365
  });
537
366
  if (!obj) throw new Error(`Function not referenced in deploy.ts ${call.DomainName}.${call.ModuleId}.${call.FunctionId}`);
538
- functionSpec = obj.functionSpec;
539
- setGitURLMapping({ spec: functionSpec, resolvedPath: obj.modulePath });
540
- } else {
541
- const obj = await proxyWatcher.commitFunction({
542
- watchFunction: function getModuleConfig() {
543
- let domainObject = functionSchema()[call.DomainName];
544
- let moduleObject = domainObject.PathFunctionRunner[call.ModuleId];
545
- let functionSpec = atomicObjectRead(moduleObject.Sources[call.FunctionId]);
546
- return { functionSpec };
547
- }
548
- });
549
- functionSpec = obj.functionSpec;
367
+ setGitURLMapping({ spec: obj.functionSpec, resolvedPath: obj.modulePath });
368
+ return obj.functionSpec;
369
+ }
550
370
 
551
- if (!functionSpec) {
552
- throw new Error(`Function not found in database ${call.DomainName}.${call.ModuleId}.${call.FunctionId}`);
553
- }
554
- // Add the module to the loader via asking the server the exact url for this call. The loader will
555
- // then load the code from that url when it ends up running it.
556
- await addModuleToLoader(functionSpec);
371
+ let domainObject = functionSchema()[call.DomainName];
372
+ let moduleObject = domainObject.PathFunctionRunner[call.ModuleId];
373
+ let functionSpec = atomicObjectRead(moduleObject.Sources[call.FunctionId]);
374
+ if (!functionSpec) {
375
+ if (!Querysub.isAllSynced()) return undefined;
376
+ throw new Error(`Function not found in database ${call.DomainName}.${call.ModuleId}.${call.FunctionId}`);
557
377
  }
378
+ // Add the module to the loader via asking the server the exact url for this call. The loader will
379
+ // then load the code from that url when it ends up running it.
380
+ if (!addToModuleLoaderCache(functionSpec)) return undefined;
381
+ return functionSpec;
382
+ }
558
383
 
559
- let module = await getModuleFromConfig(functionSpec);
384
+ function getFunctionInfoBase(call: CallSpec): {
385
+ functionSpec: FunctionSpec;
386
+ baseFunction: Function;
387
+ } | undefined {
388
+ let functionSpec = getFunctionSpec(call);
389
+ if (!functionSpec) return undefined;
390
+
391
+ let module = getModuleFromConfig(functionSpec);
392
+ if (module instanceof Promise) {
393
+ proxyWatcher.triggerOnPromiseFinish(module, { waitReason: `Loading function ${call.DomainName}.${call.ModuleId}.${call.FunctionId}` });
394
+ return undefined;
395
+ }
560
396
  let exportPath = getPathFromStr(functionSpec.exportPathStr);
561
397
  let exportObj = module.exports;
562
398
  for (let path of exportPath) {
@@ -567,8 +403,32 @@ export async function getCallWrites(config: {
567
403
  }
568
404
  let baseFunction = exportObj as Function;
569
405
 
570
- let specTyped = functionSpec;
571
- return await proxyWatcher.dryRunFull({
406
+ return { functionSpec, baseFunction };
407
+ }
408
+
409
+ // IMPORTANT! See the addCall note for why we have to NOT have any waiting before getCallWrites.
410
+ export function getCallWrites(config: {
411
+ debugName: string;
412
+ call: CallSpec;
413
+ overrides?: PathValue[];
414
+ useFinishReordering?: boolean;
415
+ metadata?: FunctionMetadata;
416
+ }): Promise<DryRunResult> {
417
+ let { call, debugName } = config;
418
+
419
+ let finishInStartOrder: number | boolean | undefined;
420
+
421
+ if (config.useFinishReordering) {
422
+ let triggerCaller = getCurrentCallCreationProxy();
423
+ if (!triggerCaller) {
424
+ require("debugbreak")(2);
425
+ debugger;
426
+ throw new Error(`getCallWrites did not happen synchronously when triggering a call. It's very important that we synchronously start the proxy watcher, so we can make sure that predictions happen in a consistent order. FIX THIS! Find the async weighting that was added and move it inside the watch function using cacheAsync to make it watched. For: ${debugName}`);
427
+ }
428
+ finishInStartOrder = addEpsilons(triggerCaller.createTime, 1);
429
+ }
430
+ let first = true;
431
+ return proxyWatcher.dryRunFull({
572
432
  debugName,
573
433
  runAtTime: call.runAtTime,
574
434
  overrideAllowLockDomainsPrefixes: [getPathStr1(call.DomainName)],
@@ -576,10 +436,24 @@ export async function getCallWrites(config: {
576
436
  unsafeNoLocks: true,
577
437
  overrides: config.overrides,
578
438
  nestedCalls: "inline",
439
+ // Run after our trigger function. This will insert us before functions which may have been run before us, but after our trigger function.
440
+ finishInStartOrder,
441
+ predictMetadata: config.metadata,
579
442
  watchFunction() {
580
- return overrideCurrentCall({ spec: call, fnc: specTyped }, () => {
443
+ let fncObj = getFunctionInfoBase(call);
444
+ if (!fncObj) return undefined;
445
+ let { functionSpec, baseFunction } = fncObj;
446
+ return overrideCurrentCall({ spec: call, fnc: functionSpec }, () => {
447
+ if (first) {
448
+ if (Querysub.DEBUG_PREDICTIONS) {
449
+ console.log(magenta(`Loaded predict function and first run`), `${call.DomainName}.${call.FunctionId}`);
450
+ }
451
+ }
452
+ first = false;
453
+
581
454
  let args = parseArgs(call);
582
- return baseFunction(...args);
455
+ let result = baseFunction(...args);
456
+ return result;
583
457
  });
584
458
  },
585
459
  });
@@ -81,7 +81,7 @@ export async function sendErrorDigestEmail(digestInfo: ErrorDigestInfo) {
81
81
  await sendEmail({
82
82
  to: notifyEmails,
83
83
  fromPrefix: "error-digest",
84
- subject: `${errorCount} errors | ${formatNumber(failingFiles)} failing files | ${warningCount} warnings${corruptErrors + corruptWarnings > 0 ? ` | ${corruptErrors + corruptWarnings} corrupt` : ""} | ${formatTime(digestInfo.scanDuration)} | ${formatNumber(digestInfo.totalCompressedBytes)} / ${formatNumber(digestInfo.totalUncompressedBytes)} | ${formatNumber(digestInfo.totalFiles)} files`,
84
+ subject: `${errorCount} err | >= ${formatNumber(failingFiles)} lines | ${warningCount} warn${corruptErrors + corruptWarnings > 0 ? ` | ${corruptErrors + corruptWarnings} corrupt` : ""} | ${suppressedErrors + suppressedWarnings} suppressed | ${formatTime(digestInfo.scanDuration)} | ${formatNumber(digestInfo.totalCompressedBytes)} / ${formatNumber(digestInfo.totalUncompressedBytes)} | ${formatNumber(digestInfo.totalFiles)} log files`,
85
85
  contents: <div>
86
86
  <h2>Error Summary</h2>
87
87
  <ul style="list-style-type: none; padding-left: 0;">
@@ -128,6 +128,7 @@ const sendIMs = batchFunction(({ delay: BATCH_TIME }), async (logsAll: LogDatum[
128
128
  if (countFiltered > 0) {
129
129
  message += `\n+${countFiltered} more errors`;
130
130
  }
131
+ console.log(`Discord message: ${message}`);
131
132
  void sendDiscordMessage({
132
133
  webhookURL,
133
134
  message,
@@ -110,6 +110,11 @@ DEBUG: Why we would get into an infinite identify loop:
110
110
  IMPORTANT! The infinite identify loop causes servers to never finish identification, and so new PathValueServers couldn't start. SO THIS IS A REALLY BIG ISSUE!
111
111
  - Restarting the server fixed it. I'm not sure if it was one service, or all of them?
112
112
 
113
+ DEBUG: Backblaze errors
114
+ - Add a life cycle which handles the fact that one request might result in errors and retries. It also might result in an initial connection being done. It might be waiting on the initial connection, or it might be the one that triggers the initial connection.
115
+ - Being able to split up the different life cycles by various values, such as if they had to connect, if they had to retry, etc., would be useful as well. And then also splitting up the timing. And graphing various timing such as how long the request actually takes (minus the retries, initial connection, throttling, etc), how long the overall call takes, etc.
116
+ - And being able to plot by time would be useful as well, so we can see if the errors are time-based.
117
+
113
118
  SPECIAL UI links for certain errors in log view
114
119
  - Probably dynamically created, based on contents of log
115
120
  - LINKS to filters for all these special errors on a special page
@@ -32,13 +32,22 @@ export function cacheAsyncLimited<Arg, Return>(limit: number, fnc: (arg: Arg) =>
32
32
  }
33
33
 
34
34
  export const cacheAsyncSynced = cacheAsyncLimitedJSON;
35
- export function cacheAsyncLimitedJSON<Arg, Return>(limit: number, fnc: (arg: Arg) => Promise<Return>) {
35
+ export function cacheAsyncLimitedJSON<Arg, Return>(
36
+ limit: number,
37
+ fnc: (arg: Arg) => Promise<Return>
38
+ ): ((arg: Arg) => Return | undefined) & {
39
+ clear(): void;
40
+ promise: (arg: Arg) => Promise<Return>;
41
+ } {
36
42
  let results = new Map<string, { type: "result"; value: Return } | { type: "error"; error: Error; }>();
37
43
  let promiseValues = cacheLimited(limit, (json: string) => fnc(JSON.parse(json)));
38
44
  get["clear"] = () => {
39
45
  results.clear();
40
46
  promiseValues.clear();
41
47
  };
48
+ get.promise = (arg: Arg) => {
49
+ return promiseValues(JSON.stringify(arg));
50
+ };
42
51
  return get;
43
52
  function get(arg: Arg) {
44
53
  let json = JSON.stringify(arg);