@valentinkolb/sync 2.0.4 → 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +158 -4
- package/index.d.ts +8 -0
- package/index.js +2034 -167
- package/package.json +1 -1
- package/src/ephemeral.d.ts +101 -0
- package/src/internal/cron.d.ts +3 -0
- package/src/internal/job-utils.d.ts +11 -0
- package/src/internal/topic-utils.d.ts +7 -0
- package/src/job.d.ts +107 -0
- package/src/mutex.d.ts +26 -0
- package/src/queue.d.ts +67 -0
- package/src/ratelimit.d.ts +22 -0
- package/src/retry.d.ts +19 -0
- package/src/scheduler.d.ts +141 -0
- package/src/topic.d.ts +63 -0
package/index.js
CHANGED
|
@@ -1,12 +1,16 @@
|
|
|
1
1
|
// @bun
|
|
2
2
|
var __defProp = Object.defineProperty;
|
|
3
|
+
var __returnValue = (v) => v;
|
|
4
|
+
function __exportSetter(name, newValue) {
|
|
5
|
+
this[name] = __returnValue.bind(null, newValue);
|
|
6
|
+
}
|
|
3
7
|
var __export = (target, all) => {
|
|
4
8
|
for (var name in all)
|
|
5
9
|
__defProp(target, name, {
|
|
6
10
|
get: all[name],
|
|
7
11
|
enumerable: true,
|
|
8
12
|
configurable: true,
|
|
9
|
-
set: (
|
|
13
|
+
set: __exportSetter.bind(all, name)
|
|
10
14
|
});
|
|
11
15
|
};
|
|
12
16
|
|
|
@@ -189,6 +193,91 @@ var mutex = (config) => {
|
|
|
189
193
|
// src/queue.ts
|
|
190
194
|
var {redis: redis3, RedisClient } = globalThis.Bun;
|
|
191
195
|
import { randomUUID } from "crypto";
|
|
196
|
+
|
|
197
|
+
// src/retry.ts
|
|
198
|
+
var {sleep: sleep2 } = globalThis.Bun;
|
|
199
|
+
var asError = (error) => error instanceof Error ? error : new Error(String(error));
|
|
200
|
+
var createAbortError = () => {
|
|
201
|
+
const error = new Error("retry aborted");
|
|
202
|
+
error.name = "AbortError";
|
|
203
|
+
return error;
|
|
204
|
+
};
|
|
205
|
+
var parseCode = (error) => {
|
|
206
|
+
if (!error || typeof error !== "object")
|
|
207
|
+
return "";
|
|
208
|
+
const code = error.code;
|
|
209
|
+
return typeof code === "string" ? code.toUpperCase() : "";
|
|
210
|
+
};
|
|
211
|
+
var isRetryableTransportError = (error) => {
|
|
212
|
+
const code = parseCode(error);
|
|
213
|
+
if (code === "ECONNRESET" || code === "ETIMEDOUT" || code === "ECONNREFUSED" || code === "ENOTFOUND" || code === "EPIPE" || code === "EHOSTUNREACH" || code === "ECONNABORTED") {
|
|
214
|
+
return true;
|
|
215
|
+
}
|
|
216
|
+
const message = asError(error).message.toLowerCase();
|
|
217
|
+
return message.includes("econnreset") || message.includes("etimedout") || message.includes("connection") || message.includes("socket") || message.includes("broken pipe") || message.includes("network") || message.includes("loading") || message.includes("tryagain") || message.includes("clusterdown");
|
|
218
|
+
};
|
|
219
|
+
var DEFAULT_RETRY_OPTIONS = {
|
|
220
|
+
attempts: 8,
|
|
221
|
+
minDelayMs: 100,
|
|
222
|
+
maxDelayMs: 2000,
|
|
223
|
+
factor: 2,
|
|
224
|
+
jitter: 0.2,
|
|
225
|
+
retryIf: isRetryableTransportError
|
|
226
|
+
};
|
|
227
|
+
var computeDelayMs = (attempt, opts) => {
|
|
228
|
+
const base = opts.minDelayMs * opts.factor ** Math.max(0, attempt - 1);
|
|
229
|
+
const capped = Math.min(opts.maxDelayMs, base);
|
|
230
|
+
const spread = capped * opts.jitter;
|
|
231
|
+
const jittered = capped + (Math.random() * 2 - 1) * spread;
|
|
232
|
+
return Math.max(0, Math.floor(jittered));
|
|
233
|
+
};
|
|
234
|
+
var sleepWithSignal = async (delayMs, signal) => {
|
|
235
|
+
if (delayMs <= 0)
|
|
236
|
+
return;
|
|
237
|
+
if (!signal) {
|
|
238
|
+
await sleep2(delayMs);
|
|
239
|
+
return;
|
|
240
|
+
}
|
|
241
|
+
if (signal.aborted)
|
|
242
|
+
throw createAbortError();
|
|
243
|
+
await new Promise((resolve, reject) => {
|
|
244
|
+
const timer = setTimeout(() => {
|
|
245
|
+
signal.removeEventListener("abort", onAbort);
|
|
246
|
+
resolve();
|
|
247
|
+
}, delayMs);
|
|
248
|
+
const onAbort = () => {
|
|
249
|
+
clearTimeout(timer);
|
|
250
|
+
signal.removeEventListener("abort", onAbort);
|
|
251
|
+
reject(createAbortError());
|
|
252
|
+
};
|
|
253
|
+
signal.addEventListener("abort", onAbort, { once: true });
|
|
254
|
+
});
|
|
255
|
+
};
|
|
256
|
+
var retry = async (fn, opts = {}) => {
|
|
257
|
+
const attempts = Math.max(1, opts.attempts ?? DEFAULT_RETRY_OPTIONS.attempts);
|
|
258
|
+
const minDelayMs = Math.max(0, opts.minDelayMs ?? DEFAULT_RETRY_OPTIONS.minDelayMs);
|
|
259
|
+
const maxDelayMs = Math.max(minDelayMs, opts.maxDelayMs ?? DEFAULT_RETRY_OPTIONS.maxDelayMs);
|
|
260
|
+
const factor = Math.max(1, opts.factor ?? DEFAULT_RETRY_OPTIONS.factor);
|
|
261
|
+
const jitter = Math.min(1, Math.max(0, opts.jitter ?? DEFAULT_RETRY_OPTIONS.jitter));
|
|
262
|
+
const retryIf = opts.retryIf ?? DEFAULT_RETRY_OPTIONS.retryIf;
|
|
263
|
+
for (let attempt = 1;attempt <= attempts; attempt++) {
|
|
264
|
+
if (opts.signal?.aborted)
|
|
265
|
+
throw createAbortError();
|
|
266
|
+
try {
|
|
267
|
+
return await fn(attempt);
|
|
268
|
+
} catch (error) {
|
|
269
|
+
if (attempt >= attempts)
|
|
270
|
+
throw error;
|
|
271
|
+
if (!retryIf(error))
|
|
272
|
+
throw error;
|
|
273
|
+
const delayMs = computeDelayMs(attempt, { minDelayMs, maxDelayMs, factor, jitter });
|
|
274
|
+
await sleepWithSignal(delayMs, opts.signal);
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
throw new Error("unreachable retry state");
|
|
278
|
+
};
|
|
279
|
+
|
|
280
|
+
// src/queue.ts
|
|
192
281
|
var DAY_MS = 24 * 60 * 60 * 1000;
|
|
193
282
|
var DEFAULT_PREFIX3 = "sync:queue";
|
|
194
283
|
var DEFAULT_TENANT = "default";
|
|
@@ -485,7 +574,7 @@ var TOUCH_SCRIPT = `
|
|
|
485
574
|
return 1
|
|
486
575
|
`;
|
|
487
576
|
var textEncoder = new TextEncoder;
|
|
488
|
-
var
|
|
577
|
+
var asError2 = (error) => error instanceof Error ? error : new Error(String(error));
|
|
489
578
|
var safeClose = (client) => {
|
|
490
579
|
if (!client.connected)
|
|
491
580
|
return;
|
|
@@ -576,7 +665,7 @@ var queue = (config) => {
|
|
|
576
665
|
} catch (error) {
|
|
577
666
|
if (cfg.signal.aborted)
|
|
578
667
|
return null;
|
|
579
|
-
throw
|
|
668
|
+
throw asError2(error);
|
|
580
669
|
} finally {
|
|
581
670
|
cfg.signal.removeEventListener("abort", onAbort);
|
|
582
671
|
safeClose(client);
|
|
@@ -588,7 +677,7 @@ var queue = (config) => {
|
|
|
588
677
|
return typeof popped === "string" ? popped : null;
|
|
589
678
|
} catch (error) {
|
|
590
679
|
resetBlockingClient();
|
|
591
|
-
throw
|
|
680
|
+
throw asError2(error);
|
|
592
681
|
}
|
|
593
682
|
};
|
|
594
683
|
const recv = async (recvCfg = {}) => {
|
|
@@ -607,7 +696,7 @@ var queue = (config) => {
|
|
|
607
696
|
messageId = await popMessageId(keys, { wait, timeoutMs, signal: recvCfg.signal });
|
|
608
697
|
} catch (error) {
|
|
609
698
|
if (!wait)
|
|
610
|
-
throw
|
|
699
|
+
throw asError2(error);
|
|
611
700
|
continue;
|
|
612
701
|
}
|
|
613
702
|
if (!messageId)
|
|
@@ -683,7 +772,11 @@ var queue = (config) => {
|
|
|
683
772
|
const wait = streamCfg.wait ?? true;
|
|
684
773
|
try {
|
|
685
774
|
while (!streamCfg.signal?.aborted) {
|
|
686
|
-
const message = await recv(streamCfg)
|
|
775
|
+
const message = wait ? await retry(async () => await recv(streamCfg), {
|
|
776
|
+
attempts: Number.POSITIVE_INFINITY,
|
|
777
|
+
signal: streamCfg.signal,
|
|
778
|
+
retryIf: isRetryableTransportError
|
|
779
|
+
}) : await recv(streamCfg);
|
|
687
780
|
if (message) {
|
|
688
781
|
yield message;
|
|
689
782
|
continue;
|
|
@@ -827,7 +920,7 @@ var PUB_SCRIPT = `
|
|
|
827
920
|
`;
|
|
828
921
|
var ENSURED_GROUPS_MAX = 1e4;
|
|
829
922
|
var textEncoder2 = new TextEncoder;
|
|
830
|
-
var
|
|
923
|
+
var asError3 = (error) => error instanceof Error ? error : new Error(String(error));
|
|
831
924
|
var safeClose2 = (client) => {
|
|
832
925
|
if (!client.connected)
|
|
833
926
|
return;
|
|
@@ -855,7 +948,7 @@ var blockingReadWithTemporaryClient = async (command, args, signal) => {
|
|
|
855
948
|
} catch (error) {
|
|
856
949
|
if (signal?.aborted)
|
|
857
950
|
return null;
|
|
858
|
-
throw
|
|
951
|
+
throw asError3(error);
|
|
859
952
|
} finally {
|
|
860
953
|
if (signal)
|
|
861
954
|
signal.removeEventListener("abort", onAbort);
|
|
@@ -878,7 +971,7 @@ var topic = (config) => {
|
|
|
878
971
|
try {
|
|
879
972
|
await redis4.send("XGROUP", ["CREATE", key, group, "0", "MKSTREAM"]);
|
|
880
973
|
} catch (error) {
|
|
881
|
-
const message =
|
|
974
|
+
const message = asError3(error).message;
|
|
882
975
|
if (!message.includes("BUSYGROUP"))
|
|
883
976
|
throw error;
|
|
884
977
|
}
|
|
@@ -953,7 +1046,7 @@ var topic = (config) => {
|
|
|
953
1046
|
if (signal?.aborted)
|
|
954
1047
|
return null;
|
|
955
1048
|
resetBlockingClient();
|
|
956
|
-
throw
|
|
1049
|
+
throw asError3(error);
|
|
957
1050
|
} finally {
|
|
958
1051
|
if (signal)
|
|
959
1052
|
signal.removeEventListener("abort", onAbort);
|
|
@@ -1009,7 +1102,11 @@ var topic = (config) => {
|
|
|
1009
1102
|
const wait = streamCfg.wait ?? true;
|
|
1010
1103
|
try {
|
|
1011
1104
|
while (!streamCfg.signal?.aborted) {
|
|
1012
|
-
const message = await recv(streamCfg)
|
|
1105
|
+
const message = wait ? await retry(async () => await recv(streamCfg), {
|
|
1106
|
+
attempts: Number.POSITIVE_INFINITY,
|
|
1107
|
+
signal: streamCfg.signal,
|
|
1108
|
+
retryIf: isRetryableTransportError
|
|
1109
|
+
}) : await recv(streamCfg);
|
|
1013
1110
|
if (message) {
|
|
1014
1111
|
yield message;
|
|
1015
1112
|
continue;
|
|
@@ -1049,15 +1146,19 @@ var topic = (config) => {
|
|
|
1049
1146
|
let cursor = liveCfg.after ?? "$";
|
|
1050
1147
|
try {
|
|
1051
1148
|
while (!liveCfg.signal?.aborted) {
|
|
1052
|
-
const result = liveCfg.signal ? await blockingReadWithTemporaryClient("XREAD", ["COUNT", "1", "BLOCK", timeoutMs.toString(), "STREAMS", key, cursor], liveCfg.signal) : await (async () => {
|
|
1149
|
+
const result = await retry(async () => liveCfg.signal ? await blockingReadWithTemporaryClient("XREAD", ["COUNT", "1", "BLOCK", timeoutMs.toString(), "STREAMS", key, cursor], liveCfg.signal) : await (async () => {
|
|
1053
1150
|
try {
|
|
1054
1151
|
const client = await ensureBlockingClient();
|
|
1055
1152
|
return await client.send("XREAD", ["COUNT", "1", "BLOCK", timeoutMs.toString(), "STREAMS", key, cursor]);
|
|
1056
1153
|
} catch (error) {
|
|
1057
1154
|
resetBlockingClient();
|
|
1058
|
-
throw
|
|
1155
|
+
throw asError3(error);
|
|
1059
1156
|
}
|
|
1060
|
-
})()
|
|
1157
|
+
})(), {
|
|
1158
|
+
attempts: Number.POSITIVE_INFINITY,
|
|
1159
|
+
signal: liveCfg.signal,
|
|
1160
|
+
retryIf: isRetryableTransportError
|
|
1161
|
+
});
|
|
1061
1162
|
const entry = parseFirstStreamEntry(result);
|
|
1062
1163
|
if (!entry)
|
|
1063
1164
|
continue;
|
|
@@ -1088,7 +1189,7 @@ var topic = (config) => {
|
|
|
1088
1189
|
};
|
|
1089
1190
|
};
|
|
1090
1191
|
// src/job.ts
|
|
1091
|
-
var {redis: redis5 } = globalThis.Bun;
|
|
1192
|
+
var {redis: redis5, sleep: sleep3 } = globalThis.Bun;
|
|
1092
1193
|
|
|
1093
1194
|
// node_modules/zod/v4/classic/external.js
|
|
1094
1195
|
var exports_external = {};
|
|
@@ -14672,6 +14773,43 @@ var DEFAULT_LEASE_MS2 = 30000;
|
|
|
14672
14773
|
var DEFAULT_WORKER_RECV_TIMEOUT_MS = 1000;
|
|
14673
14774
|
var DEFAULT_MAX_ATTEMPTS = 1;
|
|
14674
14775
|
var DEFAULT_STATE_RETENTION_MS = 7 * DAY_MS3;
|
|
14776
|
+
var FINALIZE_STATE_SCRIPT = `
|
|
14777
|
+
local key = KEYS[1]
|
|
14778
|
+
local payload = ARGV[1]
|
|
14779
|
+
local ttlMs = tonumber(ARGV[2])
|
|
14780
|
+
|
|
14781
|
+
local raw = redis.call("GET", key)
|
|
14782
|
+
if not raw then return 0 end
|
|
14783
|
+
|
|
14784
|
+
local ok, state = pcall(cjson.decode, raw)
|
|
14785
|
+
if not ok then return 0 end
|
|
14786
|
+
|
|
14787
|
+
local status = tostring(state.status or "")
|
|
14788
|
+
if status == "cancelled" then return -1 end
|
|
14789
|
+
if status == "completed" or status == "failed" or status == "timed_out" then return 0 end
|
|
14790
|
+
|
|
14791
|
+
redis.call("SET", key, payload, "PX", tostring(ttlMs))
|
|
14792
|
+
return 1
|
|
14793
|
+
`;
|
|
14794
|
+
var CANCEL_STATE_SCRIPT = `
|
|
14795
|
+
local key = KEYS[1]
|
|
14796
|
+
local payload = ARGV[1]
|
|
14797
|
+
local ttlMs = tonumber(ARGV[2])
|
|
14798
|
+
|
|
14799
|
+
local raw = redis.call("GET", key)
|
|
14800
|
+
if not raw then return 0 end
|
|
14801
|
+
|
|
14802
|
+
local ok, state = pcall(cjson.decode, raw)
|
|
14803
|
+
if not ok then return 0 end
|
|
14804
|
+
|
|
14805
|
+
local status = tostring(state.status or "")
|
|
14806
|
+
if status == "completed" or status == "failed" or status == "timed_out" or status == "cancelled" then
|
|
14807
|
+
return 0
|
|
14808
|
+
end
|
|
14809
|
+
|
|
14810
|
+
redis.call("SET", key, payload, "PX", tostring(ttlMs))
|
|
14811
|
+
return 1
|
|
14812
|
+
`;
|
|
14675
14813
|
var activeWorkers = new Map;
|
|
14676
14814
|
var workPayloadSchema = exports_external.object({
|
|
14677
14815
|
id: exports_external.string(),
|
|
@@ -14731,150 +14869,211 @@ var job = (definition) => {
|
|
|
14731
14869
|
const writeState = async (state, ttlMs = DEFAULT_STATE_RETENTION_MS) => {
|
|
14732
14870
|
await redis5.send("SET", [stateKey(state.id), JSON.stringify(state), "PX", ttlMs.toString()]);
|
|
14733
14871
|
};
|
|
14872
|
+
const writeStateIfAbsent = async (state, ttlMs = DEFAULT_STATE_RETENTION_MS) => {
|
|
14873
|
+
const result = await redis5.send("SET", [stateKey(state.id), JSON.stringify(state), "PX", ttlMs.toString(), "NX"]);
|
|
14874
|
+
return result === "OK";
|
|
14875
|
+
};
|
|
14876
|
+
const writeFinalState = async (state, ttlMs = DEFAULT_STATE_RETENTION_MS) => {
|
|
14877
|
+
const result = Number(await redis5.send("EVAL", [
|
|
14878
|
+
FINALIZE_STATE_SCRIPT,
|
|
14879
|
+
"1",
|
|
14880
|
+
stateKey(state.id),
|
|
14881
|
+
JSON.stringify(state),
|
|
14882
|
+
String(ttlMs)
|
|
14883
|
+
]));
|
|
14884
|
+
if (result === 1)
|
|
14885
|
+
return "ok";
|
|
14886
|
+
if (result === -1)
|
|
14887
|
+
return "cancelled";
|
|
14888
|
+
return "missing";
|
|
14889
|
+
};
|
|
14890
|
+
const writeCancelledState = async (state, ttlMs = DEFAULT_STATE_RETENTION_MS) => {
|
|
14891
|
+
const result = Number(await redis5.send("EVAL", [
|
|
14892
|
+
CANCEL_STATE_SCRIPT,
|
|
14893
|
+
"1",
|
|
14894
|
+
stateKey(state.id),
|
|
14895
|
+
JSON.stringify(state),
|
|
14896
|
+
String(ttlMs)
|
|
14897
|
+
]));
|
|
14898
|
+
return result === 1;
|
|
14899
|
+
};
|
|
14734
14900
|
const startWorker = () => {
|
|
14735
14901
|
if (activeWorkers.has(workerId))
|
|
14736
14902
|
return;
|
|
14737
14903
|
const workerAc = new AbortController;
|
|
14738
14904
|
activeWorkers.set(workerId, workerAc);
|
|
14739
14905
|
(async () => {
|
|
14740
|
-
|
|
14741
|
-
|
|
14742
|
-
|
|
14743
|
-
|
|
14744
|
-
|
|
14745
|
-
|
|
14746
|
-
|
|
14747
|
-
|
|
14748
|
-
|
|
14749
|
-
|
|
14750
|
-
|
|
14751
|
-
await message.ack();
|
|
14752
|
-
continue;
|
|
14753
|
-
}
|
|
14754
|
-
if (state.status === "cancelled") {
|
|
14755
|
-
await message.ack();
|
|
14756
|
-
continue;
|
|
14757
|
-
}
|
|
14758
|
-
if (isTerminalStatus(state.status)) {
|
|
14759
|
-
await message.ack();
|
|
14760
|
-
continue;
|
|
14761
|
-
}
|
|
14762
|
-
const attempt = message.attempt;
|
|
14763
|
-
const runId = message.deliveryId;
|
|
14764
|
-
const startedAt = now();
|
|
14765
|
-
await writeState({
|
|
14766
|
-
...state,
|
|
14767
|
-
status: "running",
|
|
14768
|
-
attempts: attempt,
|
|
14769
|
-
updatedAt: startedAt
|
|
14770
|
-
});
|
|
14771
|
-
await emitEvent(payload.id, {
|
|
14772
|
-
type: "started",
|
|
14773
|
-
id: payload.id,
|
|
14774
|
-
runId,
|
|
14775
|
-
attempt,
|
|
14776
|
-
ts: startedAt
|
|
14777
|
-
});
|
|
14778
|
-
await message.touch({ leaseMs: payload.leaseMs });
|
|
14779
|
-
const jobAc = new AbortController;
|
|
14780
|
-
const ctx = {
|
|
14781
|
-
signal: jobAc.signal,
|
|
14782
|
-
step: async (cfg) => {
|
|
14783
|
-
return await Promise.resolve(cfg.run());
|
|
14784
|
-
},
|
|
14785
|
-
heartbeat: async (cfg) => {
|
|
14786
|
-
await message.touch({ leaseMs: cfg?.leaseMs ?? payload.leaseMs });
|
|
14787
|
-
await emitEvent(payload.id, {
|
|
14788
|
-
type: "heartbeat",
|
|
14789
|
-
id: payload.id,
|
|
14790
|
-
runId,
|
|
14791
|
-
ts: now()
|
|
14792
|
-
});
|
|
14793
|
-
}
|
|
14794
|
-
};
|
|
14795
|
-
try {
|
|
14796
|
-
const inputParsed = definition.schema.safeParse(payload.input);
|
|
14797
|
-
if (!inputParsed.success) {
|
|
14798
|
-
throw inputParsed.error;
|
|
14799
|
-
}
|
|
14800
|
-
const processPromise = Promise.resolve(definition.process({ ctx, input: inputParsed.data }));
|
|
14801
|
-
const result = await withTimeout(processPromise, payload.leaseMs);
|
|
14802
|
-
const latest = await readState(payload.id);
|
|
14803
|
-
if (latest?.status === "cancelled") {
|
|
14804
|
-
jobAc.abort();
|
|
14805
|
-
await message.ack();
|
|
14806
|
-
continue;
|
|
14807
|
-
}
|
|
14808
|
-
const acked = await message.ack();
|
|
14809
|
-
if (!acked)
|
|
14810
|
-
continue;
|
|
14811
|
-
const finishedAt = now();
|
|
14812
|
-
await writeState({
|
|
14813
|
-
id: payload.id,
|
|
14814
|
-
status: "completed",
|
|
14815
|
-
attempts: attempt,
|
|
14816
|
-
updatedAt: finishedAt,
|
|
14817
|
-
finishedAt,
|
|
14818
|
-
result
|
|
14819
|
-
});
|
|
14820
|
-
await emitEvent(payload.id, {
|
|
14821
|
-
type: "completed",
|
|
14822
|
-
id: payload.id,
|
|
14823
|
-
ts: finishedAt
|
|
14824
|
-
});
|
|
14825
|
-
} catch (error48) {
|
|
14826
|
-
jobAc.abort();
|
|
14827
|
-
const err = error48 instanceof Error ? error48 : new Error(String(error48));
|
|
14828
|
-
const timedOut = err.name === "JobTimeoutError";
|
|
14829
|
-
const canRetry = attempt < payload.maxAttempts;
|
|
14830
|
-
if (canRetry) {
|
|
14831
|
-
const delayMs = computeRetryDelay(payload.backoff, attempt);
|
|
14832
|
-
const nextAt = now() + delayMs;
|
|
14833
|
-
const nacked = await message.nack({
|
|
14834
|
-
delayMs,
|
|
14835
|
-
reason: timedOut ? "timed_out" : "error",
|
|
14836
|
-
error: err.message
|
|
14906
|
+
try {
|
|
14907
|
+
while (!workerAc.signal.aborted) {
|
|
14908
|
+
try {
|
|
14909
|
+
const message = await retry(async () => await workQueue.recv({
|
|
14910
|
+
wait: true,
|
|
14911
|
+
timeoutMs: DEFAULT_WORKER_RECV_TIMEOUT_MS,
|
|
14912
|
+
leaseMs: DEFAULT_LEASE_MS2
|
|
14913
|
+
}), {
|
|
14914
|
+
attempts: Number.POSITIVE_INFINITY,
|
|
14915
|
+
signal: workerAc.signal,
|
|
14916
|
+
retryIf: isRetryableTransportError
|
|
14837
14917
|
});
|
|
14838
|
-
if (!
|
|
14918
|
+
if (!message)
|
|
14839
14919
|
continue;
|
|
14920
|
+
const payload = message.data;
|
|
14921
|
+
let state = await readState(payload.id);
|
|
14922
|
+
if (!state) {
|
|
14923
|
+
await writeStateIfAbsent({
|
|
14924
|
+
id: payload.id,
|
|
14925
|
+
status: "submitted",
|
|
14926
|
+
attempts: 0,
|
|
14927
|
+
updatedAt: now()
|
|
14928
|
+
});
|
|
14929
|
+
state = await readState(payload.id);
|
|
14930
|
+
if (!state) {
|
|
14931
|
+
await message.nack({ delayMs: 250, reason: "state_missing_recover_failed" });
|
|
14932
|
+
continue;
|
|
14933
|
+
}
|
|
14934
|
+
}
|
|
14935
|
+
if (state.status === "cancelled") {
|
|
14936
|
+
await message.ack();
|
|
14937
|
+
continue;
|
|
14938
|
+
}
|
|
14939
|
+
if (isTerminalStatus(state.status)) {
|
|
14940
|
+
await message.ack();
|
|
14941
|
+
continue;
|
|
14942
|
+
}
|
|
14943
|
+
const attempt = message.attempt;
|
|
14944
|
+
const runId = message.deliveryId;
|
|
14945
|
+
const startedAt = now();
|
|
14840
14946
|
await writeState({
|
|
14841
|
-
|
|
14842
|
-
status: "
|
|
14947
|
+
...state,
|
|
14948
|
+
status: "running",
|
|
14843
14949
|
attempts: attempt,
|
|
14844
|
-
updatedAt:
|
|
14950
|
+
updatedAt: startedAt
|
|
14845
14951
|
});
|
|
14846
14952
|
await emitEvent(payload.id, {
|
|
14847
|
-
type: "
|
|
14953
|
+
type: "started",
|
|
14848
14954
|
id: payload.id,
|
|
14849
14955
|
runId,
|
|
14850
|
-
|
|
14851
|
-
|
|
14852
|
-
ts: now()
|
|
14956
|
+
attempt,
|
|
14957
|
+
ts: startedAt
|
|
14853
14958
|
});
|
|
14854
|
-
|
|
14855
|
-
|
|
14856
|
-
|
|
14857
|
-
|
|
14858
|
-
|
|
14859
|
-
|
|
14860
|
-
|
|
14861
|
-
|
|
14862
|
-
|
|
14863
|
-
|
|
14864
|
-
|
|
14865
|
-
|
|
14866
|
-
|
|
14867
|
-
|
|
14868
|
-
|
|
14869
|
-
|
|
14959
|
+
await message.touch({ leaseMs: payload.leaseMs });
|
|
14960
|
+
const jobAc = new AbortController;
|
|
14961
|
+
const ctx = {
|
|
14962
|
+
signal: jobAc.signal,
|
|
14963
|
+
step: async (cfg) => {
|
|
14964
|
+
return await Promise.resolve(cfg.run());
|
|
14965
|
+
},
|
|
14966
|
+
heartbeat: async (cfg) => {
|
|
14967
|
+
await message.touch({ leaseMs: cfg?.leaseMs ?? payload.leaseMs });
|
|
14968
|
+
await emitEvent(payload.id, {
|
|
14969
|
+
type: "heartbeat",
|
|
14970
|
+
id: payload.id,
|
|
14971
|
+
runId,
|
|
14972
|
+
ts: now()
|
|
14973
|
+
});
|
|
14974
|
+
}
|
|
14975
|
+
};
|
|
14976
|
+
try {
|
|
14977
|
+
const inputParsed = definition.schema.safeParse(payload.input);
|
|
14978
|
+
if (!inputParsed.success) {
|
|
14979
|
+
throw inputParsed.error;
|
|
14980
|
+
}
|
|
14981
|
+
const processPromise = Promise.resolve(definition.process({ ctx, input: inputParsed.data }));
|
|
14982
|
+
const result = await withTimeout(processPromise, payload.leaseMs);
|
|
14983
|
+
const latest = await readState(payload.id);
|
|
14984
|
+
if (latest?.status === "cancelled") {
|
|
14985
|
+
jobAc.abort();
|
|
14986
|
+
await message.ack();
|
|
14987
|
+
continue;
|
|
14988
|
+
}
|
|
14989
|
+
const acked = await message.ack();
|
|
14990
|
+
if (!acked)
|
|
14991
|
+
continue;
|
|
14992
|
+
const finishedAt = now();
|
|
14993
|
+
const writeResult = await writeFinalState({
|
|
14994
|
+
id: payload.id,
|
|
14995
|
+
status: "completed",
|
|
14996
|
+
attempts: attempt,
|
|
14997
|
+
updatedAt: finishedAt,
|
|
14998
|
+
finishedAt,
|
|
14999
|
+
result
|
|
15000
|
+
});
|
|
15001
|
+
if (writeResult !== "ok")
|
|
15002
|
+
continue;
|
|
15003
|
+
await emitEvent(payload.id, {
|
|
15004
|
+
type: "completed",
|
|
15005
|
+
id: payload.id,
|
|
15006
|
+
ts: finishedAt
|
|
15007
|
+
});
|
|
15008
|
+
} catch (error48) {
|
|
15009
|
+
jobAc.abort();
|
|
15010
|
+
const err = error48 instanceof Error ? error48 : new Error(String(error48));
|
|
15011
|
+
const timedOut = err.name === "JobTimeoutError";
|
|
15012
|
+
const canRetry = attempt < payload.maxAttempts;
|
|
15013
|
+
if (canRetry) {
|
|
15014
|
+
const delayMs = computeRetryDelay(payload.backoff, attempt);
|
|
15015
|
+
const nextAt = now() + delayMs;
|
|
15016
|
+
const nacked = await message.nack({
|
|
15017
|
+
delayMs,
|
|
15018
|
+
reason: timedOut ? "timed_out" : "error",
|
|
15019
|
+
error: err.message
|
|
15020
|
+
});
|
|
15021
|
+
if (!nacked)
|
|
15022
|
+
continue;
|
|
15023
|
+
const latestState = await readState(payload.id);
|
|
15024
|
+
if (latestState?.status === "cancelled")
|
|
15025
|
+
continue;
|
|
15026
|
+
await writeState({
|
|
15027
|
+
id: payload.id,
|
|
15028
|
+
status: "submitted",
|
|
15029
|
+
attempts: attempt,
|
|
15030
|
+
updatedAt: now()
|
|
15031
|
+
});
|
|
15032
|
+
await emitEvent(payload.id, {
|
|
15033
|
+
type: "retry",
|
|
15034
|
+
id: payload.id,
|
|
15035
|
+
runId,
|
|
15036
|
+
nextAt,
|
|
15037
|
+
reason: err.message,
|
|
15038
|
+
ts: now()
|
|
15039
|
+
});
|
|
15040
|
+
continue;
|
|
15041
|
+
}
|
|
15042
|
+
const acked = await message.ack();
|
|
15043
|
+
if (!acked)
|
|
15044
|
+
continue;
|
|
15045
|
+
const finishedAt = now();
|
|
15046
|
+
const status = timedOut ? "timed_out" : "failed";
|
|
15047
|
+
const writeResult = await writeFinalState({
|
|
15048
|
+
id: payload.id,
|
|
15049
|
+
status,
|
|
15050
|
+
attempts: attempt,
|
|
15051
|
+
updatedAt: finishedAt,
|
|
15052
|
+
finishedAt,
|
|
15053
|
+
error: {
|
|
15054
|
+
message: err.message,
|
|
15055
|
+
code: timedOut ? "TIMEOUT" : undefined
|
|
15056
|
+
}
|
|
15057
|
+
});
|
|
15058
|
+
if (writeResult !== "ok")
|
|
15059
|
+
continue;
|
|
15060
|
+
await emitEvent(payload.id, {
|
|
15061
|
+
type: "failed",
|
|
15062
|
+
id: payload.id,
|
|
15063
|
+
reason: err.message,
|
|
15064
|
+
ts: finishedAt
|
|
15065
|
+
});
|
|
14870
15066
|
}
|
|
14871
|
-
}
|
|
14872
|
-
|
|
14873
|
-
|
|
14874
|
-
|
|
14875
|
-
|
|
14876
|
-
|
|
14877
|
-
|
|
15067
|
+
} catch {
|
|
15068
|
+
if (workerAc.signal.aborted)
|
|
15069
|
+
break;
|
|
15070
|
+
await sleep3(25);
|
|
15071
|
+
}
|
|
15072
|
+
}
|
|
15073
|
+
} finally {
|
|
15074
|
+
const current = activeWorkers.get(workerId);
|
|
15075
|
+
if (current === workerAc) {
|
|
15076
|
+
activeWorkers.delete(workerId);
|
|
14878
15077
|
}
|
|
14879
15078
|
}
|
|
14880
15079
|
})();
|
|
@@ -14902,7 +15101,9 @@ var job = (definition) => {
|
|
|
14902
15101
|
const leaseMs = Math.max(1, cfg.leaseMs ?? definition.defaults?.leaseMs ?? DEFAULT_LEASE_MS2);
|
|
14903
15102
|
const backoff = cfg.backoff ?? definition.defaults?.backoff;
|
|
14904
15103
|
const delayMs = cfg.at !== undefined ? Math.max(0, cfg.at - now()) : Math.max(0, cfg.delayMs ?? 0);
|
|
15104
|
+
const keyTtlMs = Math.max(1000, cfg.keyTtlMs ?? DEFAULT_STATE_RETENTION_MS);
|
|
14905
15105
|
let jobId;
|
|
15106
|
+
let isNewSubmission = true;
|
|
14906
15107
|
if (cfg.key) {
|
|
14907
15108
|
const idemKey = `${keys.idempotencyPrefix}:${cfg.key}`;
|
|
14908
15109
|
const result = await redis5.send("EVAL", [
|
|
@@ -14910,16 +15111,21 @@ var job = (definition) => {
|
|
|
14910
15111
|
"2",
|
|
14911
15112
|
idemKey,
|
|
14912
15113
|
keys.seq,
|
|
14913
|
-
String(
|
|
15114
|
+
String(keyTtlMs)
|
|
14914
15115
|
]);
|
|
14915
15116
|
const arr = result;
|
|
14916
15117
|
jobId = String(arr[0]);
|
|
14917
|
-
|
|
14918
|
-
return jobId;
|
|
15118
|
+
isNewSubmission = Number(arr[1]) === 1;
|
|
14919
15119
|
} else {
|
|
14920
15120
|
const nextId = await redis5.incr(keys.seq);
|
|
14921
15121
|
jobId = String(nextId);
|
|
14922
15122
|
}
|
|
15123
|
+
const submittedState = {
|
|
15124
|
+
id: jobId,
|
|
15125
|
+
status: "submitted",
|
|
15126
|
+
attempts: 0,
|
|
15127
|
+
updatedAt: now()
|
|
15128
|
+
};
|
|
14923
15129
|
const payload = {
|
|
14924
15130
|
id: jobId,
|
|
14925
15131
|
input: parsed.data,
|
|
@@ -14928,25 +15134,44 @@ var job = (definition) => {
|
|
|
14928
15134
|
leaseMs,
|
|
14929
15135
|
meta: cfg.meta
|
|
14930
15136
|
};
|
|
14931
|
-
|
|
14932
|
-
|
|
14933
|
-
|
|
14934
|
-
|
|
14935
|
-
|
|
14936
|
-
|
|
14937
|
-
|
|
14938
|
-
|
|
14939
|
-
|
|
14940
|
-
|
|
14941
|
-
|
|
14942
|
-
|
|
14943
|
-
|
|
14944
|
-
|
|
14945
|
-
|
|
14946
|
-
|
|
14947
|
-
|
|
14948
|
-
|
|
14949
|
-
|
|
15137
|
+
if (!isNewSubmission) {
|
|
15138
|
+
const existingState = await readState(jobId);
|
|
15139
|
+
if (existingState)
|
|
15140
|
+
return jobId;
|
|
15141
|
+
await workQueue.send({
|
|
15142
|
+
data: payload,
|
|
15143
|
+
delayMs,
|
|
15144
|
+
idempotencyKey: cfg.key,
|
|
15145
|
+
idempotencyTtlMs: keyTtlMs,
|
|
15146
|
+
meta: cfg.meta
|
|
15147
|
+
});
|
|
15148
|
+
const wrote = await writeStateIfAbsent(submittedState);
|
|
15149
|
+
if (wrote) {
|
|
15150
|
+
await emitEvent(jobId, {
|
|
15151
|
+
type: "submitted",
|
|
15152
|
+
id: jobId,
|
|
15153
|
+
ts: now()
|
|
15154
|
+
});
|
|
15155
|
+
}
|
|
15156
|
+
return jobId;
|
|
15157
|
+
} else {
|
|
15158
|
+
await workQueue.send({
|
|
15159
|
+
data: payload,
|
|
15160
|
+
delayMs,
|
|
15161
|
+
idempotencyKey: cfg.key,
|
|
15162
|
+
idempotencyTtlMs: keyTtlMs,
|
|
15163
|
+
meta: cfg.meta
|
|
15164
|
+
});
|
|
15165
|
+
const wrote = await writeStateIfAbsent(submittedState);
|
|
15166
|
+
if (wrote) {
|
|
15167
|
+
await emitEvent(jobId, {
|
|
15168
|
+
type: "submitted",
|
|
15169
|
+
id: jobId,
|
|
15170
|
+
ts: now()
|
|
15171
|
+
});
|
|
15172
|
+
}
|
|
15173
|
+
return jobId;
|
|
15174
|
+
}
|
|
14950
15175
|
};
|
|
14951
15176
|
const join = async (cfg) => {
|
|
14952
15177
|
const state = await readState(cfg.id);
|
|
@@ -15014,7 +15239,7 @@ var job = (definition) => {
|
|
|
15014
15239
|
if (isTerminalStatus(existing.status))
|
|
15015
15240
|
return;
|
|
15016
15241
|
const finishedAt = now();
|
|
15017
|
-
await
|
|
15242
|
+
const wrote = await writeCancelledState({
|
|
15018
15243
|
id: cfg.id,
|
|
15019
15244
|
status: "cancelled",
|
|
15020
15245
|
attempts: existing.attempts,
|
|
@@ -15025,6 +15250,8 @@ var job = (definition) => {
|
|
|
15025
15250
|
code: "CANCELLED"
|
|
15026
15251
|
} : undefined
|
|
15027
15252
|
});
|
|
15253
|
+
if (!wrote)
|
|
15254
|
+
return;
|
|
15028
15255
|
await emitEvent(cfg.id, {
|
|
15029
15256
|
type: "cancelled",
|
|
15030
15257
|
id: cfg.id,
|
|
@@ -15049,18 +15276,1658 @@ var job = (definition) => {
|
|
|
15049
15276
|
return {
|
|
15050
15277
|
id: definition.id,
|
|
15051
15278
|
submit,
|
|
15279
|
+
validateInput: (input) => {
|
|
15280
|
+
const parsed = definition.schema.safeParse(input);
|
|
15281
|
+
if (!parsed.success)
|
|
15282
|
+
throw parsed.error;
|
|
15283
|
+
},
|
|
15052
15284
|
join,
|
|
15053
15285
|
cancel,
|
|
15054
15286
|
events,
|
|
15055
15287
|
stop
|
|
15056
15288
|
};
|
|
15057
15289
|
};
|
|
15290
|
+
// src/scheduler.ts
|
|
15291
|
+
var {redis: redis6, sleep: sleep4 } = globalThis.Bun;
|
|
15292
|
+
|
|
15293
|
+
// src/internal/cron.ts
|
|
15294
|
+
var MAX_LOOKAHEAD_MINUTES = 5 * 366 * 24 * 60;
|
|
15295
|
+
var WEEKDAY_TO_NUM = {
|
|
15296
|
+
Sun: 0,
|
|
15297
|
+
Mon: 1,
|
|
15298
|
+
Tue: 2,
|
|
15299
|
+
Wed: 3,
|
|
15300
|
+
Thu: 4,
|
|
15301
|
+
Fri: 5,
|
|
15302
|
+
Sat: 6
|
|
15303
|
+
};
|
|
15304
|
+
var zonedFormatterCache = new Map;
|
|
15305
|
+
var parsedCronCache = new Map;
|
|
15306
|
+
var getZonedFormatter = (tz) => {
|
|
15307
|
+
const cached2 = zonedFormatterCache.get(tz);
|
|
15308
|
+
if (cached2)
|
|
15309
|
+
return cached2;
|
|
15310
|
+
const formatter = new Intl.DateTimeFormat("en-US", {
|
|
15311
|
+
timeZone: tz,
|
|
15312
|
+
hour12: false,
|
|
15313
|
+
hourCycle: "h23",
|
|
15314
|
+
minute: "2-digit",
|
|
15315
|
+
hour: "2-digit",
|
|
15316
|
+
day: "2-digit",
|
|
15317
|
+
month: "2-digit",
|
|
15318
|
+
year: "numeric",
|
|
15319
|
+
weekday: "short"
|
|
15320
|
+
});
|
|
15321
|
+
zonedFormatterCache.set(tz, formatter);
|
|
15322
|
+
return formatter;
|
|
15323
|
+
};
|
|
15324
|
+
var asInt = (value, fieldName) => {
|
|
15325
|
+
const parsed = Number(value);
|
|
15326
|
+
if (!Number.isInteger(parsed)) {
|
|
15327
|
+
throw new Error(`invalid ${fieldName} value: ${value}`);
|
|
15328
|
+
}
|
|
15329
|
+
return parsed;
|
|
15330
|
+
};
|
|
15331
|
+
var normalizeDow = (value) => {
|
|
15332
|
+
if (value === 7)
|
|
15333
|
+
return 0;
|
|
15334
|
+
return value;
|
|
15335
|
+
};
|
|
15336
|
+
var parseSegment = (segment, min, max, fieldName) => {
|
|
15337
|
+
const [base, stepRaw] = segment.split("/");
|
|
15338
|
+
const step = stepRaw === undefined ? 1 : asInt(stepRaw, `${fieldName} step`);
|
|
15339
|
+
if (step <= 0)
|
|
15340
|
+
throw new Error(`invalid ${fieldName} step: ${segment}`);
|
|
15341
|
+
const resolvedBase = base ?? "*";
|
|
15342
|
+
let start = min;
|
|
15343
|
+
let end = max;
|
|
15344
|
+
if (resolvedBase !== "*") {
|
|
15345
|
+
if (resolvedBase.includes("-")) {
|
|
15346
|
+
const [a, b] = resolvedBase.split("-");
|
|
15347
|
+
if (a === undefined || b === undefined)
|
|
15348
|
+
throw new Error(`invalid ${fieldName} range: ${segment}`);
|
|
15349
|
+
start = asInt(a, fieldName);
|
|
15350
|
+
end = asInt(b, fieldName);
|
|
15351
|
+
} else {
|
|
15352
|
+
start = asInt(resolvedBase, fieldName);
|
|
15353
|
+
end = start;
|
|
15354
|
+
}
|
|
15355
|
+
}
|
|
15356
|
+
if (fieldName === "day-of-week") {
|
|
15357
|
+
const rawStart = start;
|
|
15358
|
+
const rawEnd = end;
|
|
15359
|
+
start = normalizeDow(start);
|
|
15360
|
+
end = normalizeDow(end);
|
|
15361
|
+
if (rawStart !== rawEnd && start > end) {
|
|
15362
|
+
const wrappedValues = [];
|
|
15363
|
+
for (let v = start;v <= max; v += step)
|
|
15364
|
+
wrappedValues.push(v);
|
|
15365
|
+
for (let v = min;v <= end; v += step)
|
|
15366
|
+
wrappedValues.push(v);
|
|
15367
|
+
return wrappedValues;
|
|
15368
|
+
}
|
|
15369
|
+
}
|
|
15370
|
+
if (start < min || start > max || end < min || end > max || start > end) {
|
|
15371
|
+
throw new Error(`out-of-range ${fieldName}: ${segment}`);
|
|
15372
|
+
}
|
|
15373
|
+
const values = [];
|
|
15374
|
+
for (let v = start;v <= end; v += step)
|
|
15375
|
+
values.push(v);
|
|
15376
|
+
return values;
|
|
15377
|
+
};
|
|
15378
|
+
var parseField = (raw, min, max, fieldName) => {
|
|
15379
|
+
if (raw === "*")
|
|
15380
|
+
return { any: true, values: new Set };
|
|
15381
|
+
const values = new Set;
|
|
15382
|
+
for (const segment of raw.split(",")) {
|
|
15383
|
+
const trimmed = segment.trim();
|
|
15384
|
+
if (!trimmed)
|
|
15385
|
+
throw new Error(`invalid ${fieldName} field: "${raw}"`);
|
|
15386
|
+
for (const v of parseSegment(trimmed, min, max, fieldName)) {
|
|
15387
|
+
values.add(fieldName === "day-of-week" ? normalizeDow(v) : v);
|
|
15388
|
+
}
|
|
15389
|
+
}
|
|
15390
|
+
if (values.size === 0)
|
|
15391
|
+
throw new Error(`invalid ${fieldName} field: "${raw}"`);
|
|
15392
|
+
return { any: false, values };
|
|
15393
|
+
};
|
|
15394
|
+
var parseCron = (raw) => {
|
|
15395
|
+
const cached2 = parsedCronCache.get(raw);
|
|
15396
|
+
if (cached2)
|
|
15397
|
+
return cached2;
|
|
15398
|
+
const parts = raw.trim().split(/\s+/);
|
|
15399
|
+
if (parts.length !== 5) {
|
|
15400
|
+
throw new Error(`cron must have 5 fields (minute hour day-of-month month day-of-week), got: "${raw}"`);
|
|
15401
|
+
}
|
|
15402
|
+
const [minuteRaw, hourRaw, dayOfMonthRaw, monthRaw, dayOfWeekRaw] = parts;
|
|
15403
|
+
const spec = {
|
|
15404
|
+
minute: parseField(minuteRaw, 0, 59, "minute"),
|
|
15405
|
+
hour: parseField(hourRaw, 0, 23, "hour"),
|
|
15406
|
+
dayOfMonth: parseField(dayOfMonthRaw, 1, 31, "day-of-month"),
|
|
15407
|
+
month: parseField(monthRaw, 1, 12, "month"),
|
|
15408
|
+
dayOfWeek: parseField(dayOfWeekRaw, 0, 7, "day-of-week")
|
|
15409
|
+
};
|
|
15410
|
+
parsedCronCache.set(raw, spec);
|
|
15411
|
+
if (parsedCronCache.size > 1000) {
|
|
15412
|
+
const firstKey = parsedCronCache.keys().next().value;
|
|
15413
|
+
if (firstKey)
|
|
15414
|
+
parsedCronCache.delete(firstKey);
|
|
15415
|
+
}
|
|
15416
|
+
return spec;
|
|
15417
|
+
};
|
|
15418
|
+
var floorToMinute = (timestampMs) => {
|
|
15419
|
+
return Math.floor(timestampMs / 60000) * 60000;
|
|
15420
|
+
};
|
|
15421
|
+
var toZonedParts = (timestampMs, tz) => {
|
|
15422
|
+
const parts = getZonedFormatter(tz).formatToParts(new Date(timestampMs));
|
|
15423
|
+
let minute = -1;
|
|
15424
|
+
let hour = -1;
|
|
15425
|
+
let dayOfMonth = -1;
|
|
15426
|
+
let month = -1;
|
|
15427
|
+
let dayOfWeek = -1;
|
|
15428
|
+
for (const part of parts) {
|
|
15429
|
+
if (part.type === "minute")
|
|
15430
|
+
minute = Number(part.value);
|
|
15431
|
+
else if (part.type === "hour")
|
|
15432
|
+
hour = Number(part.value);
|
|
15433
|
+
else if (part.type === "day")
|
|
15434
|
+
dayOfMonth = Number(part.value);
|
|
15435
|
+
else if (part.type === "month")
|
|
15436
|
+
month = Number(part.value);
|
|
15437
|
+
else if (part.type === "weekday")
|
|
15438
|
+
dayOfWeek = WEEKDAY_TO_NUM[part.value] ?? -1;
|
|
15439
|
+
}
|
|
15440
|
+
if (minute < 0 || hour < 0 || dayOfMonth < 0 || month < 0 || dayOfWeek < 0) {
|
|
15441
|
+
throw new Error(`failed to resolve zoned time parts for timezone "${tz}"`);
|
|
15442
|
+
}
|
|
15443
|
+
return { minute, hour, dayOfMonth, month, dayOfWeek };
|
|
15444
|
+
};
|
|
15445
|
+
var matchesField = (field, value) => field.any || field.values.has(value);
|
|
15446
|
+
var matchesDay = (spec, dayOfMonth, dayOfWeek) => {
|
|
15447
|
+
const domAny = spec.dayOfMonth.any;
|
|
15448
|
+
const dowAny = spec.dayOfWeek.any;
|
|
15449
|
+
const domMatches = domAny || spec.dayOfMonth.values.has(dayOfMonth);
|
|
15450
|
+
const dowMatches = dowAny || spec.dayOfWeek.values.has(dayOfWeek);
|
|
15451
|
+
if (domAny && dowAny)
|
|
15452
|
+
return true;
|
|
15453
|
+
if (domAny)
|
|
15454
|
+
return dowMatches;
|
|
15455
|
+
if (dowAny)
|
|
15456
|
+
return domMatches;
|
|
15457
|
+
return domMatches || dowMatches;
|
|
15458
|
+
};
|
|
15459
|
+
var matchesCron = (spec, timestampMs, tz) => {
|
|
15460
|
+
const p = toZonedParts(timestampMs, tz);
|
|
15461
|
+
return matchesField(spec.minute, p.minute) && matchesField(spec.hour, p.hour) && matchesField(spec.month, p.month) && matchesDay(spec, p.dayOfMonth, p.dayOfWeek);
|
|
15462
|
+
};
|
|
15463
|
+
var nextCronTimestamp = (cron, tz, afterTimestampMs) => {
|
|
15464
|
+
const spec = parseCron(cron);
|
|
15465
|
+
let candidate = floorToMinute(afterTimestampMs) + 60000;
|
|
15466
|
+
for (let i = 0;i < MAX_LOOKAHEAD_MINUTES; i++) {
|
|
15467
|
+
if (matchesCron(spec, candidate, tz))
|
|
15468
|
+
return candidate;
|
|
15469
|
+
candidate += 60000;
|
|
15470
|
+
}
|
|
15471
|
+
throw new Error(`unable to find next cron execution within lookahead window for "${cron}" (${tz})`);
|
|
15472
|
+
};
|
|
15473
|
+
var assertValidTimeZone = (tz) => {
|
|
15474
|
+
getZonedFormatter(tz);
|
|
15475
|
+
};
|
|
15476
|
+
|
|
15477
|
+
// src/scheduler.ts
|
|
15478
|
+
var DEFAULT_PREFIX6 = "sync:scheduler";
|
|
15479
|
+
var DAY_MS4 = 24 * 60 * 60 * 1000;
|
|
15480
|
+
var DEFAULT_LEASE_MS3 = 5000;
|
|
15481
|
+
var DEFAULT_HEARTBEAT_MS = 500;
|
|
15482
|
+
var DEFAULT_TICK_MS = 500;
|
|
15483
|
+
var DEFAULT_BATCH_SIZE = 200;
|
|
15484
|
+
var DEFAULT_MAX_SUBMITS_PER_TICK = 500;
|
|
15485
|
+
var DEFAULT_SUBMIT_RETRIES = 3;
|
|
15486
|
+
var DEFAULT_SUBMIT_BACKOFF_BASE_MS = 100;
|
|
15487
|
+
var DEFAULT_SUBMIT_BACKOFF_MAX_MS = 2000;
|
|
15488
|
+
var DEFAULT_SCHEDULED_JOB_KEY_TTL_MS = 90 * DAY_MS4;
|
|
15489
|
+
var DEFAULT_DISPATCH_DLQ_MAX_ENTRIES = 5000;
|
|
15490
|
+
var DEFAULT_MISFIRE = "skip";
|
|
15491
|
+
var DEFAULT_MAX_CATCH_UP_RUNS = 100;
|
|
15492
|
+
var DEFAULT_MAX_CONSECUTIVE_DISPATCH_FAILURES = 5;
|
|
15493
|
+
var DEFAULT_STRICT_HANDLERS = true;
|
|
15494
|
+
var UPSERT_SCRIPT = `
|
|
15495
|
+
local raw = redis.call("GET", KEYS[1])
|
|
15496
|
+
local incomingRaw = ARGV[1]
|
|
15497
|
+
local firstRunAt = tonumber(ARGV[2])
|
|
15498
|
+
local scheduleId = ARGV[3]
|
|
15499
|
+
local now = tonumber(ARGV[4])
|
|
15500
|
+
|
|
15501
|
+
local created = 1
|
|
15502
|
+
local incoming = cjson.decode(incomingRaw)
|
|
15503
|
+
|
|
15504
|
+
if raw then
|
|
15505
|
+
created = 0
|
|
15506
|
+
local ok, existing = pcall(cjson.decode, raw)
|
|
15507
|
+
if ok then
|
|
15508
|
+
incoming.createdAt = tonumber(existing.createdAt) or incoming.createdAt
|
|
15509
|
+
|
|
15510
|
+
local shouldResetNext = tostring(existing.cron) ~= tostring(incoming.cron)
|
|
15511
|
+
or tostring(existing.tz) ~= tostring(incoming.tz)
|
|
15512
|
+
|
|
15513
|
+
if shouldResetNext then
|
|
15514
|
+
incoming.nextRunAt = firstRunAt
|
|
15515
|
+
incoming.consecutiveDispatchFailures = 0
|
|
15516
|
+
incoming.lastFailedSlotTs = nil
|
|
15517
|
+
incoming.lastDispatchError = nil
|
|
15518
|
+
else
|
|
15519
|
+
incoming.nextRunAt = tonumber(existing.nextRunAt) or firstRunAt
|
|
15520
|
+
incoming.consecutiveDispatchFailures = tonumber(existing.consecutiveDispatchFailures) or 0
|
|
15521
|
+
incoming.lastFailedSlotTs = tonumber(existing.lastFailedSlotTs) or nil
|
|
15522
|
+
incoming.lastDispatchError = existing.lastDispatchError
|
|
15523
|
+
end
|
|
15524
|
+
else
|
|
15525
|
+
incoming.nextRunAt = firstRunAt
|
|
15526
|
+
incoming.consecutiveDispatchFailures = 0
|
|
15527
|
+
incoming.lastFailedSlotTs = nil
|
|
15528
|
+
incoming.lastDispatchError = nil
|
|
15529
|
+
end
|
|
15530
|
+
else
|
|
15531
|
+
incoming.nextRunAt = firstRunAt
|
|
15532
|
+
incoming.consecutiveDispatchFailures = 0
|
|
15533
|
+
incoming.lastFailedSlotTs = nil
|
|
15534
|
+
incoming.lastDispatchError = nil
|
|
15535
|
+
end
|
|
15536
|
+
|
|
15537
|
+
incoming.updatedAt = now
|
|
15538
|
+
|
|
15539
|
+
redis.call("SET", KEYS[1], cjson.encode(incoming))
|
|
15540
|
+
redis.call("ZADD", KEYS[2], tostring(incoming.nextRunAt), scheduleId)
|
|
15541
|
+
redis.call("SADD", KEYS[3], scheduleId)
|
|
15542
|
+
|
|
15543
|
+
if created == 1 then
|
|
15544
|
+
return 1
|
|
15545
|
+
end
|
|
15546
|
+
return 2
|
|
15547
|
+
`;
|
|
15548
|
+
var UNREGISTER_SCRIPT = `
|
|
15549
|
+
redis.call("DEL", KEYS[1])
|
|
15550
|
+
redis.call("ZREM", KEYS[2], ARGV[1])
|
|
15551
|
+
redis.call("SREM", KEYS[3], ARGV[1])
|
|
15552
|
+
return 1
|
|
15553
|
+
`;
|
|
15554
|
+
var CLEANUP_BROKEN_SCRIPT = `
|
|
15555
|
+
redis.call("DEL", KEYS[1])
|
|
15556
|
+
redis.call("ZREM", KEYS[2], ARGV[1])
|
|
15557
|
+
redis.call("SREM", KEYS[3], ARGV[1])
|
|
15558
|
+
return 1
|
|
15559
|
+
`;
|
|
15560
|
+
var RESCHEDULE_CAS_SCRIPT = `
|
|
15561
|
+
local epochKey = KEYS[4]
|
|
15562
|
+
local raw = redis.call("GET", KEYS[1])
|
|
15563
|
+
local scheduleId = ARGV[1]
|
|
15564
|
+
local expectedEpoch = tonumber(ARGV[2])
|
|
15565
|
+
local expectedNext = ARGV[3]
|
|
15566
|
+
local nextRunAt = tonumber(ARGV[4])
|
|
15567
|
+
local now = tonumber(ARGV[5])
|
|
15568
|
+
|
|
15569
|
+
local currentEpoch = tonumber(redis.call("GET", epochKey) or "0")
|
|
15570
|
+
if currentEpoch ~= expectedEpoch then
|
|
15571
|
+
return -2
|
|
15572
|
+
end
|
|
15573
|
+
|
|
15574
|
+
if not raw then
|
|
15575
|
+
redis.call("ZREM", KEYS[2], scheduleId)
|
|
15576
|
+
redis.call("SREM", KEYS[3], scheduleId)
|
|
15577
|
+
return 0
|
|
15578
|
+
end
|
|
15579
|
+
|
|
15580
|
+
local ok, schedule = pcall(cjson.decode, raw)
|
|
15581
|
+
if not ok then
|
|
15582
|
+
redis.call("DEL", KEYS[1])
|
|
15583
|
+
redis.call("ZREM", KEYS[2], scheduleId)
|
|
15584
|
+
redis.call("SREM", KEYS[3], scheduleId)
|
|
15585
|
+
return 0
|
|
15586
|
+
end
|
|
15587
|
+
|
|
15588
|
+
if tonumber(schedule.nextRunAt) ~= tonumber(expectedNext) then
|
|
15589
|
+
return -1
|
|
15590
|
+
end
|
|
15591
|
+
|
|
15592
|
+
schedule.nextRunAt = nextRunAt
|
|
15593
|
+
schedule.updatedAt = now
|
|
15594
|
+
|
|
15595
|
+
redis.call("SET", KEYS[1], cjson.encode(schedule))
|
|
15596
|
+
redis.call("ZADD", KEYS[2], tostring(nextRunAt), scheduleId)
|
|
15597
|
+
redis.call("SADD", KEYS[3], scheduleId)
|
|
15598
|
+
|
|
15599
|
+
return 1
|
|
15600
|
+
`;
|
|
15601
|
+
var PUSH_DISPATCH_DLQ_SCRIPT = `
|
|
15602
|
+
redis.call("LPUSH", KEYS[1], ARGV[1])
|
|
15603
|
+
redis.call("LTRIM", KEYS[1], "0", ARGV[2])
|
|
15604
|
+
return 1
|
|
15605
|
+
`;
|
|
15606
|
+
var RECORD_FAILURE_CAS_SCRIPT = `
|
|
15607
|
+
local epochKey = KEYS[4]
|
|
15608
|
+
local raw = redis.call("GET", KEYS[1])
|
|
15609
|
+
local scheduleId = ARGV[1]
|
|
15610
|
+
local expectedEpoch = tonumber(ARGV[2])
|
|
15611
|
+
local expectedNext = tonumber(ARGV[3])
|
|
15612
|
+
local failedSlotTs = tonumber(ARGV[4])
|
|
15613
|
+
local nextRunAt = tonumber(ARGV[5])
|
|
15614
|
+
local failures = tonumber(ARGV[6])
|
|
15615
|
+
local shouldAdvance = tonumber(ARGV[7])
|
|
15616
|
+
local errorMessage = ARGV[8]
|
|
15617
|
+
local now = tonumber(ARGV[9])
|
|
15618
|
+
|
|
15619
|
+
local currentEpoch = tonumber(redis.call("GET", epochKey) or "0")
|
|
15620
|
+
if currentEpoch ~= expectedEpoch then
|
|
15621
|
+
return -2
|
|
15622
|
+
end
|
|
15623
|
+
|
|
15624
|
+
if not raw then
|
|
15625
|
+
redis.call("ZREM", KEYS[2], scheduleId)
|
|
15626
|
+
redis.call("SREM", KEYS[3], scheduleId)
|
|
15627
|
+
return 0
|
|
15628
|
+
end
|
|
15629
|
+
|
|
15630
|
+
local ok, schedule = pcall(cjson.decode, raw)
|
|
15631
|
+
if not ok then
|
|
15632
|
+
redis.call("DEL", KEYS[1])
|
|
15633
|
+
redis.call("ZREM", KEYS[2], scheduleId)
|
|
15634
|
+
redis.call("SREM", KEYS[3], scheduleId)
|
|
15635
|
+
return 0
|
|
15636
|
+
end
|
|
15637
|
+
|
|
15638
|
+
if tonumber(schedule.nextRunAt) ~= expectedNext then
|
|
15639
|
+
return -1
|
|
15640
|
+
end
|
|
15641
|
+
|
|
15642
|
+
schedule.consecutiveDispatchFailures = failures
|
|
15643
|
+
schedule.lastFailedSlotTs = failedSlotTs
|
|
15644
|
+
schedule.lastDispatchError = errorMessage
|
|
15645
|
+
schedule.updatedAt = now
|
|
15646
|
+
|
|
15647
|
+
if shouldAdvance == 1 then
|
|
15648
|
+
schedule.nextRunAt = nextRunAt
|
|
15649
|
+
schedule.consecutiveDispatchFailures = 0
|
|
15650
|
+
end
|
|
15651
|
+
|
|
15652
|
+
redis.call("SET", KEYS[1], cjson.encode(schedule))
|
|
15653
|
+
redis.call("ZADD", KEYS[2], tostring(schedule.nextRunAt), scheduleId)
|
|
15654
|
+
redis.call("SADD", KEYS[3], scheduleId)
|
|
15655
|
+
return 1
|
|
15656
|
+
`;
|
|
15657
|
+
var asError4 = (error48) => error48 instanceof Error ? error48 : new Error(String(error48));
|
|
15658
|
+
var safeMetric = (onMetric, metric) => {
|
|
15659
|
+
if (!onMetric)
|
|
15660
|
+
return;
|
|
15661
|
+
try {
|
|
15662
|
+
onMetric(metric);
|
|
15663
|
+
} catch {}
|
|
15664
|
+
};
|
|
15665
|
+
var parseSchedule = (raw) => {
|
|
15666
|
+
if (!raw)
|
|
15667
|
+
return null;
|
|
15668
|
+
try {
|
|
15669
|
+
return JSON.parse(raw);
|
|
15670
|
+
} catch {
|
|
15671
|
+
return null;
|
|
15672
|
+
}
|
|
15673
|
+
};
|
|
15674
|
+
var asInfo = (schedule) => ({
|
|
15675
|
+
id: schedule.id,
|
|
15676
|
+
cron: schedule.cron,
|
|
15677
|
+
tz: schedule.tz,
|
|
15678
|
+
misfire: schedule.misfire,
|
|
15679
|
+
maxCatchUpRuns: schedule.maxCatchUpRuns,
|
|
15680
|
+
jobId: schedule.jobId,
|
|
15681
|
+
nextRunAt: schedule.nextRunAt,
|
|
15682
|
+
createdAt: schedule.createdAt,
|
|
15683
|
+
updatedAt: schedule.updatedAt
|
|
15684
|
+
});
|
|
15685
|
+
var computeDispatchPlan = (schedule, nowMs) => {
|
|
15686
|
+
if (schedule.nextRunAt > nowMs)
|
|
15687
|
+
return null;
|
|
15688
|
+
if (schedule.misfire === "skip") {
|
|
15689
|
+
return {
|
|
15690
|
+
slots: [],
|
|
15691
|
+
nextRunAt: nextCronTimestamp(schedule.cron, schedule.tz, nowMs)
|
|
15692
|
+
};
|
|
15693
|
+
}
|
|
15694
|
+
if (schedule.misfire === "catch_up_one") {
|
|
15695
|
+
return {
|
|
15696
|
+
slots: [schedule.nextRunAt],
|
|
15697
|
+
nextRunAt: nextCronTimestamp(schedule.cron, schedule.tz, nowMs)
|
|
15698
|
+
};
|
|
15699
|
+
}
|
|
15700
|
+
const slots = [];
|
|
15701
|
+
let cursor = schedule.nextRunAt;
|
|
15702
|
+
const maxRuns = Math.max(1, schedule.maxCatchUpRuns);
|
|
15703
|
+
while (cursor <= nowMs && slots.length < maxRuns) {
|
|
15704
|
+
slots.push(cursor);
|
|
15705
|
+
cursor = nextCronTimestamp(schedule.cron, schedule.tz, cursor);
|
|
15706
|
+
}
|
|
15707
|
+
return {
|
|
15708
|
+
slots,
|
|
15709
|
+
nextRunAt: cursor
|
|
15710
|
+
};
|
|
15711
|
+
};
|
|
15712
|
+
var scheduler = (config2) => {
|
|
15713
|
+
const prefix = config2.prefix ?? DEFAULT_PREFIX6;
|
|
15714
|
+
const leaseMs = Math.max(500, config2.leader?.leaseMs ?? DEFAULT_LEASE_MS3);
|
|
15715
|
+
const heartbeatMs = Math.max(100, config2.leader?.heartbeatMs ?? DEFAULT_HEARTBEAT_MS);
|
|
15716
|
+
const tickMs = Math.max(50, config2.dispatch?.tickMs ?? DEFAULT_TICK_MS);
|
|
15717
|
+
const batchSize = Math.max(1, config2.dispatch?.batchSize ?? DEFAULT_BATCH_SIZE);
|
|
15718
|
+
const maxSubmitsPerTick = Math.max(1, config2.dispatch?.maxSubmitsPerTick ?? DEFAULT_MAX_SUBMITS_PER_TICK);
|
|
15719
|
+
const submitRetries = Math.max(0, config2.dispatch?.submitRetries ?? DEFAULT_SUBMIT_RETRIES);
|
|
15720
|
+
const submitBackoffBaseMs = Math.max(10, config2.dispatch?.submitBackoffBaseMs ?? DEFAULT_SUBMIT_BACKOFF_BASE_MS);
|
|
15721
|
+
const submitBackoffMaxMs = Math.max(submitBackoffBaseMs, config2.dispatch?.submitBackoffMaxMs ?? DEFAULT_SUBMIT_BACKOFF_MAX_MS);
|
|
15722
|
+
const scheduledJobKeyTtlMs = Math.max(60000, config2.dispatch?.scheduledJobKeyTtlMs ?? DEFAULT_SCHEDULED_JOB_KEY_TTL_MS);
|
|
15723
|
+
const dlqMaxEntries = Math.max(1, config2.dispatch?.dlqMaxEntries ?? DEFAULT_DISPATCH_DLQ_MAX_ENTRIES);
|
|
15724
|
+
const maxConsecutiveDispatchFailures = Math.max(1, config2.dispatch?.maxConsecutiveDispatchFailures ?? DEFAULT_MAX_CONSECUTIVE_DISPATCH_FAILURES);
|
|
15725
|
+
const strictHandlers = config2.strictHandlers ?? DEFAULT_STRICT_HANDLERS;
|
|
15726
|
+
const scheduleKey = (scheduleId) => `${prefix}:${config2.id}:schedule:${scheduleId}`;
|
|
15727
|
+
const dueKey = `${prefix}:${config2.id}:due`;
|
|
15728
|
+
const indexKey = `${prefix}:${config2.id}:index`;
|
|
15729
|
+
const dispatchDlqKey = `${prefix}:${config2.id}:dispatch:dlq`;
|
|
15730
|
+
const leaderEpochKey = `${prefix}:${config2.id}:leader:epoch`;
|
|
15731
|
+
const leaderMutex = mutex({
|
|
15732
|
+
id: `${config2.id}:leader`,
|
|
15733
|
+
prefix: `${prefix}:leader`,
|
|
15734
|
+
defaultTtl: leaseMs,
|
|
15735
|
+
retryCount: 0
|
|
15736
|
+
});
|
|
15737
|
+
const jobsById = new Map;
|
|
15738
|
+
const scheduleToJobId = new Map;
|
|
15739
|
+
const metricsState = {
|
|
15740
|
+
isLeader: false,
|
|
15741
|
+
leaderEpoch: 0,
|
|
15742
|
+
leaderChanges: 0,
|
|
15743
|
+
dispatchSubmitted: 0,
|
|
15744
|
+
dispatchFailed: 0,
|
|
15745
|
+
dispatchRetried: 0,
|
|
15746
|
+
dispatchSkipped: 0,
|
|
15747
|
+
dispatchDlq: 0,
|
|
15748
|
+
tickErrors: 0,
|
|
15749
|
+
lastTickAt: null
|
|
15750
|
+
};
|
|
15751
|
+
let running = false;
|
|
15752
|
+
let loopPromise = null;
|
|
15753
|
+
let currentLeaderLock = null;
|
|
15754
|
+
let currentLeaderEpoch = null;
|
|
15755
|
+
let lastHeartbeatAt = 0;
|
|
15756
|
+
const setLeader = (next, reason) => {
|
|
15757
|
+
if (metricsState.isLeader === next)
|
|
15758
|
+
return;
|
|
15759
|
+
metricsState.isLeader = next;
|
|
15760
|
+
metricsState.leaderChanges += 1;
|
|
15761
|
+
if (next) {
|
|
15762
|
+
safeMetric(config2.onMetric, { type: "leader_acquired", ts: Date.now() });
|
|
15763
|
+
return;
|
|
15764
|
+
}
|
|
15765
|
+
metricsState.leaderEpoch = currentLeaderEpoch ?? metricsState.leaderEpoch;
|
|
15766
|
+
safeMetric(config2.onMetric, { type: "leader_lost", ts: Date.now(), reason: reason ?? "stop" });
|
|
15767
|
+
};
|
|
15768
|
+
const tryAcquireLeadership = async () => {
|
|
15769
|
+
if (currentLeaderLock)
|
|
15770
|
+
return;
|
|
15771
|
+
const acquired = await leaderMutex.acquire("active", leaseMs);
|
|
15772
|
+
if (!acquired)
|
|
15773
|
+
return;
|
|
15774
|
+
currentLeaderLock = acquired;
|
|
15775
|
+
currentLeaderEpoch = Number(await redis6.incr(leaderEpochKey));
|
|
15776
|
+
metricsState.leaderEpoch = currentLeaderEpoch;
|
|
15777
|
+
lastHeartbeatAt = Date.now();
|
|
15778
|
+
setLeader(true);
|
|
15779
|
+
};
|
|
15780
|
+
const maintainLeadership = async () => {
|
|
15781
|
+
if (!currentLeaderLock)
|
|
15782
|
+
return;
|
|
15783
|
+
const nowMs = Date.now();
|
|
15784
|
+
if (nowMs - lastHeartbeatAt < heartbeatMs)
|
|
15785
|
+
return;
|
|
15786
|
+
const ok = await leaderMutex.extend(currentLeaderLock, leaseMs);
|
|
15787
|
+
lastHeartbeatAt = nowMs;
|
|
15788
|
+
if (ok) {
|
|
15789
|
+
if (currentLeaderEpoch !== null) {
|
|
15790
|
+
const latestEpoch = Number(await redis6.get(leaderEpochKey) ?? "0");
|
|
15791
|
+
if (latestEpoch === currentLeaderEpoch)
|
|
15792
|
+
return;
|
|
15793
|
+
} else {
|
|
15794
|
+
return;
|
|
15795
|
+
}
|
|
15796
|
+
}
|
|
15797
|
+
currentLeaderLock = null;
|
|
15798
|
+
currentLeaderEpoch = null;
|
|
15799
|
+
setLeader(false, "extend_failed");
|
|
15800
|
+
};
|
|
15801
|
+
const relinquishLeadership = async () => {
|
|
15802
|
+
if (!currentLeaderLock)
|
|
15803
|
+
return;
|
|
15804
|
+
try {
|
|
15805
|
+
await leaderMutex.release(currentLeaderLock);
|
|
15806
|
+
} catch {}
|
|
15807
|
+
currentLeaderLock = null;
|
|
15808
|
+
currentLeaderEpoch = null;
|
|
15809
|
+
setLeader(false, "extend_failed");
|
|
15810
|
+
};
|
|
15811
|
+
const ensureLeadership = async (cfg) => {
|
|
15812
|
+
if (!currentLeaderLock || currentLeaderEpoch === null)
|
|
15813
|
+
return false;
|
|
15814
|
+
const nowMs = Date.now();
|
|
15815
|
+
const shouldRefresh = (cfg?.forceRefresh ?? false) || nowMs - lastHeartbeatAt >= heartbeatMs;
|
|
15816
|
+
if (!shouldRefresh)
|
|
15817
|
+
return metricsState.isLeader;
|
|
15818
|
+
const ok = await leaderMutex.extend(currentLeaderLock, leaseMs);
|
|
15819
|
+
lastHeartbeatAt = nowMs;
|
|
15820
|
+
if (ok) {
|
|
15821
|
+
const latestEpoch = Number(await redis6.get(leaderEpochKey) ?? "0");
|
|
15822
|
+
if (latestEpoch === currentLeaderEpoch)
|
|
15823
|
+
return true;
|
|
15824
|
+
}
|
|
15825
|
+
currentLeaderLock = null;
|
|
15826
|
+
currentLeaderEpoch = null;
|
|
15827
|
+
setLeader(false, "extend_failed");
|
|
15828
|
+
return false;
|
|
15829
|
+
};
|
|
15830
|
+
const cleanupBrokenSchedule = async (scheduleId) => {
|
|
15831
|
+
await redis6.send("EVAL", [
|
|
15832
|
+
CLEANUP_BROKEN_SCRIPT,
|
|
15833
|
+
"3",
|
|
15834
|
+
scheduleKey(scheduleId),
|
|
15835
|
+
dueKey,
|
|
15836
|
+
indexKey,
|
|
15837
|
+
scheduleId
|
|
15838
|
+
]);
|
|
15839
|
+
scheduleToJobId.delete(scheduleId);
|
|
15840
|
+
};
|
|
15841
|
+
const rescheduleCas = async (schedule, nextRunAt, nowMs) => {
|
|
15842
|
+
if (currentLeaderEpoch === null)
|
|
15843
|
+
return "stale";
|
|
15844
|
+
const result = await redis6.send("EVAL", [
|
|
15845
|
+
RESCHEDULE_CAS_SCRIPT,
|
|
15846
|
+
"4",
|
|
15847
|
+
scheduleKey(schedule.id),
|
|
15848
|
+
dueKey,
|
|
15849
|
+
indexKey,
|
|
15850
|
+
leaderEpochKey,
|
|
15851
|
+
schedule.id,
|
|
15852
|
+
String(currentLeaderEpoch),
|
|
15853
|
+
String(schedule.nextRunAt),
|
|
15854
|
+
String(nextRunAt),
|
|
15855
|
+
String(nowMs)
|
|
15856
|
+
]);
|
|
15857
|
+
const code = Number(result);
|
|
15858
|
+
if (code === 1)
|
|
15859
|
+
return "ok";
|
|
15860
|
+
if (code === 0)
|
|
15861
|
+
return "missing";
|
|
15862
|
+
return "stale";
|
|
15863
|
+
};
|
|
15864
|
+
const pushDispatchDlq = async (cfg) => {
|
|
15865
|
+
const payload = JSON.stringify({
|
|
15866
|
+
...cfg,
|
|
15867
|
+
ts: Date.now(),
|
|
15868
|
+
schedulerId: config2.id,
|
|
15869
|
+
leaderEpoch: currentLeaderEpoch
|
|
15870
|
+
});
|
|
15871
|
+
await redis6.send("EVAL", [
|
|
15872
|
+
PUSH_DISPATCH_DLQ_SCRIPT,
|
|
15873
|
+
"1",
|
|
15874
|
+
dispatchDlqKey,
|
|
15875
|
+
payload,
|
|
15876
|
+
String(dlqMaxEntries - 1)
|
|
15877
|
+
]);
|
|
15878
|
+
metricsState.dispatchDlq += 1;
|
|
15879
|
+
safeMetric(config2.onMetric, {
|
|
15880
|
+
type: "dispatch_dlq",
|
|
15881
|
+
ts: Date.now(),
|
|
15882
|
+
scheduleId: cfg.scheduleId,
|
|
15883
|
+
slotTs: cfg.slotTs,
|
|
15884
|
+
message: cfg.message
|
|
15885
|
+
});
|
|
15886
|
+
};
|
|
15887
|
+
const submitWithRetry = async (cfg) => {
|
|
15888
|
+
await retry(async () => {
|
|
15889
|
+
if (!await ensureLeadership({ forceRefresh: true })) {
|
|
15890
|
+
throw new Error("leadership lost during dispatch");
|
|
15891
|
+
}
|
|
15892
|
+
return await cfg.jobHandle.submit({
|
|
15893
|
+
input: cfg.schedule.input,
|
|
15894
|
+
key: `${cfg.schedule.id}:${cfg.slotTs}`,
|
|
15895
|
+
keyTtlMs: scheduledJobKeyTtlMs,
|
|
15896
|
+
at: cfg.slotTs,
|
|
15897
|
+
meta: {
|
|
15898
|
+
...cfg.schedule.meta ?? {},
|
|
15899
|
+
scheduleId: cfg.schedule.id,
|
|
15900
|
+
scheduleSlotTs: cfg.slotTs,
|
|
15901
|
+
schedulerId: config2.id
|
|
15902
|
+
}
|
|
15903
|
+
});
|
|
15904
|
+
}, {
|
|
15905
|
+
attempts: submitRetries + 1,
|
|
15906
|
+
minDelayMs: submitBackoffBaseMs,
|
|
15907
|
+
maxDelayMs: submitBackoffMaxMs,
|
|
15908
|
+
factor: 2,
|
|
15909
|
+
jitter: 0.25,
|
|
15910
|
+
retryIf: (error48) => {
|
|
15911
|
+
const err = asError4(error48);
|
|
15912
|
+
if (err.name === "ZodError")
|
|
15913
|
+
return false;
|
|
15914
|
+
if (err.message === "leadership lost during dispatch")
|
|
15915
|
+
return false;
|
|
15916
|
+
metricsState.dispatchRetried += 1;
|
|
15917
|
+
return true;
|
|
15918
|
+
}
|
|
15919
|
+
});
|
|
15920
|
+
};
|
|
15921
|
+
const recordDispatchFailure = async (cfg) => {
|
|
15922
|
+
if (currentLeaderEpoch === null)
|
|
15923
|
+
return "stale";
|
|
15924
|
+
const sameSlot = cfg.schedule.lastFailedSlotTs === cfg.failedSlotTs;
|
|
15925
|
+
const failures = sameSlot ? cfg.schedule.consecutiveDispatchFailures + 1 : 1;
|
|
15926
|
+
const shouldAdvance = cfg.deterministic || failures >= maxConsecutiveDispatchFailures;
|
|
15927
|
+
const nextRunAt = shouldAdvance ? nextCronTimestamp(cfg.schedule.cron, cfg.schedule.tz, cfg.failedSlotTs) : cfg.schedule.nextRunAt;
|
|
15928
|
+
const result = Number(await redis6.send("EVAL", [
|
|
15929
|
+
RECORD_FAILURE_CAS_SCRIPT,
|
|
15930
|
+
"4",
|
|
15931
|
+
scheduleKey(cfg.schedule.id),
|
|
15932
|
+
dueKey,
|
|
15933
|
+
indexKey,
|
|
15934
|
+
leaderEpochKey,
|
|
15935
|
+
cfg.schedule.id,
|
|
15936
|
+
String(currentLeaderEpoch),
|
|
15937
|
+
String(cfg.schedule.nextRunAt),
|
|
15938
|
+
String(cfg.failedSlotTs),
|
|
15939
|
+
String(nextRunAt),
|
|
15940
|
+
String(failures),
|
|
15941
|
+
shouldAdvance ? "1" : "0",
|
|
15942
|
+
cfg.message,
|
|
15943
|
+
String(Date.now())
|
|
15944
|
+
]));
|
|
15945
|
+
if (result === 1 && shouldAdvance) {
|
|
15946
|
+
safeMetric(config2.onMetric, {
|
|
15947
|
+
type: "dispatch_advanced_after_failures",
|
|
15948
|
+
ts: Date.now(),
|
|
15949
|
+
scheduleId: cfg.schedule.id,
|
|
15950
|
+
slotTs: cfg.failedSlotTs,
|
|
15951
|
+
failures
|
|
15952
|
+
});
|
|
15953
|
+
}
|
|
15954
|
+
if (result === 1)
|
|
15955
|
+
return "ok";
|
|
15956
|
+
if (result === 0)
|
|
15957
|
+
return "missing";
|
|
15958
|
+
return "stale";
|
|
15959
|
+
};
|
|
15960
|
+
const dispatchDue = async () => {
|
|
15961
|
+
const nowMs = Date.now();
|
|
15962
|
+
const dueIds = await redis6.send("ZRANGEBYSCORE", [dueKey, "-inf", String(nowMs), "LIMIT", "0", String(batchSize)]);
|
|
15963
|
+
if (!Array.isArray(dueIds) || dueIds.length === 0)
|
|
15964
|
+
return;
|
|
15965
|
+
let submitsRemaining = maxSubmitsPerTick;
|
|
15966
|
+
for (const idRaw of dueIds) {
|
|
15967
|
+
if (!await ensureLeadership())
|
|
15968
|
+
break;
|
|
15969
|
+
const scheduleId = String(idRaw);
|
|
15970
|
+
const raw = await redis6.get(scheduleKey(scheduleId));
|
|
15971
|
+
const schedule = parseSchedule(raw);
|
|
15972
|
+
if (!schedule) {
|
|
15973
|
+
await cleanupBrokenSchedule(scheduleId);
|
|
15974
|
+
continue;
|
|
15975
|
+
}
|
|
15976
|
+
if (schedule.nextRunAt > nowMs) {
|
|
15977
|
+
continue;
|
|
15978
|
+
}
|
|
15979
|
+
const plan = computeDispatchPlan(schedule, nowMs);
|
|
15980
|
+
if (!plan)
|
|
15981
|
+
continue;
|
|
15982
|
+
const jobHandle = jobsById.get(schedule.jobId);
|
|
15983
|
+
if (!jobHandle && plan.slots.length > 0) {
|
|
15984
|
+
metricsState.dispatchSkipped += 1;
|
|
15985
|
+
safeMetric(config2.onMetric, { type: "dispatch_skipped", ts: nowMs, scheduleId: schedule.id, reason: "missing_handler" });
|
|
15986
|
+
if (strictHandlers) {
|
|
15987
|
+
await relinquishLeadership();
|
|
15988
|
+
break;
|
|
15989
|
+
}
|
|
15990
|
+
continue;
|
|
15991
|
+
}
|
|
15992
|
+
let submitFailed = false;
|
|
15993
|
+
let submittedAny = false;
|
|
15994
|
+
let lastSubmittedSlotTs = null;
|
|
15995
|
+
for (const slotTs of plan.slots) {
|
|
15996
|
+
if (submitsRemaining <= 0)
|
|
15997
|
+
break;
|
|
15998
|
+
if (!await ensureLeadership()) {
|
|
15999
|
+
submitFailed = true;
|
|
16000
|
+
break;
|
|
16001
|
+
}
|
|
16002
|
+
try {
|
|
16003
|
+
await submitWithRetry({
|
|
16004
|
+
jobHandle,
|
|
16005
|
+
schedule,
|
|
16006
|
+
slotTs
|
|
16007
|
+
});
|
|
16008
|
+
metricsState.dispatchSubmitted += 1;
|
|
16009
|
+
submitsRemaining -= 1;
|
|
16010
|
+
submittedAny = true;
|
|
16011
|
+
lastSubmittedSlotTs = slotTs;
|
|
16012
|
+
safeMetric(config2.onMetric, {
|
|
16013
|
+
type: "dispatch_submitted",
|
|
16014
|
+
ts: Date.now(),
|
|
16015
|
+
scheduleId: schedule.id,
|
|
16016
|
+
slotTs,
|
|
16017
|
+
jobId: schedule.jobId
|
|
16018
|
+
});
|
|
16019
|
+
} catch (error48) {
|
|
16020
|
+
submitFailed = true;
|
|
16021
|
+
metricsState.dispatchFailed += 1;
|
|
16022
|
+
const err = asError4(error48);
|
|
16023
|
+
await pushDispatchDlq({
|
|
16024
|
+
scheduleId: schedule.id,
|
|
16025
|
+
slotTs,
|
|
16026
|
+
message: err.message
|
|
16027
|
+
});
|
|
16028
|
+
await recordDispatchFailure({
|
|
16029
|
+
schedule,
|
|
16030
|
+
failedSlotTs: slotTs,
|
|
16031
|
+
message: err.message,
|
|
16032
|
+
deterministic: err.name === "ZodError"
|
|
16033
|
+
});
|
|
16034
|
+
safeMetric(config2.onMetric, {
|
|
16035
|
+
type: "dispatch_failed",
|
|
16036
|
+
ts: Date.now(),
|
|
16037
|
+
scheduleId: schedule.id,
|
|
16038
|
+
message: err.message
|
|
16039
|
+
});
|
|
16040
|
+
break;
|
|
16041
|
+
}
|
|
16042
|
+
}
|
|
16043
|
+
if (submitFailed)
|
|
16044
|
+
continue;
|
|
16045
|
+
if (submitsRemaining <= 0) {
|
|
16046
|
+
if (submittedAny && lastSubmittedSlotTs !== null) {
|
|
16047
|
+
if (!await ensureLeadership({ forceRefresh: true }))
|
|
16048
|
+
break;
|
|
16049
|
+
const partialNextRunAt = nextCronTimestamp(schedule.cron, schedule.tz, lastSubmittedSlotTs);
|
|
16050
|
+
const partialCas = await rescheduleCas(schedule, partialNextRunAt, Date.now());
|
|
16051
|
+
if (partialCas !== "ok") {
|
|
16052
|
+
metricsState.dispatchSkipped += 1;
|
|
16053
|
+
safeMetric(config2.onMetric, {
|
|
16054
|
+
type: "dispatch_skipped",
|
|
16055
|
+
ts: Date.now(),
|
|
16056
|
+
scheduleId: schedule.id,
|
|
16057
|
+
reason: "cas_stale"
|
|
16058
|
+
});
|
|
16059
|
+
}
|
|
16060
|
+
scheduleToJobId.set(schedule.id, schedule.jobId);
|
|
16061
|
+
}
|
|
16062
|
+
break;
|
|
16063
|
+
}
|
|
16064
|
+
if (!await ensureLeadership({ forceRefresh: true }))
|
|
16065
|
+
break;
|
|
16066
|
+
const cas = await rescheduleCas(schedule, plan.nextRunAt, Date.now());
|
|
16067
|
+
if (cas !== "ok") {
|
|
16068
|
+
metricsState.dispatchSkipped += 1;
|
|
16069
|
+
safeMetric(config2.onMetric, {
|
|
16070
|
+
type: "dispatch_skipped",
|
|
16071
|
+
ts: Date.now(),
|
|
16072
|
+
scheduleId: schedule.id,
|
|
16073
|
+
reason: "cas_stale"
|
|
16074
|
+
});
|
|
16075
|
+
}
|
|
16076
|
+
scheduleToJobId.set(schedule.id, schedule.jobId);
|
|
16077
|
+
}
|
|
16078
|
+
};
|
|
16079
|
+
const loop = async () => {
|
|
16080
|
+
while (running) {
|
|
16081
|
+
try {
|
|
16082
|
+
await tryAcquireLeadership();
|
|
16083
|
+
await maintainLeadership();
|
|
16084
|
+
if (currentLeaderLock) {
|
|
16085
|
+
await dispatchDue();
|
|
16086
|
+
}
|
|
16087
|
+
metricsState.lastTickAt = Date.now();
|
|
16088
|
+
} catch (error48) {
|
|
16089
|
+
metricsState.tickErrors += 1;
|
|
16090
|
+
safeMetric(config2.onMetric, {
|
|
16091
|
+
type: "tick_error",
|
|
16092
|
+
ts: Date.now(),
|
|
16093
|
+
message: asError4(error48).message
|
|
16094
|
+
});
|
|
16095
|
+
}
|
|
16096
|
+
await sleep4(tickMs);
|
|
16097
|
+
}
|
|
16098
|
+
};
|
|
16099
|
+
const register = async (cfg) => {
|
|
16100
|
+
const tz = cfg.tz ?? "UTC";
|
|
16101
|
+
assertValidTimeZone(tz);
|
|
16102
|
+
cfg.job.validateInput?.(cfg.input);
|
|
16103
|
+
const nowMs = Date.now();
|
|
16104
|
+
const misfire = cfg.misfire ?? DEFAULT_MISFIRE;
|
|
16105
|
+
const maxCatchUpRuns = Math.max(1, cfg.maxCatchUpRuns ?? DEFAULT_MAX_CATCH_UP_RUNS);
|
|
16106
|
+
const firstRunAt = nextCronTimestamp(cfg.cron, tz, nowMs);
|
|
16107
|
+
const previous = parseSchedule(await redis6.get(scheduleKey(cfg.id)));
|
|
16108
|
+
const stored = {
|
|
16109
|
+
id: cfg.id,
|
|
16110
|
+
cron: cfg.cron,
|
|
16111
|
+
tz,
|
|
16112
|
+
misfire,
|
|
16113
|
+
maxCatchUpRuns,
|
|
16114
|
+
jobId: cfg.job.id,
|
|
16115
|
+
input: cfg.input,
|
|
16116
|
+
meta: cfg.meta,
|
|
16117
|
+
createdAt: nowMs,
|
|
16118
|
+
updatedAt: nowMs,
|
|
16119
|
+
nextRunAt: firstRunAt,
|
|
16120
|
+
consecutiveDispatchFailures: 0
|
|
16121
|
+
};
|
|
16122
|
+
const upsertResult = Number(await redis6.send("EVAL", [
|
|
16123
|
+
UPSERT_SCRIPT,
|
|
16124
|
+
"3",
|
|
16125
|
+
scheduleKey(cfg.id),
|
|
16126
|
+
dueKey,
|
|
16127
|
+
indexKey,
|
|
16128
|
+
JSON.stringify(stored),
|
|
16129
|
+
String(firstRunAt),
|
|
16130
|
+
cfg.id,
|
|
16131
|
+
String(nowMs)
|
|
16132
|
+
]));
|
|
16133
|
+
const created = upsertResult === 1;
|
|
16134
|
+
const updated = upsertResult === 2;
|
|
16135
|
+
jobsById.set(cfg.job.id, cfg.job);
|
|
16136
|
+
scheduleToJobId.set(cfg.id, cfg.job.id);
|
|
16137
|
+
if (previous && previous.jobId !== cfg.job.id) {
|
|
16138
|
+
let stillUsed = false;
|
|
16139
|
+
for (const [sid, jid] of scheduleToJobId.entries()) {
|
|
16140
|
+
if (sid !== cfg.id && jid === previous.jobId) {
|
|
16141
|
+
stillUsed = true;
|
|
16142
|
+
break;
|
|
16143
|
+
}
|
|
16144
|
+
}
|
|
16145
|
+
if (!stillUsed)
|
|
16146
|
+
jobsById.delete(previous.jobId);
|
|
16147
|
+
}
|
|
16148
|
+
safeMetric(config2.onMetric, {
|
|
16149
|
+
type: "schedule_registered",
|
|
16150
|
+
ts: Date.now(),
|
|
16151
|
+
scheduleId: cfg.id,
|
|
16152
|
+
created
|
|
16153
|
+
});
|
|
16154
|
+
if (updated && !created) {
|
|
16155
|
+
safeMetric(config2.onMetric, {
|
|
16156
|
+
type: "schedule_updated",
|
|
16157
|
+
ts: Date.now(),
|
|
16158
|
+
scheduleId: cfg.id
|
|
16159
|
+
});
|
|
16160
|
+
}
|
|
16161
|
+
return { created, updated };
|
|
16162
|
+
};
|
|
16163
|
+
const unregister = async (cfg) => {
|
|
16164
|
+
const jobId = scheduleToJobId.get(cfg.id);
|
|
16165
|
+
await redis6.send("EVAL", [
|
|
16166
|
+
UNREGISTER_SCRIPT,
|
|
16167
|
+
"3",
|
|
16168
|
+
scheduleKey(cfg.id),
|
|
16169
|
+
dueKey,
|
|
16170
|
+
indexKey,
|
|
16171
|
+
cfg.id
|
|
16172
|
+
]);
|
|
16173
|
+
safeMetric(config2.onMetric, {
|
|
16174
|
+
type: "schedule_unregistered",
|
|
16175
|
+
ts: Date.now(),
|
|
16176
|
+
scheduleId: cfg.id
|
|
16177
|
+
});
|
|
16178
|
+
scheduleToJobId.delete(cfg.id);
|
|
16179
|
+
if (jobId) {
|
|
16180
|
+
let stillUsed = false;
|
|
16181
|
+
for (const mappedJobId of scheduleToJobId.values()) {
|
|
16182
|
+
if (mappedJobId === jobId) {
|
|
16183
|
+
stillUsed = true;
|
|
16184
|
+
break;
|
|
16185
|
+
}
|
|
16186
|
+
}
|
|
16187
|
+
if (!stillUsed)
|
|
16188
|
+
jobsById.delete(jobId);
|
|
16189
|
+
}
|
|
16190
|
+
};
|
|
16191
|
+
const get = async (cfg) => {
|
|
16192
|
+
const raw = await redis6.get(scheduleKey(cfg.id));
|
|
16193
|
+
const parsed = parseSchedule(raw);
|
|
16194
|
+
if (!parsed)
|
|
16195
|
+
return null;
|
|
16196
|
+
return asInfo(parsed);
|
|
16197
|
+
};
|
|
16198
|
+
const list = async () => {
|
|
16199
|
+
const idsRaw = await redis6.send("SMEMBERS", [indexKey]);
|
|
16200
|
+
if (!Array.isArray(idsRaw) || idsRaw.length === 0)
|
|
16201
|
+
return [];
|
|
16202
|
+
const ids = idsRaw.map((v) => String(v));
|
|
16203
|
+
const values = await redis6.send("MGET", ids.map((id) => scheduleKey(id)));
|
|
16204
|
+
if (!Array.isArray(values))
|
|
16205
|
+
return [];
|
|
16206
|
+
const out = [];
|
|
16207
|
+
for (const raw of values) {
|
|
16208
|
+
const parsed = parseSchedule(typeof raw === "string" ? raw : null);
|
|
16209
|
+
if (!parsed)
|
|
16210
|
+
continue;
|
|
16211
|
+
out.push(asInfo(parsed));
|
|
16212
|
+
}
|
|
16213
|
+
out.sort((a, b) => a.id.localeCompare(b.id));
|
|
16214
|
+
return out;
|
|
16215
|
+
};
|
|
16216
|
+
const start = () => {
|
|
16217
|
+
if (running)
|
|
16218
|
+
return;
|
|
16219
|
+
running = true;
|
|
16220
|
+
loopPromise = loop();
|
|
16221
|
+
};
|
|
16222
|
+
const stop = async () => {
|
|
16223
|
+
if (!running)
|
|
16224
|
+
return;
|
|
16225
|
+
running = false;
|
|
16226
|
+
await loopPromise;
|
|
16227
|
+
loopPromise = null;
|
|
16228
|
+
if (currentLeaderLock) {
|
|
16229
|
+
try {
|
|
16230
|
+
await leaderMutex.release(currentLeaderLock);
|
|
16231
|
+
} catch {}
|
|
16232
|
+
currentLeaderLock = null;
|
|
16233
|
+
currentLeaderEpoch = null;
|
|
16234
|
+
}
|
|
16235
|
+
setLeader(false, "stop");
|
|
16236
|
+
};
|
|
16237
|
+
const metrics = () => ({ ...metricsState });
|
|
16238
|
+
return {
|
|
16239
|
+
id: config2.id,
|
|
16240
|
+
start,
|
|
16241
|
+
stop,
|
|
16242
|
+
register,
|
|
16243
|
+
unregister,
|
|
16244
|
+
get,
|
|
16245
|
+
list,
|
|
16246
|
+
metrics
|
|
16247
|
+
};
|
|
16248
|
+
};
|
|
16249
|
+
// src/ephemeral.ts
|
|
16250
|
+
var {redis: redis7, RedisClient: RedisClient3 } = globalThis.Bun;
|
|
16251
|
+
var DEFAULT_PREFIX7 = "sync:e";
|
|
16252
|
+
var DEFAULT_TENANT3 = "default";
|
|
16253
|
+
var DEFAULT_MAX_ENTRIES = 1e4;
|
|
16254
|
+
var DEFAULT_MAX_PAYLOAD_BYTES = 4 * 1024;
|
|
16255
|
+
var DEFAULT_EVENT_RETENTION_MS = 5 * 60 * 1000;
|
|
16256
|
+
var DEFAULT_EVENT_MAXLEN = 50000;
|
|
16257
|
+
var DEFAULT_TIMEOUT_MS2 = 30000;
|
|
16258
|
+
var DEFAULT_RECONCILE_BATCH_SIZE = 200;
|
|
16259
|
+
var DEFAULT_RECONCILE_INTERVAL_MS = 250;
|
|
16260
|
+
var MAX_RECONCILE_TENANTS = 1000;
|
|
16261
|
+
var MAX_KEY_BYTES = 512;
|
|
16262
|
+
var textEncoder3 = new TextEncoder;
|
|
16263
|
+
var UPSERT_SCRIPT2 = `
|
|
16264
|
+
local now = tonumber(ARGV[1])
|
|
16265
|
+
local ttlMs = tonumber(ARGV[2])
|
|
16266
|
+
local dataRaw = ARGV[3]
|
|
16267
|
+
local logicalKey = ARGV[4]
|
|
16268
|
+
local maxEntries = tonumber(ARGV[5])
|
|
16269
|
+
local trimMinId = ARGV[6]
|
|
16270
|
+
local maxEventLen = tonumber(ARGV[7])
|
|
16271
|
+
|
|
16272
|
+
local ttlKey = KEYS[4] .. string.len(logicalKey) .. ":" .. logicalKey
|
|
16273
|
+
|
|
16274
|
+
local exists = redis.call("HEXISTS", KEYS[2], logicalKey)
|
|
16275
|
+
if exists == 0 then
|
|
16276
|
+
local count = tonumber(redis.call("HLEN", KEYS[2]))
|
|
16277
|
+
if count >= maxEntries then
|
|
16278
|
+
return "__ERR_CAPACITY__"
|
|
16279
|
+
end
|
|
16280
|
+
end
|
|
16281
|
+
|
|
16282
|
+
local decodeOk, data = pcall(cjson.decode, dataRaw)
|
|
16283
|
+
if not decodeOk then
|
|
16284
|
+
return "__ERR_PAYLOAD__"
|
|
16285
|
+
end
|
|
16286
|
+
|
|
16287
|
+
local version = tostring(redis.call("INCR", KEYS[1]))
|
|
16288
|
+
local updatedAt = now
|
|
16289
|
+
local expiresAt = now + ttlMs
|
|
16290
|
+
|
|
16291
|
+
local entry = {
|
|
16292
|
+
key = logicalKey,
|
|
16293
|
+
data = data,
|
|
16294
|
+
version = version,
|
|
16295
|
+
updatedAt = updatedAt,
|
|
16296
|
+
expiresAt = expiresAt,
|
|
16297
|
+
}
|
|
16298
|
+
|
|
16299
|
+
local encoded = cjson.encode(entry)
|
|
16300
|
+
redis.call("HSET", KEYS[2], logicalKey, encoded)
|
|
16301
|
+
redis.call("ZADD", KEYS[3], tostring(expiresAt), logicalKey)
|
|
16302
|
+
redis.call("SET", ttlKey, "1", "PX", tostring(ttlMs))
|
|
16303
|
+
|
|
16304
|
+
redis.call(
|
|
16305
|
+
"XADD",
|
|
16306
|
+
KEYS[5],
|
|
16307
|
+
"*",
|
|
16308
|
+
"type",
|
|
16309
|
+
"upsert",
|
|
16310
|
+
"key",
|
|
16311
|
+
logicalKey,
|
|
16312
|
+
"version",
|
|
16313
|
+
version,
|
|
16314
|
+
"updatedAt",
|
|
16315
|
+
tostring(updatedAt),
|
|
16316
|
+
"expiresAt",
|
|
16317
|
+
tostring(expiresAt),
|
|
16318
|
+
"payload",
|
|
16319
|
+
dataRaw
|
|
16320
|
+
)
|
|
16321
|
+
|
|
16322
|
+
if trimMinId ~= "" then
|
|
16323
|
+
redis.call("XTRIM", KEYS[5], "MINID", "~", trimMinId)
|
|
16324
|
+
end
|
|
16325
|
+
if maxEventLen > 0 then
|
|
16326
|
+
redis.call("XTRIM", KEYS[5], "MAXLEN", "~", tostring(maxEventLen))
|
|
16327
|
+
end
|
|
16328
|
+
|
|
16329
|
+
return encoded
|
|
16330
|
+
`;
|
|
16331
|
+
var TOUCH_SCRIPT2 = `
|
|
16332
|
+
local now = tonumber(ARGV[1])
|
|
16333
|
+
local ttlMs = tonumber(ARGV[2])
|
|
16334
|
+
local logicalKey = ARGV[3]
|
|
16335
|
+
local trimMinId = ARGV[4]
|
|
16336
|
+
local maxEventLen = tonumber(ARGV[5])
|
|
16337
|
+
|
|
16338
|
+
local ttlKey = KEYS[4] .. string.len(logicalKey) .. ":" .. logicalKey
|
|
16339
|
+
|
|
16340
|
+
local existingRaw = redis.call("HGET", KEYS[2], logicalKey)
|
|
16341
|
+
if not existingRaw then
|
|
16342
|
+
return nil
|
|
16343
|
+
end
|
|
16344
|
+
|
|
16345
|
+
local decodeOk, existing = pcall(cjson.decode, existingRaw)
|
|
16346
|
+
if not decodeOk then
|
|
16347
|
+
redis.call("HDEL", KEYS[2], logicalKey)
|
|
16348
|
+
redis.call("ZREM", KEYS[3], logicalKey)
|
|
16349
|
+
redis.call("DEL", ttlKey)
|
|
16350
|
+
return nil
|
|
16351
|
+
end
|
|
16352
|
+
|
|
16353
|
+
local version = tostring(redis.call("INCR", KEYS[1]))
|
|
16354
|
+
local expiresAt = now + ttlMs
|
|
16355
|
+
|
|
16356
|
+
existing.version = version
|
|
16357
|
+
existing.updatedAt = now
|
|
16358
|
+
existing.expiresAt = expiresAt
|
|
16359
|
+
|
|
16360
|
+
redis.call("HSET", KEYS[2], logicalKey, cjson.encode(existing))
|
|
16361
|
+
redis.call("ZADD", KEYS[3], tostring(expiresAt), logicalKey)
|
|
16362
|
+
redis.call("SET", ttlKey, "1", "PX", tostring(ttlMs))
|
|
16363
|
+
|
|
16364
|
+
redis.call(
|
|
16365
|
+
"XADD",
|
|
16366
|
+
KEYS[5],
|
|
16367
|
+
"*",
|
|
16368
|
+
"type",
|
|
16369
|
+
"touch",
|
|
16370
|
+
"key",
|
|
16371
|
+
logicalKey,
|
|
16372
|
+
"version",
|
|
16373
|
+
version,
|
|
16374
|
+
"expiresAt",
|
|
16375
|
+
tostring(expiresAt)
|
|
16376
|
+
)
|
|
16377
|
+
|
|
16378
|
+
if trimMinId ~= "" then
|
|
16379
|
+
redis.call("XTRIM", KEYS[5], "MINID", "~", trimMinId)
|
|
16380
|
+
end
|
|
16381
|
+
if maxEventLen > 0 then
|
|
16382
|
+
redis.call("XTRIM", KEYS[5], "MAXLEN", "~", tostring(maxEventLen))
|
|
16383
|
+
end
|
|
16384
|
+
|
|
16385
|
+
return cjson.encode({ version = version, expiresAt = expiresAt })
|
|
16386
|
+
`;
|
|
16387
|
+
var REMOVE_SCRIPT = `
|
|
16388
|
+
local now = tonumber(ARGV[1])
|
|
16389
|
+
local logicalKey = ARGV[2]
|
|
16390
|
+
local reason = ARGV[3]
|
|
16391
|
+
local trimMinId = ARGV[4]
|
|
16392
|
+
local maxEventLen = tonumber(ARGV[5])
|
|
16393
|
+
|
|
16394
|
+
local ttlKey = KEYS[4] .. string.len(logicalKey) .. ":" .. logicalKey
|
|
16395
|
+
|
|
16396
|
+
local existingRaw = redis.call("HGET", KEYS[2], logicalKey)
|
|
16397
|
+
if not existingRaw then
|
|
16398
|
+
return 0
|
|
16399
|
+
end
|
|
16400
|
+
|
|
16401
|
+
redis.call("HDEL", KEYS[2], logicalKey)
|
|
16402
|
+
redis.call("ZREM", KEYS[3], logicalKey)
|
|
16403
|
+
redis.call("DEL", ttlKey)
|
|
16404
|
+
|
|
16405
|
+
local version = tostring(redis.call("INCR", KEYS[1]))
|
|
16406
|
+
if reason ~= "" then
|
|
16407
|
+
redis.call(
|
|
16408
|
+
"XADD",
|
|
16409
|
+
KEYS[5],
|
|
16410
|
+
"*",
|
|
16411
|
+
"type",
|
|
16412
|
+
"delete",
|
|
16413
|
+
"key",
|
|
16414
|
+
logicalKey,
|
|
16415
|
+
"version",
|
|
16416
|
+
version,
|
|
16417
|
+
"reason",
|
|
16418
|
+
reason,
|
|
16419
|
+
"deletedAt",
|
|
16420
|
+
tostring(now)
|
|
16421
|
+
)
|
|
16422
|
+
else
|
|
16423
|
+
redis.call(
|
|
16424
|
+
"XADD",
|
|
16425
|
+
KEYS[5],
|
|
16426
|
+
"*",
|
|
16427
|
+
"type",
|
|
16428
|
+
"delete",
|
|
16429
|
+
"key",
|
|
16430
|
+
logicalKey,
|
|
16431
|
+
"version",
|
|
16432
|
+
version,
|
|
16433
|
+
"deletedAt",
|
|
16434
|
+
tostring(now)
|
|
16435
|
+
)
|
|
16436
|
+
end
|
|
16437
|
+
|
|
16438
|
+
if trimMinId ~= "" then
|
|
16439
|
+
redis.call("XTRIM", KEYS[5], "MINID", "~", trimMinId)
|
|
16440
|
+
end
|
|
16441
|
+
if maxEventLen > 0 then
|
|
16442
|
+
redis.call("XTRIM", KEYS[5], "MAXLEN", "~", tostring(maxEventLen))
|
|
16443
|
+
end
|
|
16444
|
+
|
|
16445
|
+
return 1
|
|
16446
|
+
`;
|
|
16447
|
+
var RECONCILE_SCRIPT = `
|
|
16448
|
+
local now = tonumber(ARGV[1])
|
|
16449
|
+
local batch = tonumber(ARGV[2])
|
|
16450
|
+
local trimMinId = ARGV[3]
|
|
16451
|
+
local maxEventLen = tonumber(ARGV[4])
|
|
16452
|
+
|
|
16453
|
+
local due = redis.call("ZRANGEBYSCORE", KEYS[3], "-inf", tostring(now), "LIMIT", "0", tostring(batch))
|
|
16454
|
+
local expired = 0
|
|
16455
|
+
|
|
16456
|
+
for _, logicalKey in ipairs(due) do
|
|
16457
|
+
local ttlKey = KEYS[4] .. string.len(logicalKey) .. ":" .. logicalKey
|
|
16458
|
+
local ttlExists = redis.call("EXISTS", ttlKey)
|
|
16459
|
+
if ttlExists == 0 then
|
|
16460
|
+
redis.call("ZREM", KEYS[3], logicalKey)
|
|
16461
|
+
|
|
16462
|
+
local existingRaw = redis.call("HGET", KEYS[2], logicalKey)
|
|
16463
|
+
if existingRaw then
|
|
16464
|
+
redis.call("HDEL", KEYS[2], logicalKey)
|
|
16465
|
+
local version = tostring(redis.call("INCR", KEYS[1]))
|
|
16466
|
+
redis.call(
|
|
16467
|
+
"XADD",
|
|
16468
|
+
KEYS[5],
|
|
16469
|
+
"*",
|
|
16470
|
+
"type",
|
|
16471
|
+
"expire",
|
|
16472
|
+
"key",
|
|
16473
|
+
logicalKey,
|
|
16474
|
+
"version",
|
|
16475
|
+
version,
|
|
16476
|
+
"expiredAt",
|
|
16477
|
+
tostring(now)
|
|
16478
|
+
)
|
|
16479
|
+
expired = expired + 1
|
|
16480
|
+
end
|
|
16481
|
+
end
|
|
16482
|
+
end
|
|
16483
|
+
|
|
16484
|
+
if expired > 0 and trimMinId ~= "" then
|
|
16485
|
+
redis.call("XTRIM", KEYS[5], "MINID", "~", trimMinId)
|
|
16486
|
+
end
|
|
16487
|
+
if expired > 0 and maxEventLen > 0 then
|
|
16488
|
+
redis.call("XTRIM", KEYS[5], "MAXLEN", "~", tostring(maxEventLen))
|
|
16489
|
+
end
|
|
16490
|
+
|
|
16491
|
+
return expired
|
|
16492
|
+
`;
|
|
16493
|
+
var asError5 = (error48) => error48 instanceof Error ? error48 : new Error(String(error48));
|
|
16494
|
+
var safeClose3 = (client) => {
|
|
16495
|
+
if (!client.connected)
|
|
16496
|
+
return;
|
|
16497
|
+
try {
|
|
16498
|
+
client.close();
|
|
16499
|
+
} catch {}
|
|
16500
|
+
};
|
|
16501
|
+
var evalScript3 = async (script, keys, args) => {
|
|
16502
|
+
return await redis7.send("EVAL", [script, keys.length.toString(), ...keys, ...args.map((v) => String(v))]);
|
|
16503
|
+
};
|
|
16504
|
+
var blockingReadWithTemporaryClient2 = async (args, signal) => {
|
|
16505
|
+
if (signal?.aborted)
|
|
16506
|
+
return null;
|
|
16507
|
+
const client = new RedisClient3;
|
|
16508
|
+
const onAbort = () => {
|
|
16509
|
+
safeClose3(client);
|
|
16510
|
+
};
|
|
16511
|
+
if (signal)
|
|
16512
|
+
signal.addEventListener("abort", onAbort, { once: true });
|
|
16513
|
+
try {
|
|
16514
|
+
if (!client.connected)
|
|
16515
|
+
await client.connect();
|
|
16516
|
+
return await client.send("XREAD", args);
|
|
16517
|
+
} catch (error48) {
|
|
16518
|
+
if (signal?.aborted)
|
|
16519
|
+
return null;
|
|
16520
|
+
throw asError5(error48);
|
|
16521
|
+
} finally {
|
|
16522
|
+
if (signal)
|
|
16523
|
+
signal.removeEventListener("abort", onAbort);
|
|
16524
|
+
safeClose3(client);
|
|
16525
|
+
}
|
|
16526
|
+
};
|
|
16527
|
+
var parseFirstRangeEntry = (raw) => {
|
|
16528
|
+
if (!Array.isArray(raw) || raw.length === 0)
|
|
16529
|
+
return null;
|
|
16530
|
+
const first = raw[0];
|
|
16531
|
+
if (!Array.isArray(first) || first.length < 2)
|
|
16532
|
+
return null;
|
|
16533
|
+
const id = first[0];
|
|
16534
|
+
if (typeof id !== "string")
|
|
16535
|
+
return null;
|
|
16536
|
+
return {
|
|
16537
|
+
id,
|
|
16538
|
+
fields: fieldArrayToObject(first[1])
|
|
16539
|
+
};
|
|
16540
|
+
};
|
|
16541
|
+
|
|
16542
|
+
class EphemeralCapacityError extends Error {
|
|
16543
|
+
constructor(message = "ephemeral store capacity reached") {
|
|
16544
|
+
super(message);
|
|
16545
|
+
this.name = "EphemeralCapacityError";
|
|
16546
|
+
}
|
|
16547
|
+
}
|
|
16548
|
+
|
|
16549
|
+
class EphemeralPayloadTooLargeError extends Error {
|
|
16550
|
+
constructor(message) {
|
|
16551
|
+
super(message);
|
|
16552
|
+
this.name = "EphemeralPayloadTooLargeError";
|
|
16553
|
+
}
|
|
16554
|
+
}
|
|
16555
|
+
var parseOptionalNumber = (value) => {
|
|
16556
|
+
if (typeof value === "number")
|
|
16557
|
+
return Number.isFinite(value) ? value : null;
|
|
16558
|
+
if (typeof value === "string") {
|
|
16559
|
+
const num = Number(value);
|
|
16560
|
+
return Number.isFinite(num) ? num : null;
|
|
16561
|
+
}
|
|
16562
|
+
return null;
|
|
16563
|
+
};
|
|
16564
|
+
var encodeSegment = (value) => encodeURIComponent(value);
|
|
16565
|
+
var assertIdentifier = (value, label) => {
|
|
16566
|
+
if (value.length === 0)
|
|
16567
|
+
throw new Error(`${label} must be non-empty`);
|
|
16568
|
+
if (value.length > 256)
|
|
16569
|
+
throw new Error(`${label} too long (max 256 chars)`);
|
|
16570
|
+
};
|
|
16571
|
+
var assertLogicalKey = (value) => {
|
|
16572
|
+
if (value.length === 0)
|
|
16573
|
+
throw new Error("key must be non-empty");
|
|
16574
|
+
const bytes = textEncoder3.encode(value).byteLength;
|
|
16575
|
+
if (bytes > MAX_KEY_BYTES)
|
|
16576
|
+
throw new Error(`key exceeds max length (${MAX_KEY_BYTES} bytes)`);
|
|
16577
|
+
};
|
|
16578
|
+
var ephemeral = (config2) => {
|
|
16579
|
+
if (!Number.isFinite(config2.ttlMs) || config2.ttlMs <= 0) {
|
|
16580
|
+
throw new Error("ttlMs must be > 0");
|
|
16581
|
+
}
|
|
16582
|
+
assertIdentifier(config2.id, "config.id");
|
|
16583
|
+
const defaultTenant = config2.tenantId ?? DEFAULT_TENANT3;
|
|
16584
|
+
assertIdentifier(defaultTenant, "tenantId");
|
|
16585
|
+
const maxEntries = config2.limits?.maxEntries ?? DEFAULT_MAX_ENTRIES;
|
|
16586
|
+
const maxPayloadBytes = config2.limits?.maxPayloadBytes ?? DEFAULT_MAX_PAYLOAD_BYTES;
|
|
16587
|
+
const eventRetentionMs = config2.limits?.eventRetentionMs ?? DEFAULT_EVENT_RETENTION_MS;
|
|
16588
|
+
const eventMaxLen = config2.limits?.eventMaxLen ?? DEFAULT_EVENT_MAXLEN;
|
|
16589
|
+
const resolveTenant = (tenantId) => {
|
|
16590
|
+
const resolved = tenantId ?? defaultTenant;
|
|
16591
|
+
assertIdentifier(resolved, "tenantId");
|
|
16592
|
+
return resolved;
|
|
16593
|
+
};
|
|
16594
|
+
const keysForTenant = (tenantId) => {
|
|
16595
|
+
const base = `${DEFAULT_PREFIX7}:${encodeSegment(tenantId)}:${encodeSegment(config2.id)}`;
|
|
16596
|
+
return {
|
|
16597
|
+
seq: `${base}:seq`,
|
|
16598
|
+
state: `${base}:state`,
|
|
16599
|
+
expirations: `${base}:exp`,
|
|
16600
|
+
ttlPrefix: `${base}:ttl:`,
|
|
16601
|
+
events: `${base}:events`
|
|
16602
|
+
};
|
|
16603
|
+
};
|
|
16604
|
+
const trimMinId = () => `${Date.now() - eventRetentionMs}-0`;
|
|
16605
|
+
const parseStoredEntry = (raw) => {
|
|
16606
|
+
try {
|
|
16607
|
+
const parsed = JSON.parse(raw);
|
|
16608
|
+
const validated = config2.schema.safeParse(parsed.data);
|
|
16609
|
+
if (!validated.success)
|
|
16610
|
+
return null;
|
|
16611
|
+
return {
|
|
16612
|
+
key: parsed.key,
|
|
16613
|
+
value: validated.data,
|
|
16614
|
+
version: String(parsed.version),
|
|
16615
|
+
updatedAt: Number(parsed.updatedAt),
|
|
16616
|
+
expiresAt: Number(parsed.expiresAt)
|
|
16617
|
+
};
|
|
16618
|
+
} catch {
|
|
16619
|
+
return null;
|
|
16620
|
+
}
|
|
16621
|
+
};
|
|
16622
|
+
const parseUpsertEvent = (entry) => {
|
|
16623
|
+
const rawPayload = entry.fields.payload;
|
|
16624
|
+
if (!rawPayload)
|
|
16625
|
+
return null;
|
|
16626
|
+
try {
|
|
16627
|
+
const payload = JSON.parse(rawPayload);
|
|
16628
|
+
const parsed = config2.schema.safeParse(payload);
|
|
16629
|
+
if (!parsed.success)
|
|
16630
|
+
return null;
|
|
16631
|
+
const updatedAt = parseOptionalNumber(entry.fields.updatedAt);
|
|
16632
|
+
const expiresAt = parseOptionalNumber(entry.fields.expiresAt);
|
|
16633
|
+
if (updatedAt === null || expiresAt === null)
|
|
16634
|
+
return null;
|
|
16635
|
+
return {
|
|
16636
|
+
type: "upsert",
|
|
16637
|
+
cursor: entry.id,
|
|
16638
|
+
entry: {
|
|
16639
|
+
key: entry.fields.key ?? "",
|
|
16640
|
+
value: parsed.data,
|
|
16641
|
+
version: entry.fields.version ?? "",
|
|
16642
|
+
updatedAt,
|
|
16643
|
+
expiresAt
|
|
16644
|
+
}
|
|
16645
|
+
};
|
|
16646
|
+
} catch {
|
|
16647
|
+
return null;
|
|
16648
|
+
}
|
|
16649
|
+
};
|
|
16650
|
+
const parseEvent = (entry) => {
|
|
16651
|
+
const type = entry.fields.type;
|
|
16652
|
+
if (type === "upsert")
|
|
16653
|
+
return parseUpsertEvent(entry);
|
|
16654
|
+
if (type === "touch") {
|
|
16655
|
+
const expiresAt = parseOptionalNumber(entry.fields.expiresAt);
|
|
16656
|
+
if (expiresAt === null)
|
|
16657
|
+
return null;
|
|
16658
|
+
return {
|
|
16659
|
+
type,
|
|
16660
|
+
cursor: entry.id,
|
|
16661
|
+
key: entry.fields.key ?? "",
|
|
16662
|
+
version: entry.fields.version ?? "",
|
|
16663
|
+
expiresAt
|
|
16664
|
+
};
|
|
16665
|
+
}
|
|
16666
|
+
if (type === "delete") {
|
|
16667
|
+
const deletedAt = parseOptionalNumber(entry.fields.deletedAt);
|
|
16668
|
+
if (deletedAt === null)
|
|
16669
|
+
return null;
|
|
16670
|
+
return {
|
|
16671
|
+
type,
|
|
16672
|
+
cursor: entry.id,
|
|
16673
|
+
key: entry.fields.key ?? "",
|
|
16674
|
+
version: entry.fields.version ?? "",
|
|
16675
|
+
deletedAt,
|
|
16676
|
+
reason: entry.fields.reason
|
|
16677
|
+
};
|
|
16678
|
+
}
|
|
16679
|
+
if (type === "expire") {
|
|
16680
|
+
const expiredAt = parseOptionalNumber(entry.fields.expiredAt);
|
|
16681
|
+
if (expiredAt === null)
|
|
16682
|
+
return null;
|
|
16683
|
+
return {
|
|
16684
|
+
type,
|
|
16685
|
+
cursor: entry.id,
|
|
16686
|
+
key: entry.fields.key ?? "",
|
|
16687
|
+
version: entry.fields.version ?? "",
|
|
16688
|
+
expiredAt
|
|
16689
|
+
};
|
|
16690
|
+
}
|
|
16691
|
+
return null;
|
|
16692
|
+
};
|
|
16693
|
+
const lastReconcileByTenant = new Map;
|
|
16694
|
+
const runReconcile = async (keys, now2) => {
|
|
16695
|
+
const raw = await evalScript3(RECONCILE_SCRIPT, [keys.seq, keys.state, keys.expirations, keys.ttlPrefix, keys.events], [now2, DEFAULT_RECONCILE_BATCH_SIZE, trimMinId(), eventMaxLen]);
|
|
16696
|
+
return Number(raw ?? 0);
|
|
16697
|
+
};
|
|
16698
|
+
const maybeRunReconcile = async (tenantId, keys, force = false) => {
|
|
16699
|
+
const now2 = Date.now();
|
|
16700
|
+
if (!force) {
|
|
16701
|
+
const last = lastReconcileByTenant.get(tenantId) ?? 0;
|
|
16702
|
+
if (now2 - last < DEFAULT_RECONCILE_INTERVAL_MS)
|
|
16703
|
+
return;
|
|
16704
|
+
}
|
|
16705
|
+
if (!lastReconcileByTenant.has(tenantId) && lastReconcileByTenant.size >= MAX_RECONCILE_TENANTS) {
|
|
16706
|
+
const first = lastReconcileByTenant.keys().next().value;
|
|
16707
|
+
if (first)
|
|
16708
|
+
lastReconcileByTenant.delete(first);
|
|
16709
|
+
}
|
|
16710
|
+
lastReconcileByTenant.set(tenantId, now2);
|
|
16711
|
+
await runReconcile(keys, now2);
|
|
16712
|
+
};
|
|
16713
|
+
const runFullReconcile = async (keys) => {
|
|
16714
|
+
let loops = 0;
|
|
16715
|
+
while (loops < 50) {
|
|
16716
|
+
const count = await runReconcile(keys, Date.now());
|
|
16717
|
+
if (count < DEFAULT_RECONCILE_BATCH_SIZE)
|
|
16718
|
+
break;
|
|
16719
|
+
loops += 1;
|
|
16720
|
+
await Bun.sleep(1);
|
|
16721
|
+
}
|
|
16722
|
+
};
|
|
16723
|
+
const latestCursor = async (eventsKey) => {
|
|
16724
|
+
const raw = await redis7.send("XREVRANGE", [eventsKey, "+", "-", "COUNT", "1"]);
|
|
16725
|
+
const parsed = parseFirstRangeEntry(raw);
|
|
16726
|
+
return parsed?.id ?? "0-0";
|
|
16727
|
+
};
|
|
16728
|
+
const firstAtOrAfterCursor = async (eventsKey, cursor) => {
|
|
16729
|
+
const raw = await redis7.send("XRANGE", [eventsKey, cursor, "+", "COUNT", "1"]);
|
|
16730
|
+
return parseFirstRangeEntry(raw)?.id ?? null;
|
|
16731
|
+
};
|
|
16732
|
+
const upsert = async (cfg) => {
|
|
16733
|
+
assertLogicalKey(cfg.key);
|
|
16734
|
+
const tenantId = resolveTenant(cfg.tenantId);
|
|
16735
|
+
const keys = keysForTenant(tenantId);
|
|
16736
|
+
await maybeRunReconcile(tenantId, keys);
|
|
16737
|
+
const ttlMs = cfg.ttlMs ?? config2.ttlMs;
|
|
16738
|
+
if (!Number.isFinite(ttlMs) || ttlMs <= 0) {
|
|
16739
|
+
throw new Error("ttlMs must be > 0");
|
|
16740
|
+
}
|
|
16741
|
+
const parsed = config2.schema.safeParse(cfg.value);
|
|
16742
|
+
if (!parsed.success)
|
|
16743
|
+
throw parsed.error;
|
|
16744
|
+
const payloadRaw = JSON.stringify(parsed.data);
|
|
16745
|
+
const payloadBytes = textEncoder3.encode(payloadRaw).byteLength;
|
|
16746
|
+
if (payloadBytes > maxPayloadBytes) {
|
|
16747
|
+
throw new EphemeralPayloadTooLargeError(`payload exceeds limit (${maxPayloadBytes} bytes)`);
|
|
16748
|
+
}
|
|
16749
|
+
const raw = await evalScript3(UPSERT_SCRIPT2, [keys.seq, keys.state, keys.expirations, keys.ttlPrefix, keys.events], [Date.now(), ttlMs, payloadRaw, cfg.key, maxEntries, trimMinId(), eventMaxLen]);
|
|
16750
|
+
if (raw === "__ERR_CAPACITY__") {
|
|
16751
|
+
throw new EphemeralCapacityError(`maxEntries (${maxEntries}) reached`);
|
|
16752
|
+
}
|
|
16753
|
+
if (raw === "__ERR_PAYLOAD__") {
|
|
16754
|
+
throw new Error("invalid payload encoding");
|
|
16755
|
+
}
|
|
16756
|
+
const storedRaw = typeof raw === "string" ? raw : String(raw ?? "");
|
|
16757
|
+
const entry = parseStoredEntry(storedRaw);
|
|
16758
|
+
if (!entry)
|
|
16759
|
+
throw new Error("failed to parse stored entry");
|
|
16760
|
+
return entry;
|
|
16761
|
+
};
|
|
16762
|
+
const touch = async (cfg) => {
|
|
16763
|
+
assertLogicalKey(cfg.key);
|
|
16764
|
+
const tenantId = resolveTenant(cfg.tenantId);
|
|
16765
|
+
const keys = keysForTenant(tenantId);
|
|
16766
|
+
await maybeRunReconcile(tenantId, keys);
|
|
16767
|
+
const ttlMs = cfg.ttlMs ?? config2.ttlMs;
|
|
16768
|
+
if (!Number.isFinite(ttlMs) || ttlMs <= 0) {
|
|
16769
|
+
throw new Error("ttlMs must be > 0");
|
|
16770
|
+
}
|
|
16771
|
+
const raw = await evalScript3(TOUCH_SCRIPT2, [keys.seq, keys.state, keys.expirations, keys.ttlPrefix, keys.events], [Date.now(), ttlMs, cfg.key, trimMinId(), eventMaxLen]);
|
|
16772
|
+
if (!raw)
|
|
16773
|
+
return { ok: false };
|
|
16774
|
+
try {
|
|
16775
|
+
const parsed = JSON.parse(typeof raw === "string" ? raw : String(raw));
|
|
16776
|
+
return { ok: true, version: String(parsed.version), expiresAt: Number(parsed.expiresAt) };
|
|
16777
|
+
} catch {
|
|
16778
|
+
return { ok: false };
|
|
16779
|
+
}
|
|
16780
|
+
};
|
|
16781
|
+
const remove = async (cfg) => {
|
|
16782
|
+
assertLogicalKey(cfg.key);
|
|
16783
|
+
const tenantId = resolveTenant(cfg.tenantId);
|
|
16784
|
+
const keys = keysForTenant(tenantId);
|
|
16785
|
+
await maybeRunReconcile(tenantId, keys);
|
|
16786
|
+
const raw = await evalScript3(REMOVE_SCRIPT, [keys.seq, keys.state, keys.expirations, keys.ttlPrefix, keys.events], [Date.now(), cfg.key, cfg.reason ?? "", trimMinId(), eventMaxLen]);
|
|
16787
|
+
return Number(raw) > 0;
|
|
16788
|
+
};
|
|
16789
|
+
const snapshot = async (cfg = {}) => {
|
|
16790
|
+
const tenantId = resolveTenant(cfg.tenantId);
|
|
16791
|
+
const keys = keysForTenant(tenantId);
|
|
16792
|
+
await runFullReconcile(keys);
|
|
16793
|
+
const rawEntries = await redis7.hvals(keys.state);
|
|
16794
|
+
const entries = [];
|
|
16795
|
+
for (const raw of rawEntries) {
|
|
16796
|
+
const parsed = parseStoredEntry(String(raw));
|
|
16797
|
+
if (!parsed)
|
|
16798
|
+
continue;
|
|
16799
|
+
entries.push(parsed);
|
|
16800
|
+
}
|
|
16801
|
+
entries.sort((a, b) => a.key.localeCompare(b.key));
|
|
16802
|
+
return {
|
|
16803
|
+
entries,
|
|
16804
|
+
cursor: await latestCursor(keys.events)
|
|
16805
|
+
};
|
|
16806
|
+
};
|
|
16807
|
+
const reader = (readerCfg = {}) => {
|
|
16808
|
+
const tenantId = resolveTenant(readerCfg.tenantId);
|
|
16809
|
+
const keys = keysForTenant(tenantId);
|
|
16810
|
+
let cursor = readerCfg.after ?? "$";
|
|
16811
|
+
let overflowPending = null;
|
|
16812
|
+
let replayChecked = false;
|
|
16813
|
+
let anchored = false;
|
|
16814
|
+
let blockingClient = null;
|
|
16815
|
+
const resetBlockingClient = () => {
|
|
16816
|
+
if (!blockingClient)
|
|
16817
|
+
return;
|
|
16818
|
+
safeClose3(blockingClient);
|
|
16819
|
+
blockingClient = null;
|
|
16820
|
+
};
|
|
16821
|
+
const ensureBlockingClient = async () => {
|
|
16822
|
+
if (blockingClient?.connected)
|
|
16823
|
+
return blockingClient;
|
|
16824
|
+
resetBlockingClient();
|
|
16825
|
+
blockingClient = new RedisClient3;
|
|
16826
|
+
await blockingClient.connect();
|
|
16827
|
+
return blockingClient;
|
|
16828
|
+
};
|
|
16829
|
+
const checkReplayGap = async () => {
|
|
16830
|
+
if (replayChecked)
|
|
16831
|
+
return;
|
|
16832
|
+
replayChecked = true;
|
|
16833
|
+
const after = readerCfg.after;
|
|
16834
|
+
if (!after || after === "$")
|
|
16835
|
+
return;
|
|
16836
|
+
const firstAtOrAfter = await firstAtOrAfterCursor(keys.events, after);
|
|
16837
|
+
if (!firstAtOrAfter)
|
|
16838
|
+
return;
|
|
16839
|
+
if (after === "0-0" || firstAtOrAfter !== after) {
|
|
16840
|
+
const liveCursor = await latestCursor(keys.events);
|
|
16841
|
+
overflowPending = {
|
|
16842
|
+
type: "overflow",
|
|
16843
|
+
cursor: liveCursor,
|
|
16844
|
+
after,
|
|
16845
|
+
firstAvailable: firstAtOrAfter
|
|
16846
|
+
};
|
|
16847
|
+
cursor = liveCursor;
|
|
16848
|
+
}
|
|
16849
|
+
};
|
|
16850
|
+
const anchorLiveCursor = async () => {
|
|
16851
|
+
if (anchored)
|
|
16852
|
+
return;
|
|
16853
|
+
anchored = true;
|
|
16854
|
+
if (cursor !== "$")
|
|
16855
|
+
return;
|
|
16856
|
+
cursor = await latestCursor(keys.events);
|
|
16857
|
+
};
|
|
16858
|
+
const recv = async (cfg = {}) => {
|
|
16859
|
+
await anchorLiveCursor();
|
|
16860
|
+
await maybeRunReconcile(tenantId, keys);
|
|
16861
|
+
await checkReplayGap();
|
|
16862
|
+
if (overflowPending) {
|
|
16863
|
+
const event = overflowPending;
|
|
16864
|
+
overflowPending = null;
|
|
16865
|
+
return event;
|
|
16866
|
+
}
|
|
16867
|
+
const wait = cfg.wait ?? true;
|
|
16868
|
+
const timeoutMs = cfg.timeoutMs ?? DEFAULT_TIMEOUT_MS2;
|
|
16869
|
+
const args = wait ? ["COUNT", "1", "BLOCK", timeoutMs.toString(), "STREAMS", keys.events, cursor] : ["COUNT", "1", "STREAMS", keys.events, cursor];
|
|
16870
|
+
const result = cfg.signal ? await blockingReadWithTemporaryClient2(args, cfg.signal) : wait ? await (async () => {
|
|
16871
|
+
const client = await ensureBlockingClient();
|
|
16872
|
+
try {
|
|
16873
|
+
return await client.send("XREAD", args);
|
|
16874
|
+
} catch (error48) {
|
|
16875
|
+
resetBlockingClient();
|
|
16876
|
+
throw asError5(error48);
|
|
16877
|
+
}
|
|
16878
|
+
})() : await redis7.send("XREAD", args);
|
|
16879
|
+
const entry = parseFirstStreamEntry(result);
|
|
16880
|
+
if (!entry)
|
|
16881
|
+
return null;
|
|
16882
|
+
cursor = entry.id;
|
|
16883
|
+
const parsed = parseEvent(entry);
|
|
16884
|
+
if (!parsed)
|
|
16885
|
+
return null;
|
|
16886
|
+
return parsed;
|
|
16887
|
+
};
|
|
16888
|
+
const stream = async function* (cfg = {}) {
|
|
16889
|
+
const wait = cfg.wait ?? true;
|
|
16890
|
+
try {
|
|
16891
|
+
while (!cfg.signal?.aborted) {
|
|
16892
|
+
const event = wait ? await retry(async () => await recv(cfg), {
|
|
16893
|
+
attempts: Number.POSITIVE_INFINITY,
|
|
16894
|
+
signal: cfg.signal,
|
|
16895
|
+
retryIf: isRetryableTransportError
|
|
16896
|
+
}) : await recv(cfg);
|
|
16897
|
+
if (event) {
|
|
16898
|
+
yield event;
|
|
16899
|
+
continue;
|
|
16900
|
+
}
|
|
16901
|
+
if (!wait)
|
|
16902
|
+
break;
|
|
16903
|
+
}
|
|
16904
|
+
} finally {
|
|
16905
|
+
resetBlockingClient();
|
|
16906
|
+
}
|
|
16907
|
+
};
|
|
16908
|
+
return { recv, stream };
|
|
16909
|
+
};
|
|
16910
|
+
return {
|
|
16911
|
+
upsert,
|
|
16912
|
+
touch,
|
|
16913
|
+
remove,
|
|
16914
|
+
snapshot,
|
|
16915
|
+
reader
|
|
16916
|
+
};
|
|
16917
|
+
};
|
|
15058
16918
|
export {
|
|
15059
16919
|
topic,
|
|
16920
|
+
scheduler,
|
|
16921
|
+
retry,
|
|
15060
16922
|
ratelimit,
|
|
15061
16923
|
queue,
|
|
15062
16924
|
mutex,
|
|
15063
16925
|
job,
|
|
16926
|
+
isRetryableTransportError,
|
|
16927
|
+
ephemeral,
|
|
15064
16928
|
RateLimitError,
|
|
15065
|
-
LockError
|
|
16929
|
+
LockError,
|
|
16930
|
+
EphemeralPayloadTooLargeError,
|
|
16931
|
+
EphemeralCapacityError,
|
|
16932
|
+
DEFAULT_RETRY_OPTIONS
|
|
15066
16933
|
};
|