@aria-cli/wireguard 1.0.36 → 1.0.38
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.d.ts +59 -0
- package/index.js +52 -52
- package/npm/darwin-arm64/package.json +18 -0
- package/npm/darwin-arm64/wireguard.darwin-arm64.node +0 -0
- package/npm/darwin-x64/package.json +18 -0
- package/npm/darwin-x64/wireguard.darwin-x64.node +0 -0
- package/npm/linux-arm64-gnu/package.json +18 -0
- package/npm/linux-arm64-gnu/wireguard.linux-arm64-gnu.node +0 -0
- package/npm/linux-x64-gnu/package.json +18 -0
- package/npm/linux-x64-gnu/wireguard.linux-x64-gnu.node +0 -0
- package/npm/win32-x64-msvc/package.json +18 -0
- package/npm/win32-x64-msvc/wireguard.win32-x64-msvc.node +0 -0
- package/package.json +11 -16
- package/wireguard.darwin-arm64.node +0 -0
- package/wireguard.darwin-x64.node +0 -0
- package/wireguard.linux-arm64-gnu.node +0 -0
- package/wireguard.linux-x64-gnu.node +0 -0
- package/wireguard.win32-x64-msvc.node +0 -0
- package/dist/.aria-build-stamp.json +0 -4
- package/dist/bootstrap-authority.js +0 -47
- package/dist/bootstrap-tls.js +0 -69
- package/dist/db-owner-fencing.js +0 -44
- package/dist/derp-relay.js +0 -311
- package/dist/index.js +0 -100
- package/dist/nat.js +0 -397
- package/dist/network-state-store.js +0 -248
- package/dist/network.js +0 -3391
- package/dist/peer-discovery.js +0 -486
- package/dist/resilient-tunnel.js +0 -389
- package/dist/route-ownership.js +0 -79
- package/dist/tunnel.js +0 -474
package/dist/resilient-tunnel.js
DELETED
|
@@ -1,389 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
/**
|
|
3
|
-
* ResilientTunnel — wraps SecureTunnel with dead peer detection, auto-reconnection,
|
|
4
|
-
* and outbound message queuing.
|
|
5
|
-
*
|
|
6
|
-
* State machine:
|
|
7
|
-
* CONNECTING → HANDSHAKING → CONNECTED → DISCONNECTED → RECONNECTING → HANDSHAKING → CONNECTED
|
|
8
|
-
* ↓
|
|
9
|
-
* DEAD (max retries exhausted)
|
|
10
|
-
*/
|
|
11
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
|
-
exports.ResilientTunnel = void 0;
|
|
13
|
-
const node_events_1 = require("node:events");
|
|
14
|
-
const tunnel_js_1 = require("./tunnel.js");
|
|
15
|
-
class ResilientTunnel extends node_events_1.EventEmitter {
|
|
16
|
-
options;
|
|
17
|
-
tunnel = null;
|
|
18
|
-
_state = "connecting";
|
|
19
|
-
queue = [];
|
|
20
|
-
queueBytes = 0;
|
|
21
|
-
reconnectAttempts = 0;
|
|
22
|
-
reconnectTimer = null;
|
|
23
|
-
deadPeerTimer = null;
|
|
24
|
-
lastPacketAt = 0;
|
|
25
|
-
reconnections = 0;
|
|
26
|
-
awaitingReplayReadiness = false;
|
|
27
|
-
stopped = false;
|
|
28
|
-
static MAX_QUEUE_SIZE = 1000;
|
|
29
|
-
static MAX_QUEUE_BYTES = 1_048_576; // 1MB
|
|
30
|
-
static MAX_RECONNECT_ATTEMPTS = 10;
|
|
31
|
-
// Safety-net timeout: fires only if neither peerActivity events nor the
|
|
32
|
-
// WG state machine's own REJECT_AFTER_TIME (180s) detect the dead peer.
|
|
33
|
-
// Must be longer than REJECT_AFTER_TIME to avoid premature disconnection
|
|
34
|
-
// when keepalives can't reach us (e.g. stale endpoint from STUN/WAN IP).
|
|
35
|
-
static DEAD_PEER_TIMEOUT_MS = 300_000; // 5 minutes
|
|
36
|
-
static MAX_BACKOFF_MS = 60_000;
|
|
37
|
-
static BASE_BACKOFF_MS = 1_000;
|
|
38
|
-
constructor(options) {
|
|
39
|
-
super();
|
|
40
|
-
this.options = options;
|
|
41
|
-
}
|
|
42
|
-
/** Start the resilient tunnel — creates underlying SecureTunnel and waits for remote proof before CONNECTED */
|
|
43
|
-
async start() {
|
|
44
|
-
if (this.stopped)
|
|
45
|
-
throw new Error("Tunnel has been stopped");
|
|
46
|
-
this.setState("connecting");
|
|
47
|
-
const port = await this.createAndStartTunnel();
|
|
48
|
-
if (this._state === "connecting") {
|
|
49
|
-
this.setState("handshaking");
|
|
50
|
-
this.requestSessionProof();
|
|
51
|
-
}
|
|
52
|
-
return port;
|
|
53
|
-
}
|
|
54
|
-
/** Send plaintext through the tunnel. Queues if not yet connected or reconnecting, throws if dead. */
|
|
55
|
-
sendPlaintext(data) {
|
|
56
|
-
if (this._state === "dead") {
|
|
57
|
-
throw new Error("Tunnel is dead — max reconnection attempts exhausted");
|
|
58
|
-
}
|
|
59
|
-
if (this._state === "connecting" ||
|
|
60
|
-
this._state === "handshaking" ||
|
|
61
|
-
this._state === "reconnecting" ||
|
|
62
|
-
this._state === "disconnected") {
|
|
63
|
-
this.enqueue(data);
|
|
64
|
-
// Don't call requestSessionProof() here — the initial handshake was
|
|
65
|
-
// already triggered by start(). Re-probing while a handshake is in
|
|
66
|
-
// progress risks calling encrypt() after the native tunnel processed
|
|
67
|
-
// a peer's init (state=None), which starts a NEW handshake that
|
|
68
|
-
// overwrites the pending initiator state in Handshake::previous.
|
|
69
|
-
// boringtun internally queues data sent via encrypt() during handshake.
|
|
70
|
-
return;
|
|
71
|
-
}
|
|
72
|
-
if (this._state !== "connected" || !this.tunnel) {
|
|
73
|
-
throw new Error(`Cannot send in state: ${this._state}`);
|
|
74
|
-
}
|
|
75
|
-
const activeTunnel = this.tunnel;
|
|
76
|
-
try {
|
|
77
|
-
activeTunnel.sendPlaintext(data);
|
|
78
|
-
}
|
|
79
|
-
catch (error) {
|
|
80
|
-
if (!this.stopped) {
|
|
81
|
-
this.enqueue(data);
|
|
82
|
-
}
|
|
83
|
-
throw error;
|
|
84
|
-
}
|
|
85
|
-
// If the send synchronously triggered a disconnect, preserve the payload
|
|
86
|
-
// for replay on the next reconnect instead of dropping the in-flight frame.
|
|
87
|
-
if (!this.stopped && this._state !== "connected") {
|
|
88
|
-
this.enqueue(data);
|
|
89
|
-
return;
|
|
90
|
-
}
|
|
91
|
-
if (!this.stopped && this.tunnel !== activeTunnel) {
|
|
92
|
-
this.enqueue(data);
|
|
93
|
-
}
|
|
94
|
-
}
|
|
95
|
-
/** Stop the tunnel permanently */
|
|
96
|
-
stop() {
|
|
97
|
-
this.stopped = true;
|
|
98
|
-
this.clearTimers();
|
|
99
|
-
if (this.tunnel) {
|
|
100
|
-
this.tunnel.removeAllListeners();
|
|
101
|
-
this.tunnel.stop();
|
|
102
|
-
this.tunnel = null;
|
|
103
|
-
}
|
|
104
|
-
this.queue = [];
|
|
105
|
-
this.queueBytes = 0;
|
|
106
|
-
}
|
|
107
|
-
/** Whether the tunnel is actively connected */
|
|
108
|
-
get isActive() {
|
|
109
|
-
return this._state === "connected";
|
|
110
|
-
}
|
|
111
|
-
/** Current tunnel state */
|
|
112
|
-
getState() {
|
|
113
|
-
return this._state;
|
|
114
|
-
}
|
|
115
|
-
/** Stats including reconnection count and queue depth */
|
|
116
|
-
getStats() {
|
|
117
|
-
return {
|
|
118
|
-
state: this._state,
|
|
119
|
-
reconnections: this.reconnections,
|
|
120
|
-
reconnectAttempts: this.reconnectAttempts,
|
|
121
|
-
queueDepth: this.queue.length,
|
|
122
|
-
queueBytes: this.queueBytes,
|
|
123
|
-
};
|
|
124
|
-
}
|
|
125
|
-
/** Get the underlying SecureTunnel (for event wiring, e.g. "plaintext") */
|
|
126
|
-
getInnerTunnel() {
|
|
127
|
-
return this.tunnel;
|
|
128
|
-
}
|
|
129
|
-
// ── Internal ─────────────────────────────────────────────────────
|
|
130
|
-
setState(newState) {
|
|
131
|
-
const prev = this._state;
|
|
132
|
-
if (prev === newState)
|
|
133
|
-
return;
|
|
134
|
-
this._state = newState;
|
|
135
|
-
this.emit("stateChange", newState, prev);
|
|
136
|
-
}
|
|
137
|
-
requestSessionProof() {
|
|
138
|
-
if (this.stopped || !this.tunnel) {
|
|
139
|
-
return;
|
|
140
|
-
}
|
|
141
|
-
if (this._state !== "handshaking" &&
|
|
142
|
-
!(this._state === "reconnecting" && this.awaitingReplayReadiness)) {
|
|
143
|
-
return;
|
|
144
|
-
}
|
|
145
|
-
try {
|
|
146
|
-
// Empty payload is a no-op at the application layer but still forces
|
|
147
|
-
// WireGuard to emit handshake traffic. This primes the route without
|
|
148
|
-
// falsely promoting the tunnel to CONNECTED.
|
|
149
|
-
this.tunnel.sendPlaintext(Buffer.alloc(0));
|
|
150
|
-
}
|
|
151
|
-
catch {
|
|
152
|
-
// Best-effort only: later outbound queueing or reconnect backoff can retry.
|
|
153
|
-
}
|
|
154
|
-
}
|
|
155
|
-
async createAndStartTunnel() {
|
|
156
|
-
if (this.tunnel) {
|
|
157
|
-
this.tunnel.removeAllListeners();
|
|
158
|
-
this.tunnel.stop();
|
|
159
|
-
this.tunnel = null;
|
|
160
|
-
}
|
|
161
|
-
const tunnel = new tunnel_js_1.SecureTunnel(this.options);
|
|
162
|
-
this.tunnel = tunnel;
|
|
163
|
-
// Wire events from inner tunnel
|
|
164
|
-
tunnel.on("plaintext", (data) => {
|
|
165
|
-
this.lastPacketAt = Date.now();
|
|
166
|
-
this.resetDeadPeerTimer();
|
|
167
|
-
this.promoteReconnectReady();
|
|
168
|
-
this.emit("plaintext", data);
|
|
169
|
-
});
|
|
170
|
-
tunnel.on("handshake", () => {
|
|
171
|
-
this.lastPacketAt = Date.now();
|
|
172
|
-
this.resetDeadPeerTimer();
|
|
173
|
-
this.promoteInitialReady();
|
|
174
|
-
this.promoteReconnectReady();
|
|
175
|
-
this.emit("handshake");
|
|
176
|
-
});
|
|
177
|
-
// WG keepalive responses produce write_to_tunnel with empty data.
|
|
178
|
-
// Without this, the dead-peer timer fires after 75s of no real
|
|
179
|
-
// plaintext, even though the peer is alive and exchanging keepalives.
|
|
180
|
-
tunnel.on("peerActivity", () => {
|
|
181
|
-
this.lastPacketAt = Date.now();
|
|
182
|
-
this.resetDeadPeerTimer();
|
|
183
|
-
});
|
|
184
|
-
tunnel.on("error", (err) => {
|
|
185
|
-
this.emit("error", err);
|
|
186
|
-
if (!this.stopped &&
|
|
187
|
-
(this._state === "handshaking" ||
|
|
188
|
-
this._state === "connected" ||
|
|
189
|
-
this._state === "reconnecting")) {
|
|
190
|
-
this.handleDisconnect();
|
|
191
|
-
}
|
|
192
|
-
});
|
|
193
|
-
tunnel.on("close", () => {
|
|
194
|
-
if (!this.stopped &&
|
|
195
|
-
(this._state === "handshaking" ||
|
|
196
|
-
this._state === "connected" ||
|
|
197
|
-
this._state === "reconnecting")) {
|
|
198
|
-
this.handleDisconnect();
|
|
199
|
-
}
|
|
200
|
-
});
|
|
201
|
-
try {
|
|
202
|
-
const port = await tunnel.start();
|
|
203
|
-
return port;
|
|
204
|
-
}
|
|
205
|
-
catch (err) {
|
|
206
|
-
// CRITICAL: Clean up listeners on failure to prevent accumulation
|
|
207
|
-
// after N failed reconnections (N×4 orphaned listeners → OOM)
|
|
208
|
-
tunnel.removeAllListeners();
|
|
209
|
-
tunnel.stop();
|
|
210
|
-
this.tunnel = null;
|
|
211
|
-
throw err;
|
|
212
|
-
}
|
|
213
|
-
}
|
|
214
|
-
disposeCurrentTunnel() {
|
|
215
|
-
if (!this.tunnel) {
|
|
216
|
-
return;
|
|
217
|
-
}
|
|
218
|
-
this.tunnel.removeAllListeners();
|
|
219
|
-
this.tunnel.stop();
|
|
220
|
-
this.tunnel = null;
|
|
221
|
-
}
|
|
222
|
-
handleDisconnect() {
|
|
223
|
-
this.awaitingReplayReadiness = false;
|
|
224
|
-
this.clearDeadPeerTimer();
|
|
225
|
-
this.disposeCurrentTunnel();
|
|
226
|
-
this.setState("disconnected");
|
|
227
|
-
this.attemptReconnect();
|
|
228
|
-
}
|
|
229
|
-
promoteInitialReady() {
|
|
230
|
-
if (this.stopped)
|
|
231
|
-
return;
|
|
232
|
-
if (this._state !== "connecting" && this._state !== "handshaking") {
|
|
233
|
-
return;
|
|
234
|
-
}
|
|
235
|
-
this.setState("connected");
|
|
236
|
-
this.resetDeadPeerTimer();
|
|
237
|
-
this.flushQueue();
|
|
238
|
-
}
|
|
239
|
-
promoteReconnectReady() {
|
|
240
|
-
if (!this.awaitingReplayReadiness || this.stopped) {
|
|
241
|
-
return;
|
|
242
|
-
}
|
|
243
|
-
this.finalizeReconnect();
|
|
244
|
-
}
|
|
245
|
-
finalizeReconnect() {
|
|
246
|
-
this.awaitingReplayReadiness = false;
|
|
247
|
-
this.reconnectAttempts = 0;
|
|
248
|
-
this.reconnections++;
|
|
249
|
-
this.setState("connected");
|
|
250
|
-
this.resetDeadPeerTimer();
|
|
251
|
-
this.flushQueue();
|
|
252
|
-
this.emit("reconnected");
|
|
253
|
-
}
|
|
254
|
-
attemptReconnect() {
|
|
255
|
-
if (this.stopped)
|
|
256
|
-
return;
|
|
257
|
-
if (this.reconnectTimer)
|
|
258
|
-
return;
|
|
259
|
-
if (this.reconnectAttempts >= ResilientTunnel.MAX_RECONNECT_ATTEMPTS) {
|
|
260
|
-
this.setState("dead");
|
|
261
|
-
this.emit("dead");
|
|
262
|
-
return;
|
|
263
|
-
}
|
|
264
|
-
this.setState("reconnecting");
|
|
265
|
-
const backoff = Math.min(ResilientTunnel.BASE_BACKOFF_MS * Math.pow(2, this.reconnectAttempts), ResilientTunnel.MAX_BACKOFF_MS);
|
|
266
|
-
this.reconnectAttempts++;
|
|
267
|
-
this.reconnectTimer = setTimeout(async () => {
|
|
268
|
-
this.reconnectTimer = null;
|
|
269
|
-
if (this.stopped)
|
|
270
|
-
return;
|
|
271
|
-
try {
|
|
272
|
-
this.awaitingReplayReadiness = true;
|
|
273
|
-
await this.createAndStartTunnel();
|
|
274
|
-
// NOTE: Do NOT reset reconnectAttempts here. createAndStartTunnel()
|
|
275
|
-
// succeeding only means the UDP socket bound — the handshake has NOT
|
|
276
|
-
// completed yet. Reset happens in finalizeReconnect() when the peer
|
|
277
|
-
// proves liveness. Without this, an unreachable peer causes an infinite
|
|
278
|
-
// reconnect loop: start succeeds → error fires → counter resets → repeat.
|
|
279
|
-
if (this._state === "connected") {
|
|
280
|
-
return;
|
|
281
|
-
}
|
|
282
|
-
this.requestSessionProof();
|
|
283
|
-
}
|
|
284
|
-
catch {
|
|
285
|
-
this.awaitingReplayReadiness = false;
|
|
286
|
-
// Reconnect failed — try again
|
|
287
|
-
this.attemptReconnect();
|
|
288
|
-
}
|
|
289
|
-
}, backoff);
|
|
290
|
-
}
|
|
291
|
-
resetDeadPeerTimer() {
|
|
292
|
-
this.clearDeadPeerTimer();
|
|
293
|
-
if (this.stopped || this._state !== "connected")
|
|
294
|
-
return;
|
|
295
|
-
this.deadPeerTimer = setTimeout(() => {
|
|
296
|
-
if (this._state === "connected" && !this.stopped) {
|
|
297
|
-
this.handleDisconnect();
|
|
298
|
-
}
|
|
299
|
-
}, ResilientTunnel.DEAD_PEER_TIMEOUT_MS);
|
|
300
|
-
}
|
|
301
|
-
clearDeadPeerTimer() {
|
|
302
|
-
if (this.deadPeerTimer) {
|
|
303
|
-
clearTimeout(this.deadPeerTimer);
|
|
304
|
-
this.deadPeerTimer = null;
|
|
305
|
-
}
|
|
306
|
-
}
|
|
307
|
-
clearTimers() {
|
|
308
|
-
this.clearDeadPeerTimer();
|
|
309
|
-
if (this.reconnectTimer) {
|
|
310
|
-
clearTimeout(this.reconnectTimer);
|
|
311
|
-
this.reconnectTimer = null;
|
|
312
|
-
}
|
|
313
|
-
}
|
|
314
|
-
enqueue(data) {
|
|
315
|
-
// Drop expired messages first
|
|
316
|
-
this.purgeExpired();
|
|
317
|
-
// Check capacity
|
|
318
|
-
if (this.queue.length >= ResilientTunnel.MAX_QUEUE_SIZE) {
|
|
319
|
-
process.stderr.write(`[ResilientTunnel] dropping message, queue overflow (maxSize: ${this.queue.length}/${ResilientTunnel.MAX_QUEUE_SIZE})\n`);
|
|
320
|
-
this.emit("queueOverflow", { reason: "maxSize", dropped: 1 });
|
|
321
|
-
return;
|
|
322
|
-
}
|
|
323
|
-
if (this.queueBytes + data.length > ResilientTunnel.MAX_QUEUE_BYTES) {
|
|
324
|
-
process.stderr.write(`[ResilientTunnel] dropping message, queue overflow (maxBytes: ${this.queueBytes + data.length}/${ResilientTunnel.MAX_QUEUE_BYTES})\n`);
|
|
325
|
-
this.emit("queueOverflow", { reason: "maxBytes", dropped: 1 });
|
|
326
|
-
return;
|
|
327
|
-
}
|
|
328
|
-
this.queue.push({ data, enqueuedAt: Date.now(), sendAttempts: 0 });
|
|
329
|
-
this.queueBytes += data.length;
|
|
330
|
-
}
|
|
331
|
-
purgeExpired() {
|
|
332
|
-
const now = Date.now();
|
|
333
|
-
const before = this.queue.length;
|
|
334
|
-
this.queue = this.queue.filter((msg) => {
|
|
335
|
-
if (msg.ttl !== undefined && now - msg.enqueuedAt > msg.ttl) {
|
|
336
|
-
this.queueBytes -= msg.data.length;
|
|
337
|
-
return false;
|
|
338
|
-
}
|
|
339
|
-
return true;
|
|
340
|
-
});
|
|
341
|
-
const expired = before - this.queue.length;
|
|
342
|
-
if (expired > 0) {
|
|
343
|
-
this.emit("messagesExpired", { count: expired });
|
|
344
|
-
}
|
|
345
|
-
}
|
|
346
|
-
flushQueue() {
|
|
347
|
-
if (!this.tunnel || this._state !== "connected")
|
|
348
|
-
return;
|
|
349
|
-
// Purge expired before flushing
|
|
350
|
-
this.purgeExpired();
|
|
351
|
-
const toSend = this.queue.splice(0);
|
|
352
|
-
this.queueBytes = 0;
|
|
353
|
-
const MAX_SEND_ATTEMPTS = 3;
|
|
354
|
-
const requeueFrom = (startIndex, currentMessage) => {
|
|
355
|
-
const replayQueue = [];
|
|
356
|
-
if (currentMessage) {
|
|
357
|
-
currentMessage.sendAttempts++;
|
|
358
|
-
if (currentMessage.sendAttempts < MAX_SEND_ATTEMPTS) {
|
|
359
|
-
replayQueue.push(currentMessage);
|
|
360
|
-
}
|
|
361
|
-
}
|
|
362
|
-
replayQueue.push(...toSend.slice(startIndex + (currentMessage ? 1 : 0)));
|
|
363
|
-
if (replayQueue.length > 0) {
|
|
364
|
-
this.queue.unshift(...replayQueue);
|
|
365
|
-
this.queueBytes = replayQueue.reduce((sum, message) => sum + message.data.length, this.queueBytes);
|
|
366
|
-
this.emit("queueFlushPartialFailure", {
|
|
367
|
-
failed: replayQueue.length,
|
|
368
|
-
total: toSend.length,
|
|
369
|
-
});
|
|
370
|
-
}
|
|
371
|
-
};
|
|
372
|
-
for (let index = 0; index < toSend.length; index += 1) {
|
|
373
|
-
const msg = toSend[index];
|
|
374
|
-
try {
|
|
375
|
-
this.tunnel.sendPlaintext(msg.data);
|
|
376
|
-
}
|
|
377
|
-
catch {
|
|
378
|
-
requeueFrom(index, msg);
|
|
379
|
-
return;
|
|
380
|
-
}
|
|
381
|
-
if (this._state !== "connected") {
|
|
382
|
-
requeueFrom(index, msg);
|
|
383
|
-
return;
|
|
384
|
-
}
|
|
385
|
-
}
|
|
386
|
-
}
|
|
387
|
-
}
|
|
388
|
-
exports.ResilientTunnel = ResilientTunnel;
|
|
389
|
-
//# sourceMappingURL=resilient-tunnel.js.map
|
package/dist/route-ownership.js
DELETED
|
@@ -1,79 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.getDirectRouteKey = getDirectRouteKey;
|
|
4
|
-
exports.resolveDirectRouteOwnership = resolveDirectRouteOwnership;
|
|
5
|
-
const network_runtime_1 = require("@aria-cli/tools/network-runtime");
|
|
6
|
-
const STATUS_PRECEDENCE = {
|
|
7
|
-
pending_verification: 5,
|
|
8
|
-
pending_tunnel: 4,
|
|
9
|
-
active: 3,
|
|
10
|
-
pending: 2,
|
|
11
|
-
revoked: 1,
|
|
12
|
-
};
|
|
13
|
-
function getDirectRouteKey(input) {
|
|
14
|
-
const { endpointHost: host, endpointPort: port } = (0, network_runtime_1.canonicalizeAuthoritativeDirectEndpoint)(input);
|
|
15
|
-
if (!host || typeof port !== "number" || !Number.isFinite(port)) {
|
|
16
|
-
return null;
|
|
17
|
-
}
|
|
18
|
-
return `${host}:${port}`;
|
|
19
|
-
}
|
|
20
|
-
function compareRouteClaims(a, b) {
|
|
21
|
-
const precedenceDelta = STATUS_PRECEDENCE[b.status] - STATUS_PRECEDENCE[a.status];
|
|
22
|
-
if (precedenceDelta !== 0) {
|
|
23
|
-
return precedenceDelta;
|
|
24
|
-
}
|
|
25
|
-
const aEndpointRevision = a.endpointRevision ?? 0;
|
|
26
|
-
const bEndpointRevision = b.endpointRevision ?? 0;
|
|
27
|
-
if (aEndpointRevision !== bEndpointRevision) {
|
|
28
|
-
return bEndpointRevision - aEndpointRevision;
|
|
29
|
-
}
|
|
30
|
-
const aUpdatedAt = a.updatedAt ?? a.createdAt;
|
|
31
|
-
const bUpdatedAt = b.updatedAt ?? b.createdAt;
|
|
32
|
-
if (aUpdatedAt !== bUpdatedAt) {
|
|
33
|
-
return bUpdatedAt - aUpdatedAt;
|
|
34
|
-
}
|
|
35
|
-
if (a.createdAt !== b.createdAt) {
|
|
36
|
-
return b.createdAt - a.createdAt;
|
|
37
|
-
}
|
|
38
|
-
return a.publicKey.localeCompare(b.publicKey);
|
|
39
|
-
}
|
|
40
|
-
function resolveDirectRouteOwnership(claims) {
|
|
41
|
-
const decisions = new Map();
|
|
42
|
-
const byRouteKey = new Map();
|
|
43
|
-
for (const claim of claims) {
|
|
44
|
-
const routeKey = getDirectRouteKey(claim);
|
|
45
|
-
if (!routeKey) {
|
|
46
|
-
decisions.set(claim.publicKey, {
|
|
47
|
-
routeKey: null,
|
|
48
|
-
ownership: "current",
|
|
49
|
-
ownerPublicKey: claim.publicKey,
|
|
50
|
-
});
|
|
51
|
-
continue;
|
|
52
|
-
}
|
|
53
|
-
const bucket = byRouteKey.get(routeKey) ?? [];
|
|
54
|
-
bucket.push(claim);
|
|
55
|
-
byRouteKey.set(routeKey, bucket);
|
|
56
|
-
}
|
|
57
|
-
for (const [routeKey, bucket] of byRouteKey) {
|
|
58
|
-
const sorted = [...bucket].sort(compareRouteClaims);
|
|
59
|
-
const owner = sorted[0];
|
|
60
|
-
if (!owner) {
|
|
61
|
-
continue;
|
|
62
|
-
}
|
|
63
|
-
decisions.set(owner.publicKey, {
|
|
64
|
-
routeKey,
|
|
65
|
-
ownership: "current",
|
|
66
|
-
ownerPublicKey: owner.publicKey,
|
|
67
|
-
});
|
|
68
|
-
for (const superseded of sorted.slice(1)) {
|
|
69
|
-
decisions.set(superseded.publicKey, {
|
|
70
|
-
routeKey,
|
|
71
|
-
ownership: "superseded",
|
|
72
|
-
ownerPublicKey: owner.publicKey,
|
|
73
|
-
supersededByPublicKey: owner.publicKey,
|
|
74
|
-
});
|
|
75
|
-
}
|
|
76
|
-
}
|
|
77
|
-
return decisions;
|
|
78
|
-
}
|
|
79
|
-
//# sourceMappingURL=route-ownership.js.map
|