@unicitylabs/nostr-js-sdk 0.4.0 → 0.5.0-dev.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser/index.js +370 -55
- package/dist/browser/index.js.map +1 -1
- package/dist/browser/index.min.js +6 -6
- package/dist/browser/index.min.js.map +1 -1
- package/dist/browser/index.umd.js +370 -55
- package/dist/browser/index.umd.js.map +1 -1
- package/dist/browser/index.umd.min.js +7 -7
- package/dist/browser/index.umd.min.js.map +1 -1
- package/dist/cjs/client/NostrClient.js +370 -55
- package/dist/cjs/client/NostrClient.js.map +1 -1
- package/dist/esm/client/NostrClient.js +370 -55
- package/dist/esm/client/NostrClient.js.map +1 -1
- package/dist/types/client/NostrClient.d.ts +46 -5
- package/dist/types/client/NostrClient.d.ts.map +1 -1
- package/package.json +2 -2
|
@@ -9538,6 +9538,16 @@
|
|
|
9538
9538
|
const DEFAULT_RECONNECT_INTERVAL_MS = 1000;
|
|
9539
9539
|
const DEFAULT_MAX_RECONNECT_INTERVAL_MS = 30000;
|
|
9540
9540
|
const DEFAULT_PING_INTERVAL_MS = 30000;
|
|
9541
|
+
/**
|
|
9542
|
+
* Internal sub_id reserved for the keepalive REQ. Namespaced with a
|
|
9543
|
+
* `__nostr-sdk-` prefix so that user code calling
|
|
9544
|
+
* {@link NostrClient.subscribe} with an explicit `subscriptionId`
|
|
9545
|
+
* cannot collide — a user choosing the literal `"ping"` would
|
|
9546
|
+
* otherwise have their subscription forcibly CLOSE/REQ'd every
|
|
9547
|
+
* ping interval. The leading `__` is a stable convention for
|
|
9548
|
+
* "do not pick this name."
|
|
9549
|
+
*/
|
|
9550
|
+
const PING_SUB_ID = '__nostr-sdk-keepalive__';
|
|
9541
9551
|
/**
|
|
9542
9552
|
* Delay before resubscribing after NIP-42 authentication.
|
|
9543
9553
|
* This gives the relay time to process the AUTH response before we send
|
|
@@ -9577,6 +9587,30 @@
|
|
|
9577
9587
|
this.maxReconnectIntervalMs = options?.maxReconnectIntervalMs ?? DEFAULT_MAX_RECONNECT_INTERVAL_MS;
|
|
9578
9588
|
this.pingIntervalMs = options?.pingIntervalMs ?? DEFAULT_PING_INTERVAL_MS;
|
|
9579
9589
|
}
|
|
9590
|
+
/**
|
|
9591
|
+
* Replace the key manager used for signing and encryption.
|
|
9592
|
+
*
|
|
9593
|
+
* The connection stays alive — but every operation that consults the
|
|
9594
|
+
* key manager from this point on uses the new key, including:
|
|
9595
|
+
* - signing future published events,
|
|
9596
|
+
* - signing NIP-42 AUTH challenge responses,
|
|
9597
|
+
* - the `authors:[selfPubkey]` filter on the keepalive ping REQ
|
|
9598
|
+
* (computed each ping interval),
|
|
9599
|
+
* - any other code path that calls `getPublicKeyHex()` on the
|
|
9600
|
+
* stored manager.
|
|
9601
|
+
*
|
|
9602
|
+
* Existing in-flight subscriptions are not re-issued or re-keyed.
|
|
9603
|
+
* @param keyManager New key manager
|
|
9604
|
+
*/
|
|
9605
|
+
setKeyManager(keyManager) {
|
|
9606
|
+
this.keyManager = keyManager;
|
|
9607
|
+
}
|
|
9608
|
+
/**
|
|
9609
|
+
* Get the current key manager.
|
|
9610
|
+
*/
|
|
9611
|
+
getKeyManager() {
|
|
9612
|
+
return this.keyManager;
|
|
9613
|
+
}
|
|
9580
9614
|
/**
|
|
9581
9615
|
* Add a connection event listener.
|
|
9582
9616
|
* @param listener Listener for connection events
|
|
@@ -9620,13 +9654,6 @@
|
|
|
9620
9654
|
}
|
|
9621
9655
|
}
|
|
9622
9656
|
}
|
|
9623
|
-
/**
|
|
9624
|
-
* Get the key manager.
|
|
9625
|
-
* @returns The key manager instance
|
|
9626
|
-
*/
|
|
9627
|
-
getKeyManager() {
|
|
9628
|
-
return this.keyManager;
|
|
9629
|
-
}
|
|
9630
9657
|
/**
|
|
9631
9658
|
* Get the current query timeout in milliseconds.
|
|
9632
9659
|
* @returns Query timeout in milliseconds
|
|
@@ -9663,11 +9690,41 @@
|
|
|
9663
9690
|
return;
|
|
9664
9691
|
}
|
|
9665
9692
|
return new Promise((resolve, reject) => {
|
|
9693
|
+
// The connection-setup timeout has three races to defend
|
|
9694
|
+
// against:
|
|
9695
|
+
// A) createWebSocket resolves AFTER the timeout fired.
|
|
9696
|
+
// B) createWebSocket resolves BEFORE the timeout, but
|
|
9697
|
+
// `onopen` fires AFTER the timeout fired.
|
|
9698
|
+
// C) createWebSocket resolves and `onopen` fires BEFORE the
|
|
9699
|
+
// timeout (the success path).
|
|
9700
|
+
// `pendingSocket` lets the timeout proactively close any
|
|
9701
|
+
// socket that's already been created but hasn't fired
|
|
9702
|
+
// `onopen` yet. The `timedOut` flag covers (A) inside `.then`
|
|
9703
|
+
// and (B) inside `socket.onopen`.
|
|
9704
|
+
let timedOut = false;
|
|
9705
|
+
let pendingSocket = null;
|
|
9666
9706
|
const timeoutId = setTimeout(() => {
|
|
9707
|
+
timedOut = true;
|
|
9708
|
+
if (pendingSocket) {
|
|
9709
|
+
try {
|
|
9710
|
+
pendingSocket.close(1000, 'Connection setup timed out');
|
|
9711
|
+
}
|
|
9712
|
+
catch { /* ignore */ }
|
|
9713
|
+
}
|
|
9667
9714
|
reject(new Error(`Connection to ${url} timed out`));
|
|
9668
9715
|
}, CONNECTION_TIMEOUT_MS);
|
|
9669
9716
|
createWebSocket(url)
|
|
9670
9717
|
.then((socket) => {
|
|
9718
|
+
if (timedOut) {
|
|
9719
|
+
// Caller already saw the rejection. Discard the late
|
|
9720
|
+
// socket so we don't leak it.
|
|
9721
|
+
try {
|
|
9722
|
+
socket.close(1000, 'Connection setup timed out');
|
|
9723
|
+
}
|
|
9724
|
+
catch { /* ignore */ }
|
|
9725
|
+
return;
|
|
9726
|
+
}
|
|
9727
|
+
pendingSocket = socket;
|
|
9671
9728
|
const relay = {
|
|
9672
9729
|
url,
|
|
9673
9730
|
socket,
|
|
@@ -9677,9 +9734,30 @@
|
|
|
9677
9734
|
reconnectTimer: null,
|
|
9678
9735
|
pingTimer: null,
|
|
9679
9736
|
lastPongTime: Date.now(),
|
|
9737
|
+
unansweredPings: 0,
|
|
9680
9738
|
wasConnected: existingRelay?.wasConnected ?? false,
|
|
9739
|
+
// Reset on every new connection: a relay's per-connection
|
|
9740
|
+
// sub-slot accounting is fresh, so previously-rejected REQs
|
|
9741
|
+
// should be re-issued on the new socket.
|
|
9742
|
+
closedSubIds: new Set(),
|
|
9743
|
+
eosedSubIds: new Set(),
|
|
9681
9744
|
};
|
|
9682
9745
|
socket.onopen = () => {
|
|
9746
|
+
// The `.then` block already guards against a socket
|
|
9747
|
+
// arriving after the connection timeout, but the socket
|
|
9748
|
+
// can also be created BEFORE the timeout while
|
|
9749
|
+
// `onopen` fires AFTER the timeout has rejected the
|
|
9750
|
+
// outer promise. Without this second guard we'd register
|
|
9751
|
+
// the relay, start a pingTimer, and resubscribe — orphan
|
|
9752
|
+
// background resources the caller can't see or clean up
|
|
9753
|
+
// because their connect() call already saw a rejection.
|
|
9754
|
+
if (timedOut) {
|
|
9755
|
+
try {
|
|
9756
|
+
socket.close(1000, 'Connection setup timed out');
|
|
9757
|
+
}
|
|
9758
|
+
catch { /* ignore */ }
|
|
9759
|
+
return;
|
|
9760
|
+
}
|
|
9683
9761
|
clearTimeout(timeoutId);
|
|
9684
9762
|
relay.connected = true;
|
|
9685
9763
|
relay.reconnectAttempts = 0; // Reset on successful connection
|
|
@@ -9704,10 +9782,11 @@
|
|
|
9704
9782
|
socket.onmessage = (event) => {
|
|
9705
9783
|
try {
|
|
9706
9784
|
const data = extractMessageData(event);
|
|
9707
|
-
// Update last pong time on any message (relay is alive)
|
|
9785
|
+
// Update last pong time and reset unanswered pings on any message (relay is alive)
|
|
9708
9786
|
const r = this.relays.get(url);
|
|
9709
9787
|
if (r) {
|
|
9710
9788
|
r.lastPongTime = Date.now();
|
|
9789
|
+
r.unansweredPings = 0;
|
|
9711
9790
|
}
|
|
9712
9791
|
this.handleRelayMessage(url, data);
|
|
9713
9792
|
}
|
|
@@ -9719,9 +9798,40 @@
|
|
|
9719
9798
|
const wasConnected = relay.connected;
|
|
9720
9799
|
relay.connected = false;
|
|
9721
9800
|
this.stopPingTimer(url);
|
|
9801
|
+
// Pre-onopen close: TCP handshake failure or relay
|
|
9802
|
+
// immediately closed the WS during the upgrade. Without
|
|
9803
|
+
// this, the connectToRelay promise stays pending until
|
|
9804
|
+
// CONNECTION_TIMEOUT_MS (30s) expires; surfacing it now
|
|
9805
|
+
// lets the caller see the failure promptly and retry.
|
|
9806
|
+
if (!wasConnected && !timedOut) {
|
|
9807
|
+
timedOut = true;
|
|
9808
|
+
clearTimeout(timeoutId);
|
|
9809
|
+
reject(new Error(`Connection to ${url} closed during handshake: ${event?.reason || 'no reason'}`));
|
|
9810
|
+
}
|
|
9722
9811
|
if (wasConnected) {
|
|
9723
9812
|
const reason = event?.reason || 'Connection closed';
|
|
9724
9813
|
this.emitConnectionEvent('disconnect', url, reason);
|
|
9814
|
+
// Re-trigger the all-done check on every active sub.
|
|
9815
|
+
// queryWithFirstSeenWins.allRelaysDoneFor only runs
|
|
9816
|
+
// from listener callbacks (EOSE / CLOSED via onError);
|
|
9817
|
+
// a socket that drops without sending either would
|
|
9818
|
+
// otherwise leave the query hanging until
|
|
9819
|
+
// queryTimeoutMs even though the disconnected relay no
|
|
9820
|
+
// longer counts toward "still pending" relays. Firing
|
|
9821
|
+
// a synthetic onError gives every active sub a chance
|
|
9822
|
+
// to re-evaluate now that the relay set has shrunk.
|
|
9823
|
+
// Include the relay URL so listeners in a multi-relay
|
|
9824
|
+
// client can attribute which relay dropped.
|
|
9825
|
+
const inflight = Array.from(this.subscriptions.entries());
|
|
9826
|
+
for (const [subId, sub] of inflight) {
|
|
9827
|
+
try {
|
|
9828
|
+
sub.listener.onError?.(subId, `Relay disconnected (${url}): ${reason}`);
|
|
9829
|
+
}
|
|
9830
|
+
catch {
|
|
9831
|
+
// Ignore listener errors — we're notifying
|
|
9832
|
+
// best-effort.
|
|
9833
|
+
}
|
|
9834
|
+
}
|
|
9725
9835
|
}
|
|
9726
9836
|
if (!this.closed && this.autoReconnect && !relay.reconnecting) {
|
|
9727
9837
|
this.scheduleReconnect(url);
|
|
@@ -9733,7 +9843,11 @@
|
|
|
9733
9843
|
reject(new Error(`Failed to connect to ${url}: ${error.message || 'Unknown error'}`));
|
|
9734
9844
|
}
|
|
9735
9845
|
};
|
|
9736
|
-
this.relays
|
|
9846
|
+
// Note: we do NOT register the relay in `this.relays` here —
|
|
9847
|
+
// only after `onopen` fires successfully. Registering eagerly
|
|
9848
|
+
// (before onopen) would leak the relay into the global map
|
|
9849
|
+
// even when the connection setup times out and the caller's
|
|
9850
|
+
// promise has already rejected.
|
|
9737
9851
|
})
|
|
9738
9852
|
.catch((error) => {
|
|
9739
9853
|
clearTimeout(timeoutId);
|
|
@@ -9791,11 +9905,16 @@
|
|
|
9791
9905
|
this.stopPingTimer(url);
|
|
9792
9906
|
return;
|
|
9793
9907
|
}
|
|
9794
|
-
// Check if we've received any message recently
|
|
9795
9908
|
const timeSinceLastPong = Date.now() - relay.lastPongTime;
|
|
9796
|
-
if (timeSinceLastPong > this.pingIntervalMs * 2) {
|
|
9797
|
-
//
|
|
9798
|
-
|
|
9909
|
+
if (timeSinceLastPong > this.pingIntervalMs * 2 && relay.unansweredPings >= 2) {
|
|
9910
|
+
// No inbound message for 2x the ping interval AND we've sent at least 2 pings
|
|
9911
|
+
// without any response — the connection is truly stale.
|
|
9912
|
+
// The unanswered pings gate handles browser tab throttling: on the first tick
|
|
9913
|
+
// after waking, unansweredPings is 0, so we send a ping and wait. If the relay
|
|
9914
|
+
// is alive it responds (resetting the counter). If dead, subsequent ticks
|
|
9915
|
+
// increment the counter until it reaches the threshold, even under sustained
|
|
9916
|
+
// throttling where intervals are irregular.
|
|
9917
|
+
console.warn(`Relay ${url} appears stale (no response for ${timeSinceLastPong}ms, ${relay.unansweredPings} unanswered pings), reconnecting...`);
|
|
9799
9918
|
this.stopPingTimer(url);
|
|
9800
9919
|
try {
|
|
9801
9920
|
relay.socket.close();
|
|
@@ -9805,17 +9924,26 @@
|
|
|
9805
9924
|
}
|
|
9806
9925
|
return;
|
|
9807
9926
|
}
|
|
9808
|
-
// Send a subscription request as a ping (relays respond with EOSE)
|
|
9809
|
-
//
|
|
9810
|
-
//
|
|
9927
|
+
// Send a subscription request as a ping (relays respond with EOSE).
|
|
9928
|
+
// The filter MUST be tightly scoped — an open `{ limit: 1 }` filter
|
|
9929
|
+
// with no kinds/authors/#p will, after EOSE, stream every event the
|
|
9930
|
+
// relay receives (NIP-01 live tail), saturating the connection and
|
|
9931
|
+
// exhausting per-connection subscription slots on busy relays.
|
|
9932
|
+
// Scoping by `authors:[self]` keeps the live tail empty in practice
|
|
9933
|
+
// (the relay would only forward our own future events).
|
|
9811
9934
|
try {
|
|
9812
|
-
const
|
|
9935
|
+
const selfPubkey = this.keyManager.getPublicKeyHex();
|
|
9813
9936
|
// First close any existing ping subscription to ensure we don't accumulate
|
|
9814
|
-
const closeMessage = JSON.stringify(['CLOSE',
|
|
9937
|
+
const closeMessage = JSON.stringify(['CLOSE', PING_SUB_ID]);
|
|
9815
9938
|
relay.socket.send(closeMessage);
|
|
9816
9939
|
// Then send the new ping request (limit:1 ensures relay sends EOSE)
|
|
9817
|
-
const pingMessage = JSON.stringify([
|
|
9940
|
+
const pingMessage = JSON.stringify([
|
|
9941
|
+
'REQ',
|
|
9942
|
+
PING_SUB_ID,
|
|
9943
|
+
{ authors: [selfPubkey], limit: 1 },
|
|
9944
|
+
]);
|
|
9818
9945
|
relay.socket.send(pingMessage);
|
|
9946
|
+
relay.unansweredPings++;
|
|
9819
9947
|
}
|
|
9820
9948
|
catch {
|
|
9821
9949
|
// Send failed, connection likely dead
|
|
@@ -9848,6 +9976,11 @@
|
|
|
9848
9976
|
if (!relay?.socket || !relay.connected)
|
|
9849
9977
|
return;
|
|
9850
9978
|
for (const [subId, info] of this.subscriptions) {
|
|
9979
|
+
// Skip subs this relay has previously CLOSED — re-issuing them
|
|
9980
|
+
// just triggers the same rejection in a loop. Other healthy
|
|
9981
|
+
// relays still resubscribe.
|
|
9982
|
+
if (relay.closedSubIds.has(subId))
|
|
9983
|
+
continue;
|
|
9851
9984
|
const message = JSON.stringify(['REQ', subId, info.filter.toJSON()]);
|
|
9852
9985
|
relay.socket.send(message);
|
|
9853
9986
|
}
|
|
@@ -9867,7 +10000,7 @@
|
|
|
9867
10000
|
/**
|
|
9868
10001
|
* Handle a message from a relay.
|
|
9869
10002
|
*/
|
|
9870
|
-
handleRelayMessage(
|
|
10003
|
+
handleRelayMessage(relayUrl, message) {
|
|
9871
10004
|
try {
|
|
9872
10005
|
const json = JSON.parse(message);
|
|
9873
10006
|
if (!Array.isArray(json) || json.length < 2)
|
|
@@ -9881,16 +10014,16 @@
|
|
|
9881
10014
|
this.handleOkMessage(json);
|
|
9882
10015
|
break;
|
|
9883
10016
|
case 'EOSE':
|
|
9884
|
-
this.handleEOSEMessage(json);
|
|
10017
|
+
this.handleEOSEMessage(relayUrl, json);
|
|
9885
10018
|
break;
|
|
9886
10019
|
case 'NOTICE':
|
|
9887
10020
|
this.handleNoticeMessage(json);
|
|
9888
10021
|
break;
|
|
9889
10022
|
case 'CLOSED':
|
|
9890
|
-
this.handleClosedMessage(json);
|
|
10023
|
+
this.handleClosedMessage(relayUrl, json);
|
|
9891
10024
|
break;
|
|
9892
10025
|
case 'AUTH':
|
|
9893
|
-
this.handleAuthMessage(
|
|
10026
|
+
this.handleAuthMessage(relayUrl, json);
|
|
9894
10027
|
break;
|
|
9895
10028
|
}
|
|
9896
10029
|
}
|
|
@@ -9902,7 +10035,7 @@
|
|
|
9902
10035
|
* Handle EVENT message from relay.
|
|
9903
10036
|
*/
|
|
9904
10037
|
handleEventMessage(json) {
|
|
9905
|
-
if (json.length < 3)
|
|
10038
|
+
if (json.length < 3 || typeof json[1] !== 'string')
|
|
9906
10039
|
return;
|
|
9907
10040
|
const subscriptionId = json[1];
|
|
9908
10041
|
const eventData = json[2];
|
|
@@ -9940,11 +10073,23 @@
|
|
|
9940
10073
|
}
|
|
9941
10074
|
/**
|
|
9942
10075
|
* Handle EOSE (End of Stored Events) message from relay.
|
|
10076
|
+
*
|
|
10077
|
+
* Records the per-relay EOSE marker (mirroring closedSubIds) so
|
|
10078
|
+
* queryWithFirstSeenWins can decide when ALL connected relays have
|
|
10079
|
+
* finished — either streamed EOSE or rejected with CLOSED — instead
|
|
10080
|
+
* of settling off the first fast relay's EOSE while a slower relay
|
|
10081
|
+
* is still about to deliver matching events.
|
|
9943
10082
|
*/
|
|
9944
|
-
handleEOSEMessage(json) {
|
|
9945
|
-
if (json.length < 2)
|
|
10083
|
+
handleEOSEMessage(relayUrl, json) {
|
|
10084
|
+
if (json.length < 2 || typeof json[1] !== 'string')
|
|
9946
10085
|
return;
|
|
9947
10086
|
const subscriptionId = json[1];
|
|
10087
|
+
if (!this.subscriptions.has(subscriptionId))
|
|
10088
|
+
return;
|
|
10089
|
+
const relay = this.relays.get(relayUrl);
|
|
10090
|
+
if (relay) {
|
|
10091
|
+
relay.eosedSubIds.add(subscriptionId);
|
|
10092
|
+
}
|
|
9948
10093
|
const subscription = this.subscriptions.get(subscriptionId);
|
|
9949
10094
|
if (subscription?.listener.onEndOfStoredEvents) {
|
|
9950
10095
|
subscription.listener.onEndOfStoredEvents(subscriptionId);
|
|
@@ -9961,15 +10106,64 @@
|
|
|
9961
10106
|
}
|
|
9962
10107
|
/**
|
|
9963
10108
|
* Handle CLOSED message from relay (subscription closed by relay).
|
|
10109
|
+
*
|
|
10110
|
+
* NIP-01 CLOSED frames are terminal for the named subscription **on
|
|
10111
|
+
* the sending relay**. In a multi-relay client the same sub_id may
|
|
10112
|
+
* still be alive on a healthy relay, so we must NOT delete the
|
|
10113
|
+
* global `this.subscriptions` entry here — that would silently drop
|
|
10114
|
+
* EVENT/EOSE frames from the still-healthy relays in
|
|
10115
|
+
* `handleEventMessage` (which consults the global map).
|
|
10116
|
+
*
|
|
10117
|
+
* Instead we record the rejection on the sending relay's
|
|
10118
|
+
* `closedSubIds` set so `resubscribeAll` and post-AUTH resubscribe
|
|
10119
|
+
* skip it on this relay only. The listener is notified via
|
|
10120
|
+
* `onError` so callers (e.g. queryWithFirstSeenWins) can decide to
|
|
10121
|
+
* settle and explicitly `unsubscribe()` if they want to give up
|
|
10122
|
+
* across all relays.
|
|
9964
10123
|
*/
|
|
9965
|
-
handleClosedMessage(json) {
|
|
9966
|
-
|
|
10124
|
+
handleClosedMessage(relayUrl, json) {
|
|
10125
|
+
// NIP-01 makes the message field optional: `["CLOSED", <sub>]` is
|
|
10126
|
+
// valid. Dropping such frames was exactly the leak this PR sets out
|
|
10127
|
+
// to fix — no closedSubIds marker and no onError notification means
|
|
10128
|
+
// queries hang until timeout and resubscribe loops persist.
|
|
10129
|
+
if (json.length < 2 || typeof json[1] !== 'string')
|
|
9967
10130
|
return;
|
|
9968
10131
|
const subscriptionId = json[1];
|
|
9969
|
-
|
|
10132
|
+
// Ignore CLOSED for sub_ids we don't know about. A misbehaving or
|
|
10133
|
+
// malicious relay could otherwise spam us with arbitrary sub_ids
|
|
10134
|
+
// and grow `closedSubIds` unbounded over a long-lived connection,
|
|
10135
|
+
// and could pre-emptively block sub_ids we might use later.
|
|
10136
|
+
if (!this.subscriptions.has(subscriptionId))
|
|
10137
|
+
return;
|
|
10138
|
+
const message = typeof json[2] === 'string' ? json[2] : 'no reason provided';
|
|
10139
|
+
// NIP-42 transient case: relays that require AUTH typically reject
|
|
10140
|
+
// pre-auth REQs with `CLOSED("auth-required:...")` and then send
|
|
10141
|
+
// an AUTH challenge. resubscribeAfterAuth re-issues the sub, so
|
|
10142
|
+
// this rejection is NOT terminal. If we marked closedSubIds here,
|
|
10143
|
+
// queryWithFirstSeenWins.onError would settle the future
|
|
10144
|
+
// prematurely (single-relay → allRelaysDoneFor=true), unsubscribe
|
|
10145
|
+
// the sub, and the post-AUTH retry would find nothing to retry.
|
|
10146
|
+
// Listener still gets onError so callers see the reason; we just
|
|
10147
|
+
// don't poison the per-relay state with a transient marker.
|
|
10148
|
+
//
|
|
10149
|
+
// We accept three on-the-wire shapes: `auth-required:...`
|
|
10150
|
+
// (NIP-42 standard with reason), `auth-required ...` (whitespace
|
|
10151
|
+
// separator), and bare `auth-required` (no suffix at all — some
|
|
10152
|
+
// relays / tests).
|
|
10153
|
+
const isAuthRequired = message === 'auth-required'
|
|
10154
|
+
|| message.startsWith('auth-required:')
|
|
10155
|
+
|| message.startsWith('auth-required ');
|
|
10156
|
+
const relay = this.relays.get(relayUrl);
|
|
10157
|
+
if (relay && !isAuthRequired) {
|
|
10158
|
+
relay.closedSubIds.add(subscriptionId);
|
|
10159
|
+
}
|
|
9970
10160
|
const subscription = this.subscriptions.get(subscriptionId);
|
|
9971
10161
|
if (subscription?.listener.onError) {
|
|
9972
|
-
|
|
10162
|
+
// Pass the relay's reason through verbatim so callers can
|
|
10163
|
+
// pattern-match on standard prefixes (`auth-required:`,
|
|
10164
|
+
// `rate-limited:`, `blocked:`, etc.) without parsing through
|
|
10165
|
+
// a wrapper string.
|
|
10166
|
+
subscription.listener.onError(subscriptionId, message);
|
|
9973
10167
|
}
|
|
9974
10168
|
}
|
|
9975
10169
|
/**
|
|
@@ -9994,8 +10188,27 @@
|
|
|
9994
10188
|
// Send AUTH response
|
|
9995
10189
|
const message = JSON.stringify(['AUTH', authEvent.toJSON()]);
|
|
9996
10190
|
relay.socket.send(message);
|
|
9997
|
-
// Re-send subscriptions after auth (relay may have ignored pre-auth
|
|
10191
|
+
// Re-send subscriptions after auth (relay may have ignored pre-auth
|
|
10192
|
+
// requests). Two separate per-relay markers, two separate decisions:
|
|
10193
|
+
//
|
|
10194
|
+
// - `closedSubIds`: do NOT clear. handleClosedMessage already
|
|
10195
|
+
// skips the auth-required transient case, so anything in this
|
|
10196
|
+
// set is a TERMINAL rejection (rate-limited, blocked, etc.)
|
|
10197
|
+
// that AUTH does not relax. The resubscribeAll guard then
|
|
10198
|
+
// correctly skips terminal-rejected subs on this relay. They
|
|
10199
|
+
// will be retried on the next reconnect, when onopen creates a
|
|
10200
|
+
// fresh RelayConnection with empty markers.
|
|
10201
|
+
//
|
|
10202
|
+
// - `eosedSubIds`: clear. A relay may have EOSE'd a pre-auth sub
|
|
10203
|
+
// with zero events (filter unsatisfiable without auth context);
|
|
10204
|
+
// post-auth the same filter might match. We must re-arm the
|
|
10205
|
+
// local "still waiting" state so any in-flight
|
|
10206
|
+
// queryWithFirstSeenWins doesn't see this relay as already-done
|
|
10207
|
+
// from a stale marker.
|
|
9998
10208
|
setTimeout(() => {
|
|
10209
|
+
const r = this.relays.get(relayUrl);
|
|
10210
|
+
if (r)
|
|
10211
|
+
r.eosedSubIds.clear();
|
|
9999
10212
|
this.resubscribeAll(relayUrl);
|
|
10000
10213
|
}, AUTH_RESUBSCRIBE_DELAY_MS);
|
|
10001
10214
|
}
|
|
@@ -10015,24 +10228,40 @@
|
|
|
10015
10228
|
item.reject(new Error('Client disconnected'));
|
|
10016
10229
|
}
|
|
10017
10230
|
this.eventQueue = [];
|
|
10018
|
-
// Close all relay connections and clean up timers
|
|
10231
|
+
// Close all relay connections and clean up timers. Mark every
|
|
10232
|
+
// relay disconnected synchronously BEFORE we notify subscriptions
|
|
10233
|
+
// below, so any listener that consults `allRelaysDoneFor` sees
|
|
10234
|
+
// zero connected relays and settles immediately.
|
|
10019
10235
|
for (const [url, relay] of this.relays) {
|
|
10020
|
-
|
|
10236
|
+
relay.connected = false;
|
|
10021
10237
|
if (relay.pingTimer) {
|
|
10022
10238
|
clearInterval(relay.pingTimer);
|
|
10023
10239
|
relay.pingTimer = null;
|
|
10024
10240
|
}
|
|
10025
|
-
// Stop reconnect timer
|
|
10026
10241
|
if (relay.reconnectTimer) {
|
|
10027
10242
|
clearTimeout(relay.reconnectTimer);
|
|
10028
10243
|
relay.reconnectTimer = null;
|
|
10029
10244
|
}
|
|
10030
|
-
// Close socket
|
|
10031
10245
|
if (relay.socket && relay.socket.readyState !== CLOSED) {
|
|
10032
10246
|
relay.socket.close(1000, 'Client disconnected');
|
|
10033
10247
|
}
|
|
10034
10248
|
this.emitConnectionEvent('disconnect', url, 'Client disconnected');
|
|
10035
10249
|
}
|
|
10250
|
+
// Notify in-flight subscriptions that we're shutting down.
|
|
10251
|
+
// queryWithFirstSeenWins.onError re-checks allRelaysDoneFor (now
|
|
10252
|
+
// 0 connected → trivially true) and settles immediately, sparing
|
|
10253
|
+
// callers the full queryTimeoutMs wait. Snapshot keys first
|
|
10254
|
+
// because the listener may call unsubscribe(), which mutates
|
|
10255
|
+
// this.subscriptions while we iterate.
|
|
10256
|
+
const inflightSubs = Array.from(this.subscriptions.entries());
|
|
10257
|
+
for (const [subId, sub] of inflightSubs) {
|
|
10258
|
+
try {
|
|
10259
|
+
sub.listener.onError?.(subId, 'Client disconnected');
|
|
10260
|
+
}
|
|
10261
|
+
catch {
|
|
10262
|
+
// Ignore listener errors — we're tearing down anyway.
|
|
10263
|
+
}
|
|
10264
|
+
}
|
|
10036
10265
|
this.relays.clear();
|
|
10037
10266
|
this.subscriptions.clear();
|
|
10038
10267
|
}
|
|
@@ -10234,7 +10463,22 @@
|
|
|
10234
10463
|
filter = filterOrSubId;
|
|
10235
10464
|
listener = listenerOrFilter;
|
|
10236
10465
|
}
|
|
10466
|
+
// Reserved prefix for SDK-internal sub_ids (currently just the
|
|
10467
|
+
// keepalive `PING_SUB_ID`). Reject explicit caller use so the
|
|
10468
|
+
// keepalive timer's CLOSE/REQ cycle can't stomp on user state.
|
|
10469
|
+
if (subscriptionId.startsWith('__nostr-sdk-')) {
|
|
10470
|
+
throw new Error(`Subscription ID "${subscriptionId}" uses the reserved "__nostr-sdk-" prefix — pick a different id.`);
|
|
10471
|
+
}
|
|
10237
10472
|
this.subscriptions.set(subscriptionId, { filter, listener });
|
|
10473
|
+
// Wipe any stale per-relay EOSE/CLOSED markers for this sub_id
|
|
10474
|
+
// before issuing the REQ — otherwise a fresh subscribe with a
|
|
10475
|
+
// sub_id that was previously CLOSED (or was just freshly
|
|
10476
|
+
// EOSE'd) would be skipped or treated as "already done" on
|
|
10477
|
+
// those relays.
|
|
10478
|
+
for (const [, relay] of this.relays) {
|
|
10479
|
+
relay.closedSubIds.delete(subscriptionId);
|
|
10480
|
+
relay.eosedSubIds.delete(subscriptionId);
|
|
10481
|
+
}
|
|
10238
10482
|
// Send subscription request to all connected relays
|
|
10239
10483
|
const message = JSON.stringify(['REQ', subscriptionId, filter.toJSON()]);
|
|
10240
10484
|
for (const [, relay] of this.relays) {
|
|
@@ -10252,12 +10496,19 @@
|
|
|
10252
10496
|
if (!this.subscriptions.has(subscriptionId))
|
|
10253
10497
|
return;
|
|
10254
10498
|
this.subscriptions.delete(subscriptionId);
|
|
10255
|
-
// Send CLOSE to all connected relays
|
|
10499
|
+
// Send CLOSE to all connected relays — except those that already
|
|
10500
|
+
// CLOSED the sub themselves (no point telling the relay something
|
|
10501
|
+
// it told us).
|
|
10256
10502
|
const message = JSON.stringify(['CLOSE', subscriptionId]);
|
|
10257
10503
|
for (const [, relay] of this.relays) {
|
|
10258
|
-
if (relay.connected && relay.socket?.readyState === OPEN
|
|
10504
|
+
if (relay.connected && relay.socket?.readyState === OPEN
|
|
10505
|
+
&& !relay.closedSubIds.has(subscriptionId)) {
|
|
10259
10506
|
relay.socket.send(message);
|
|
10260
10507
|
}
|
|
10508
|
+
// Drop both per-relay markers now that the sub is gone from
|
|
10509
|
+
// the global map.
|
|
10510
|
+
relay.closedSubIds.delete(subscriptionId);
|
|
10511
|
+
relay.eosedSubIds.delete(subscriptionId);
|
|
10261
10512
|
}
|
|
10262
10513
|
}
|
|
10263
10514
|
/**
|
|
@@ -10283,12 +10534,45 @@
|
|
|
10283
10534
|
queryWithFirstSeenWins(filter, extractResult) {
|
|
10284
10535
|
return new Promise((resolve) => {
|
|
10285
10536
|
let subscriptionId = '';
|
|
10286
|
-
|
|
10287
|
-
|
|
10288
|
-
|
|
10289
|
-
|
|
10290
|
-
|
|
10537
|
+
let settled = false;
|
|
10538
|
+
// Declared as `let` and initialized lazily so `finishWith` can be
|
|
10539
|
+
// invoked before the setTimeout call below without hitting the
|
|
10540
|
+
// TDZ on `clearTimeout(timeoutId)`. (The same comment on the
|
|
10541
|
+
// listener anticipates synchronous-callback hypothetical paths.)
|
|
10542
|
+
let timeoutId;
|
|
10543
|
+
// Accept an explicit `id` so callers from inside the listener can
|
|
10544
|
+
// pass the sub_id the relay echoed back. This guards against any
|
|
10545
|
+
// future change to subscribe() that would invoke listener
|
|
10546
|
+
// callbacks before its return value is bound to `subscriptionId`
|
|
10547
|
+
// — the closure-captured value would still be `''` and we'd skip
|
|
10548
|
+
// the CLOSE frame, leaking the slot on the relay.
|
|
10549
|
+
const finishWith = (result, id) => {
|
|
10550
|
+
if (settled)
|
|
10551
|
+
return;
|
|
10552
|
+
settled = true;
|
|
10553
|
+
if (timeoutId !== undefined)
|
|
10554
|
+
clearTimeout(timeoutId);
|
|
10555
|
+
const subId = id || subscriptionId;
|
|
10556
|
+
if (subId)
|
|
10557
|
+
this.unsubscribe(subId);
|
|
10558
|
+
resolve(result);
|
|
10559
|
+
};
|
|
10560
|
+
timeoutId = setTimeout(() => finishWith(null), this.queryTimeoutMs);
|
|
10291
10561
|
const authors = new Map();
|
|
10562
|
+
const allRelaysDone = (id) => this.allRelaysDoneFor(id);
|
|
10563
|
+
const pickWinner = () => {
|
|
10564
|
+
let winnerEntry = null;
|
|
10565
|
+
let winnerPubkey = '';
|
|
10566
|
+
for (const [pubkey, entry] of authors) {
|
|
10567
|
+
if (!winnerEntry
|
|
10568
|
+
|| entry.firstSeen < winnerEntry.firstSeen
|
|
10569
|
+
|| (entry.firstSeen === winnerEntry.firstSeen && pubkey < winnerPubkey)) {
|
|
10570
|
+
winnerEntry = entry;
|
|
10571
|
+
winnerPubkey = pubkey;
|
|
10572
|
+
}
|
|
10573
|
+
}
|
|
10574
|
+
return winnerEntry ? extractResult(winnerEntry.latestEvent) : null;
|
|
10575
|
+
};
|
|
10292
10576
|
subscriptionId = this.subscribe(filter, {
|
|
10293
10577
|
onEvent: (event) => {
|
|
10294
10578
|
// Verify signature to prevent relay injection of forged events (#4)
|
|
@@ -10307,24 +10591,55 @@
|
|
|
10307
10591
|
}
|
|
10308
10592
|
}
|
|
10309
10593
|
},
|
|
10310
|
-
|
|
10311
|
-
|
|
10312
|
-
|
|
10313
|
-
|
|
10314
|
-
|
|
10315
|
-
|
|
10316
|
-
|
|
10317
|
-
|
|
10318
|
-
|
|
10319
|
-
|
|
10320
|
-
|
|
10321
|
-
|
|
10594
|
+
// EOSE means *this relay* has finished delivering stored
|
|
10595
|
+
// events. In a multi-relay client we must not settle yet — a
|
|
10596
|
+
// slower relay may still be about to deliver matching events.
|
|
10597
|
+
// Settle only when every connected relay has either EOSE'd
|
|
10598
|
+
// OR CLOSED'd this sub. (Single-relay clients are unaffected:
|
|
10599
|
+
// allDone is trivially true with one relay.)
|
|
10600
|
+
onEndOfStoredEvents: (id) => {
|
|
10601
|
+
if (allRelaysDone(id)) {
|
|
10602
|
+
finishWith(pickWinner(), id);
|
|
10603
|
+
}
|
|
10604
|
+
},
|
|
10605
|
+
// Subscription error from the SDK — fires from three paths
|
|
10606
|
+
// that all need the same "is it time to settle?" check:
|
|
10607
|
+
// 1. Relay sent CLOSED for this sub. In a multi-relay
|
|
10608
|
+
// client the same sub_id may still be alive on a
|
|
10609
|
+
// healthy relay; settling on the first CLOSED would
|
|
10610
|
+
// prematurely abort a query other relays could
|
|
10611
|
+
// satisfy. handleClosedMessage records the rejection
|
|
10612
|
+
// on the sending relay's closedSubIds before invoking
|
|
10613
|
+
// us, so we can decide via allRelaysDoneFor.
|
|
10614
|
+
// 2. Relay disconnected mid-query (socket.onclose →
|
|
10615
|
+
// synthetic onError). The relay no longer counts as
|
|
10616
|
+
// connected, so allRelaysDoneFor excludes it.
|
|
10617
|
+
// 3. Client disconnected (disconnect() → synthetic
|
|
10618
|
+
// onError). All relays are torn down, allRelaysDoneFor
|
|
10619
|
+
// sees zero connected and settles.
|
|
10620
|
+
onError: (id, message) => {
|
|
10621
|
+
console.warn(`Subscription error on ${id}: ${message}`);
|
|
10622
|
+
if (allRelaysDone(id)) {
|
|
10623
|
+
finishWith(pickWinner(), id);
|
|
10322
10624
|
}
|
|
10323
|
-
|
|
10625
|
+
// else: keep waiting for EOSE / CLOSED from remaining
|
|
10626
|
+
// relays or the overall query timeout.
|
|
10324
10627
|
},
|
|
10325
10628
|
});
|
|
10326
10629
|
});
|
|
10327
10630
|
}
|
|
10631
|
+
/**
|
|
10632
|
+
* True if every currently-connected relay has finished delivering
|
|
10633
|
+
* for the given sub_id (either EOSE'd or CLOSED'd it). Used by
|
|
10634
|
+
* queryWithFirstSeenWins to coordinate multi-relay settlement.
|
|
10635
|
+
*/
|
|
10636
|
+
allRelaysDoneFor(subscriptionId) {
|
|
10637
|
+
const connected = Array.from(this.relays.values()).filter((r) => r.connected);
|
|
10638
|
+
// No connected relays at all → nothing to wait for; settle.
|
|
10639
|
+
if (connected.length === 0)
|
|
10640
|
+
return true;
|
|
10641
|
+
return connected.every((r) => r.eosedSubIds.has(subscriptionId) || r.closedSubIds.has(subscriptionId));
|
|
10642
|
+
}
|
|
10328
10643
|
/**
|
|
10329
10644
|
* Query for a public key by nametag.
|
|
10330
10645
|
* Uses first-seen-wins anti-hijacking resolution.
|