@unicitylabs/nostr-js-sdk 0.4.0 → 0.5.0-dev.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser/index.js +370 -55
- package/dist/browser/index.js.map +1 -1
- package/dist/browser/index.min.js +6 -6
- package/dist/browser/index.min.js.map +1 -1
- package/dist/browser/index.umd.js +370 -55
- package/dist/browser/index.umd.js.map +1 -1
- package/dist/browser/index.umd.min.js +7 -7
- package/dist/browser/index.umd.min.js.map +1 -1
- package/dist/cjs/client/NostrClient.js +370 -55
- package/dist/cjs/client/NostrClient.js.map +1 -1
- package/dist/esm/client/NostrClient.js +370 -55
- package/dist/esm/client/NostrClient.js.map +1 -1
- package/dist/types/client/NostrClient.d.ts +46 -5
- package/dist/types/client/NostrClient.d.ts.map +1 -1
- package/package.json +2 -2
package/dist/browser/index.js
CHANGED
|
@@ -9532,6 +9532,16 @@ const DEFAULT_QUERY_TIMEOUT_MS = 5000;
|
|
|
9532
9532
|
const DEFAULT_RECONNECT_INTERVAL_MS = 1000;
|
|
9533
9533
|
const DEFAULT_MAX_RECONNECT_INTERVAL_MS = 30000;
|
|
9534
9534
|
const DEFAULT_PING_INTERVAL_MS = 30000;
|
|
9535
|
+
/**
|
|
9536
|
+
* Internal sub_id reserved for the keepalive REQ. Namespaced with a
|
|
9537
|
+
* `__nostr-sdk-` prefix so that user code calling
|
|
9538
|
+
* {@link NostrClient.subscribe} with an explicit `subscriptionId`
|
|
9539
|
+
* cannot collide — a user choosing the literal `"ping"` would
|
|
9540
|
+
* otherwise have their subscription forcibly CLOSE/REQ'd every
|
|
9541
|
+
* ping interval. The leading `__` is a stable convention for
|
|
9542
|
+
* "do not pick this name."
|
|
9543
|
+
*/
|
|
9544
|
+
const PING_SUB_ID = '__nostr-sdk-keepalive__';
|
|
9535
9545
|
/**
|
|
9536
9546
|
* Delay before resubscribing after NIP-42 authentication.
|
|
9537
9547
|
* This gives the relay time to process the AUTH response before we send
|
|
@@ -9571,6 +9581,30 @@ class NostrClient {
|
|
|
9571
9581
|
this.maxReconnectIntervalMs = options?.maxReconnectIntervalMs ?? DEFAULT_MAX_RECONNECT_INTERVAL_MS;
|
|
9572
9582
|
this.pingIntervalMs = options?.pingIntervalMs ?? DEFAULT_PING_INTERVAL_MS;
|
|
9573
9583
|
}
|
|
9584
|
+
/**
|
|
9585
|
+
* Replace the key manager used for signing and encryption.
|
|
9586
|
+
*
|
|
9587
|
+
* The connection stays alive — but every operation that consults the
|
|
9588
|
+
* key manager from this point on uses the new key, including:
|
|
9589
|
+
* - signing future published events,
|
|
9590
|
+
* - signing NIP-42 AUTH challenge responses,
|
|
9591
|
+
* - the `authors:[selfPubkey]` filter on the keepalive ping REQ
|
|
9592
|
+
* (computed each ping interval),
|
|
9593
|
+
* - any other code path that calls `getPublicKeyHex()` on the
|
|
9594
|
+
* stored manager.
|
|
9595
|
+
*
|
|
9596
|
+
* Existing in-flight subscriptions are not re-issued or re-keyed.
|
|
9597
|
+
* @param keyManager New key manager
|
|
9598
|
+
*/
|
|
9599
|
+
setKeyManager(keyManager) {
|
|
9600
|
+
this.keyManager = keyManager;
|
|
9601
|
+
}
|
|
9602
|
+
/**
|
|
9603
|
+
* Get the current key manager.
|
|
9604
|
+
*/
|
|
9605
|
+
getKeyManager() {
|
|
9606
|
+
return this.keyManager;
|
|
9607
|
+
}
|
|
9574
9608
|
/**
|
|
9575
9609
|
* Add a connection event listener.
|
|
9576
9610
|
* @param listener Listener for connection events
|
|
@@ -9614,13 +9648,6 @@ class NostrClient {
|
|
|
9614
9648
|
}
|
|
9615
9649
|
}
|
|
9616
9650
|
}
|
|
9617
|
-
/**
|
|
9618
|
-
* Get the key manager.
|
|
9619
|
-
* @returns The key manager instance
|
|
9620
|
-
*/
|
|
9621
|
-
getKeyManager() {
|
|
9622
|
-
return this.keyManager;
|
|
9623
|
-
}
|
|
9624
9651
|
/**
|
|
9625
9652
|
* Get the current query timeout in milliseconds.
|
|
9626
9653
|
* @returns Query timeout in milliseconds
|
|
@@ -9657,11 +9684,41 @@ class NostrClient {
|
|
|
9657
9684
|
return;
|
|
9658
9685
|
}
|
|
9659
9686
|
return new Promise((resolve, reject) => {
|
|
9687
|
+
// The connection-setup timeout has three races to defend
|
|
9688
|
+
// against:
|
|
9689
|
+
// A) createWebSocket resolves AFTER the timeout fired.
|
|
9690
|
+
// B) createWebSocket resolves BEFORE the timeout, but
|
|
9691
|
+
// `onopen` fires AFTER the timeout fired.
|
|
9692
|
+
// C) createWebSocket resolves and `onopen` fires BEFORE the
|
|
9693
|
+
// timeout (the success path).
|
|
9694
|
+
// `pendingSocket` lets the timeout proactively close any
|
|
9695
|
+
// socket that's already been created but hasn't fired
|
|
9696
|
+
// `onopen` yet. The `timedOut` flag covers (A) inside `.then`
|
|
9697
|
+
// and (B) inside `socket.onopen`.
|
|
9698
|
+
let timedOut = false;
|
|
9699
|
+
let pendingSocket = null;
|
|
9660
9700
|
const timeoutId = setTimeout(() => {
|
|
9701
|
+
timedOut = true;
|
|
9702
|
+
if (pendingSocket) {
|
|
9703
|
+
try {
|
|
9704
|
+
pendingSocket.close(1000, 'Connection setup timed out');
|
|
9705
|
+
}
|
|
9706
|
+
catch { /* ignore */ }
|
|
9707
|
+
}
|
|
9661
9708
|
reject(new Error(`Connection to ${url} timed out`));
|
|
9662
9709
|
}, CONNECTION_TIMEOUT_MS);
|
|
9663
9710
|
createWebSocket(url)
|
|
9664
9711
|
.then((socket) => {
|
|
9712
|
+
if (timedOut) {
|
|
9713
|
+
// Caller already saw the rejection. Discard the late
|
|
9714
|
+
// socket so we don't leak it.
|
|
9715
|
+
try {
|
|
9716
|
+
socket.close(1000, 'Connection setup timed out');
|
|
9717
|
+
}
|
|
9718
|
+
catch { /* ignore */ }
|
|
9719
|
+
return;
|
|
9720
|
+
}
|
|
9721
|
+
pendingSocket = socket;
|
|
9665
9722
|
const relay = {
|
|
9666
9723
|
url,
|
|
9667
9724
|
socket,
|
|
@@ -9671,9 +9728,30 @@ class NostrClient {
|
|
|
9671
9728
|
reconnectTimer: null,
|
|
9672
9729
|
pingTimer: null,
|
|
9673
9730
|
lastPongTime: Date.now(),
|
|
9731
|
+
unansweredPings: 0,
|
|
9674
9732
|
wasConnected: existingRelay?.wasConnected ?? false,
|
|
9733
|
+
// Reset on every new connection: a relay's per-connection
|
|
9734
|
+
// sub-slot accounting is fresh, so previously-rejected REQs
|
|
9735
|
+
// should be re-issued on the new socket.
|
|
9736
|
+
closedSubIds: new Set(),
|
|
9737
|
+
eosedSubIds: new Set(),
|
|
9675
9738
|
};
|
|
9676
9739
|
socket.onopen = () => {
|
|
9740
|
+
// The `.then` block already guards against a socket
|
|
9741
|
+
// arriving after the connection timeout, but the socket
|
|
9742
|
+
// can also be created BEFORE the timeout while
|
|
9743
|
+
// `onopen` fires AFTER the timeout has rejected the
|
|
9744
|
+
// outer promise. Without this second guard we'd register
|
|
9745
|
+
// the relay, start a pingTimer, and resubscribe — orphan
|
|
9746
|
+
// background resources the caller can't see or clean up
|
|
9747
|
+
// because their connect() call already saw a rejection.
|
|
9748
|
+
if (timedOut) {
|
|
9749
|
+
try {
|
|
9750
|
+
socket.close(1000, 'Connection setup timed out');
|
|
9751
|
+
}
|
|
9752
|
+
catch { /* ignore */ }
|
|
9753
|
+
return;
|
|
9754
|
+
}
|
|
9677
9755
|
clearTimeout(timeoutId);
|
|
9678
9756
|
relay.connected = true;
|
|
9679
9757
|
relay.reconnectAttempts = 0; // Reset on successful connection
|
|
@@ -9698,10 +9776,11 @@ class NostrClient {
|
|
|
9698
9776
|
socket.onmessage = (event) => {
|
|
9699
9777
|
try {
|
|
9700
9778
|
const data = extractMessageData(event);
|
|
9701
|
-
// Update last pong time on any message (relay is alive)
|
|
9779
|
+
// Update last pong time and reset unanswered pings on any message (relay is alive)
|
|
9702
9780
|
const r = this.relays.get(url);
|
|
9703
9781
|
if (r) {
|
|
9704
9782
|
r.lastPongTime = Date.now();
|
|
9783
|
+
r.unansweredPings = 0;
|
|
9705
9784
|
}
|
|
9706
9785
|
this.handleRelayMessage(url, data);
|
|
9707
9786
|
}
|
|
@@ -9713,9 +9792,40 @@ class NostrClient {
|
|
|
9713
9792
|
const wasConnected = relay.connected;
|
|
9714
9793
|
relay.connected = false;
|
|
9715
9794
|
this.stopPingTimer(url);
|
|
9795
|
+
// Pre-onopen close: TCP handshake failure or relay
|
|
9796
|
+
// immediately closed the WS during the upgrade. Without
|
|
9797
|
+
// this, the connectToRelay promise stays pending until
|
|
9798
|
+
// CONNECTION_TIMEOUT_MS (30s) expires; surfacing it now
|
|
9799
|
+
// lets the caller see the failure promptly and retry.
|
|
9800
|
+
if (!wasConnected && !timedOut) {
|
|
9801
|
+
timedOut = true;
|
|
9802
|
+
clearTimeout(timeoutId);
|
|
9803
|
+
reject(new Error(`Connection to ${url} closed during handshake: ${event?.reason || 'no reason'}`));
|
|
9804
|
+
}
|
|
9716
9805
|
if (wasConnected) {
|
|
9717
9806
|
const reason = event?.reason || 'Connection closed';
|
|
9718
9807
|
this.emitConnectionEvent('disconnect', url, reason);
|
|
9808
|
+
// Re-trigger the all-done check on every active sub.
|
|
9809
|
+
// queryWithFirstSeenWins.allRelaysDoneFor only runs
|
|
9810
|
+
// from listener callbacks (EOSE / CLOSED via onError);
|
|
9811
|
+
// a socket that drops without sending either would
|
|
9812
|
+
// otherwise leave the query hanging until
|
|
9813
|
+
// queryTimeoutMs even though the disconnected relay no
|
|
9814
|
+
// longer counts toward "still pending" relays. Firing
|
|
9815
|
+
// a synthetic onError gives every active sub a chance
|
|
9816
|
+
// to re-evaluate now that the relay set has shrunk.
|
|
9817
|
+
// Include the relay URL so listeners in a multi-relay
|
|
9818
|
+
// client can attribute which relay dropped.
|
|
9819
|
+
const inflight = Array.from(this.subscriptions.entries());
|
|
9820
|
+
for (const [subId, sub] of inflight) {
|
|
9821
|
+
try {
|
|
9822
|
+
sub.listener.onError?.(subId, `Relay disconnected (${url}): ${reason}`);
|
|
9823
|
+
}
|
|
9824
|
+
catch {
|
|
9825
|
+
// Ignore listener errors — we're notifying
|
|
9826
|
+
// best-effort.
|
|
9827
|
+
}
|
|
9828
|
+
}
|
|
9719
9829
|
}
|
|
9720
9830
|
if (!this.closed && this.autoReconnect && !relay.reconnecting) {
|
|
9721
9831
|
this.scheduleReconnect(url);
|
|
@@ -9727,7 +9837,11 @@ class NostrClient {
|
|
|
9727
9837
|
reject(new Error(`Failed to connect to ${url}: ${error.message || 'Unknown error'}`));
|
|
9728
9838
|
}
|
|
9729
9839
|
};
|
|
9730
|
-
this.relays
|
|
9840
|
+
// Note: we do NOT register the relay in `this.relays` here —
|
|
9841
|
+
// only after `onopen` fires successfully. Registering eagerly
|
|
9842
|
+
// (before onopen) would leak the relay into the global map
|
|
9843
|
+
// even when the connection setup times out and the caller's
|
|
9844
|
+
// promise has already rejected.
|
|
9731
9845
|
})
|
|
9732
9846
|
.catch((error) => {
|
|
9733
9847
|
clearTimeout(timeoutId);
|
|
@@ -9785,11 +9899,16 @@ class NostrClient {
|
|
|
9785
9899
|
this.stopPingTimer(url);
|
|
9786
9900
|
return;
|
|
9787
9901
|
}
|
|
9788
|
-
// Check if we've received any message recently
|
|
9789
9902
|
const timeSinceLastPong = Date.now() - relay.lastPongTime;
|
|
9790
|
-
if (timeSinceLastPong > this.pingIntervalMs * 2) {
|
|
9791
|
-
//
|
|
9792
|
-
|
|
9903
|
+
if (timeSinceLastPong > this.pingIntervalMs * 2 && relay.unansweredPings >= 2) {
|
|
9904
|
+
// No inbound message for 2x the ping interval AND we've sent at least 2 pings
|
|
9905
|
+
// without any response — the connection is truly stale.
|
|
9906
|
+
// The unanswered pings gate handles browser tab throttling: on the first tick
|
|
9907
|
+
// after waking, unansweredPings is 0, so we send a ping and wait. If the relay
|
|
9908
|
+
// is alive it responds (resetting the counter). If dead, subsequent ticks
|
|
9909
|
+
// increment the counter until it reaches the threshold, even under sustained
|
|
9910
|
+
// throttling where intervals are irregular.
|
|
9911
|
+
console.warn(`Relay ${url} appears stale (no response for ${timeSinceLastPong}ms, ${relay.unansweredPings} unanswered pings), reconnecting...`);
|
|
9793
9912
|
this.stopPingTimer(url);
|
|
9794
9913
|
try {
|
|
9795
9914
|
relay.socket.close();
|
|
@@ -9799,17 +9918,26 @@ class NostrClient {
|
|
|
9799
9918
|
}
|
|
9800
9919
|
return;
|
|
9801
9920
|
}
|
|
9802
|
-
// Send a subscription request as a ping (relays respond with EOSE)
|
|
9803
|
-
//
|
|
9804
|
-
//
|
|
9921
|
+
// Send a subscription request as a ping (relays respond with EOSE).
|
|
9922
|
+
// The filter MUST be tightly scoped — an open `{ limit: 1 }` filter
|
|
9923
|
+
// with no kinds/authors/#p will, after EOSE, stream every event the
|
|
9924
|
+
// relay receives (NIP-01 live tail), saturating the connection and
|
|
9925
|
+
// exhausting per-connection subscription slots on busy relays.
|
|
9926
|
+
// Scoping by `authors:[self]` keeps the live tail empty in practice
|
|
9927
|
+
// (the relay would only forward our own future events).
|
|
9805
9928
|
try {
|
|
9806
|
-
const
|
|
9929
|
+
const selfPubkey = this.keyManager.getPublicKeyHex();
|
|
9807
9930
|
// First close any existing ping subscription to ensure we don't accumulate
|
|
9808
|
-
const closeMessage = JSON.stringify(['CLOSE',
|
|
9931
|
+
const closeMessage = JSON.stringify(['CLOSE', PING_SUB_ID]);
|
|
9809
9932
|
relay.socket.send(closeMessage);
|
|
9810
9933
|
// Then send the new ping request (limit:1 ensures relay sends EOSE)
|
|
9811
|
-
const pingMessage = JSON.stringify([
|
|
9934
|
+
const pingMessage = JSON.stringify([
|
|
9935
|
+
'REQ',
|
|
9936
|
+
PING_SUB_ID,
|
|
9937
|
+
{ authors: [selfPubkey], limit: 1 },
|
|
9938
|
+
]);
|
|
9812
9939
|
relay.socket.send(pingMessage);
|
|
9940
|
+
relay.unansweredPings++;
|
|
9813
9941
|
}
|
|
9814
9942
|
catch {
|
|
9815
9943
|
// Send failed, connection likely dead
|
|
@@ -9842,6 +9970,11 @@ class NostrClient {
|
|
|
9842
9970
|
if (!relay?.socket || !relay.connected)
|
|
9843
9971
|
return;
|
|
9844
9972
|
for (const [subId, info] of this.subscriptions) {
|
|
9973
|
+
// Skip subs this relay has previously CLOSED — re-issuing them
|
|
9974
|
+
// just triggers the same rejection in a loop. Other healthy
|
|
9975
|
+
// relays still resubscribe.
|
|
9976
|
+
if (relay.closedSubIds.has(subId))
|
|
9977
|
+
continue;
|
|
9845
9978
|
const message = JSON.stringify(['REQ', subId, info.filter.toJSON()]);
|
|
9846
9979
|
relay.socket.send(message);
|
|
9847
9980
|
}
|
|
@@ -9861,7 +9994,7 @@ class NostrClient {
|
|
|
9861
9994
|
/**
|
|
9862
9995
|
* Handle a message from a relay.
|
|
9863
9996
|
*/
|
|
9864
|
-
handleRelayMessage(
|
|
9997
|
+
handleRelayMessage(relayUrl, message) {
|
|
9865
9998
|
try {
|
|
9866
9999
|
const json = JSON.parse(message);
|
|
9867
10000
|
if (!Array.isArray(json) || json.length < 2)
|
|
@@ -9875,16 +10008,16 @@ class NostrClient {
|
|
|
9875
10008
|
this.handleOkMessage(json);
|
|
9876
10009
|
break;
|
|
9877
10010
|
case 'EOSE':
|
|
9878
|
-
this.handleEOSEMessage(json);
|
|
10011
|
+
this.handleEOSEMessage(relayUrl, json);
|
|
9879
10012
|
break;
|
|
9880
10013
|
case 'NOTICE':
|
|
9881
10014
|
this.handleNoticeMessage(json);
|
|
9882
10015
|
break;
|
|
9883
10016
|
case 'CLOSED':
|
|
9884
|
-
this.handleClosedMessage(json);
|
|
10017
|
+
this.handleClosedMessage(relayUrl, json);
|
|
9885
10018
|
break;
|
|
9886
10019
|
case 'AUTH':
|
|
9887
|
-
this.handleAuthMessage(
|
|
10020
|
+
this.handleAuthMessage(relayUrl, json);
|
|
9888
10021
|
break;
|
|
9889
10022
|
}
|
|
9890
10023
|
}
|
|
@@ -9896,7 +10029,7 @@ class NostrClient {
|
|
|
9896
10029
|
* Handle EVENT message from relay.
|
|
9897
10030
|
*/
|
|
9898
10031
|
handleEventMessage(json) {
|
|
9899
|
-
if (json.length < 3)
|
|
10032
|
+
if (json.length < 3 || typeof json[1] !== 'string')
|
|
9900
10033
|
return;
|
|
9901
10034
|
const subscriptionId = json[1];
|
|
9902
10035
|
const eventData = json[2];
|
|
@@ -9934,11 +10067,23 @@ class NostrClient {
|
|
|
9934
10067
|
}
|
|
9935
10068
|
/**
|
|
9936
10069
|
* Handle EOSE (End of Stored Events) message from relay.
|
|
10070
|
+
*
|
|
10071
|
+
* Records the per-relay EOSE marker (mirroring closedSubIds) so
|
|
10072
|
+
* queryWithFirstSeenWins can decide when ALL connected relays have
|
|
10073
|
+
* finished — either streamed EOSE or rejected with CLOSED — instead
|
|
10074
|
+
* of settling off the first fast relay's EOSE while a slower relay
|
|
10075
|
+
* is still about to deliver matching events.
|
|
9937
10076
|
*/
|
|
9938
|
-
handleEOSEMessage(json) {
|
|
9939
|
-
if (json.length < 2)
|
|
10077
|
+
handleEOSEMessage(relayUrl, json) {
|
|
10078
|
+
if (json.length < 2 || typeof json[1] !== 'string')
|
|
9940
10079
|
return;
|
|
9941
10080
|
const subscriptionId = json[1];
|
|
10081
|
+
if (!this.subscriptions.has(subscriptionId))
|
|
10082
|
+
return;
|
|
10083
|
+
const relay = this.relays.get(relayUrl);
|
|
10084
|
+
if (relay) {
|
|
10085
|
+
relay.eosedSubIds.add(subscriptionId);
|
|
10086
|
+
}
|
|
9942
10087
|
const subscription = this.subscriptions.get(subscriptionId);
|
|
9943
10088
|
if (subscription?.listener.onEndOfStoredEvents) {
|
|
9944
10089
|
subscription.listener.onEndOfStoredEvents(subscriptionId);
|
|
@@ -9955,15 +10100,64 @@ class NostrClient {
|
|
|
9955
10100
|
}
|
|
9956
10101
|
/**
|
|
9957
10102
|
* Handle CLOSED message from relay (subscription closed by relay).
|
|
10103
|
+
*
|
|
10104
|
+
* NIP-01 CLOSED frames are terminal for the named subscription **on
|
|
10105
|
+
* the sending relay**. In a multi-relay client the same sub_id may
|
|
10106
|
+
* still be alive on a healthy relay, so we must NOT delete the
|
|
10107
|
+
* global `this.subscriptions` entry here — that would silently drop
|
|
10108
|
+
* EVENT/EOSE frames from the still-healthy relays in
|
|
10109
|
+
* `handleEventMessage` (which consults the global map).
|
|
10110
|
+
*
|
|
10111
|
+
* Instead we record the rejection on the sending relay's
|
|
10112
|
+
* `closedSubIds` set so `resubscribeAll` and post-AUTH resubscribe
|
|
10113
|
+
* skip it on this relay only. The listener is notified via
|
|
10114
|
+
* `onError` so callers (e.g. queryWithFirstSeenWins) can decide to
|
|
10115
|
+
* settle and explicitly `unsubscribe()` if they want to give up
|
|
10116
|
+
* across all relays.
|
|
9958
10117
|
*/
|
|
9959
|
-
handleClosedMessage(json) {
|
|
9960
|
-
|
|
10118
|
+
handleClosedMessage(relayUrl, json) {
|
|
10119
|
+
// NIP-01 makes the message field optional: `["CLOSED", <sub>]` is
|
|
10120
|
+
// valid. Dropping such frames was exactly the leak this PR sets out
|
|
10121
|
+
// to fix — no closedSubIds marker and no onError notification means
|
|
10122
|
+
// queries hang until timeout and resubscribe loops persist.
|
|
10123
|
+
if (json.length < 2 || typeof json[1] !== 'string')
|
|
9961
10124
|
return;
|
|
9962
10125
|
const subscriptionId = json[1];
|
|
9963
|
-
|
|
10126
|
+
// Ignore CLOSED for sub_ids we don't know about. A misbehaving or
|
|
10127
|
+
// malicious relay could otherwise spam us with arbitrary sub_ids
|
|
10128
|
+
// and grow `closedSubIds` unbounded over a long-lived connection,
|
|
10129
|
+
// and could pre-emptively block sub_ids we might use later.
|
|
10130
|
+
if (!this.subscriptions.has(subscriptionId))
|
|
10131
|
+
return;
|
|
10132
|
+
const message = typeof json[2] === 'string' ? json[2] : 'no reason provided';
|
|
10133
|
+
// NIP-42 transient case: relays that require AUTH typically reject
|
|
10134
|
+
// pre-auth REQs with `CLOSED("auth-required:...")` and then send
|
|
10135
|
+
// an AUTH challenge. resubscribeAfterAuth re-issues the sub, so
|
|
10136
|
+
// this rejection is NOT terminal. If we marked closedSubIds here,
|
|
10137
|
+
// queryWithFirstSeenWins.onError would settle the future
|
|
10138
|
+
// prematurely (single-relay → allRelaysDoneFor=true), unsubscribe
|
|
10139
|
+
// the sub, and the post-AUTH retry would find nothing to retry.
|
|
10140
|
+
// Listener still gets onError so callers see the reason; we just
|
|
10141
|
+
// don't poison the per-relay state with a transient marker.
|
|
10142
|
+
//
|
|
10143
|
+
// We accept three on-the-wire shapes: `auth-required:...`
|
|
10144
|
+
// (NIP-42 standard with reason), `auth-required ...` (whitespace
|
|
10145
|
+
// separator), and bare `auth-required` (no suffix at all — some
|
|
10146
|
+
// relays / tests).
|
|
10147
|
+
const isAuthRequired = message === 'auth-required'
|
|
10148
|
+
|| message.startsWith('auth-required:')
|
|
10149
|
+
|| message.startsWith('auth-required ');
|
|
10150
|
+
const relay = this.relays.get(relayUrl);
|
|
10151
|
+
if (relay && !isAuthRequired) {
|
|
10152
|
+
relay.closedSubIds.add(subscriptionId);
|
|
10153
|
+
}
|
|
9964
10154
|
const subscription = this.subscriptions.get(subscriptionId);
|
|
9965
10155
|
if (subscription?.listener.onError) {
|
|
9966
|
-
|
|
10156
|
+
// Pass the relay's reason through verbatim so callers can
|
|
10157
|
+
// pattern-match on standard prefixes (`auth-required:`,
|
|
10158
|
+
// `rate-limited:`, `blocked:`, etc.) without parsing through
|
|
10159
|
+
// a wrapper string.
|
|
10160
|
+
subscription.listener.onError(subscriptionId, message);
|
|
9967
10161
|
}
|
|
9968
10162
|
}
|
|
9969
10163
|
/**
|
|
@@ -9988,8 +10182,27 @@ class NostrClient {
|
|
|
9988
10182
|
// Send AUTH response
|
|
9989
10183
|
const message = JSON.stringify(['AUTH', authEvent.toJSON()]);
|
|
9990
10184
|
relay.socket.send(message);
|
|
9991
|
-
// Re-send subscriptions after auth (relay may have ignored pre-auth
|
|
10185
|
+
// Re-send subscriptions after auth (relay may have ignored pre-auth
|
|
10186
|
+
// requests). Two separate per-relay markers, two separate decisions:
|
|
10187
|
+
//
|
|
10188
|
+
// - `closedSubIds`: do NOT clear. handleClosedMessage already
|
|
10189
|
+
// skips the auth-required transient case, so anything in this
|
|
10190
|
+
// set is a TERMINAL rejection (rate-limited, blocked, etc.)
|
|
10191
|
+
// that AUTH does not relax. The resubscribeAll guard then
|
|
10192
|
+
// correctly skips terminal-rejected subs on this relay. They
|
|
10193
|
+
// will be retried on the next reconnect, when onopen creates a
|
|
10194
|
+
// fresh RelayConnection with empty markers.
|
|
10195
|
+
//
|
|
10196
|
+
// - `eosedSubIds`: clear. A relay may have EOSE'd a pre-auth sub
|
|
10197
|
+
// with zero events (filter unsatisfiable without auth context);
|
|
10198
|
+
// post-auth the same filter might match. We must re-arm the
|
|
10199
|
+
// local "still waiting" state so any in-flight
|
|
10200
|
+
// queryWithFirstSeenWins doesn't see this relay as already-done
|
|
10201
|
+
// from a stale marker.
|
|
9992
10202
|
setTimeout(() => {
|
|
10203
|
+
const r = this.relays.get(relayUrl);
|
|
10204
|
+
if (r)
|
|
10205
|
+
r.eosedSubIds.clear();
|
|
9993
10206
|
this.resubscribeAll(relayUrl);
|
|
9994
10207
|
}, AUTH_RESUBSCRIBE_DELAY_MS);
|
|
9995
10208
|
}
|
|
@@ -10009,24 +10222,40 @@ class NostrClient {
|
|
|
10009
10222
|
item.reject(new Error('Client disconnected'));
|
|
10010
10223
|
}
|
|
10011
10224
|
this.eventQueue = [];
|
|
10012
|
-
// Close all relay connections and clean up timers
|
|
10225
|
+
// Close all relay connections and clean up timers. Mark every
|
|
10226
|
+
// relay disconnected synchronously BEFORE we notify subscriptions
|
|
10227
|
+
// below, so any listener that consults `allRelaysDoneFor` sees
|
|
10228
|
+
// zero connected relays and settles immediately.
|
|
10013
10229
|
for (const [url, relay] of this.relays) {
|
|
10014
|
-
|
|
10230
|
+
relay.connected = false;
|
|
10015
10231
|
if (relay.pingTimer) {
|
|
10016
10232
|
clearInterval(relay.pingTimer);
|
|
10017
10233
|
relay.pingTimer = null;
|
|
10018
10234
|
}
|
|
10019
|
-
// Stop reconnect timer
|
|
10020
10235
|
if (relay.reconnectTimer) {
|
|
10021
10236
|
clearTimeout(relay.reconnectTimer);
|
|
10022
10237
|
relay.reconnectTimer = null;
|
|
10023
10238
|
}
|
|
10024
|
-
// Close socket
|
|
10025
10239
|
if (relay.socket && relay.socket.readyState !== CLOSED) {
|
|
10026
10240
|
relay.socket.close(1000, 'Client disconnected');
|
|
10027
10241
|
}
|
|
10028
10242
|
this.emitConnectionEvent('disconnect', url, 'Client disconnected');
|
|
10029
10243
|
}
|
|
10244
|
+
// Notify in-flight subscriptions that we're shutting down.
|
|
10245
|
+
// queryWithFirstSeenWins.onError re-checks allRelaysDoneFor (now
|
|
10246
|
+
// 0 connected → trivially true) and settles immediately, sparing
|
|
10247
|
+
// callers the full queryTimeoutMs wait. Snapshot keys first
|
|
10248
|
+
// because the listener may call unsubscribe(), which mutates
|
|
10249
|
+
// this.subscriptions while we iterate.
|
|
10250
|
+
const inflightSubs = Array.from(this.subscriptions.entries());
|
|
10251
|
+
for (const [subId, sub] of inflightSubs) {
|
|
10252
|
+
try {
|
|
10253
|
+
sub.listener.onError?.(subId, 'Client disconnected');
|
|
10254
|
+
}
|
|
10255
|
+
catch {
|
|
10256
|
+
// Ignore listener errors — we're tearing down anyway.
|
|
10257
|
+
}
|
|
10258
|
+
}
|
|
10030
10259
|
this.relays.clear();
|
|
10031
10260
|
this.subscriptions.clear();
|
|
10032
10261
|
}
|
|
@@ -10228,7 +10457,22 @@ class NostrClient {
|
|
|
10228
10457
|
filter = filterOrSubId;
|
|
10229
10458
|
listener = listenerOrFilter;
|
|
10230
10459
|
}
|
|
10460
|
+
// Reserved prefix for SDK-internal sub_ids (currently just the
|
|
10461
|
+
// keepalive `PING_SUB_ID`). Reject explicit caller use so the
|
|
10462
|
+
// keepalive timer's CLOSE/REQ cycle can't stomp on user state.
|
|
10463
|
+
if (subscriptionId.startsWith('__nostr-sdk-')) {
|
|
10464
|
+
throw new Error(`Subscription ID "${subscriptionId}" uses the reserved "__nostr-sdk-" prefix — pick a different id.`);
|
|
10465
|
+
}
|
|
10231
10466
|
this.subscriptions.set(subscriptionId, { filter, listener });
|
|
10467
|
+
// Wipe any stale per-relay EOSE/CLOSED markers for this sub_id
|
|
10468
|
+
// before issuing the REQ — otherwise a fresh subscribe with a
|
|
10469
|
+
// sub_id that was previously CLOSED (or was just freshly
|
|
10470
|
+
// EOSE'd) would be skipped or treated as "already done" on
|
|
10471
|
+
// those relays.
|
|
10472
|
+
for (const [, relay] of this.relays) {
|
|
10473
|
+
relay.closedSubIds.delete(subscriptionId);
|
|
10474
|
+
relay.eosedSubIds.delete(subscriptionId);
|
|
10475
|
+
}
|
|
10232
10476
|
// Send subscription request to all connected relays
|
|
10233
10477
|
const message = JSON.stringify(['REQ', subscriptionId, filter.toJSON()]);
|
|
10234
10478
|
for (const [, relay] of this.relays) {
|
|
@@ -10246,12 +10490,19 @@ class NostrClient {
|
|
|
10246
10490
|
if (!this.subscriptions.has(subscriptionId))
|
|
10247
10491
|
return;
|
|
10248
10492
|
this.subscriptions.delete(subscriptionId);
|
|
10249
|
-
// Send CLOSE to all connected relays
|
|
10493
|
+
// Send CLOSE to all connected relays — except those that already
|
|
10494
|
+
// CLOSED the sub themselves (no point telling the relay something
|
|
10495
|
+
// it told us).
|
|
10250
10496
|
const message = JSON.stringify(['CLOSE', subscriptionId]);
|
|
10251
10497
|
for (const [, relay] of this.relays) {
|
|
10252
|
-
if (relay.connected && relay.socket?.readyState === OPEN
|
|
10498
|
+
if (relay.connected && relay.socket?.readyState === OPEN
|
|
10499
|
+
&& !relay.closedSubIds.has(subscriptionId)) {
|
|
10253
10500
|
relay.socket.send(message);
|
|
10254
10501
|
}
|
|
10502
|
+
// Drop both per-relay markers now that the sub is gone from
|
|
10503
|
+
// the global map.
|
|
10504
|
+
relay.closedSubIds.delete(subscriptionId);
|
|
10505
|
+
relay.eosedSubIds.delete(subscriptionId);
|
|
10255
10506
|
}
|
|
10256
10507
|
}
|
|
10257
10508
|
/**
|
|
@@ -10277,12 +10528,45 @@ class NostrClient {
|
|
|
10277
10528
|
queryWithFirstSeenWins(filter, extractResult) {
|
|
10278
10529
|
return new Promise((resolve) => {
|
|
10279
10530
|
let subscriptionId = '';
|
|
10280
|
-
|
|
10281
|
-
|
|
10282
|
-
|
|
10283
|
-
|
|
10284
|
-
|
|
10531
|
+
let settled = false;
|
|
10532
|
+
// Declared as `let` and initialized lazily so `finishWith` can be
|
|
10533
|
+
// invoked before the setTimeout call below without hitting the
|
|
10534
|
+
// TDZ on `clearTimeout(timeoutId)`. (The same comment on the
|
|
10535
|
+
// listener anticipates synchronous-callback hypothetical paths.)
|
|
10536
|
+
let timeoutId;
|
|
10537
|
+
// Accept an explicit `id` so callers from inside the listener can
|
|
10538
|
+
// pass the sub_id the relay echoed back. This guards against any
|
|
10539
|
+
// future change to subscribe() that would invoke listener
|
|
10540
|
+
// callbacks before its return value is bound to `subscriptionId`
|
|
10541
|
+
// — the closure-captured value would still be `''` and we'd skip
|
|
10542
|
+
// the CLOSE frame, leaking the slot on the relay.
|
|
10543
|
+
const finishWith = (result, id) => {
|
|
10544
|
+
if (settled)
|
|
10545
|
+
return;
|
|
10546
|
+
settled = true;
|
|
10547
|
+
if (timeoutId !== undefined)
|
|
10548
|
+
clearTimeout(timeoutId);
|
|
10549
|
+
const subId = id || subscriptionId;
|
|
10550
|
+
if (subId)
|
|
10551
|
+
this.unsubscribe(subId);
|
|
10552
|
+
resolve(result);
|
|
10553
|
+
};
|
|
10554
|
+
timeoutId = setTimeout(() => finishWith(null), this.queryTimeoutMs);
|
|
10285
10555
|
const authors = new Map();
|
|
10556
|
+
const allRelaysDone = (id) => this.allRelaysDoneFor(id);
|
|
10557
|
+
const pickWinner = () => {
|
|
10558
|
+
let winnerEntry = null;
|
|
10559
|
+
let winnerPubkey = '';
|
|
10560
|
+
for (const [pubkey, entry] of authors) {
|
|
10561
|
+
if (!winnerEntry
|
|
10562
|
+
|| entry.firstSeen < winnerEntry.firstSeen
|
|
10563
|
+
|| (entry.firstSeen === winnerEntry.firstSeen && pubkey < winnerPubkey)) {
|
|
10564
|
+
winnerEntry = entry;
|
|
10565
|
+
winnerPubkey = pubkey;
|
|
10566
|
+
}
|
|
10567
|
+
}
|
|
10568
|
+
return winnerEntry ? extractResult(winnerEntry.latestEvent) : null;
|
|
10569
|
+
};
|
|
10286
10570
|
subscriptionId = this.subscribe(filter, {
|
|
10287
10571
|
onEvent: (event) => {
|
|
10288
10572
|
// Verify signature to prevent relay injection of forged events (#4)
|
|
@@ -10301,24 +10585,55 @@ class NostrClient {
|
|
|
10301
10585
|
}
|
|
10302
10586
|
}
|
|
10303
10587
|
},
|
|
10304
|
-
|
|
10305
|
-
|
|
10306
|
-
|
|
10307
|
-
|
|
10308
|
-
|
|
10309
|
-
|
|
10310
|
-
|
|
10311
|
-
|
|
10312
|
-
|
|
10313
|
-
|
|
10314
|
-
|
|
10315
|
-
|
|
10588
|
+
// EOSE means *this relay* has finished delivering stored
|
|
10589
|
+
// events. In a multi-relay client we must not settle yet — a
|
|
10590
|
+
// slower relay may still be about to deliver matching events.
|
|
10591
|
+
// Settle only when every connected relay has either EOSE'd
|
|
10592
|
+
// OR CLOSED'd this sub. (Single-relay clients are unaffected:
|
|
10593
|
+
// allDone is trivially true with one relay.)
|
|
10594
|
+
onEndOfStoredEvents: (id) => {
|
|
10595
|
+
if (allRelaysDone(id)) {
|
|
10596
|
+
finishWith(pickWinner(), id);
|
|
10597
|
+
}
|
|
10598
|
+
},
|
|
10599
|
+
// Subscription error from the SDK — fires from three paths
|
|
10600
|
+
// that all need the same "is it time to settle?" check:
|
|
10601
|
+
// 1. Relay sent CLOSED for this sub. In a multi-relay
|
|
10602
|
+
// client the same sub_id may still be alive on a
|
|
10603
|
+
// healthy relay; settling on the first CLOSED would
|
|
10604
|
+
// prematurely abort a query other relays could
|
|
10605
|
+
// satisfy. handleClosedMessage records the rejection
|
|
10606
|
+
// on the sending relay's closedSubIds before invoking
|
|
10607
|
+
// us, so we can decide via allRelaysDoneFor.
|
|
10608
|
+
// 2. Relay disconnected mid-query (socket.onclose →
|
|
10609
|
+
// synthetic onError). The relay no longer counts as
|
|
10610
|
+
// connected, so allRelaysDoneFor excludes it.
|
|
10611
|
+
// 3. Client disconnected (disconnect() → synthetic
|
|
10612
|
+
// onError). All relays are torn down, allRelaysDoneFor
|
|
10613
|
+
// sees zero connected and settles.
|
|
10614
|
+
onError: (id, message) => {
|
|
10615
|
+
console.warn(`Subscription error on ${id}: ${message}`);
|
|
10616
|
+
if (allRelaysDone(id)) {
|
|
10617
|
+
finishWith(pickWinner(), id);
|
|
10316
10618
|
}
|
|
10317
|
-
|
|
10619
|
+
// else: keep waiting for EOSE / CLOSED from remaining
|
|
10620
|
+
// relays or the overall query timeout.
|
|
10318
10621
|
},
|
|
10319
10622
|
});
|
|
10320
10623
|
});
|
|
10321
10624
|
}
|
|
10625
|
+
/**
|
|
10626
|
+
* True if every currently-connected relay has finished delivering
|
|
10627
|
+
* for the given sub_id (either EOSE'd or CLOSED'd it). Used by
|
|
10628
|
+
* queryWithFirstSeenWins to coordinate multi-relay settlement.
|
|
10629
|
+
*/
|
|
10630
|
+
allRelaysDoneFor(subscriptionId) {
|
|
10631
|
+
const connected = Array.from(this.relays.values()).filter((r) => r.connected);
|
|
10632
|
+
// No connected relays at all → nothing to wait for; settle.
|
|
10633
|
+
if (connected.length === 0)
|
|
10634
|
+
return true;
|
|
10635
|
+
return connected.every((r) => r.eosedSubIds.has(subscriptionId) || r.closedSubIds.has(subscriptionId));
|
|
10636
|
+
}
|
|
10322
10637
|
/**
|
|
10323
10638
|
* Query for a public key by nametag.
|
|
10324
10639
|
* Uses first-seen-wins anti-hijacking resolution.
|