@unicitylabs/nostr-js-sdk 0.4.1 → 0.5.0-dev.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser/index.js +357 -50
- package/dist/browser/index.js.map +1 -1
- package/dist/browser/index.min.js +6 -6
- package/dist/browser/index.min.js.map +1 -1
- package/dist/browser/index.umd.js +357 -50
- package/dist/browser/index.umd.js.map +1 -1
- package/dist/browser/index.umd.min.js +7 -7
- package/dist/browser/index.umd.min.js.map +1 -1
- package/dist/cjs/client/NostrClient.js +357 -50
- package/dist/cjs/client/NostrClient.js.map +1 -1
- package/dist/esm/client/NostrClient.js +357 -50
- package/dist/esm/client/NostrClient.js.map +1 -1
- package/dist/types/client/NostrClient.d.ts +46 -5
- package/dist/types/client/NostrClient.d.ts.map +1 -1
- package/package.json +2 -2
|
@@ -9538,6 +9538,16 @@
|
|
|
9538
9538
|
const DEFAULT_RECONNECT_INTERVAL_MS = 1000;
|
|
9539
9539
|
const DEFAULT_MAX_RECONNECT_INTERVAL_MS = 30000;
|
|
9540
9540
|
const DEFAULT_PING_INTERVAL_MS = 30000;
|
|
9541
|
+
/**
|
|
9542
|
+
* Internal sub_id reserved for the keepalive REQ. Namespaced with a
|
|
9543
|
+
* `__nostr-sdk-` prefix so that user code calling
|
|
9544
|
+
* {@link NostrClient.subscribe} with an explicit `subscriptionId`
|
|
9545
|
+
* cannot collide — a user choosing the literal `"ping"` would
|
|
9546
|
+
* otherwise have their subscription forcibly CLOSE/REQ'd every
|
|
9547
|
+
* ping interval. The leading `__` is a stable convention for
|
|
9548
|
+
* "do not pick this name."
|
|
9549
|
+
*/
|
|
9550
|
+
const PING_SUB_ID = '__nostr-sdk-keepalive__';
|
|
9541
9551
|
/**
|
|
9542
9552
|
* Delay before resubscribing after NIP-42 authentication.
|
|
9543
9553
|
* This gives the relay time to process the AUTH response before we send
|
|
@@ -9577,6 +9587,30 @@
|
|
|
9577
9587
|
this.maxReconnectIntervalMs = options?.maxReconnectIntervalMs ?? DEFAULT_MAX_RECONNECT_INTERVAL_MS;
|
|
9578
9588
|
this.pingIntervalMs = options?.pingIntervalMs ?? DEFAULT_PING_INTERVAL_MS;
|
|
9579
9589
|
}
|
|
9590
|
+
/**
|
|
9591
|
+
* Replace the key manager used for signing and encryption.
|
|
9592
|
+
*
|
|
9593
|
+
* The connection stays alive — but every operation that consults the
|
|
9594
|
+
* key manager from this point on uses the new key, including:
|
|
9595
|
+
* - signing future published events,
|
|
9596
|
+
* - signing NIP-42 AUTH challenge responses,
|
|
9597
|
+
* - the `authors:[selfPubkey]` filter on the keepalive ping REQ
|
|
9598
|
+
* (computed each ping interval),
|
|
9599
|
+
* - any other code path that calls `getPublicKeyHex()` on the
|
|
9600
|
+
* stored manager.
|
|
9601
|
+
*
|
|
9602
|
+
* Existing in-flight subscriptions are not re-issued or re-keyed.
|
|
9603
|
+
* @param keyManager New key manager
|
|
9604
|
+
*/
|
|
9605
|
+
setKeyManager(keyManager) {
|
|
9606
|
+
this.keyManager = keyManager;
|
|
9607
|
+
}
|
|
9608
|
+
/**
|
|
9609
|
+
* Get the current key manager.
|
|
9610
|
+
*/
|
|
9611
|
+
getKeyManager() {
|
|
9612
|
+
return this.keyManager;
|
|
9613
|
+
}
|
|
9580
9614
|
/**
|
|
9581
9615
|
* Add a connection event listener.
|
|
9582
9616
|
* @param listener Listener for connection events
|
|
@@ -9620,13 +9654,6 @@
|
|
|
9620
9654
|
}
|
|
9621
9655
|
}
|
|
9622
9656
|
}
|
|
9623
|
-
/**
|
|
9624
|
-
* Get the key manager.
|
|
9625
|
-
* @returns The key manager instance
|
|
9626
|
-
*/
|
|
9627
|
-
getKeyManager() {
|
|
9628
|
-
return this.keyManager;
|
|
9629
|
-
}
|
|
9630
9657
|
/**
|
|
9631
9658
|
* Get the current query timeout in milliseconds.
|
|
9632
9659
|
* @returns Query timeout in milliseconds
|
|
@@ -9663,11 +9690,41 @@
|
|
|
9663
9690
|
return;
|
|
9664
9691
|
}
|
|
9665
9692
|
return new Promise((resolve, reject) => {
|
|
9693
|
+
// The connection-setup timeout has three races to defend
|
|
9694
|
+
// against:
|
|
9695
|
+
// A) createWebSocket resolves AFTER the timeout fired.
|
|
9696
|
+
// B) createWebSocket resolves BEFORE the timeout, but
|
|
9697
|
+
// `onopen` fires AFTER the timeout fired.
|
|
9698
|
+
// C) createWebSocket resolves and `onopen` fires BEFORE the
|
|
9699
|
+
// timeout (the success path).
|
|
9700
|
+
// `pendingSocket` lets the timeout proactively close any
|
|
9701
|
+
// socket that's already been created but hasn't fired
|
|
9702
|
+
// `onopen` yet. The `timedOut` flag covers (A) inside `.then`
|
|
9703
|
+
// and (B) inside `socket.onopen`.
|
|
9704
|
+
let timedOut = false;
|
|
9705
|
+
let pendingSocket = null;
|
|
9666
9706
|
const timeoutId = setTimeout(() => {
|
|
9707
|
+
timedOut = true;
|
|
9708
|
+
if (pendingSocket) {
|
|
9709
|
+
try {
|
|
9710
|
+
pendingSocket.close(1000, 'Connection setup timed out');
|
|
9711
|
+
}
|
|
9712
|
+
catch { /* ignore */ }
|
|
9713
|
+
}
|
|
9667
9714
|
reject(new Error(`Connection to ${url} timed out`));
|
|
9668
9715
|
}, CONNECTION_TIMEOUT_MS);
|
|
9669
9716
|
createWebSocket(url)
|
|
9670
9717
|
.then((socket) => {
|
|
9718
|
+
if (timedOut) {
|
|
9719
|
+
// Caller already saw the rejection. Discard the late
|
|
9720
|
+
// socket so we don't leak it.
|
|
9721
|
+
try {
|
|
9722
|
+
socket.close(1000, 'Connection setup timed out');
|
|
9723
|
+
}
|
|
9724
|
+
catch { /* ignore */ }
|
|
9725
|
+
return;
|
|
9726
|
+
}
|
|
9727
|
+
pendingSocket = socket;
|
|
9671
9728
|
const relay = {
|
|
9672
9729
|
url,
|
|
9673
9730
|
socket,
|
|
@@ -9679,8 +9736,28 @@
|
|
|
9679
9736
|
lastPongTime: Date.now(),
|
|
9680
9737
|
unansweredPings: 0,
|
|
9681
9738
|
wasConnected: existingRelay?.wasConnected ?? false,
|
|
9739
|
+
// Reset on every new connection: a relay's per-connection
|
|
9740
|
+
// sub-slot accounting is fresh, so previously-rejected REQs
|
|
9741
|
+
// should be re-issued on the new socket.
|
|
9742
|
+
closedSubIds: new Set(),
|
|
9743
|
+
eosedSubIds: new Set(),
|
|
9682
9744
|
};
|
|
9683
9745
|
socket.onopen = () => {
|
|
9746
|
+
// The `.then` block already guards against a socket
|
|
9747
|
+
// arriving after the connection timeout, but the socket
|
|
9748
|
+
// can also be created BEFORE the timeout while
|
|
9749
|
+
// `onopen` fires AFTER the timeout has rejected the
|
|
9750
|
+
// outer promise. Without this second guard we'd register
|
|
9751
|
+
// the relay, start a pingTimer, and resubscribe — orphan
|
|
9752
|
+
// background resources the caller can't see or clean up
|
|
9753
|
+
// because their connect() call already saw a rejection.
|
|
9754
|
+
if (timedOut) {
|
|
9755
|
+
try {
|
|
9756
|
+
socket.close(1000, 'Connection setup timed out');
|
|
9757
|
+
}
|
|
9758
|
+
catch { /* ignore */ }
|
|
9759
|
+
return;
|
|
9760
|
+
}
|
|
9684
9761
|
clearTimeout(timeoutId);
|
|
9685
9762
|
relay.connected = true;
|
|
9686
9763
|
relay.reconnectAttempts = 0; // Reset on successful connection
|
|
@@ -9721,9 +9798,40 @@
|
|
|
9721
9798
|
const wasConnected = relay.connected;
|
|
9722
9799
|
relay.connected = false;
|
|
9723
9800
|
this.stopPingTimer(url);
|
|
9801
|
+
// Pre-onopen close: TCP handshake failure or relay
|
|
9802
|
+
// immediately closed the WS during the upgrade. Without
|
|
9803
|
+
// this, the connectToRelay promise stays pending until
|
|
9804
|
+
// CONNECTION_TIMEOUT_MS (30s) expires; surfacing it now
|
|
9805
|
+
// lets the caller see the failure promptly and retry.
|
|
9806
|
+
if (!wasConnected && !timedOut) {
|
|
9807
|
+
timedOut = true;
|
|
9808
|
+
clearTimeout(timeoutId);
|
|
9809
|
+
reject(new Error(`Connection to ${url} closed during handshake: ${event?.reason || 'no reason'}`));
|
|
9810
|
+
}
|
|
9724
9811
|
if (wasConnected) {
|
|
9725
9812
|
const reason = event?.reason || 'Connection closed';
|
|
9726
9813
|
this.emitConnectionEvent('disconnect', url, reason);
|
|
9814
|
+
// Re-trigger the all-done check on every active sub.
|
|
9815
|
+
// queryWithFirstSeenWins.allRelaysDoneFor only runs
|
|
9816
|
+
// from listener callbacks (EOSE / CLOSED via onError);
|
|
9817
|
+
// a socket that drops without sending either would
|
|
9818
|
+
// otherwise leave the query hanging until
|
|
9819
|
+
// queryTimeoutMs even though the disconnected relay no
|
|
9820
|
+
// longer counts toward "still pending" relays. Firing
|
|
9821
|
+
// a synthetic onError gives every active sub a chance
|
|
9822
|
+
// to re-evaluate now that the relay set has shrunk.
|
|
9823
|
+
// Include the relay URL so listeners in a multi-relay
|
|
9824
|
+
// client can attribute which relay dropped.
|
|
9825
|
+
const inflight = Array.from(this.subscriptions.entries());
|
|
9826
|
+
for (const [subId, sub] of inflight) {
|
|
9827
|
+
try {
|
|
9828
|
+
sub.listener.onError?.(subId, `Relay disconnected (${url}): ${reason}`);
|
|
9829
|
+
}
|
|
9830
|
+
catch {
|
|
9831
|
+
// Ignore listener errors — we're notifying
|
|
9832
|
+
// best-effort.
|
|
9833
|
+
}
|
|
9834
|
+
}
|
|
9727
9835
|
}
|
|
9728
9836
|
if (!this.closed && this.autoReconnect && !relay.reconnecting) {
|
|
9729
9837
|
this.scheduleReconnect(url);
|
|
@@ -9735,7 +9843,11 @@
|
|
|
9735
9843
|
reject(new Error(`Failed to connect to ${url}: ${error.message || 'Unknown error'}`));
|
|
9736
9844
|
}
|
|
9737
9845
|
};
|
|
9738
|
-
this.relays
|
|
9846
|
+
// Note: we do NOT register the relay in `this.relays` here —
|
|
9847
|
+
// only after `onopen` fires successfully. Registering eagerly
|
|
9848
|
+
// (before onopen) would leak the relay into the global map
|
|
9849
|
+
// even when the connection setup times out and the caller's
|
|
9850
|
+
// promise has already rejected.
|
|
9739
9851
|
})
|
|
9740
9852
|
.catch((error) => {
|
|
9741
9853
|
clearTimeout(timeoutId);
|
|
@@ -9812,16 +9924,24 @@
|
|
|
9812
9924
|
}
|
|
9813
9925
|
return;
|
|
9814
9926
|
}
|
|
9815
|
-
// Send a subscription request as a ping (relays respond with EOSE)
|
|
9816
|
-
//
|
|
9817
|
-
//
|
|
9927
|
+
// Send a subscription request as a ping (relays respond with EOSE).
|
|
9928
|
+
// The filter MUST be tightly scoped — an open `{ limit: 1 }` filter
|
|
9929
|
+
// with no kinds/authors/#p will, after EOSE, stream every event the
|
|
9930
|
+
// relay receives (NIP-01 live tail), saturating the connection and
|
|
9931
|
+
// exhausting per-connection subscription slots on busy relays.
|
|
9932
|
+
// Scoping by `authors:[self]` keeps the live tail empty in practice
|
|
9933
|
+
// (the relay would only forward our own future events).
|
|
9818
9934
|
try {
|
|
9819
|
-
const
|
|
9935
|
+
const selfPubkey = this.keyManager.getPublicKeyHex();
|
|
9820
9936
|
// First close any existing ping subscription to ensure we don't accumulate
|
|
9821
|
-
const closeMessage = JSON.stringify(['CLOSE',
|
|
9937
|
+
const closeMessage = JSON.stringify(['CLOSE', PING_SUB_ID]);
|
|
9822
9938
|
relay.socket.send(closeMessage);
|
|
9823
9939
|
// Then send the new ping request (limit:1 ensures relay sends EOSE)
|
|
9824
|
-
const pingMessage = JSON.stringify([
|
|
9940
|
+
const pingMessage = JSON.stringify([
|
|
9941
|
+
'REQ',
|
|
9942
|
+
PING_SUB_ID,
|
|
9943
|
+
{ authors: [selfPubkey], limit: 1 },
|
|
9944
|
+
]);
|
|
9825
9945
|
relay.socket.send(pingMessage);
|
|
9826
9946
|
relay.unansweredPings++;
|
|
9827
9947
|
}
|
|
@@ -9856,6 +9976,11 @@
|
|
|
9856
9976
|
if (!relay?.socket || !relay.connected)
|
|
9857
9977
|
return;
|
|
9858
9978
|
for (const [subId, info] of this.subscriptions) {
|
|
9979
|
+
// Skip subs this relay has previously CLOSED — re-issuing them
|
|
9980
|
+
// just triggers the same rejection in a loop. Other healthy
|
|
9981
|
+
// relays still resubscribe.
|
|
9982
|
+
if (relay.closedSubIds.has(subId))
|
|
9983
|
+
continue;
|
|
9859
9984
|
const message = JSON.stringify(['REQ', subId, info.filter.toJSON()]);
|
|
9860
9985
|
relay.socket.send(message);
|
|
9861
9986
|
}
|
|
@@ -9875,7 +10000,7 @@
|
|
|
9875
10000
|
/**
|
|
9876
10001
|
* Handle a message from a relay.
|
|
9877
10002
|
*/
|
|
9878
|
-
handleRelayMessage(
|
|
10003
|
+
handleRelayMessage(relayUrl, message) {
|
|
9879
10004
|
try {
|
|
9880
10005
|
const json = JSON.parse(message);
|
|
9881
10006
|
if (!Array.isArray(json) || json.length < 2)
|
|
@@ -9889,16 +10014,16 @@
|
|
|
9889
10014
|
this.handleOkMessage(json);
|
|
9890
10015
|
break;
|
|
9891
10016
|
case 'EOSE':
|
|
9892
|
-
this.handleEOSEMessage(json);
|
|
10017
|
+
this.handleEOSEMessage(relayUrl, json);
|
|
9893
10018
|
break;
|
|
9894
10019
|
case 'NOTICE':
|
|
9895
10020
|
this.handleNoticeMessage(json);
|
|
9896
10021
|
break;
|
|
9897
10022
|
case 'CLOSED':
|
|
9898
|
-
this.handleClosedMessage(json);
|
|
10023
|
+
this.handleClosedMessage(relayUrl, json);
|
|
9899
10024
|
break;
|
|
9900
10025
|
case 'AUTH':
|
|
9901
|
-
this.handleAuthMessage(
|
|
10026
|
+
this.handleAuthMessage(relayUrl, json);
|
|
9902
10027
|
break;
|
|
9903
10028
|
}
|
|
9904
10029
|
}
|
|
@@ -9910,7 +10035,7 @@
|
|
|
9910
10035
|
* Handle EVENT message from relay.
|
|
9911
10036
|
*/
|
|
9912
10037
|
handleEventMessage(json) {
|
|
9913
|
-
if (json.length < 3)
|
|
10038
|
+
if (json.length < 3 || typeof json[1] !== 'string')
|
|
9914
10039
|
return;
|
|
9915
10040
|
const subscriptionId = json[1];
|
|
9916
10041
|
const eventData = json[2];
|
|
@@ -9948,11 +10073,23 @@
|
|
|
9948
10073
|
}
|
|
9949
10074
|
/**
|
|
9950
10075
|
* Handle EOSE (End of Stored Events) message from relay.
|
|
10076
|
+
*
|
|
10077
|
+
* Records the per-relay EOSE marker (mirroring closedSubIds) so
|
|
10078
|
+
* queryWithFirstSeenWins can decide when ALL connected relays have
|
|
10079
|
+
* finished — either streamed EOSE or rejected with CLOSED — instead
|
|
10080
|
+
* of settling off the first fast relay's EOSE while a slower relay
|
|
10081
|
+
* is still about to deliver matching events.
|
|
9951
10082
|
*/
|
|
9952
|
-
handleEOSEMessage(json) {
|
|
9953
|
-
if (json.length < 2)
|
|
10083
|
+
handleEOSEMessage(relayUrl, json) {
|
|
10084
|
+
if (json.length < 2 || typeof json[1] !== 'string')
|
|
9954
10085
|
return;
|
|
9955
10086
|
const subscriptionId = json[1];
|
|
10087
|
+
if (!this.subscriptions.has(subscriptionId))
|
|
10088
|
+
return;
|
|
10089
|
+
const relay = this.relays.get(relayUrl);
|
|
10090
|
+
if (relay) {
|
|
10091
|
+
relay.eosedSubIds.add(subscriptionId);
|
|
10092
|
+
}
|
|
9956
10093
|
const subscription = this.subscriptions.get(subscriptionId);
|
|
9957
10094
|
if (subscription?.listener.onEndOfStoredEvents) {
|
|
9958
10095
|
subscription.listener.onEndOfStoredEvents(subscriptionId);
|
|
@@ -9969,15 +10106,64 @@
|
|
|
9969
10106
|
}
|
|
9970
10107
|
/**
|
|
9971
10108
|
* Handle CLOSED message from relay (subscription closed by relay).
|
|
10109
|
+
*
|
|
10110
|
+
* NIP-01 CLOSED frames are terminal for the named subscription **on
|
|
10111
|
+
* the sending relay**. In a multi-relay client the same sub_id may
|
|
10112
|
+
* still be alive on a healthy relay, so we must NOT delete the
|
|
10113
|
+
* global `this.subscriptions` entry here — that would silently drop
|
|
10114
|
+
* EVENT/EOSE frames from the still-healthy relays in
|
|
10115
|
+
* `handleEventMessage` (which consults the global map).
|
|
10116
|
+
*
|
|
10117
|
+
* Instead we record the rejection on the sending relay's
|
|
10118
|
+
* `closedSubIds` set so `resubscribeAll` and post-AUTH resubscribe
|
|
10119
|
+
* skip it on this relay only. The listener is notified via
|
|
10120
|
+
* `onError` so callers (e.g. queryWithFirstSeenWins) can decide to
|
|
10121
|
+
* settle and explicitly `unsubscribe()` if they want to give up
|
|
10122
|
+
* across all relays.
|
|
9972
10123
|
*/
|
|
9973
|
-
handleClosedMessage(json) {
|
|
9974
|
-
|
|
10124
|
+
handleClosedMessage(relayUrl, json) {
|
|
10125
|
+
// NIP-01 makes the message field optional: `["CLOSED", <sub>]` is
|
|
10126
|
+
// valid. Dropping such frames was exactly the leak this PR sets out
|
|
10127
|
+
// to fix — no closedSubIds marker and no onError notification means
|
|
10128
|
+
// queries hang until timeout and resubscribe loops persist.
|
|
10129
|
+
if (json.length < 2 || typeof json[1] !== 'string')
|
|
9975
10130
|
return;
|
|
9976
10131
|
const subscriptionId = json[1];
|
|
9977
|
-
|
|
10132
|
+
// Ignore CLOSED for sub_ids we don't know about. A misbehaving or
|
|
10133
|
+
// malicious relay could otherwise spam us with arbitrary sub_ids
|
|
10134
|
+
// and grow `closedSubIds` unbounded over a long-lived connection,
|
|
10135
|
+
// and could pre-emptively block sub_ids we might use later.
|
|
10136
|
+
if (!this.subscriptions.has(subscriptionId))
|
|
10137
|
+
return;
|
|
10138
|
+
const message = typeof json[2] === 'string' ? json[2] : 'no reason provided';
|
|
10139
|
+
// NIP-42 transient case: relays that require AUTH typically reject
|
|
10140
|
+
// pre-auth REQs with `CLOSED("auth-required:...")` and then send
|
|
10141
|
+
// an AUTH challenge. resubscribeAfterAuth re-issues the sub, so
|
|
10142
|
+
// this rejection is NOT terminal. If we marked closedSubIds here,
|
|
10143
|
+
// queryWithFirstSeenWins.onError would settle the future
|
|
10144
|
+
// prematurely (single-relay → allRelaysDoneFor=true), unsubscribe
|
|
10145
|
+
// the sub, and the post-AUTH retry would find nothing to retry.
|
|
10146
|
+
// Listener still gets onError so callers see the reason; we just
|
|
10147
|
+
// don't poison the per-relay state with a transient marker.
|
|
10148
|
+
//
|
|
10149
|
+
// We accept three on-the-wire shapes: `auth-required:...`
|
|
10150
|
+
// (NIP-42 standard with reason), `auth-required ...` (whitespace
|
|
10151
|
+
// separator), and bare `auth-required` (no suffix at all — some
|
|
10152
|
+
// relays / tests).
|
|
10153
|
+
const isAuthRequired = message === 'auth-required'
|
|
10154
|
+
|| message.startsWith('auth-required:')
|
|
10155
|
+
|| message.startsWith('auth-required ');
|
|
10156
|
+
const relay = this.relays.get(relayUrl);
|
|
10157
|
+
if (relay && !isAuthRequired) {
|
|
10158
|
+
relay.closedSubIds.add(subscriptionId);
|
|
10159
|
+
}
|
|
9978
10160
|
const subscription = this.subscriptions.get(subscriptionId);
|
|
9979
10161
|
if (subscription?.listener.onError) {
|
|
9980
|
-
|
|
10162
|
+
// Pass the relay's reason through verbatim so callers can
|
|
10163
|
+
// pattern-match on standard prefixes (`auth-required:`,
|
|
10164
|
+
// `rate-limited:`, `blocked:`, etc.) without parsing through
|
|
10165
|
+
// a wrapper string.
|
|
10166
|
+
subscription.listener.onError(subscriptionId, message);
|
|
9981
10167
|
}
|
|
9982
10168
|
}
|
|
9983
10169
|
/**
|
|
@@ -10002,8 +10188,27 @@
|
|
|
10002
10188
|
// Send AUTH response
|
|
10003
10189
|
const message = JSON.stringify(['AUTH', authEvent.toJSON()]);
|
|
10004
10190
|
relay.socket.send(message);
|
|
10005
|
-
// Re-send subscriptions after auth (relay may have ignored pre-auth
|
|
10191
|
+
// Re-send subscriptions after auth (relay may have ignored pre-auth
|
|
10192
|
+
// requests). Two separate per-relay markers, two separate decisions:
|
|
10193
|
+
//
|
|
10194
|
+
// - `closedSubIds`: do NOT clear. handleClosedMessage already
|
|
10195
|
+
// skips the auth-required transient case, so anything in this
|
|
10196
|
+
// set is a TERMINAL rejection (rate-limited, blocked, etc.)
|
|
10197
|
+
// that AUTH does not relax. The resubscribeAll guard then
|
|
10198
|
+
// correctly skips terminal-rejected subs on this relay. They
|
|
10199
|
+
// will be retried on the next reconnect, when onopen creates a
|
|
10200
|
+
// fresh RelayConnection with empty markers.
|
|
10201
|
+
//
|
|
10202
|
+
// - `eosedSubIds`: clear. A relay may have EOSE'd a pre-auth sub
|
|
10203
|
+
// with zero events (filter unsatisfiable without auth context);
|
|
10204
|
+
// post-auth the same filter might match. We must re-arm the
|
|
10205
|
+
// local "still waiting" state so any in-flight
|
|
10206
|
+
// queryWithFirstSeenWins doesn't see this relay as already-done
|
|
10207
|
+
// from a stale marker.
|
|
10006
10208
|
setTimeout(() => {
|
|
10209
|
+
const r = this.relays.get(relayUrl);
|
|
10210
|
+
if (r)
|
|
10211
|
+
r.eosedSubIds.clear();
|
|
10007
10212
|
this.resubscribeAll(relayUrl);
|
|
10008
10213
|
}, AUTH_RESUBSCRIBE_DELAY_MS);
|
|
10009
10214
|
}
|
|
@@ -10023,24 +10228,40 @@
|
|
|
10023
10228
|
item.reject(new Error('Client disconnected'));
|
|
10024
10229
|
}
|
|
10025
10230
|
this.eventQueue = [];
|
|
10026
|
-
// Close all relay connections and clean up timers
|
|
10231
|
+
// Close all relay connections and clean up timers. Mark every
|
|
10232
|
+
// relay disconnected synchronously BEFORE we notify subscriptions
|
|
10233
|
+
// below, so any listener that consults `allRelaysDoneFor` sees
|
|
10234
|
+
// zero connected relays and settles immediately.
|
|
10027
10235
|
for (const [url, relay] of this.relays) {
|
|
10028
|
-
|
|
10236
|
+
relay.connected = false;
|
|
10029
10237
|
if (relay.pingTimer) {
|
|
10030
10238
|
clearInterval(relay.pingTimer);
|
|
10031
10239
|
relay.pingTimer = null;
|
|
10032
10240
|
}
|
|
10033
|
-
// Stop reconnect timer
|
|
10034
10241
|
if (relay.reconnectTimer) {
|
|
10035
10242
|
clearTimeout(relay.reconnectTimer);
|
|
10036
10243
|
relay.reconnectTimer = null;
|
|
10037
10244
|
}
|
|
10038
|
-
// Close socket
|
|
10039
10245
|
if (relay.socket && relay.socket.readyState !== CLOSED) {
|
|
10040
10246
|
relay.socket.close(1000, 'Client disconnected');
|
|
10041
10247
|
}
|
|
10042
10248
|
this.emitConnectionEvent('disconnect', url, 'Client disconnected');
|
|
10043
10249
|
}
|
|
10250
|
+
// Notify in-flight subscriptions that we're shutting down.
|
|
10251
|
+
// queryWithFirstSeenWins.onError re-checks allRelaysDoneFor (now
|
|
10252
|
+
// 0 connected → trivially true) and settles immediately, sparing
|
|
10253
|
+
// callers the full queryTimeoutMs wait. Snapshot keys first
|
|
10254
|
+
// because the listener may call unsubscribe(), which mutates
|
|
10255
|
+
// this.subscriptions while we iterate.
|
|
10256
|
+
const inflightSubs = Array.from(this.subscriptions.entries());
|
|
10257
|
+
for (const [subId, sub] of inflightSubs) {
|
|
10258
|
+
try {
|
|
10259
|
+
sub.listener.onError?.(subId, 'Client disconnected');
|
|
10260
|
+
}
|
|
10261
|
+
catch {
|
|
10262
|
+
// Ignore listener errors — we're tearing down anyway.
|
|
10263
|
+
}
|
|
10264
|
+
}
|
|
10044
10265
|
this.relays.clear();
|
|
10045
10266
|
this.subscriptions.clear();
|
|
10046
10267
|
}
|
|
@@ -10242,7 +10463,22 @@
|
|
|
10242
10463
|
filter = filterOrSubId;
|
|
10243
10464
|
listener = listenerOrFilter;
|
|
10244
10465
|
}
|
|
10466
|
+
// Reserved prefix for SDK-internal sub_ids (currently just the
|
|
10467
|
+
// keepalive `PING_SUB_ID`). Reject explicit caller use so the
|
|
10468
|
+
// keepalive timer's CLOSE/REQ cycle can't stomp on user state.
|
|
10469
|
+
if (subscriptionId.startsWith('__nostr-sdk-')) {
|
|
10470
|
+
throw new Error(`Subscription ID "${subscriptionId}" uses the reserved "__nostr-sdk-" prefix — pick a different id.`);
|
|
10471
|
+
}
|
|
10245
10472
|
this.subscriptions.set(subscriptionId, { filter, listener });
|
|
10473
|
+
// Wipe any stale per-relay EOSE/CLOSED markers for this sub_id
|
|
10474
|
+
// before issuing the REQ — otherwise a fresh subscribe with a
|
|
10475
|
+
// sub_id that was previously CLOSED (or was just freshly
|
|
10476
|
+
// EOSE'd) would be skipped or treated as "already done" on
|
|
10477
|
+
// those relays.
|
|
10478
|
+
for (const [, relay] of this.relays) {
|
|
10479
|
+
relay.closedSubIds.delete(subscriptionId);
|
|
10480
|
+
relay.eosedSubIds.delete(subscriptionId);
|
|
10481
|
+
}
|
|
10246
10482
|
// Send subscription request to all connected relays
|
|
10247
10483
|
const message = JSON.stringify(['REQ', subscriptionId, filter.toJSON()]);
|
|
10248
10484
|
for (const [, relay] of this.relays) {
|
|
@@ -10260,12 +10496,19 @@
|
|
|
10260
10496
|
if (!this.subscriptions.has(subscriptionId))
|
|
10261
10497
|
return;
|
|
10262
10498
|
this.subscriptions.delete(subscriptionId);
|
|
10263
|
-
// Send CLOSE to all connected relays
|
|
10499
|
+
// Send CLOSE to all connected relays — except those that already
|
|
10500
|
+
// CLOSED the sub themselves (no point telling the relay something
|
|
10501
|
+
// it told us).
|
|
10264
10502
|
const message = JSON.stringify(['CLOSE', subscriptionId]);
|
|
10265
10503
|
for (const [, relay] of this.relays) {
|
|
10266
|
-
if (relay.connected && relay.socket?.readyState === OPEN
|
|
10504
|
+
if (relay.connected && relay.socket?.readyState === OPEN
|
|
10505
|
+
&& !relay.closedSubIds.has(subscriptionId)) {
|
|
10267
10506
|
relay.socket.send(message);
|
|
10268
10507
|
}
|
|
10508
|
+
// Drop both per-relay markers now that the sub is gone from
|
|
10509
|
+
// the global map.
|
|
10510
|
+
relay.closedSubIds.delete(subscriptionId);
|
|
10511
|
+
relay.eosedSubIds.delete(subscriptionId);
|
|
10269
10512
|
}
|
|
10270
10513
|
}
|
|
10271
10514
|
/**
|
|
@@ -10291,12 +10534,45 @@
|
|
|
10291
10534
|
queryWithFirstSeenWins(filter, extractResult) {
|
|
10292
10535
|
return new Promise((resolve) => {
|
|
10293
10536
|
let subscriptionId = '';
|
|
10294
|
-
|
|
10295
|
-
|
|
10296
|
-
|
|
10297
|
-
|
|
10298
|
-
|
|
10537
|
+
let settled = false;
|
|
10538
|
+
// Declared as `let` and initialized lazily so `finishWith` can be
|
|
10539
|
+
// invoked before the setTimeout call below without hitting the
|
|
10540
|
+
// TDZ on `clearTimeout(timeoutId)`. (The same comment on the
|
|
10541
|
+
// listener anticipates synchronous-callback hypothetical paths.)
|
|
10542
|
+
let timeoutId;
|
|
10543
|
+
// Accept an explicit `id` so callers from inside the listener can
|
|
10544
|
+
// pass the sub_id the relay echoed back. This guards against any
|
|
10545
|
+
// future change to subscribe() that would invoke listener
|
|
10546
|
+
// callbacks before its return value is bound to `subscriptionId`
|
|
10547
|
+
// — the closure-captured value would still be `''` and we'd skip
|
|
10548
|
+
// the CLOSE frame, leaking the slot on the relay.
|
|
10549
|
+
const finishWith = (result, id) => {
|
|
10550
|
+
if (settled)
|
|
10551
|
+
return;
|
|
10552
|
+
settled = true;
|
|
10553
|
+
if (timeoutId !== undefined)
|
|
10554
|
+
clearTimeout(timeoutId);
|
|
10555
|
+
const subId = id || subscriptionId;
|
|
10556
|
+
if (subId)
|
|
10557
|
+
this.unsubscribe(subId);
|
|
10558
|
+
resolve(result);
|
|
10559
|
+
};
|
|
10560
|
+
timeoutId = setTimeout(() => finishWith(null), this.queryTimeoutMs);
|
|
10299
10561
|
const authors = new Map();
|
|
10562
|
+
const allRelaysDone = (id) => this.allRelaysDoneFor(id);
|
|
10563
|
+
const pickWinner = () => {
|
|
10564
|
+
let winnerEntry = null;
|
|
10565
|
+
let winnerPubkey = '';
|
|
10566
|
+
for (const [pubkey, entry] of authors) {
|
|
10567
|
+
if (!winnerEntry
|
|
10568
|
+
|| entry.firstSeen < winnerEntry.firstSeen
|
|
10569
|
+
|| (entry.firstSeen === winnerEntry.firstSeen && pubkey < winnerPubkey)) {
|
|
10570
|
+
winnerEntry = entry;
|
|
10571
|
+
winnerPubkey = pubkey;
|
|
10572
|
+
}
|
|
10573
|
+
}
|
|
10574
|
+
return winnerEntry ? extractResult(winnerEntry.latestEvent) : null;
|
|
10575
|
+
};
|
|
10300
10576
|
subscriptionId = this.subscribe(filter, {
|
|
10301
10577
|
onEvent: (event) => {
|
|
10302
10578
|
// Verify signature to prevent relay injection of forged events (#4)
|
|
@@ -10315,24 +10591,55 @@
|
|
|
10315
10591
|
}
|
|
10316
10592
|
}
|
|
10317
10593
|
},
|
|
10318
|
-
|
|
10319
|
-
|
|
10320
|
-
|
|
10321
|
-
|
|
10322
|
-
|
|
10323
|
-
|
|
10324
|
-
|
|
10325
|
-
|
|
10326
|
-
|
|
10327
|
-
|
|
10328
|
-
|
|
10329
|
-
|
|
10594
|
+
// EOSE means *this relay* has finished delivering stored
|
|
10595
|
+
// events. In a multi-relay client we must not settle yet — a
|
|
10596
|
+
// slower relay may still be about to deliver matching events.
|
|
10597
|
+
// Settle only when every connected relay has either EOSE'd
|
|
10598
|
+
// OR CLOSED'd this sub. (Single-relay clients are unaffected:
|
|
10599
|
+
// allDone is trivially true with one relay.)
|
|
10600
|
+
onEndOfStoredEvents: (id) => {
|
|
10601
|
+
if (allRelaysDone(id)) {
|
|
10602
|
+
finishWith(pickWinner(), id);
|
|
10603
|
+
}
|
|
10604
|
+
},
|
|
10605
|
+
// Subscription error from the SDK — fires from three paths
|
|
10606
|
+
// that all need the same "is it time to settle?" check:
|
|
10607
|
+
// 1. Relay sent CLOSED for this sub. In a multi-relay
|
|
10608
|
+
// client the same sub_id may still be alive on a
|
|
10609
|
+
// healthy relay; settling on the first CLOSED would
|
|
10610
|
+
// prematurely abort a query other relays could
|
|
10611
|
+
// satisfy. handleClosedMessage records the rejection
|
|
10612
|
+
// on the sending relay's closedSubIds before invoking
|
|
10613
|
+
// us, so we can decide via allRelaysDoneFor.
|
|
10614
|
+
// 2. Relay disconnected mid-query (socket.onclose →
|
|
10615
|
+
// synthetic onError). The relay no longer counts as
|
|
10616
|
+
// connected, so allRelaysDoneFor excludes it.
|
|
10617
|
+
// 3. Client disconnected (disconnect() → synthetic
|
|
10618
|
+
// onError). All relays are torn down, allRelaysDoneFor
|
|
10619
|
+
// sees zero connected and settles.
|
|
10620
|
+
onError: (id, message) => {
|
|
10621
|
+
console.warn(`Subscription error on ${id}: ${message}`);
|
|
10622
|
+
if (allRelaysDone(id)) {
|
|
10623
|
+
finishWith(pickWinner(), id);
|
|
10330
10624
|
}
|
|
10331
|
-
|
|
10625
|
+
// else: keep waiting for EOSE / CLOSED from remaining
|
|
10626
|
+
// relays or the overall query timeout.
|
|
10332
10627
|
},
|
|
10333
10628
|
});
|
|
10334
10629
|
});
|
|
10335
10630
|
}
|
|
10631
|
+
/**
|
|
10632
|
+
* True if every currently-connected relay has finished delivering
|
|
10633
|
+
* for the given sub_id (either EOSE'd or CLOSED'd it). Used by
|
|
10634
|
+
* queryWithFirstSeenWins to coordinate multi-relay settlement.
|
|
10635
|
+
*/
|
|
10636
|
+
allRelaysDoneFor(subscriptionId) {
|
|
10637
|
+
const connected = Array.from(this.relays.values()).filter((r) => r.connected);
|
|
10638
|
+
// No connected relays at all → nothing to wait for; settle.
|
|
10639
|
+
if (connected.length === 0)
|
|
10640
|
+
return true;
|
|
10641
|
+
return connected.every((r) => r.eosedSubIds.has(subscriptionId) || r.closedSubIds.has(subscriptionId));
|
|
10642
|
+
}
|
|
10336
10643
|
/**
|
|
10337
10644
|
* Query for a public key by nametag.
|
|
10338
10645
|
* Uses first-seen-wins anti-hijacking resolution.
|