@unicitylabs/nostr-js-sdk 0.4.0 → 0.5.0-dev.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -14,6 +14,16 @@ const DEFAULT_QUERY_TIMEOUT_MS = 5000;
14
14
  const DEFAULT_RECONNECT_INTERVAL_MS = 1000;
15
15
  const DEFAULT_MAX_RECONNECT_INTERVAL_MS = 30000;
16
16
  const DEFAULT_PING_INTERVAL_MS = 30000;
17
+ /**
18
+ * Internal sub_id reserved for the keepalive REQ. Namespaced with a
19
+ * `__nostr-sdk-` prefix so that user code calling
20
+ * {@link NostrClient.subscribe} with an explicit `subscriptionId`
21
+ * cannot collide — a user choosing the literal `"ping"` would
22
+ * otherwise have their subscription forcibly CLOSE/REQ'd every
23
+ * ping interval. The leading `__` is a stable convention for
24
+ * "do not pick this name."
25
+ */
26
+ const PING_SUB_ID = '__nostr-sdk-keepalive__';
17
27
  /**
18
28
  * Delay before resubscribing after NIP-42 authentication.
19
29
  * This gives the relay time to process the AUTH response before we send
@@ -53,6 +63,30 @@ export class NostrClient {
53
63
  this.maxReconnectIntervalMs = options?.maxReconnectIntervalMs ?? DEFAULT_MAX_RECONNECT_INTERVAL_MS;
54
64
  this.pingIntervalMs = options?.pingIntervalMs ?? DEFAULT_PING_INTERVAL_MS;
55
65
  }
66
+ /**
67
+ * Replace the key manager used for signing and encryption.
68
+ *
69
+ * The connection stays alive — but every operation that consults the
70
+ * key manager from this point on uses the new key, including:
71
+ * - signing future published events,
72
+ * - signing NIP-42 AUTH challenge responses,
73
+ * - the `authors:[selfPubkey]` filter on the keepalive ping REQ
74
+ * (computed each ping interval),
75
+ * - any other code path that calls `getPublicKeyHex()` on the
76
+ * stored manager.
77
+ *
78
+ * Existing in-flight subscriptions are not re-issued or re-keyed.
79
+ * @param keyManager New key manager
80
+ */
81
+ setKeyManager(keyManager) {
82
+ this.keyManager = keyManager;
83
+ }
84
+ /**
85
+ * Get the current key manager.
86
+ */
87
+ getKeyManager() {
88
+ return this.keyManager;
89
+ }
56
90
  /**
57
91
  * Add a connection event listener.
58
92
  * @param listener Listener for connection events
@@ -96,13 +130,6 @@ export class NostrClient {
96
130
  }
97
131
  }
98
132
  }
99
- /**
100
- * Get the key manager.
101
- * @returns The key manager instance
102
- */
103
- getKeyManager() {
104
- return this.keyManager;
105
- }
106
133
  /**
107
134
  * Get the current query timeout in milliseconds.
108
135
  * @returns Query timeout in milliseconds
@@ -139,11 +166,41 @@ export class NostrClient {
139
166
  return;
140
167
  }
141
168
  return new Promise((resolve, reject) => {
169
+ // The connection-setup timeout has three races to defend
170
+ // against:
171
+ // A) createWebSocket resolves AFTER the timeout fired.
172
+ // B) createWebSocket resolves BEFORE the timeout, but
173
+ // `onopen` fires AFTER the timeout fired.
174
+ // C) createWebSocket resolves and `onopen` fires BEFORE the
175
+ // timeout (the success path).
176
+ // `pendingSocket` lets the timeout proactively close any
177
+ // socket that's already been created but hasn't fired
178
+ // `onopen` yet. The `timedOut` flag covers (A) inside `.then`
179
+ // and (B) inside `socket.onopen`.
180
+ let timedOut = false;
181
+ let pendingSocket = null;
142
182
  const timeoutId = setTimeout(() => {
183
+ timedOut = true;
184
+ if (pendingSocket) {
185
+ try {
186
+ pendingSocket.close(1000, 'Connection setup timed out');
187
+ }
188
+ catch { /* ignore */ }
189
+ }
143
190
  reject(new Error(`Connection to ${url} timed out`));
144
191
  }, CONNECTION_TIMEOUT_MS);
145
192
  createWebSocket(url)
146
193
  .then((socket) => {
194
+ if (timedOut) {
195
+ // Caller already saw the rejection. Discard the late
196
+ // socket so we don't leak it.
197
+ try {
198
+ socket.close(1000, 'Connection setup timed out');
199
+ }
200
+ catch { /* ignore */ }
201
+ return;
202
+ }
203
+ pendingSocket = socket;
147
204
  const relay = {
148
205
  url,
149
206
  socket,
@@ -153,9 +210,30 @@ export class NostrClient {
153
210
  reconnectTimer: null,
154
211
  pingTimer: null,
155
212
  lastPongTime: Date.now(),
213
+ unansweredPings: 0,
156
214
  wasConnected: existingRelay?.wasConnected ?? false,
215
+ // Reset on every new connection: a relay's per-connection
216
+ // sub-slot accounting is fresh, so previously-rejected REQs
217
+ // should be re-issued on the new socket.
218
+ closedSubIds: new Set(),
219
+ eosedSubIds: new Set(),
157
220
  };
158
221
  socket.onopen = () => {
222
+ // The `.then` block already guards against a socket
223
+ // arriving after the connection timeout, but the socket
224
+ // can also be created BEFORE the timeout while
225
+ // `onopen` fires AFTER the timeout has rejected the
226
+ // outer promise. Without this second guard we'd register
227
+ // the relay, start a pingTimer, and resubscribe — orphan
228
+ // background resources the caller can't see or clean up
229
+ // because their connect() call already saw a rejection.
230
+ if (timedOut) {
231
+ try {
232
+ socket.close(1000, 'Connection setup timed out');
233
+ }
234
+ catch { /* ignore */ }
235
+ return;
236
+ }
159
237
  clearTimeout(timeoutId);
160
238
  relay.connected = true;
161
239
  relay.reconnectAttempts = 0; // Reset on successful connection
@@ -180,10 +258,11 @@ export class NostrClient {
180
258
  socket.onmessage = (event) => {
181
259
  try {
182
260
  const data = extractMessageData(event);
183
- // Update last pong time on any message (relay is alive)
261
+ // Update last pong time and reset unanswered pings on any message (relay is alive)
184
262
  const r = this.relays.get(url);
185
263
  if (r) {
186
264
  r.lastPongTime = Date.now();
265
+ r.unansweredPings = 0;
187
266
  }
188
267
  this.handleRelayMessage(url, data);
189
268
  }
@@ -195,9 +274,40 @@ export class NostrClient {
195
274
  const wasConnected = relay.connected;
196
275
  relay.connected = false;
197
276
  this.stopPingTimer(url);
277
+ // Pre-onopen close: TCP handshake failure or relay
278
+ // immediately closed the WS during the upgrade. Without
279
+ // this, the connectToRelay promise stays pending until
280
+ // CONNECTION_TIMEOUT_MS (30s) expires; surfacing it now
281
+ // lets the caller see the failure promptly and retry.
282
+ if (!wasConnected && !timedOut) {
283
+ timedOut = true;
284
+ clearTimeout(timeoutId);
285
+ reject(new Error(`Connection to ${url} closed during handshake: ${event?.reason || 'no reason'}`));
286
+ }
198
287
  if (wasConnected) {
199
288
  const reason = event?.reason || 'Connection closed';
200
289
  this.emitConnectionEvent('disconnect', url, reason);
290
+ // Re-trigger the all-done check on every active sub.
291
+ // queryWithFirstSeenWins.allRelaysDoneFor only runs
292
+ // from listener callbacks (EOSE / CLOSED via onError);
293
+ // a socket that drops without sending either would
294
+ // otherwise leave the query hanging until
295
+ // queryTimeoutMs even though the disconnected relay no
296
+ // longer counts toward "still pending" relays. Firing
297
+ // a synthetic onError gives every active sub a chance
298
+ // to re-evaluate now that the relay set has shrunk.
299
+ // Include the relay URL so listeners in a multi-relay
300
+ // client can attribute which relay dropped.
301
+ const inflight = Array.from(this.subscriptions.entries());
302
+ for (const [subId, sub] of inflight) {
303
+ try {
304
+ sub.listener.onError?.(subId, `Relay disconnected (${url}): ${reason}`);
305
+ }
306
+ catch {
307
+ // Ignore listener errors — we're notifying
308
+ // best-effort.
309
+ }
310
+ }
201
311
  }
202
312
  if (!this.closed && this.autoReconnect && !relay.reconnecting) {
203
313
  this.scheduleReconnect(url);
@@ -209,7 +319,11 @@ export class NostrClient {
209
319
  reject(new Error(`Failed to connect to ${url}: ${error.message || 'Unknown error'}`));
210
320
  }
211
321
  };
212
- this.relays.set(url, relay);
322
+ // Note: we do NOT register the relay in `this.relays` here —
323
+ // only after `onopen` fires successfully. Registering eagerly
324
+ // (before onopen) would leak the relay into the global map
325
+ // even when the connection setup times out and the caller's
326
+ // promise has already rejected.
213
327
  })
214
328
  .catch((error) => {
215
329
  clearTimeout(timeoutId);
@@ -267,11 +381,16 @@ export class NostrClient {
267
381
  this.stopPingTimer(url);
268
382
  return;
269
383
  }
270
- // Check if we've received any message recently
271
384
  const timeSinceLastPong = Date.now() - relay.lastPongTime;
272
- if (timeSinceLastPong > this.pingIntervalMs * 2) {
273
- // Connection is stale - force close and reconnect
274
- console.warn(`Relay ${url} appears stale (no response for ${timeSinceLastPong}ms), reconnecting...`);
385
+ if (timeSinceLastPong > this.pingIntervalMs * 2 && relay.unansweredPings >= 2) {
386
+ // No inbound message for 2x the ping interval AND we've sent at least 2 pings
387
+ // without any response the connection is truly stale.
388
+ // The unanswered pings gate handles browser tab throttling: on the first tick
389
+ // after waking, unansweredPings is 0, so we send a ping and wait. If the relay
390
+ // is alive it responds (resetting the counter). If dead, subsequent ticks
391
+ // increment the counter until it reaches the threshold, even under sustained
392
+ // throttling where intervals are irregular.
393
+ console.warn(`Relay ${url} appears stale (no response for ${timeSinceLastPong}ms, ${relay.unansweredPings} unanswered pings), reconnecting...`);
275
394
  this.stopPingTimer(url);
276
395
  try {
277
396
  relay.socket.close();
@@ -281,17 +400,26 @@ export class NostrClient {
281
400
  }
282
401
  return;
283
402
  }
284
- // Send a subscription request as a ping (relays respond with EOSE)
285
- // Use a single fixed subscription ID per relay to avoid accumulating subscriptions
286
- // Note: limit:1 is used because some relays don't respond to limit:0
403
+ // Send a subscription request as a ping (relays respond with EOSE).
404
+ // The filter MUST be tightly scoped an open `{ limit: 1 }` filter
405
+ // with no kinds/authors/#p will, after EOSE, stream every event the
406
+ // relay receives (NIP-01 live tail), saturating the connection and
407
+ // exhausting per-connection subscription slots on busy relays.
408
+ // Scoping by `authors:[self]` keeps the live tail empty in practice
409
+ // (the relay would only forward our own future events).
287
410
  try {
288
- const pingSubId = `ping`;
411
+ const selfPubkey = this.keyManager.getPublicKeyHex();
289
412
  // First close any existing ping subscription to ensure we don't accumulate
290
- const closeMessage = JSON.stringify(['CLOSE', pingSubId]);
413
+ const closeMessage = JSON.stringify(['CLOSE', PING_SUB_ID]);
291
414
  relay.socket.send(closeMessage);
292
415
  // Then send the new ping request (limit:1 ensures relay sends EOSE)
293
- const pingMessage = JSON.stringify(['REQ', pingSubId, { limit: 1 }]);
416
+ const pingMessage = JSON.stringify([
417
+ 'REQ',
418
+ PING_SUB_ID,
419
+ { authors: [selfPubkey], limit: 1 },
420
+ ]);
294
421
  relay.socket.send(pingMessage);
422
+ relay.unansweredPings++;
295
423
  }
296
424
  catch {
297
425
  // Send failed, connection likely dead
@@ -324,6 +452,11 @@ export class NostrClient {
324
452
  if (!relay?.socket || !relay.connected)
325
453
  return;
326
454
  for (const [subId, info] of this.subscriptions) {
455
+ // Skip subs this relay has previously CLOSED — re-issuing them
456
+ // just triggers the same rejection in a loop. Other healthy
457
+ // relays still resubscribe.
458
+ if (relay.closedSubIds.has(subId))
459
+ continue;
327
460
  const message = JSON.stringify(['REQ', subId, info.filter.toJSON()]);
328
461
  relay.socket.send(message);
329
462
  }
@@ -343,7 +476,7 @@ export class NostrClient {
343
476
  /**
344
477
  * Handle a message from a relay.
345
478
  */
346
- handleRelayMessage(_url, message) {
479
+ handleRelayMessage(relayUrl, message) {
347
480
  try {
348
481
  const json = JSON.parse(message);
349
482
  if (!Array.isArray(json) || json.length < 2)
@@ -357,16 +490,16 @@ export class NostrClient {
357
490
  this.handleOkMessage(json);
358
491
  break;
359
492
  case 'EOSE':
360
- this.handleEOSEMessage(json);
493
+ this.handleEOSEMessage(relayUrl, json);
361
494
  break;
362
495
  case 'NOTICE':
363
496
  this.handleNoticeMessage(json);
364
497
  break;
365
498
  case 'CLOSED':
366
- this.handleClosedMessage(json);
499
+ this.handleClosedMessage(relayUrl, json);
367
500
  break;
368
501
  case 'AUTH':
369
- this.handleAuthMessage(_url, json);
502
+ this.handleAuthMessage(relayUrl, json);
370
503
  break;
371
504
  }
372
505
  }
@@ -378,7 +511,7 @@ export class NostrClient {
378
511
  * Handle EVENT message from relay.
379
512
  */
380
513
  handleEventMessage(json) {
381
- if (json.length < 3)
514
+ if (json.length < 3 || typeof json[1] !== 'string')
382
515
  return;
383
516
  const subscriptionId = json[1];
384
517
  const eventData = json[2];
@@ -416,11 +549,23 @@ export class NostrClient {
416
549
  }
417
550
  /**
418
551
  * Handle EOSE (End of Stored Events) message from relay.
552
+ *
553
+ * Records the per-relay EOSE marker (mirroring closedSubIds) so
554
+ * queryWithFirstSeenWins can decide when ALL connected relays have
555
+ * finished — either streamed EOSE or rejected with CLOSED — instead
556
+ * of settling off the first fast relay's EOSE while a slower relay
557
+ * is still about to deliver matching events.
419
558
  */
420
- handleEOSEMessage(json) {
421
- if (json.length < 2)
559
+ handleEOSEMessage(relayUrl, json) {
560
+ if (json.length < 2 || typeof json[1] !== 'string')
422
561
  return;
423
562
  const subscriptionId = json[1];
563
+ if (!this.subscriptions.has(subscriptionId))
564
+ return;
565
+ const relay = this.relays.get(relayUrl);
566
+ if (relay) {
567
+ relay.eosedSubIds.add(subscriptionId);
568
+ }
424
569
  const subscription = this.subscriptions.get(subscriptionId);
425
570
  if (subscription?.listener.onEndOfStoredEvents) {
426
571
  subscription.listener.onEndOfStoredEvents(subscriptionId);
@@ -437,15 +582,64 @@ export class NostrClient {
437
582
  }
438
583
  /**
439
584
  * Handle CLOSED message from relay (subscription closed by relay).
585
+ *
586
+ * NIP-01 CLOSED frames are terminal for the named subscription **on
587
+ * the sending relay**. In a multi-relay client the same sub_id may
588
+ * still be alive on a healthy relay, so we must NOT delete the
589
+ * global `this.subscriptions` entry here — that would silently drop
590
+ * EVENT/EOSE frames from the still-healthy relays in
591
+ * `handleEventMessage` (which consults the global map).
592
+ *
593
+ * Instead we record the rejection on the sending relay's
594
+ * `closedSubIds` set so `resubscribeAll` and post-AUTH resubscribe
595
+ * skip it on this relay only. The listener is notified via
596
+ * `onError` so callers (e.g. queryWithFirstSeenWins) can decide to
597
+ * settle and explicitly `unsubscribe()` if they want to give up
598
+ * across all relays.
440
599
  */
441
- handleClosedMessage(json) {
442
- if (json.length < 3)
600
+ handleClosedMessage(relayUrl, json) {
601
+ // NIP-01 makes the message field optional: `["CLOSED", <sub>]` is
602
+ // valid. Dropping such frames was exactly the leak this PR sets out
603
+ // to fix — no closedSubIds marker and no onError notification means
604
+ // queries hang until timeout and resubscribe loops persist.
605
+ if (json.length < 2 || typeof json[1] !== 'string')
443
606
  return;
444
607
  const subscriptionId = json[1];
445
- const message = json[2];
608
+ // Ignore CLOSED for sub_ids we don't know about. A misbehaving or
609
+ // malicious relay could otherwise spam us with arbitrary sub_ids
610
+ // and grow `closedSubIds` unbounded over a long-lived connection,
611
+ // and could pre-emptively block sub_ids we might use later.
612
+ if (!this.subscriptions.has(subscriptionId))
613
+ return;
614
+ const message = typeof json[2] === 'string' ? json[2] : 'no reason provided';
615
+ // NIP-42 transient case: relays that require AUTH typically reject
616
+ // pre-auth REQs with `CLOSED("auth-required:...")` and then send
617
+ // an AUTH challenge. resubscribeAfterAuth re-issues the sub, so
618
+ // this rejection is NOT terminal. If we marked closedSubIds here,
619
+ // queryWithFirstSeenWins.onError would settle the future
620
+ // prematurely (single-relay → allRelaysDoneFor=true), unsubscribe
621
+ // the sub, and the post-AUTH retry would find nothing to retry.
622
+ // Listener still gets onError so callers see the reason; we just
623
+ // don't poison the per-relay state with a transient marker.
624
+ //
625
+ // We accept three on-the-wire shapes: `auth-required:...`
626
+ // (NIP-42 standard with reason), `auth-required ...` (whitespace
627
+ // separator), and bare `auth-required` (no suffix at all — some
628
+ // relays / tests).
629
+ const isAuthRequired = message === 'auth-required'
630
+ || message.startsWith('auth-required:')
631
+ || message.startsWith('auth-required ');
632
+ const relay = this.relays.get(relayUrl);
633
+ if (relay && !isAuthRequired) {
634
+ relay.closedSubIds.add(subscriptionId);
635
+ }
446
636
  const subscription = this.subscriptions.get(subscriptionId);
447
637
  if (subscription?.listener.onError) {
448
- subscription.listener.onError(subscriptionId, `Subscription closed: ${message}`);
638
+ // Pass the relay's reason through verbatim so callers can
639
+ // pattern-match on standard prefixes (`auth-required:`,
640
+ // `rate-limited:`, `blocked:`, etc.) without parsing through
641
+ // a wrapper string.
642
+ subscription.listener.onError(subscriptionId, message);
449
643
  }
450
644
  }
451
645
  /**
@@ -470,8 +664,27 @@ export class NostrClient {
470
664
  // Send AUTH response
471
665
  const message = JSON.stringify(['AUTH', authEvent.toJSON()]);
472
666
  relay.socket.send(message);
473
- // Re-send subscriptions after auth (relay may have ignored pre-auth requests)
667
+ // Re-send subscriptions after auth (relay may have ignored pre-auth
668
+ // requests). Two separate per-relay markers, two separate decisions:
669
+ //
670
+ // - `closedSubIds`: do NOT clear. handleClosedMessage already
671
+ // skips the auth-required transient case, so anything in this
672
+ // set is a TERMINAL rejection (rate-limited, blocked, etc.)
673
+ // that AUTH does not relax. The resubscribeAll guard then
674
+ // correctly skips terminal-rejected subs on this relay. They
675
+ // will be retried on the next reconnect, when onopen creates a
676
+ // fresh RelayConnection with empty markers.
677
+ //
678
+ // - `eosedSubIds`: clear. A relay may have EOSE'd a pre-auth sub
679
+ // with zero events (filter unsatisfiable without auth context);
680
+ // post-auth the same filter might match. We must re-arm the
681
+ // local "still waiting" state so any in-flight
682
+ // queryWithFirstSeenWins doesn't see this relay as already-done
683
+ // from a stale marker.
474
684
  setTimeout(() => {
685
+ const r = this.relays.get(relayUrl);
686
+ if (r)
687
+ r.eosedSubIds.clear();
475
688
  this.resubscribeAll(relayUrl);
476
689
  }, AUTH_RESUBSCRIBE_DELAY_MS);
477
690
  }
@@ -491,24 +704,40 @@ export class NostrClient {
491
704
  item.reject(new Error('Client disconnected'));
492
705
  }
493
706
  this.eventQueue = [];
494
- // Close all relay connections and clean up timers
707
+ // Close all relay connections and clean up timers. Mark every
708
+ // relay disconnected synchronously BEFORE we notify subscriptions
709
+ // below, so any listener that consults `allRelaysDoneFor` sees
710
+ // zero connected relays and settles immediately.
495
711
  for (const [url, relay] of this.relays) {
496
- // Stop ping timer
712
+ relay.connected = false;
497
713
  if (relay.pingTimer) {
498
714
  clearInterval(relay.pingTimer);
499
715
  relay.pingTimer = null;
500
716
  }
501
- // Stop reconnect timer
502
717
  if (relay.reconnectTimer) {
503
718
  clearTimeout(relay.reconnectTimer);
504
719
  relay.reconnectTimer = null;
505
720
  }
506
- // Close socket
507
721
  if (relay.socket && relay.socket.readyState !== CLOSED) {
508
722
  relay.socket.close(1000, 'Client disconnected');
509
723
  }
510
724
  this.emitConnectionEvent('disconnect', url, 'Client disconnected');
511
725
  }
726
+ // Notify in-flight subscriptions that we're shutting down.
727
+ // queryWithFirstSeenWins.onError re-checks allRelaysDoneFor (now
728
+ // 0 connected → trivially true) and settles immediately, sparing
729
+ // callers the full queryTimeoutMs wait. Snapshot keys first
730
+ // because the listener may call unsubscribe(), which mutates
731
+ // this.subscriptions while we iterate.
732
+ const inflightSubs = Array.from(this.subscriptions.entries());
733
+ for (const [subId, sub] of inflightSubs) {
734
+ try {
735
+ sub.listener.onError?.(subId, 'Client disconnected');
736
+ }
737
+ catch {
738
+ // Ignore listener errors — we're tearing down anyway.
739
+ }
740
+ }
512
741
  this.relays.clear();
513
742
  this.subscriptions.clear();
514
743
  }
@@ -710,7 +939,22 @@ export class NostrClient {
710
939
  filter = filterOrSubId;
711
940
  listener = listenerOrFilter;
712
941
  }
942
+ // Reserved prefix for SDK-internal sub_ids (currently just the
943
+ // keepalive `PING_SUB_ID`). Reject explicit caller use so the
944
+ // keepalive timer's CLOSE/REQ cycle can't stomp on user state.
945
+ if (subscriptionId.startsWith('__nostr-sdk-')) {
946
+ throw new Error(`Subscription ID "${subscriptionId}" uses the reserved "__nostr-sdk-" prefix — pick a different id.`);
947
+ }
713
948
  this.subscriptions.set(subscriptionId, { filter, listener });
949
+ // Wipe any stale per-relay EOSE/CLOSED markers for this sub_id
950
+ // before issuing the REQ — otherwise a fresh subscribe with a
951
+ // sub_id that was previously CLOSED (or was just freshly
952
+ // EOSE'd) would be skipped or treated as "already done" on
953
+ // those relays.
954
+ for (const [, relay] of this.relays) {
955
+ relay.closedSubIds.delete(subscriptionId);
956
+ relay.eosedSubIds.delete(subscriptionId);
957
+ }
714
958
  // Send subscription request to all connected relays
715
959
  const message = JSON.stringify(['REQ', subscriptionId, filter.toJSON()]);
716
960
  for (const [, relay] of this.relays) {
@@ -728,12 +972,19 @@ export class NostrClient {
728
972
  if (!this.subscriptions.has(subscriptionId))
729
973
  return;
730
974
  this.subscriptions.delete(subscriptionId);
731
- // Send CLOSE to all connected relays
975
+ // Send CLOSE to all connected relays — except those that already
976
+ // CLOSED the sub themselves (no point telling the relay something
977
+ // it told us).
732
978
  const message = JSON.stringify(['CLOSE', subscriptionId]);
733
979
  for (const [, relay] of this.relays) {
734
- if (relay.connected && relay.socket?.readyState === OPEN) {
980
+ if (relay.connected && relay.socket?.readyState === OPEN
981
+ && !relay.closedSubIds.has(subscriptionId)) {
735
982
  relay.socket.send(message);
736
983
  }
984
+ // Drop both per-relay markers now that the sub is gone from
985
+ // the global map.
986
+ relay.closedSubIds.delete(subscriptionId);
987
+ relay.eosedSubIds.delete(subscriptionId);
737
988
  }
738
989
  }
739
990
  /**
@@ -759,12 +1010,45 @@ export class NostrClient {
759
1010
  queryWithFirstSeenWins(filter, extractResult) {
760
1011
  return new Promise((resolve) => {
761
1012
  let subscriptionId = '';
762
- const timeoutId = setTimeout(() => {
763
- if (subscriptionId)
764
- this.unsubscribe(subscriptionId);
765
- resolve(null);
766
- }, this.queryTimeoutMs);
1013
+ let settled = false;
1014
+ // Declared as `let` and initialized lazily so `finishWith` can be
1015
+ // invoked before the setTimeout call below without hitting the
1016
+ // TDZ on `clearTimeout(timeoutId)`. (The same comment on the
1017
+ // listener anticipates synchronous-callback hypothetical paths.)
1018
+ let timeoutId;
1019
+ // Accept an explicit `id` so callers from inside the listener can
1020
+ // pass the sub_id the relay echoed back. This guards against any
1021
+ // future change to subscribe() that would invoke listener
1022
+ // callbacks before its return value is bound to `subscriptionId`
1023
+ // — the closure-captured value would still be `''` and we'd skip
1024
+ // the CLOSE frame, leaking the slot on the relay.
1025
+ const finishWith = (result, id) => {
1026
+ if (settled)
1027
+ return;
1028
+ settled = true;
1029
+ if (timeoutId !== undefined)
1030
+ clearTimeout(timeoutId);
1031
+ const subId = id || subscriptionId;
1032
+ if (subId)
1033
+ this.unsubscribe(subId);
1034
+ resolve(result);
1035
+ };
1036
+ timeoutId = setTimeout(() => finishWith(null), this.queryTimeoutMs);
767
1037
  const authors = new Map();
1038
+ const allRelaysDone = (id) => this.allRelaysDoneFor(id);
1039
+ const pickWinner = () => {
1040
+ let winnerEntry = null;
1041
+ let winnerPubkey = '';
1042
+ for (const [pubkey, entry] of authors) {
1043
+ if (!winnerEntry
1044
+ || entry.firstSeen < winnerEntry.firstSeen
1045
+ || (entry.firstSeen === winnerEntry.firstSeen && pubkey < winnerPubkey)) {
1046
+ winnerEntry = entry;
1047
+ winnerPubkey = pubkey;
1048
+ }
1049
+ }
1050
+ return winnerEntry ? extractResult(winnerEntry.latestEvent) : null;
1051
+ };
768
1052
  subscriptionId = this.subscribe(filter, {
769
1053
  onEvent: (event) => {
770
1054
  // Verify signature to prevent relay injection of forged events (#4)
@@ -783,24 +1067,55 @@ export class NostrClient {
783
1067
  }
784
1068
  }
785
1069
  },
786
- onEndOfStoredEvents: () => {
787
- clearTimeout(timeoutId);
788
- this.unsubscribe(subscriptionId);
789
- let winnerEntry = null;
790
- let winnerPubkey = '';
791
- for (const [pubkey, entry] of authors) {
792
- if (!winnerEntry
793
- || entry.firstSeen < winnerEntry.firstSeen
794
- || (entry.firstSeen === winnerEntry.firstSeen && pubkey < winnerPubkey)) {
795
- winnerEntry = entry;
796
- winnerPubkey = pubkey;
797
- }
1070
+ // EOSE means *this relay* has finished delivering stored
1071
+ // events. In a multi-relay client we must not settle yet — a
1072
+ // slower relay may still be about to deliver matching events.
1073
+ // Settle only when every connected relay has either EOSE'd
1074
+ // OR CLOSED'd this sub. (Single-relay clients are unaffected:
1075
+ // allDone is trivially true with one relay.)
1076
+ onEndOfStoredEvents: (id) => {
1077
+ if (allRelaysDone(id)) {
1078
+ finishWith(pickWinner(), id);
1079
+ }
1080
+ },
1081
+ // Subscription error from the SDK — fires from three paths
1082
+ // that all need the same "is it time to settle?" check:
1083
+ // 1. Relay sent CLOSED for this sub. In a multi-relay
1084
+ // client the same sub_id may still be alive on a
1085
+ // healthy relay; settling on the first CLOSED would
1086
+ // prematurely abort a query other relays could
1087
+ // satisfy. handleClosedMessage records the rejection
1088
+ // on the sending relay's closedSubIds before invoking
1089
+ // us, so we can decide via allRelaysDoneFor.
1090
+ // 2. Relay disconnected mid-query (socket.onclose →
1091
+ // synthetic onError). The relay no longer counts as
1092
+ // connected, so allRelaysDoneFor excludes it.
1093
+ // 3. Client disconnected (disconnect() → synthetic
1094
+ // onError). All relays are torn down, allRelaysDoneFor
1095
+ // sees zero connected and settles.
1096
+ onError: (id, message) => {
1097
+ console.warn(`Subscription error on ${id}: ${message}`);
1098
+ if (allRelaysDone(id)) {
1099
+ finishWith(pickWinner(), id);
798
1100
  }
799
- resolve(winnerEntry ? extractResult(winnerEntry.latestEvent) : null);
1101
+ // else: keep waiting for EOSE / CLOSED from remaining
1102
+ // relays or the overall query timeout.
800
1103
  },
801
1104
  });
802
1105
  });
803
1106
  }
1107
+ /**
1108
+ * True if every currently-connected relay has finished delivering
1109
+ * for the given sub_id (either EOSE'd or CLOSED'd it). Used by
1110
+ * queryWithFirstSeenWins to coordinate multi-relay settlement.
1111
+ */
1112
+ allRelaysDoneFor(subscriptionId) {
1113
+ const connected = Array.from(this.relays.values()).filter((r) => r.connected);
1114
+ // No connected relays at all → nothing to wait for; settle.
1115
+ if (connected.length === 0)
1116
+ return true;
1117
+ return connected.every((r) => r.eosedSubIds.has(subscriptionId) || r.closedSubIds.has(subscriptionId));
1118
+ }
804
1119
  /**
805
1120
  * Query for a public key by nametag.
806
1121
  * Uses first-seen-wins anti-hijacking resolution.