@unicitylabs/nostr-js-sdk 0.4.1 → 0.5.0-dev.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser/index.js +357 -50
- package/dist/browser/index.js.map +1 -1
- package/dist/browser/index.min.js +6 -6
- package/dist/browser/index.min.js.map +1 -1
- package/dist/browser/index.umd.js +357 -50
- package/dist/browser/index.umd.js.map +1 -1
- package/dist/browser/index.umd.min.js +7 -7
- package/dist/browser/index.umd.min.js.map +1 -1
- package/dist/cjs/client/NostrClient.js +357 -50
- package/dist/cjs/client/NostrClient.js.map +1 -1
- package/dist/esm/client/NostrClient.js +357 -50
- package/dist/esm/client/NostrClient.js.map +1 -1
- package/dist/types/client/NostrClient.d.ts +46 -5
- package/dist/types/client/NostrClient.d.ts.map +1 -1
- package/package.json +2 -2
|
@@ -50,6 +50,16 @@ const DEFAULT_QUERY_TIMEOUT_MS = 5000;
|
|
|
50
50
|
const DEFAULT_RECONNECT_INTERVAL_MS = 1000;
|
|
51
51
|
const DEFAULT_MAX_RECONNECT_INTERVAL_MS = 30000;
|
|
52
52
|
const DEFAULT_PING_INTERVAL_MS = 30000;
|
|
53
|
+
/**
|
|
54
|
+
* Internal sub_id reserved for the keepalive REQ. Namespaced with a
|
|
55
|
+
* `__nostr-sdk-` prefix so that user code calling
|
|
56
|
+
* {@link NostrClient.subscribe} with an explicit `subscriptionId`
|
|
57
|
+
* cannot collide — a user choosing the literal `"ping"` would
|
|
58
|
+
* otherwise have their subscription forcibly CLOSE/REQ'd every
|
|
59
|
+
* ping interval. The leading `__` is a stable convention for
|
|
60
|
+
* "do not pick this name."
|
|
61
|
+
*/
|
|
62
|
+
const PING_SUB_ID = '__nostr-sdk-keepalive__';
|
|
53
63
|
/**
|
|
54
64
|
* Delay before resubscribing after NIP-42 authentication.
|
|
55
65
|
* This gives the relay time to process the AUTH response before we send
|
|
@@ -89,6 +99,30 @@ class NostrClient {
|
|
|
89
99
|
this.maxReconnectIntervalMs = options?.maxReconnectIntervalMs ?? DEFAULT_MAX_RECONNECT_INTERVAL_MS;
|
|
90
100
|
this.pingIntervalMs = options?.pingIntervalMs ?? DEFAULT_PING_INTERVAL_MS;
|
|
91
101
|
}
|
|
102
|
+
/**
|
|
103
|
+
* Replace the key manager used for signing and encryption.
|
|
104
|
+
*
|
|
105
|
+
* The connection stays alive — but every operation that consults the
|
|
106
|
+
* key manager from this point on uses the new key, including:
|
|
107
|
+
* - signing future published events,
|
|
108
|
+
* - signing NIP-42 AUTH challenge responses,
|
|
109
|
+
* - the `authors:[selfPubkey]` filter on the keepalive ping REQ
|
|
110
|
+
* (computed each ping interval),
|
|
111
|
+
* - any other code path that calls `getPublicKeyHex()` on the
|
|
112
|
+
* stored manager.
|
|
113
|
+
*
|
|
114
|
+
* Existing in-flight subscriptions are not re-issued or re-keyed.
|
|
115
|
+
* @param keyManager New key manager
|
|
116
|
+
*/
|
|
117
|
+
setKeyManager(keyManager) {
|
|
118
|
+
this.keyManager = keyManager;
|
|
119
|
+
}
|
|
120
|
+
/**
|
|
121
|
+
* Get the current key manager.
|
|
122
|
+
*/
|
|
123
|
+
getKeyManager() {
|
|
124
|
+
return this.keyManager;
|
|
125
|
+
}
|
|
92
126
|
/**
|
|
93
127
|
* Add a connection event listener.
|
|
94
128
|
* @param listener Listener for connection events
|
|
@@ -132,13 +166,6 @@ class NostrClient {
|
|
|
132
166
|
}
|
|
133
167
|
}
|
|
134
168
|
}
|
|
135
|
-
/**
|
|
136
|
-
* Get the key manager.
|
|
137
|
-
* @returns The key manager instance
|
|
138
|
-
*/
|
|
139
|
-
getKeyManager() {
|
|
140
|
-
return this.keyManager;
|
|
141
|
-
}
|
|
142
169
|
/**
|
|
143
170
|
* Get the current query timeout in milliseconds.
|
|
144
171
|
* @returns Query timeout in milliseconds
|
|
@@ -175,11 +202,41 @@ class NostrClient {
|
|
|
175
202
|
return;
|
|
176
203
|
}
|
|
177
204
|
return new Promise((resolve, reject) => {
|
|
205
|
+
// The connection-setup timeout has three races to defend
|
|
206
|
+
// against:
|
|
207
|
+
// A) createWebSocket resolves AFTER the timeout fired.
|
|
208
|
+
// B) createWebSocket resolves BEFORE the timeout, but
|
|
209
|
+
// `onopen` fires AFTER the timeout fired.
|
|
210
|
+
// C) createWebSocket resolves and `onopen` fires BEFORE the
|
|
211
|
+
// timeout (the success path).
|
|
212
|
+
// `pendingSocket` lets the timeout proactively close any
|
|
213
|
+
// socket that's already been created but hasn't fired
|
|
214
|
+
// `onopen` yet. The `timedOut` flag covers (A) inside `.then`
|
|
215
|
+
// and (B) inside `socket.onopen`.
|
|
216
|
+
let timedOut = false;
|
|
217
|
+
let pendingSocket = null;
|
|
178
218
|
const timeoutId = setTimeout(() => {
|
|
219
|
+
timedOut = true;
|
|
220
|
+
if (pendingSocket) {
|
|
221
|
+
try {
|
|
222
|
+
pendingSocket.close(1000, 'Connection setup timed out');
|
|
223
|
+
}
|
|
224
|
+
catch { /* ignore */ }
|
|
225
|
+
}
|
|
179
226
|
reject(new Error(`Connection to ${url} timed out`));
|
|
180
227
|
}, CONNECTION_TIMEOUT_MS);
|
|
181
228
|
(0, WebSocketAdapter_js_1.createWebSocket)(url)
|
|
182
229
|
.then((socket) => {
|
|
230
|
+
if (timedOut) {
|
|
231
|
+
// Caller already saw the rejection. Discard the late
|
|
232
|
+
// socket so we don't leak it.
|
|
233
|
+
try {
|
|
234
|
+
socket.close(1000, 'Connection setup timed out');
|
|
235
|
+
}
|
|
236
|
+
catch { /* ignore */ }
|
|
237
|
+
return;
|
|
238
|
+
}
|
|
239
|
+
pendingSocket = socket;
|
|
183
240
|
const relay = {
|
|
184
241
|
url,
|
|
185
242
|
socket,
|
|
@@ -191,8 +248,28 @@ class NostrClient {
|
|
|
191
248
|
lastPongTime: Date.now(),
|
|
192
249
|
unansweredPings: 0,
|
|
193
250
|
wasConnected: existingRelay?.wasConnected ?? false,
|
|
251
|
+
// Reset on every new connection: a relay's per-connection
|
|
252
|
+
// sub-slot accounting is fresh, so previously-rejected REQs
|
|
253
|
+
// should be re-issued on the new socket.
|
|
254
|
+
closedSubIds: new Set(),
|
|
255
|
+
eosedSubIds: new Set(),
|
|
194
256
|
};
|
|
195
257
|
socket.onopen = () => {
|
|
258
|
+
// The `.then` block already guards against a socket
|
|
259
|
+
// arriving after the connection timeout, but the socket
|
|
260
|
+
// can also be created BEFORE the timeout while
|
|
261
|
+
// `onopen` fires AFTER the timeout has rejected the
|
|
262
|
+
// outer promise. Without this second guard we'd register
|
|
263
|
+
// the relay, start a pingTimer, and resubscribe — orphan
|
|
264
|
+
// background resources the caller can't see or clean up
|
|
265
|
+
// because their connect() call already saw a rejection.
|
|
266
|
+
if (timedOut) {
|
|
267
|
+
try {
|
|
268
|
+
socket.close(1000, 'Connection setup timed out');
|
|
269
|
+
}
|
|
270
|
+
catch { /* ignore */ }
|
|
271
|
+
return;
|
|
272
|
+
}
|
|
196
273
|
clearTimeout(timeoutId);
|
|
197
274
|
relay.connected = true;
|
|
198
275
|
relay.reconnectAttempts = 0; // Reset on successful connection
|
|
@@ -233,9 +310,40 @@ class NostrClient {
|
|
|
233
310
|
const wasConnected = relay.connected;
|
|
234
311
|
relay.connected = false;
|
|
235
312
|
this.stopPingTimer(url);
|
|
313
|
+
// Pre-onopen close: TCP handshake failure or relay
|
|
314
|
+
// immediately closed the WS during the upgrade. Without
|
|
315
|
+
// this, the connectToRelay promise stays pending until
|
|
316
|
+
// CONNECTION_TIMEOUT_MS (30s) expires; surfacing it now
|
|
317
|
+
// lets the caller see the failure promptly and retry.
|
|
318
|
+
if (!wasConnected && !timedOut) {
|
|
319
|
+
timedOut = true;
|
|
320
|
+
clearTimeout(timeoutId);
|
|
321
|
+
reject(new Error(`Connection to ${url} closed during handshake: ${event?.reason || 'no reason'}`));
|
|
322
|
+
}
|
|
236
323
|
if (wasConnected) {
|
|
237
324
|
const reason = event?.reason || 'Connection closed';
|
|
238
325
|
this.emitConnectionEvent('disconnect', url, reason);
|
|
326
|
+
// Re-trigger the all-done check on every active sub.
|
|
327
|
+
// queryWithFirstSeenWins.allRelaysDoneFor only runs
|
|
328
|
+
// from listener callbacks (EOSE / CLOSED via onError);
|
|
329
|
+
// a socket that drops without sending either would
|
|
330
|
+
// otherwise leave the query hanging until
|
|
331
|
+
// queryTimeoutMs even though the disconnected relay no
|
|
332
|
+
// longer counts toward "still pending" relays. Firing
|
|
333
|
+
// a synthetic onError gives every active sub a chance
|
|
334
|
+
// to re-evaluate now that the relay set has shrunk.
|
|
335
|
+
// Include the relay URL so listeners in a multi-relay
|
|
336
|
+
// client can attribute which relay dropped.
|
|
337
|
+
const inflight = Array.from(this.subscriptions.entries());
|
|
338
|
+
for (const [subId, sub] of inflight) {
|
|
339
|
+
try {
|
|
340
|
+
sub.listener.onError?.(subId, `Relay disconnected (${url}): ${reason}`);
|
|
341
|
+
}
|
|
342
|
+
catch {
|
|
343
|
+
// Ignore listener errors — we're notifying
|
|
344
|
+
// best-effort.
|
|
345
|
+
}
|
|
346
|
+
}
|
|
239
347
|
}
|
|
240
348
|
if (!this.closed && this.autoReconnect && !relay.reconnecting) {
|
|
241
349
|
this.scheduleReconnect(url);
|
|
@@ -247,7 +355,11 @@ class NostrClient {
|
|
|
247
355
|
reject(new Error(`Failed to connect to ${url}: ${error.message || 'Unknown error'}`));
|
|
248
356
|
}
|
|
249
357
|
};
|
|
250
|
-
this.relays
|
|
358
|
+
// Note: we do NOT register the relay in `this.relays` here —
|
|
359
|
+
// only after `onopen` fires successfully. Registering eagerly
|
|
360
|
+
// (before onopen) would leak the relay into the global map
|
|
361
|
+
// even when the connection setup times out and the caller's
|
|
362
|
+
// promise has already rejected.
|
|
251
363
|
})
|
|
252
364
|
.catch((error) => {
|
|
253
365
|
clearTimeout(timeoutId);
|
|
@@ -324,16 +436,24 @@ class NostrClient {
|
|
|
324
436
|
}
|
|
325
437
|
return;
|
|
326
438
|
}
|
|
327
|
-
// Send a subscription request as a ping (relays respond with EOSE)
|
|
328
|
-
//
|
|
329
|
-
//
|
|
439
|
+
// Send a subscription request as a ping (relays respond with EOSE).
|
|
440
|
+
// The filter MUST be tightly scoped — an open `{ limit: 1 }` filter
|
|
441
|
+
// with no kinds/authors/#p will, after EOSE, stream every event the
|
|
442
|
+
// relay receives (NIP-01 live tail), saturating the connection and
|
|
443
|
+
// exhausting per-connection subscription slots on busy relays.
|
|
444
|
+
// Scoping by `authors:[self]` keeps the live tail empty in practice
|
|
445
|
+
// (the relay would only forward our own future events).
|
|
330
446
|
try {
|
|
331
|
-
const
|
|
447
|
+
const selfPubkey = this.keyManager.getPublicKeyHex();
|
|
332
448
|
// First close any existing ping subscription to ensure we don't accumulate
|
|
333
|
-
const closeMessage = JSON.stringify(['CLOSE',
|
|
449
|
+
const closeMessage = JSON.stringify(['CLOSE', PING_SUB_ID]);
|
|
334
450
|
relay.socket.send(closeMessage);
|
|
335
451
|
// Then send the new ping request (limit:1 ensures relay sends EOSE)
|
|
336
|
-
const pingMessage = JSON.stringify([
|
|
452
|
+
const pingMessage = JSON.stringify([
|
|
453
|
+
'REQ',
|
|
454
|
+
PING_SUB_ID,
|
|
455
|
+
{ authors: [selfPubkey], limit: 1 },
|
|
456
|
+
]);
|
|
337
457
|
relay.socket.send(pingMessage);
|
|
338
458
|
relay.unansweredPings++;
|
|
339
459
|
}
|
|
@@ -368,6 +488,11 @@ class NostrClient {
|
|
|
368
488
|
if (!relay?.socket || !relay.connected)
|
|
369
489
|
return;
|
|
370
490
|
for (const [subId, info] of this.subscriptions) {
|
|
491
|
+
// Skip subs this relay has previously CLOSED — re-issuing them
|
|
492
|
+
// just triggers the same rejection in a loop. Other healthy
|
|
493
|
+
// relays still resubscribe.
|
|
494
|
+
if (relay.closedSubIds.has(subId))
|
|
495
|
+
continue;
|
|
371
496
|
const message = JSON.stringify(['REQ', subId, info.filter.toJSON()]);
|
|
372
497
|
relay.socket.send(message);
|
|
373
498
|
}
|
|
@@ -387,7 +512,7 @@ class NostrClient {
|
|
|
387
512
|
/**
|
|
388
513
|
* Handle a message from a relay.
|
|
389
514
|
*/
|
|
390
|
-
handleRelayMessage(
|
|
515
|
+
handleRelayMessage(relayUrl, message) {
|
|
391
516
|
try {
|
|
392
517
|
const json = JSON.parse(message);
|
|
393
518
|
if (!Array.isArray(json) || json.length < 2)
|
|
@@ -401,16 +526,16 @@ class NostrClient {
|
|
|
401
526
|
this.handleOkMessage(json);
|
|
402
527
|
break;
|
|
403
528
|
case 'EOSE':
|
|
404
|
-
this.handleEOSEMessage(json);
|
|
529
|
+
this.handleEOSEMessage(relayUrl, json);
|
|
405
530
|
break;
|
|
406
531
|
case 'NOTICE':
|
|
407
532
|
this.handleNoticeMessage(json);
|
|
408
533
|
break;
|
|
409
534
|
case 'CLOSED':
|
|
410
|
-
this.handleClosedMessage(json);
|
|
535
|
+
this.handleClosedMessage(relayUrl, json);
|
|
411
536
|
break;
|
|
412
537
|
case 'AUTH':
|
|
413
|
-
this.handleAuthMessage(
|
|
538
|
+
this.handleAuthMessage(relayUrl, json);
|
|
414
539
|
break;
|
|
415
540
|
}
|
|
416
541
|
}
|
|
@@ -422,7 +547,7 @@ class NostrClient {
|
|
|
422
547
|
* Handle EVENT message from relay.
|
|
423
548
|
*/
|
|
424
549
|
handleEventMessage(json) {
|
|
425
|
-
if (json.length < 3)
|
|
550
|
+
if (json.length < 3 || typeof json[1] !== 'string')
|
|
426
551
|
return;
|
|
427
552
|
const subscriptionId = json[1];
|
|
428
553
|
const eventData = json[2];
|
|
@@ -460,11 +585,23 @@ class NostrClient {
|
|
|
460
585
|
}
|
|
461
586
|
/**
|
|
462
587
|
* Handle EOSE (End of Stored Events) message from relay.
|
|
588
|
+
*
|
|
589
|
+
* Records the per-relay EOSE marker (mirroring closedSubIds) so
|
|
590
|
+
* queryWithFirstSeenWins can decide when ALL connected relays have
|
|
591
|
+
* finished — either streamed EOSE or rejected with CLOSED — instead
|
|
592
|
+
* of settling off the first fast relay's EOSE while a slower relay
|
|
593
|
+
* is still about to deliver matching events.
|
|
463
594
|
*/
|
|
464
|
-
handleEOSEMessage(json) {
|
|
465
|
-
if (json.length < 2)
|
|
595
|
+
handleEOSEMessage(relayUrl, json) {
|
|
596
|
+
if (json.length < 2 || typeof json[1] !== 'string')
|
|
466
597
|
return;
|
|
467
598
|
const subscriptionId = json[1];
|
|
599
|
+
if (!this.subscriptions.has(subscriptionId))
|
|
600
|
+
return;
|
|
601
|
+
const relay = this.relays.get(relayUrl);
|
|
602
|
+
if (relay) {
|
|
603
|
+
relay.eosedSubIds.add(subscriptionId);
|
|
604
|
+
}
|
|
468
605
|
const subscription = this.subscriptions.get(subscriptionId);
|
|
469
606
|
if (subscription?.listener.onEndOfStoredEvents) {
|
|
470
607
|
subscription.listener.onEndOfStoredEvents(subscriptionId);
|
|
@@ -481,15 +618,64 @@ class NostrClient {
|
|
|
481
618
|
}
|
|
482
619
|
/**
|
|
483
620
|
* Handle CLOSED message from relay (subscription closed by relay).
|
|
621
|
+
*
|
|
622
|
+
* NIP-01 CLOSED frames are terminal for the named subscription **on
|
|
623
|
+
* the sending relay**. In a multi-relay client the same sub_id may
|
|
624
|
+
* still be alive on a healthy relay, so we must NOT delete the
|
|
625
|
+
* global `this.subscriptions` entry here — that would silently drop
|
|
626
|
+
* EVENT/EOSE frames from the still-healthy relays in
|
|
627
|
+
* `handleEventMessage` (which consults the global map).
|
|
628
|
+
*
|
|
629
|
+
* Instead we record the rejection on the sending relay's
|
|
630
|
+
* `closedSubIds` set so `resubscribeAll` and post-AUTH resubscribe
|
|
631
|
+
* skip it on this relay only. The listener is notified via
|
|
632
|
+
* `onError` so callers (e.g. queryWithFirstSeenWins) can decide to
|
|
633
|
+
* settle and explicitly `unsubscribe()` if they want to give up
|
|
634
|
+
* across all relays.
|
|
484
635
|
*/
|
|
485
|
-
handleClosedMessage(json) {
|
|
486
|
-
|
|
636
|
+
handleClosedMessage(relayUrl, json) {
|
|
637
|
+
// NIP-01 makes the message field optional: `["CLOSED", <sub>]` is
|
|
638
|
+
// valid. Dropping such frames was exactly the leak this PR sets out
|
|
639
|
+
// to fix — no closedSubIds marker and no onError notification means
|
|
640
|
+
// queries hang until timeout and resubscribe loops persist.
|
|
641
|
+
if (json.length < 2 || typeof json[1] !== 'string')
|
|
487
642
|
return;
|
|
488
643
|
const subscriptionId = json[1];
|
|
489
|
-
|
|
644
|
+
// Ignore CLOSED for sub_ids we don't know about. A misbehaving or
|
|
645
|
+
// malicious relay could otherwise spam us with arbitrary sub_ids
|
|
646
|
+
// and grow `closedSubIds` unbounded over a long-lived connection,
|
|
647
|
+
// and could pre-emptively block sub_ids we might use later.
|
|
648
|
+
if (!this.subscriptions.has(subscriptionId))
|
|
649
|
+
return;
|
|
650
|
+
const message = typeof json[2] === 'string' ? json[2] : 'no reason provided';
|
|
651
|
+
// NIP-42 transient case: relays that require AUTH typically reject
|
|
652
|
+
// pre-auth REQs with `CLOSED("auth-required:...")` and then send
|
|
653
|
+
// an AUTH challenge. resubscribeAfterAuth re-issues the sub, so
|
|
654
|
+
// this rejection is NOT terminal. If we marked closedSubIds here,
|
|
655
|
+
// queryWithFirstSeenWins.onError would settle the future
|
|
656
|
+
// prematurely (single-relay → allRelaysDoneFor=true), unsubscribe
|
|
657
|
+
// the sub, and the post-AUTH retry would find nothing to retry.
|
|
658
|
+
// Listener still gets onError so callers see the reason; we just
|
|
659
|
+
// don't poison the per-relay state with a transient marker.
|
|
660
|
+
//
|
|
661
|
+
// We accept three on-the-wire shapes: `auth-required:...`
|
|
662
|
+
// (NIP-42 standard with reason), `auth-required ...` (whitespace
|
|
663
|
+
// separator), and bare `auth-required` (no suffix at all — some
|
|
664
|
+
// relays / tests).
|
|
665
|
+
const isAuthRequired = message === 'auth-required'
|
|
666
|
+
|| message.startsWith('auth-required:')
|
|
667
|
+
|| message.startsWith('auth-required ');
|
|
668
|
+
const relay = this.relays.get(relayUrl);
|
|
669
|
+
if (relay && !isAuthRequired) {
|
|
670
|
+
relay.closedSubIds.add(subscriptionId);
|
|
671
|
+
}
|
|
490
672
|
const subscription = this.subscriptions.get(subscriptionId);
|
|
491
673
|
if (subscription?.listener.onError) {
|
|
492
|
-
|
|
674
|
+
// Pass the relay's reason through verbatim so callers can
|
|
675
|
+
// pattern-match on standard prefixes (`auth-required:`,
|
|
676
|
+
// `rate-limited:`, `blocked:`, etc.) without parsing through
|
|
677
|
+
// a wrapper string.
|
|
678
|
+
subscription.listener.onError(subscriptionId, message);
|
|
493
679
|
}
|
|
494
680
|
}
|
|
495
681
|
/**
|
|
@@ -514,8 +700,27 @@ class NostrClient {
|
|
|
514
700
|
// Send AUTH response
|
|
515
701
|
const message = JSON.stringify(['AUTH', authEvent.toJSON()]);
|
|
516
702
|
relay.socket.send(message);
|
|
517
|
-
// Re-send subscriptions after auth (relay may have ignored pre-auth
|
|
703
|
+
// Re-send subscriptions after auth (relay may have ignored pre-auth
|
|
704
|
+
// requests). Two separate per-relay markers, two separate decisions:
|
|
705
|
+
//
|
|
706
|
+
// - `closedSubIds`: do NOT clear. handleClosedMessage already
|
|
707
|
+
// skips the auth-required transient case, so anything in this
|
|
708
|
+
// set is a TERMINAL rejection (rate-limited, blocked, etc.)
|
|
709
|
+
// that AUTH does not relax. The resubscribeAll guard then
|
|
710
|
+
// correctly skips terminal-rejected subs on this relay. They
|
|
711
|
+
// will be retried on the next reconnect, when onopen creates a
|
|
712
|
+
// fresh RelayConnection with empty markers.
|
|
713
|
+
//
|
|
714
|
+
// - `eosedSubIds`: clear. A relay may have EOSE'd a pre-auth sub
|
|
715
|
+
// with zero events (filter unsatisfiable without auth context);
|
|
716
|
+
// post-auth the same filter might match. We must re-arm the
|
|
717
|
+
// local "still waiting" state so any in-flight
|
|
718
|
+
// queryWithFirstSeenWins doesn't see this relay as already-done
|
|
719
|
+
// from a stale marker.
|
|
518
720
|
setTimeout(() => {
|
|
721
|
+
const r = this.relays.get(relayUrl);
|
|
722
|
+
if (r)
|
|
723
|
+
r.eosedSubIds.clear();
|
|
519
724
|
this.resubscribeAll(relayUrl);
|
|
520
725
|
}, AUTH_RESUBSCRIBE_DELAY_MS);
|
|
521
726
|
}
|
|
@@ -535,24 +740,40 @@ class NostrClient {
|
|
|
535
740
|
item.reject(new Error('Client disconnected'));
|
|
536
741
|
}
|
|
537
742
|
this.eventQueue = [];
|
|
538
|
-
// Close all relay connections and clean up timers
|
|
743
|
+
// Close all relay connections and clean up timers. Mark every
|
|
744
|
+
// relay disconnected synchronously BEFORE we notify subscriptions
|
|
745
|
+
// below, so any listener that consults `allRelaysDoneFor` sees
|
|
746
|
+
// zero connected relays and settles immediately.
|
|
539
747
|
for (const [url, relay] of this.relays) {
|
|
540
|
-
|
|
748
|
+
relay.connected = false;
|
|
541
749
|
if (relay.pingTimer) {
|
|
542
750
|
clearInterval(relay.pingTimer);
|
|
543
751
|
relay.pingTimer = null;
|
|
544
752
|
}
|
|
545
|
-
// Stop reconnect timer
|
|
546
753
|
if (relay.reconnectTimer) {
|
|
547
754
|
clearTimeout(relay.reconnectTimer);
|
|
548
755
|
relay.reconnectTimer = null;
|
|
549
756
|
}
|
|
550
|
-
// Close socket
|
|
551
757
|
if (relay.socket && relay.socket.readyState !== WebSocketAdapter_js_1.CLOSED) {
|
|
552
758
|
relay.socket.close(1000, 'Client disconnected');
|
|
553
759
|
}
|
|
554
760
|
this.emitConnectionEvent('disconnect', url, 'Client disconnected');
|
|
555
761
|
}
|
|
762
|
+
// Notify in-flight subscriptions that we're shutting down.
|
|
763
|
+
// queryWithFirstSeenWins.onError re-checks allRelaysDoneFor (now
|
|
764
|
+
// 0 connected → trivially true) and settles immediately, sparing
|
|
765
|
+
// callers the full queryTimeoutMs wait. Snapshot keys first
|
|
766
|
+
// because the listener may call unsubscribe(), which mutates
|
|
767
|
+
// this.subscriptions while we iterate.
|
|
768
|
+
const inflightSubs = Array.from(this.subscriptions.entries());
|
|
769
|
+
for (const [subId, sub] of inflightSubs) {
|
|
770
|
+
try {
|
|
771
|
+
sub.listener.onError?.(subId, 'Client disconnected');
|
|
772
|
+
}
|
|
773
|
+
catch {
|
|
774
|
+
// Ignore listener errors — we're tearing down anyway.
|
|
775
|
+
}
|
|
776
|
+
}
|
|
556
777
|
this.relays.clear();
|
|
557
778
|
this.subscriptions.clear();
|
|
558
779
|
}
|
|
@@ -754,7 +975,22 @@ class NostrClient {
|
|
|
754
975
|
filter = filterOrSubId;
|
|
755
976
|
listener = listenerOrFilter;
|
|
756
977
|
}
|
|
978
|
+
// Reserved prefix for SDK-internal sub_ids (currently just the
|
|
979
|
+
// keepalive `PING_SUB_ID`). Reject explicit caller use so the
|
|
980
|
+
// keepalive timer's CLOSE/REQ cycle can't stomp on user state.
|
|
981
|
+
if (subscriptionId.startsWith('__nostr-sdk-')) {
|
|
982
|
+
throw new Error(`Subscription ID "${subscriptionId}" uses the reserved "__nostr-sdk-" prefix — pick a different id.`);
|
|
983
|
+
}
|
|
757
984
|
this.subscriptions.set(subscriptionId, { filter, listener });
|
|
985
|
+
// Wipe any stale per-relay EOSE/CLOSED markers for this sub_id
|
|
986
|
+
// before issuing the REQ — otherwise a fresh subscribe with a
|
|
987
|
+
// sub_id that was previously CLOSED (or was just freshly
|
|
988
|
+
// EOSE'd) would be skipped or treated as "already done" on
|
|
989
|
+
// those relays.
|
|
990
|
+
for (const [, relay] of this.relays) {
|
|
991
|
+
relay.closedSubIds.delete(subscriptionId);
|
|
992
|
+
relay.eosedSubIds.delete(subscriptionId);
|
|
993
|
+
}
|
|
758
994
|
// Send subscription request to all connected relays
|
|
759
995
|
const message = JSON.stringify(['REQ', subscriptionId, filter.toJSON()]);
|
|
760
996
|
for (const [, relay] of this.relays) {
|
|
@@ -772,12 +1008,19 @@ class NostrClient {
|
|
|
772
1008
|
if (!this.subscriptions.has(subscriptionId))
|
|
773
1009
|
return;
|
|
774
1010
|
this.subscriptions.delete(subscriptionId);
|
|
775
|
-
// Send CLOSE to all connected relays
|
|
1011
|
+
// Send CLOSE to all connected relays — except those that already
|
|
1012
|
+
// CLOSED the sub themselves (no point telling the relay something
|
|
1013
|
+
// it told us).
|
|
776
1014
|
const message = JSON.stringify(['CLOSE', subscriptionId]);
|
|
777
1015
|
for (const [, relay] of this.relays) {
|
|
778
|
-
if (relay.connected && relay.socket?.readyState === WebSocketAdapter_js_1.OPEN
|
|
1016
|
+
if (relay.connected && relay.socket?.readyState === WebSocketAdapter_js_1.OPEN
|
|
1017
|
+
&& !relay.closedSubIds.has(subscriptionId)) {
|
|
779
1018
|
relay.socket.send(message);
|
|
780
1019
|
}
|
|
1020
|
+
// Drop both per-relay markers now that the sub is gone from
|
|
1021
|
+
// the global map.
|
|
1022
|
+
relay.closedSubIds.delete(subscriptionId);
|
|
1023
|
+
relay.eosedSubIds.delete(subscriptionId);
|
|
781
1024
|
}
|
|
782
1025
|
}
|
|
783
1026
|
/**
|
|
@@ -803,12 +1046,45 @@ class NostrClient {
|
|
|
803
1046
|
queryWithFirstSeenWins(filter, extractResult) {
|
|
804
1047
|
return new Promise((resolve) => {
|
|
805
1048
|
let subscriptionId = '';
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
1049
|
+
let settled = false;
|
|
1050
|
+
// Declared as `let` and initialized lazily so `finishWith` can be
|
|
1051
|
+
// invoked before the setTimeout call below without hitting the
|
|
1052
|
+
// TDZ on `clearTimeout(timeoutId)`. (The same comment on the
|
|
1053
|
+
// listener anticipates synchronous-callback hypothetical paths.)
|
|
1054
|
+
let timeoutId;
|
|
1055
|
+
// Accept an explicit `id` so callers from inside the listener can
|
|
1056
|
+
// pass the sub_id the relay echoed back. This guards against any
|
|
1057
|
+
// future change to subscribe() that would invoke listener
|
|
1058
|
+
// callbacks before its return value is bound to `subscriptionId`
|
|
1059
|
+
// — the closure-captured value would still be `''` and we'd skip
|
|
1060
|
+
// the CLOSE frame, leaking the slot on the relay.
|
|
1061
|
+
const finishWith = (result, id) => {
|
|
1062
|
+
if (settled)
|
|
1063
|
+
return;
|
|
1064
|
+
settled = true;
|
|
1065
|
+
if (timeoutId !== undefined)
|
|
1066
|
+
clearTimeout(timeoutId);
|
|
1067
|
+
const subId = id || subscriptionId;
|
|
1068
|
+
if (subId)
|
|
1069
|
+
this.unsubscribe(subId);
|
|
1070
|
+
resolve(result);
|
|
1071
|
+
};
|
|
1072
|
+
timeoutId = setTimeout(() => finishWith(null), this.queryTimeoutMs);
|
|
811
1073
|
const authors = new Map();
|
|
1074
|
+
const allRelaysDone = (id) => this.allRelaysDoneFor(id);
|
|
1075
|
+
const pickWinner = () => {
|
|
1076
|
+
let winnerEntry = null;
|
|
1077
|
+
let winnerPubkey = '';
|
|
1078
|
+
for (const [pubkey, entry] of authors) {
|
|
1079
|
+
if (!winnerEntry
|
|
1080
|
+
|| entry.firstSeen < winnerEntry.firstSeen
|
|
1081
|
+
|| (entry.firstSeen === winnerEntry.firstSeen && pubkey < winnerPubkey)) {
|
|
1082
|
+
winnerEntry = entry;
|
|
1083
|
+
winnerPubkey = pubkey;
|
|
1084
|
+
}
|
|
1085
|
+
}
|
|
1086
|
+
return winnerEntry ? extractResult(winnerEntry.latestEvent) : null;
|
|
1087
|
+
};
|
|
812
1088
|
subscriptionId = this.subscribe(filter, {
|
|
813
1089
|
onEvent: (event) => {
|
|
814
1090
|
// Verify signature to prevent relay injection of forged events (#4)
|
|
@@ -827,24 +1103,55 @@ class NostrClient {
|
|
|
827
1103
|
}
|
|
828
1104
|
}
|
|
829
1105
|
},
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
1106
|
+
// EOSE means *this relay* has finished delivering stored
|
|
1107
|
+
// events. In a multi-relay client we must not settle yet — a
|
|
1108
|
+
// slower relay may still be about to deliver matching events.
|
|
1109
|
+
// Settle only when every connected relay has either EOSE'd
|
|
1110
|
+
// OR CLOSED'd this sub. (Single-relay clients are unaffected:
|
|
1111
|
+
// allDone is trivially true with one relay.)
|
|
1112
|
+
onEndOfStoredEvents: (id) => {
|
|
1113
|
+
if (allRelaysDone(id)) {
|
|
1114
|
+
finishWith(pickWinner(), id);
|
|
1115
|
+
}
|
|
1116
|
+
},
|
|
1117
|
+
// Subscription error from the SDK — fires from three paths
|
|
1118
|
+
// that all need the same "is it time to settle?" check:
|
|
1119
|
+
// 1. Relay sent CLOSED for this sub. In a multi-relay
|
|
1120
|
+
// client the same sub_id may still be alive on a
|
|
1121
|
+
// healthy relay; settling on the first CLOSED would
|
|
1122
|
+
// prematurely abort a query other relays could
|
|
1123
|
+
// satisfy. handleClosedMessage records the rejection
|
|
1124
|
+
// on the sending relay's closedSubIds before invoking
|
|
1125
|
+
// us, so we can decide via allRelaysDoneFor.
|
|
1126
|
+
// 2. Relay disconnected mid-query (socket.onclose →
|
|
1127
|
+
// synthetic onError). The relay no longer counts as
|
|
1128
|
+
// connected, so allRelaysDoneFor excludes it.
|
|
1129
|
+
// 3. Client disconnected (disconnect() → synthetic
|
|
1130
|
+
// onError). All relays are torn down, allRelaysDoneFor
|
|
1131
|
+
// sees zero connected and settles.
|
|
1132
|
+
onError: (id, message) => {
|
|
1133
|
+
console.warn(`Subscription error on ${id}: ${message}`);
|
|
1134
|
+
if (allRelaysDone(id)) {
|
|
1135
|
+
finishWith(pickWinner(), id);
|
|
842
1136
|
}
|
|
843
|
-
|
|
1137
|
+
// else: keep waiting for EOSE / CLOSED from remaining
|
|
1138
|
+
// relays or the overall query timeout.
|
|
844
1139
|
},
|
|
845
1140
|
});
|
|
846
1141
|
});
|
|
847
1142
|
}
|
|
1143
|
+
/**
|
|
1144
|
+
* True if every currently-connected relay has finished delivering
|
|
1145
|
+
* for the given sub_id (either EOSE'd or CLOSED'd it). Used by
|
|
1146
|
+
* queryWithFirstSeenWins to coordinate multi-relay settlement.
|
|
1147
|
+
*/
|
|
1148
|
+
allRelaysDoneFor(subscriptionId) {
|
|
1149
|
+
const connected = Array.from(this.relays.values()).filter((r) => r.connected);
|
|
1150
|
+
// No connected relays at all → nothing to wait for; settle.
|
|
1151
|
+
if (connected.length === 0)
|
|
1152
|
+
return true;
|
|
1153
|
+
return connected.every((r) => r.eosedSubIds.has(subscriptionId) || r.closedSubIds.has(subscriptionId));
|
|
1154
|
+
}
|
|
848
1155
|
/**
|
|
849
1156
|
* Query for a public key by nametag.
|
|
850
1157
|
* Uses first-seen-wins anti-hijacking resolution.
|