@unicitylabs/nostr-js-sdk 0.4.1 → 0.5.0-dev.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser/index.js +357 -50
- package/dist/browser/index.js.map +1 -1
- package/dist/browser/index.min.js +6 -6
- package/dist/browser/index.min.js.map +1 -1
- package/dist/browser/index.umd.js +357 -50
- package/dist/browser/index.umd.js.map +1 -1
- package/dist/browser/index.umd.min.js +7 -7
- package/dist/browser/index.umd.min.js.map +1 -1
- package/dist/cjs/client/NostrClient.js +357 -50
- package/dist/cjs/client/NostrClient.js.map +1 -1
- package/dist/esm/client/NostrClient.js +357 -50
- package/dist/esm/client/NostrClient.js.map +1 -1
- package/dist/types/client/NostrClient.d.ts +46 -5
- package/dist/types/client/NostrClient.d.ts.map +1 -1
- package/package.json +2 -2
|
@@ -14,6 +14,16 @@ const DEFAULT_QUERY_TIMEOUT_MS = 5000;
|
|
|
14
14
|
const DEFAULT_RECONNECT_INTERVAL_MS = 1000;
|
|
15
15
|
const DEFAULT_MAX_RECONNECT_INTERVAL_MS = 30000;
|
|
16
16
|
const DEFAULT_PING_INTERVAL_MS = 30000;
|
|
17
|
+
/**
|
|
18
|
+
* Internal sub_id reserved for the keepalive REQ. Namespaced with a
|
|
19
|
+
* `__nostr-sdk-` prefix so that user code calling
|
|
20
|
+
* {@link NostrClient.subscribe} with an explicit `subscriptionId`
|
|
21
|
+
* cannot collide — a user choosing the literal `"ping"` would
|
|
22
|
+
* otherwise have their subscription forcibly CLOSE/REQ'd every
|
|
23
|
+
* ping interval. The leading `__` is a stable convention for
|
|
24
|
+
* "do not pick this name."
|
|
25
|
+
*/
|
|
26
|
+
const PING_SUB_ID = '__nostr-sdk-keepalive__';
|
|
17
27
|
/**
|
|
18
28
|
* Delay before resubscribing after NIP-42 authentication.
|
|
19
29
|
* This gives the relay time to process the AUTH response before we send
|
|
@@ -53,6 +63,30 @@ export class NostrClient {
|
|
|
53
63
|
this.maxReconnectIntervalMs = options?.maxReconnectIntervalMs ?? DEFAULT_MAX_RECONNECT_INTERVAL_MS;
|
|
54
64
|
this.pingIntervalMs = options?.pingIntervalMs ?? DEFAULT_PING_INTERVAL_MS;
|
|
55
65
|
}
|
|
66
|
+
/**
|
|
67
|
+
* Replace the key manager used for signing and encryption.
|
|
68
|
+
*
|
|
69
|
+
* The connection stays alive — but every operation that consults the
|
|
70
|
+
* key manager from this point on uses the new key, including:
|
|
71
|
+
* - signing future published events,
|
|
72
|
+
* - signing NIP-42 AUTH challenge responses,
|
|
73
|
+
* - the `authors:[selfPubkey]` filter on the keepalive ping REQ
|
|
74
|
+
* (computed each ping interval),
|
|
75
|
+
* - any other code path that calls `getPublicKeyHex()` on the
|
|
76
|
+
* stored manager.
|
|
77
|
+
*
|
|
78
|
+
* Existing in-flight subscriptions are not re-issued or re-keyed.
|
|
79
|
+
* @param keyManager New key manager
|
|
80
|
+
*/
|
|
81
|
+
setKeyManager(keyManager) {
|
|
82
|
+
this.keyManager = keyManager;
|
|
83
|
+
}
|
|
84
|
+
/**
|
|
85
|
+
* Get the current key manager.
|
|
86
|
+
*/
|
|
87
|
+
getKeyManager() {
|
|
88
|
+
return this.keyManager;
|
|
89
|
+
}
|
|
56
90
|
/**
|
|
57
91
|
* Add a connection event listener.
|
|
58
92
|
* @param listener Listener for connection events
|
|
@@ -96,13 +130,6 @@ export class NostrClient {
|
|
|
96
130
|
}
|
|
97
131
|
}
|
|
98
132
|
}
|
|
99
|
-
/**
|
|
100
|
-
* Get the key manager.
|
|
101
|
-
* @returns The key manager instance
|
|
102
|
-
*/
|
|
103
|
-
getKeyManager() {
|
|
104
|
-
return this.keyManager;
|
|
105
|
-
}
|
|
106
133
|
/**
|
|
107
134
|
* Get the current query timeout in milliseconds.
|
|
108
135
|
* @returns Query timeout in milliseconds
|
|
@@ -139,11 +166,41 @@ export class NostrClient {
|
|
|
139
166
|
return;
|
|
140
167
|
}
|
|
141
168
|
return new Promise((resolve, reject) => {
|
|
169
|
+
// The connection-setup timeout has three races to defend
|
|
170
|
+
// against:
|
|
171
|
+
// A) createWebSocket resolves AFTER the timeout fired.
|
|
172
|
+
// B) createWebSocket resolves BEFORE the timeout, but
|
|
173
|
+
// `onopen` fires AFTER the timeout fired.
|
|
174
|
+
// C) createWebSocket resolves and `onopen` fires BEFORE the
|
|
175
|
+
// timeout (the success path).
|
|
176
|
+
// `pendingSocket` lets the timeout proactively close any
|
|
177
|
+
// socket that's already been created but hasn't fired
|
|
178
|
+
// `onopen` yet. The `timedOut` flag covers (A) inside `.then`
|
|
179
|
+
// and (B) inside `socket.onopen`.
|
|
180
|
+
let timedOut = false;
|
|
181
|
+
let pendingSocket = null;
|
|
142
182
|
const timeoutId = setTimeout(() => {
|
|
183
|
+
timedOut = true;
|
|
184
|
+
if (pendingSocket) {
|
|
185
|
+
try {
|
|
186
|
+
pendingSocket.close(1000, 'Connection setup timed out');
|
|
187
|
+
}
|
|
188
|
+
catch { /* ignore */ }
|
|
189
|
+
}
|
|
143
190
|
reject(new Error(`Connection to ${url} timed out`));
|
|
144
191
|
}, CONNECTION_TIMEOUT_MS);
|
|
145
192
|
createWebSocket(url)
|
|
146
193
|
.then((socket) => {
|
|
194
|
+
if (timedOut) {
|
|
195
|
+
// Caller already saw the rejection. Discard the late
|
|
196
|
+
// socket so we don't leak it.
|
|
197
|
+
try {
|
|
198
|
+
socket.close(1000, 'Connection setup timed out');
|
|
199
|
+
}
|
|
200
|
+
catch { /* ignore */ }
|
|
201
|
+
return;
|
|
202
|
+
}
|
|
203
|
+
pendingSocket = socket;
|
|
147
204
|
const relay = {
|
|
148
205
|
url,
|
|
149
206
|
socket,
|
|
@@ -155,8 +212,28 @@ export class NostrClient {
|
|
|
155
212
|
lastPongTime: Date.now(),
|
|
156
213
|
unansweredPings: 0,
|
|
157
214
|
wasConnected: existingRelay?.wasConnected ?? false,
|
|
215
|
+
// Reset on every new connection: a relay's per-connection
|
|
216
|
+
// sub-slot accounting is fresh, so previously-rejected REQs
|
|
217
|
+
// should be re-issued on the new socket.
|
|
218
|
+
closedSubIds: new Set(),
|
|
219
|
+
eosedSubIds: new Set(),
|
|
158
220
|
};
|
|
159
221
|
socket.onopen = () => {
|
|
222
|
+
// The `.then` block already guards against a socket
|
|
223
|
+
// arriving after the connection timeout, but the socket
|
|
224
|
+
// can also be created BEFORE the timeout while
|
|
225
|
+
// `onopen` fires AFTER the timeout has rejected the
|
|
226
|
+
// outer promise. Without this second guard we'd register
|
|
227
|
+
// the relay, start a pingTimer, and resubscribe — orphan
|
|
228
|
+
// background resources the caller can't see or clean up
|
|
229
|
+
// because their connect() call already saw a rejection.
|
|
230
|
+
if (timedOut) {
|
|
231
|
+
try {
|
|
232
|
+
socket.close(1000, 'Connection setup timed out');
|
|
233
|
+
}
|
|
234
|
+
catch { /* ignore */ }
|
|
235
|
+
return;
|
|
236
|
+
}
|
|
160
237
|
clearTimeout(timeoutId);
|
|
161
238
|
relay.connected = true;
|
|
162
239
|
relay.reconnectAttempts = 0; // Reset on successful connection
|
|
@@ -197,9 +274,40 @@ export class NostrClient {
|
|
|
197
274
|
const wasConnected = relay.connected;
|
|
198
275
|
relay.connected = false;
|
|
199
276
|
this.stopPingTimer(url);
|
|
277
|
+
// Pre-onopen close: TCP handshake failure or relay
|
|
278
|
+
// immediately closed the WS during the upgrade. Without
|
|
279
|
+
// this, the connectToRelay promise stays pending until
|
|
280
|
+
// CONNECTION_TIMEOUT_MS (30s) expires; surfacing it now
|
|
281
|
+
// lets the caller see the failure promptly and retry.
|
|
282
|
+
if (!wasConnected && !timedOut) {
|
|
283
|
+
timedOut = true;
|
|
284
|
+
clearTimeout(timeoutId);
|
|
285
|
+
reject(new Error(`Connection to ${url} closed during handshake: ${event?.reason || 'no reason'}`));
|
|
286
|
+
}
|
|
200
287
|
if (wasConnected) {
|
|
201
288
|
const reason = event?.reason || 'Connection closed';
|
|
202
289
|
this.emitConnectionEvent('disconnect', url, reason);
|
|
290
|
+
// Re-trigger the all-done check on every active sub.
|
|
291
|
+
// queryWithFirstSeenWins.allRelaysDoneFor only runs
|
|
292
|
+
// from listener callbacks (EOSE / CLOSED via onError);
|
|
293
|
+
// a socket that drops without sending either would
|
|
294
|
+
// otherwise leave the query hanging until
|
|
295
|
+
// queryTimeoutMs even though the disconnected relay no
|
|
296
|
+
// longer counts toward "still pending" relays. Firing
|
|
297
|
+
// a synthetic onError gives every active sub a chance
|
|
298
|
+
// to re-evaluate now that the relay set has shrunk.
|
|
299
|
+
// Include the relay URL so listeners in a multi-relay
|
|
300
|
+
// client can attribute which relay dropped.
|
|
301
|
+
const inflight = Array.from(this.subscriptions.entries());
|
|
302
|
+
for (const [subId, sub] of inflight) {
|
|
303
|
+
try {
|
|
304
|
+
sub.listener.onError?.(subId, `Relay disconnected (${url}): ${reason}`);
|
|
305
|
+
}
|
|
306
|
+
catch {
|
|
307
|
+
// Ignore listener errors — we're notifying
|
|
308
|
+
// best-effort.
|
|
309
|
+
}
|
|
310
|
+
}
|
|
203
311
|
}
|
|
204
312
|
if (!this.closed && this.autoReconnect && !relay.reconnecting) {
|
|
205
313
|
this.scheduleReconnect(url);
|
|
@@ -211,7 +319,11 @@ export class NostrClient {
|
|
|
211
319
|
reject(new Error(`Failed to connect to ${url}: ${error.message || 'Unknown error'}`));
|
|
212
320
|
}
|
|
213
321
|
};
|
|
214
|
-
this.relays
|
|
322
|
+
// Note: we do NOT register the relay in `this.relays` here —
|
|
323
|
+
// only after `onopen` fires successfully. Registering eagerly
|
|
324
|
+
// (before onopen) would leak the relay into the global map
|
|
325
|
+
// even when the connection setup times out and the caller's
|
|
326
|
+
// promise has already rejected.
|
|
215
327
|
})
|
|
216
328
|
.catch((error) => {
|
|
217
329
|
clearTimeout(timeoutId);
|
|
@@ -288,16 +400,24 @@ export class NostrClient {
|
|
|
288
400
|
}
|
|
289
401
|
return;
|
|
290
402
|
}
|
|
291
|
-
// Send a subscription request as a ping (relays respond with EOSE)
|
|
292
|
-
//
|
|
293
|
-
//
|
|
403
|
+
// Send a subscription request as a ping (relays respond with EOSE).
|
|
404
|
+
// The filter MUST be tightly scoped — an open `{ limit: 1 }` filter
|
|
405
|
+
// with no kinds/authors/#p will, after EOSE, stream every event the
|
|
406
|
+
// relay receives (NIP-01 live tail), saturating the connection and
|
|
407
|
+
// exhausting per-connection subscription slots on busy relays.
|
|
408
|
+
// Scoping by `authors:[self]` keeps the live tail empty in practice
|
|
409
|
+
// (the relay would only forward our own future events).
|
|
294
410
|
try {
|
|
295
|
-
const
|
|
411
|
+
const selfPubkey = this.keyManager.getPublicKeyHex();
|
|
296
412
|
// First close any existing ping subscription to ensure we don't accumulate
|
|
297
|
-
const closeMessage = JSON.stringify(['CLOSE',
|
|
413
|
+
const closeMessage = JSON.stringify(['CLOSE', PING_SUB_ID]);
|
|
298
414
|
relay.socket.send(closeMessage);
|
|
299
415
|
// Then send the new ping request (limit:1 ensures relay sends EOSE)
|
|
300
|
-
const pingMessage = JSON.stringify([
|
|
416
|
+
const pingMessage = JSON.stringify([
|
|
417
|
+
'REQ',
|
|
418
|
+
PING_SUB_ID,
|
|
419
|
+
{ authors: [selfPubkey], limit: 1 },
|
|
420
|
+
]);
|
|
301
421
|
relay.socket.send(pingMessage);
|
|
302
422
|
relay.unansweredPings++;
|
|
303
423
|
}
|
|
@@ -332,6 +452,11 @@ export class NostrClient {
|
|
|
332
452
|
if (!relay?.socket || !relay.connected)
|
|
333
453
|
return;
|
|
334
454
|
for (const [subId, info] of this.subscriptions) {
|
|
455
|
+
// Skip subs this relay has previously CLOSED — re-issuing them
|
|
456
|
+
// just triggers the same rejection in a loop. Other healthy
|
|
457
|
+
// relays still resubscribe.
|
|
458
|
+
if (relay.closedSubIds.has(subId))
|
|
459
|
+
continue;
|
|
335
460
|
const message = JSON.stringify(['REQ', subId, info.filter.toJSON()]);
|
|
336
461
|
relay.socket.send(message);
|
|
337
462
|
}
|
|
@@ -351,7 +476,7 @@ export class NostrClient {
|
|
|
351
476
|
/**
|
|
352
477
|
* Handle a message from a relay.
|
|
353
478
|
*/
|
|
354
|
-
handleRelayMessage(
|
|
479
|
+
handleRelayMessage(relayUrl, message) {
|
|
355
480
|
try {
|
|
356
481
|
const json = JSON.parse(message);
|
|
357
482
|
if (!Array.isArray(json) || json.length < 2)
|
|
@@ -365,16 +490,16 @@ export class NostrClient {
|
|
|
365
490
|
this.handleOkMessage(json);
|
|
366
491
|
break;
|
|
367
492
|
case 'EOSE':
|
|
368
|
-
this.handleEOSEMessage(json);
|
|
493
|
+
this.handleEOSEMessage(relayUrl, json);
|
|
369
494
|
break;
|
|
370
495
|
case 'NOTICE':
|
|
371
496
|
this.handleNoticeMessage(json);
|
|
372
497
|
break;
|
|
373
498
|
case 'CLOSED':
|
|
374
|
-
this.handleClosedMessage(json);
|
|
499
|
+
this.handleClosedMessage(relayUrl, json);
|
|
375
500
|
break;
|
|
376
501
|
case 'AUTH':
|
|
377
|
-
this.handleAuthMessage(
|
|
502
|
+
this.handleAuthMessage(relayUrl, json);
|
|
378
503
|
break;
|
|
379
504
|
}
|
|
380
505
|
}
|
|
@@ -386,7 +511,7 @@ export class NostrClient {
|
|
|
386
511
|
* Handle EVENT message from relay.
|
|
387
512
|
*/
|
|
388
513
|
handleEventMessage(json) {
|
|
389
|
-
if (json.length < 3)
|
|
514
|
+
if (json.length < 3 || typeof json[1] !== 'string')
|
|
390
515
|
return;
|
|
391
516
|
const subscriptionId = json[1];
|
|
392
517
|
const eventData = json[2];
|
|
@@ -424,11 +549,23 @@ export class NostrClient {
|
|
|
424
549
|
}
|
|
425
550
|
/**
|
|
426
551
|
* Handle EOSE (End of Stored Events) message from relay.
|
|
552
|
+
*
|
|
553
|
+
* Records the per-relay EOSE marker (mirroring closedSubIds) so
|
|
554
|
+
* queryWithFirstSeenWins can decide when ALL connected relays have
|
|
555
|
+
* finished — either streamed EOSE or rejected with CLOSED — instead
|
|
556
|
+
* of settling off the first fast relay's EOSE while a slower relay
|
|
557
|
+
* is still about to deliver matching events.
|
|
427
558
|
*/
|
|
428
|
-
handleEOSEMessage(json) {
|
|
429
|
-
if (json.length < 2)
|
|
559
|
+
handleEOSEMessage(relayUrl, json) {
|
|
560
|
+
if (json.length < 2 || typeof json[1] !== 'string')
|
|
430
561
|
return;
|
|
431
562
|
const subscriptionId = json[1];
|
|
563
|
+
if (!this.subscriptions.has(subscriptionId))
|
|
564
|
+
return;
|
|
565
|
+
const relay = this.relays.get(relayUrl);
|
|
566
|
+
if (relay) {
|
|
567
|
+
relay.eosedSubIds.add(subscriptionId);
|
|
568
|
+
}
|
|
432
569
|
const subscription = this.subscriptions.get(subscriptionId);
|
|
433
570
|
if (subscription?.listener.onEndOfStoredEvents) {
|
|
434
571
|
subscription.listener.onEndOfStoredEvents(subscriptionId);
|
|
@@ -445,15 +582,64 @@ export class NostrClient {
|
|
|
445
582
|
}
|
|
446
583
|
/**
|
|
447
584
|
* Handle CLOSED message from relay (subscription closed by relay).
|
|
585
|
+
*
|
|
586
|
+
* NIP-01 CLOSED frames are terminal for the named subscription **on
|
|
587
|
+
* the sending relay**. In a multi-relay client the same sub_id may
|
|
588
|
+
* still be alive on a healthy relay, so we must NOT delete the
|
|
589
|
+
* global `this.subscriptions` entry here — that would silently drop
|
|
590
|
+
* EVENT/EOSE frames from the still-healthy relays in
|
|
591
|
+
* `handleEventMessage` (which consults the global map).
|
|
592
|
+
*
|
|
593
|
+
* Instead we record the rejection on the sending relay's
|
|
594
|
+
* `closedSubIds` set so `resubscribeAll` and post-AUTH resubscribe
|
|
595
|
+
* skip it on this relay only. The listener is notified via
|
|
596
|
+
* `onError` so callers (e.g. queryWithFirstSeenWins) can decide to
|
|
597
|
+
* settle and explicitly `unsubscribe()` if they want to give up
|
|
598
|
+
* across all relays.
|
|
448
599
|
*/
|
|
449
|
-
handleClosedMessage(json) {
|
|
450
|
-
|
|
600
|
+
handleClosedMessage(relayUrl, json) {
|
|
601
|
+
// NIP-01 makes the message field optional: `["CLOSED", <sub>]` is
|
|
602
|
+
// valid. Dropping such frames was exactly the leak this PR sets out
|
|
603
|
+
// to fix — no closedSubIds marker and no onError notification means
|
|
604
|
+
// queries hang until timeout and resubscribe loops persist.
|
|
605
|
+
if (json.length < 2 || typeof json[1] !== 'string')
|
|
451
606
|
return;
|
|
452
607
|
const subscriptionId = json[1];
|
|
453
|
-
|
|
608
|
+
// Ignore CLOSED for sub_ids we don't know about. A misbehaving or
|
|
609
|
+
// malicious relay could otherwise spam us with arbitrary sub_ids
|
|
610
|
+
// and grow `closedSubIds` unbounded over a long-lived connection,
|
|
611
|
+
// and could pre-emptively block sub_ids we might use later.
|
|
612
|
+
if (!this.subscriptions.has(subscriptionId))
|
|
613
|
+
return;
|
|
614
|
+
const message = typeof json[2] === 'string' ? json[2] : 'no reason provided';
|
|
615
|
+
// NIP-42 transient case: relays that require AUTH typically reject
|
|
616
|
+
// pre-auth REQs with `CLOSED("auth-required:...")` and then send
|
|
617
|
+
// an AUTH challenge. resubscribeAfterAuth re-issues the sub, so
|
|
618
|
+
// this rejection is NOT terminal. If we marked closedSubIds here,
|
|
619
|
+
// queryWithFirstSeenWins.onError would settle the future
|
|
620
|
+
// prematurely (single-relay → allRelaysDoneFor=true), unsubscribe
|
|
621
|
+
// the sub, and the post-AUTH retry would find nothing to retry.
|
|
622
|
+
// Listener still gets onError so callers see the reason; we just
|
|
623
|
+
// don't poison the per-relay state with a transient marker.
|
|
624
|
+
//
|
|
625
|
+
// We accept three on-the-wire shapes: `auth-required:...`
|
|
626
|
+
// (NIP-42 standard with reason), `auth-required ...` (whitespace
|
|
627
|
+
// separator), and bare `auth-required` (no suffix at all — some
|
|
628
|
+
// relays / tests).
|
|
629
|
+
const isAuthRequired = message === 'auth-required'
|
|
630
|
+
|| message.startsWith('auth-required:')
|
|
631
|
+
|| message.startsWith('auth-required ');
|
|
632
|
+
const relay = this.relays.get(relayUrl);
|
|
633
|
+
if (relay && !isAuthRequired) {
|
|
634
|
+
relay.closedSubIds.add(subscriptionId);
|
|
635
|
+
}
|
|
454
636
|
const subscription = this.subscriptions.get(subscriptionId);
|
|
455
637
|
if (subscription?.listener.onError) {
|
|
456
|
-
|
|
638
|
+
// Pass the relay's reason through verbatim so callers can
|
|
639
|
+
// pattern-match on standard prefixes (`auth-required:`,
|
|
640
|
+
// `rate-limited:`, `blocked:`, etc.) without parsing through
|
|
641
|
+
// a wrapper string.
|
|
642
|
+
subscription.listener.onError(subscriptionId, message);
|
|
457
643
|
}
|
|
458
644
|
}
|
|
459
645
|
/**
|
|
@@ -478,8 +664,27 @@ export class NostrClient {
|
|
|
478
664
|
// Send AUTH response
|
|
479
665
|
const message = JSON.stringify(['AUTH', authEvent.toJSON()]);
|
|
480
666
|
relay.socket.send(message);
|
|
481
|
-
// Re-send subscriptions after auth (relay may have ignored pre-auth
|
|
667
|
+
// Re-send subscriptions after auth (relay may have ignored pre-auth
|
|
668
|
+
// requests). Two separate per-relay markers, two separate decisions:
|
|
669
|
+
//
|
|
670
|
+
// - `closedSubIds`: do NOT clear. handleClosedMessage already
|
|
671
|
+
// skips the auth-required transient case, so anything in this
|
|
672
|
+
// set is a TERMINAL rejection (rate-limited, blocked, etc.)
|
|
673
|
+
// that AUTH does not relax. The resubscribeAll guard then
|
|
674
|
+
// correctly skips terminal-rejected subs on this relay. They
|
|
675
|
+
// will be retried on the next reconnect, when onopen creates a
|
|
676
|
+
// fresh RelayConnection with empty markers.
|
|
677
|
+
//
|
|
678
|
+
// - `eosedSubIds`: clear. A relay may have EOSE'd a pre-auth sub
|
|
679
|
+
// with zero events (filter unsatisfiable without auth context);
|
|
680
|
+
// post-auth the same filter might match. We must re-arm the
|
|
681
|
+
// local "still waiting" state so any in-flight
|
|
682
|
+
// queryWithFirstSeenWins doesn't see this relay as already-done
|
|
683
|
+
// from a stale marker.
|
|
482
684
|
setTimeout(() => {
|
|
685
|
+
const r = this.relays.get(relayUrl);
|
|
686
|
+
if (r)
|
|
687
|
+
r.eosedSubIds.clear();
|
|
483
688
|
this.resubscribeAll(relayUrl);
|
|
484
689
|
}, AUTH_RESUBSCRIBE_DELAY_MS);
|
|
485
690
|
}
|
|
@@ -499,24 +704,40 @@ export class NostrClient {
|
|
|
499
704
|
item.reject(new Error('Client disconnected'));
|
|
500
705
|
}
|
|
501
706
|
this.eventQueue = [];
|
|
502
|
-
// Close all relay connections and clean up timers
|
|
707
|
+
// Close all relay connections and clean up timers. Mark every
|
|
708
|
+
// relay disconnected synchronously BEFORE we notify subscriptions
|
|
709
|
+
// below, so any listener that consults `allRelaysDoneFor` sees
|
|
710
|
+
// zero connected relays and settles immediately.
|
|
503
711
|
for (const [url, relay] of this.relays) {
|
|
504
|
-
|
|
712
|
+
relay.connected = false;
|
|
505
713
|
if (relay.pingTimer) {
|
|
506
714
|
clearInterval(relay.pingTimer);
|
|
507
715
|
relay.pingTimer = null;
|
|
508
716
|
}
|
|
509
|
-
// Stop reconnect timer
|
|
510
717
|
if (relay.reconnectTimer) {
|
|
511
718
|
clearTimeout(relay.reconnectTimer);
|
|
512
719
|
relay.reconnectTimer = null;
|
|
513
720
|
}
|
|
514
|
-
// Close socket
|
|
515
721
|
if (relay.socket && relay.socket.readyState !== CLOSED) {
|
|
516
722
|
relay.socket.close(1000, 'Client disconnected');
|
|
517
723
|
}
|
|
518
724
|
this.emitConnectionEvent('disconnect', url, 'Client disconnected');
|
|
519
725
|
}
|
|
726
|
+
// Notify in-flight subscriptions that we're shutting down.
|
|
727
|
+
// queryWithFirstSeenWins.onError re-checks allRelaysDoneFor (now
|
|
728
|
+
// 0 connected → trivially true) and settles immediately, sparing
|
|
729
|
+
// callers the full queryTimeoutMs wait. Snapshot keys first
|
|
730
|
+
// because the listener may call unsubscribe(), which mutates
|
|
731
|
+
// this.subscriptions while we iterate.
|
|
732
|
+
const inflightSubs = Array.from(this.subscriptions.entries());
|
|
733
|
+
for (const [subId, sub] of inflightSubs) {
|
|
734
|
+
try {
|
|
735
|
+
sub.listener.onError?.(subId, 'Client disconnected');
|
|
736
|
+
}
|
|
737
|
+
catch {
|
|
738
|
+
// Ignore listener errors — we're tearing down anyway.
|
|
739
|
+
}
|
|
740
|
+
}
|
|
520
741
|
this.relays.clear();
|
|
521
742
|
this.subscriptions.clear();
|
|
522
743
|
}
|
|
@@ -718,7 +939,22 @@ export class NostrClient {
|
|
|
718
939
|
filter = filterOrSubId;
|
|
719
940
|
listener = listenerOrFilter;
|
|
720
941
|
}
|
|
942
|
+
// Reserved prefix for SDK-internal sub_ids (currently just the
|
|
943
|
+
// keepalive `PING_SUB_ID`). Reject explicit caller use so the
|
|
944
|
+
// keepalive timer's CLOSE/REQ cycle can't stomp on user state.
|
|
945
|
+
if (subscriptionId.startsWith('__nostr-sdk-')) {
|
|
946
|
+
throw new Error(`Subscription ID "${subscriptionId}" uses the reserved "__nostr-sdk-" prefix — pick a different id.`);
|
|
947
|
+
}
|
|
721
948
|
this.subscriptions.set(subscriptionId, { filter, listener });
|
|
949
|
+
// Wipe any stale per-relay EOSE/CLOSED markers for this sub_id
|
|
950
|
+
// before issuing the REQ — otherwise a fresh subscribe with a
|
|
951
|
+
// sub_id that was previously CLOSED (or was just freshly
|
|
952
|
+
// EOSE'd) would be skipped or treated as "already done" on
|
|
953
|
+
// those relays.
|
|
954
|
+
for (const [, relay] of this.relays) {
|
|
955
|
+
relay.closedSubIds.delete(subscriptionId);
|
|
956
|
+
relay.eosedSubIds.delete(subscriptionId);
|
|
957
|
+
}
|
|
722
958
|
// Send subscription request to all connected relays
|
|
723
959
|
const message = JSON.stringify(['REQ', subscriptionId, filter.toJSON()]);
|
|
724
960
|
for (const [, relay] of this.relays) {
|
|
@@ -736,12 +972,19 @@ export class NostrClient {
|
|
|
736
972
|
if (!this.subscriptions.has(subscriptionId))
|
|
737
973
|
return;
|
|
738
974
|
this.subscriptions.delete(subscriptionId);
|
|
739
|
-
// Send CLOSE to all connected relays
|
|
975
|
+
// Send CLOSE to all connected relays — except those that already
|
|
976
|
+
// CLOSED the sub themselves (no point telling the relay something
|
|
977
|
+
// it told us).
|
|
740
978
|
const message = JSON.stringify(['CLOSE', subscriptionId]);
|
|
741
979
|
for (const [, relay] of this.relays) {
|
|
742
|
-
if (relay.connected && relay.socket?.readyState === OPEN
|
|
980
|
+
if (relay.connected && relay.socket?.readyState === OPEN
|
|
981
|
+
&& !relay.closedSubIds.has(subscriptionId)) {
|
|
743
982
|
relay.socket.send(message);
|
|
744
983
|
}
|
|
984
|
+
// Drop both per-relay markers now that the sub is gone from
|
|
985
|
+
// the global map.
|
|
986
|
+
relay.closedSubIds.delete(subscriptionId);
|
|
987
|
+
relay.eosedSubIds.delete(subscriptionId);
|
|
745
988
|
}
|
|
746
989
|
}
|
|
747
990
|
/**
|
|
@@ -767,12 +1010,45 @@ export class NostrClient {
|
|
|
767
1010
|
queryWithFirstSeenWins(filter, extractResult) {
|
|
768
1011
|
return new Promise((resolve) => {
|
|
769
1012
|
let subscriptionId = '';
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
1013
|
+
let settled = false;
|
|
1014
|
+
// Declared as `let` and initialized lazily so `finishWith` can be
|
|
1015
|
+
// invoked before the setTimeout call below without hitting the
|
|
1016
|
+
// TDZ on `clearTimeout(timeoutId)`. (The same comment on the
|
|
1017
|
+
// listener anticipates synchronous-callback hypothetical paths.)
|
|
1018
|
+
let timeoutId;
|
|
1019
|
+
// Accept an explicit `id` so callers from inside the listener can
|
|
1020
|
+
// pass the sub_id the relay echoed back. This guards against any
|
|
1021
|
+
// future change to subscribe() that would invoke listener
|
|
1022
|
+
// callbacks before its return value is bound to `subscriptionId`
|
|
1023
|
+
// — the closure-captured value would still be `''` and we'd skip
|
|
1024
|
+
// the CLOSE frame, leaking the slot on the relay.
|
|
1025
|
+
const finishWith = (result, id) => {
|
|
1026
|
+
if (settled)
|
|
1027
|
+
return;
|
|
1028
|
+
settled = true;
|
|
1029
|
+
if (timeoutId !== undefined)
|
|
1030
|
+
clearTimeout(timeoutId);
|
|
1031
|
+
const subId = id || subscriptionId;
|
|
1032
|
+
if (subId)
|
|
1033
|
+
this.unsubscribe(subId);
|
|
1034
|
+
resolve(result);
|
|
1035
|
+
};
|
|
1036
|
+
timeoutId = setTimeout(() => finishWith(null), this.queryTimeoutMs);
|
|
775
1037
|
const authors = new Map();
|
|
1038
|
+
const allRelaysDone = (id) => this.allRelaysDoneFor(id);
|
|
1039
|
+
const pickWinner = () => {
|
|
1040
|
+
let winnerEntry = null;
|
|
1041
|
+
let winnerPubkey = '';
|
|
1042
|
+
for (const [pubkey, entry] of authors) {
|
|
1043
|
+
if (!winnerEntry
|
|
1044
|
+
|| entry.firstSeen < winnerEntry.firstSeen
|
|
1045
|
+
|| (entry.firstSeen === winnerEntry.firstSeen && pubkey < winnerPubkey)) {
|
|
1046
|
+
winnerEntry = entry;
|
|
1047
|
+
winnerPubkey = pubkey;
|
|
1048
|
+
}
|
|
1049
|
+
}
|
|
1050
|
+
return winnerEntry ? extractResult(winnerEntry.latestEvent) : null;
|
|
1051
|
+
};
|
|
776
1052
|
subscriptionId = this.subscribe(filter, {
|
|
777
1053
|
onEvent: (event) => {
|
|
778
1054
|
// Verify signature to prevent relay injection of forged events (#4)
|
|
@@ -791,24 +1067,55 @@ export class NostrClient {
|
|
|
791
1067
|
}
|
|
792
1068
|
}
|
|
793
1069
|
},
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
1070
|
+
// EOSE means *this relay* has finished delivering stored
|
|
1071
|
+
// events. In a multi-relay client we must not settle yet — a
|
|
1072
|
+
// slower relay may still be about to deliver matching events.
|
|
1073
|
+
// Settle only when every connected relay has either EOSE'd
|
|
1074
|
+
// OR CLOSED'd this sub. (Single-relay clients are unaffected:
|
|
1075
|
+
// allDone is trivially true with one relay.)
|
|
1076
|
+
onEndOfStoredEvents: (id) => {
|
|
1077
|
+
if (allRelaysDone(id)) {
|
|
1078
|
+
finishWith(pickWinner(), id);
|
|
1079
|
+
}
|
|
1080
|
+
},
|
|
1081
|
+
// Subscription error from the SDK — fires from three paths
|
|
1082
|
+
// that all need the same "is it time to settle?" check:
|
|
1083
|
+
// 1. Relay sent CLOSED for this sub. In a multi-relay
|
|
1084
|
+
// client the same sub_id may still be alive on a
|
|
1085
|
+
// healthy relay; settling on the first CLOSED would
|
|
1086
|
+
// prematurely abort a query other relays could
|
|
1087
|
+
// satisfy. handleClosedMessage records the rejection
|
|
1088
|
+
// on the sending relay's closedSubIds before invoking
|
|
1089
|
+
// us, so we can decide via allRelaysDoneFor.
|
|
1090
|
+
// 2. Relay disconnected mid-query (socket.onclose →
|
|
1091
|
+
// synthetic onError). The relay no longer counts as
|
|
1092
|
+
// connected, so allRelaysDoneFor excludes it.
|
|
1093
|
+
// 3. Client disconnected (disconnect() → synthetic
|
|
1094
|
+
// onError). All relays are torn down, allRelaysDoneFor
|
|
1095
|
+
// sees zero connected and settles.
|
|
1096
|
+
onError: (id, message) => {
|
|
1097
|
+
console.warn(`Subscription error on ${id}: ${message}`);
|
|
1098
|
+
if (allRelaysDone(id)) {
|
|
1099
|
+
finishWith(pickWinner(), id);
|
|
806
1100
|
}
|
|
807
|
-
|
|
1101
|
+
// else: keep waiting for EOSE / CLOSED from remaining
|
|
1102
|
+
// relays or the overall query timeout.
|
|
808
1103
|
},
|
|
809
1104
|
});
|
|
810
1105
|
});
|
|
811
1106
|
}
|
|
1107
|
+
/**
|
|
1108
|
+
* True if every currently-connected relay has finished delivering
|
|
1109
|
+
* for the given sub_id (either EOSE'd or CLOSED'd it). Used by
|
|
1110
|
+
* queryWithFirstSeenWins to coordinate multi-relay settlement.
|
|
1111
|
+
*/
|
|
1112
|
+
allRelaysDoneFor(subscriptionId) {
|
|
1113
|
+
const connected = Array.from(this.relays.values()).filter((r) => r.connected);
|
|
1114
|
+
// No connected relays at all → nothing to wait for; settle.
|
|
1115
|
+
if (connected.length === 0)
|
|
1116
|
+
return true;
|
|
1117
|
+
return connected.every((r) => r.eosedSubIds.has(subscriptionId) || r.closedSubIds.has(subscriptionId));
|
|
1118
|
+
}
|
|
812
1119
|
/**
|
|
813
1120
|
* Query for a public key by nametag.
|
|
814
1121
|
* Uses first-seen-wins anti-hijacking resolution.
|