@unicitylabs/nostr-js-sdk 0.4.0 → 0.5.0-dev.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser/index.js +370 -55
- package/dist/browser/index.js.map +1 -1
- package/dist/browser/index.min.js +6 -6
- package/dist/browser/index.min.js.map +1 -1
- package/dist/browser/index.umd.js +370 -55
- package/dist/browser/index.umd.js.map +1 -1
- package/dist/browser/index.umd.min.js +7 -7
- package/dist/browser/index.umd.min.js.map +1 -1
- package/dist/cjs/client/NostrClient.js +370 -55
- package/dist/cjs/client/NostrClient.js.map +1 -1
- package/dist/esm/client/NostrClient.js +370 -55
- package/dist/esm/client/NostrClient.js.map +1 -1
- package/dist/types/client/NostrClient.d.ts +46 -5
- package/dist/types/client/NostrClient.d.ts.map +1 -1
- package/package.json +2 -2
|
@@ -50,6 +50,16 @@ const DEFAULT_QUERY_TIMEOUT_MS = 5000;
|
|
|
50
50
|
const DEFAULT_RECONNECT_INTERVAL_MS = 1000;
|
|
51
51
|
const DEFAULT_MAX_RECONNECT_INTERVAL_MS = 30000;
|
|
52
52
|
const DEFAULT_PING_INTERVAL_MS = 30000;
|
|
53
|
+
/**
|
|
54
|
+
* Internal sub_id reserved for the keepalive REQ. Namespaced with a
|
|
55
|
+
* `__nostr-sdk-` prefix so that user code calling
|
|
56
|
+
* {@link NostrClient.subscribe} with an explicit `subscriptionId`
|
|
57
|
+
* cannot collide — a user choosing the literal `"ping"` would
|
|
58
|
+
* otherwise have their subscription forcibly CLOSE/REQ'd every
|
|
59
|
+
* ping interval. The leading `__` is a stable convention for
|
|
60
|
+
* "do not pick this name."
|
|
61
|
+
*/
|
|
62
|
+
const PING_SUB_ID = '__nostr-sdk-keepalive__';
|
|
53
63
|
/**
|
|
54
64
|
* Delay before resubscribing after NIP-42 authentication.
|
|
55
65
|
* This gives the relay time to process the AUTH response before we send
|
|
@@ -89,6 +99,30 @@ class NostrClient {
|
|
|
89
99
|
this.maxReconnectIntervalMs = options?.maxReconnectIntervalMs ?? DEFAULT_MAX_RECONNECT_INTERVAL_MS;
|
|
90
100
|
this.pingIntervalMs = options?.pingIntervalMs ?? DEFAULT_PING_INTERVAL_MS;
|
|
91
101
|
}
|
|
102
|
+
/**
|
|
103
|
+
* Replace the key manager used for signing and encryption.
|
|
104
|
+
*
|
|
105
|
+
* The connection stays alive — but every operation that consults the
|
|
106
|
+
* key manager from this point on uses the new key, including:
|
|
107
|
+
* - signing future published events,
|
|
108
|
+
* - signing NIP-42 AUTH challenge responses,
|
|
109
|
+
* - the `authors:[selfPubkey]` filter on the keepalive ping REQ
|
|
110
|
+
* (computed each ping interval),
|
|
111
|
+
* - any other code path that calls `getPublicKeyHex()` on the
|
|
112
|
+
* stored manager.
|
|
113
|
+
*
|
|
114
|
+
* Existing in-flight subscriptions are not re-issued or re-keyed.
|
|
115
|
+
* @param keyManager New key manager
|
|
116
|
+
*/
|
|
117
|
+
setKeyManager(keyManager) {
|
|
118
|
+
this.keyManager = keyManager;
|
|
119
|
+
}
|
|
120
|
+
/**
|
|
121
|
+
* Get the current key manager.
|
|
122
|
+
*/
|
|
123
|
+
getKeyManager() {
|
|
124
|
+
return this.keyManager;
|
|
125
|
+
}
|
|
92
126
|
/**
|
|
93
127
|
* Add a connection event listener.
|
|
94
128
|
* @param listener Listener for connection events
|
|
@@ -132,13 +166,6 @@ class NostrClient {
|
|
|
132
166
|
}
|
|
133
167
|
}
|
|
134
168
|
}
|
|
135
|
-
/**
|
|
136
|
-
* Get the key manager.
|
|
137
|
-
* @returns The key manager instance
|
|
138
|
-
*/
|
|
139
|
-
getKeyManager() {
|
|
140
|
-
return this.keyManager;
|
|
141
|
-
}
|
|
142
169
|
/**
|
|
143
170
|
* Get the current query timeout in milliseconds.
|
|
144
171
|
* @returns Query timeout in milliseconds
|
|
@@ -175,11 +202,41 @@ class NostrClient {
|
|
|
175
202
|
return;
|
|
176
203
|
}
|
|
177
204
|
return new Promise((resolve, reject) => {
|
|
205
|
+
// The connection-setup timeout has three races to defend
|
|
206
|
+
// against:
|
|
207
|
+
// A) createWebSocket resolves AFTER the timeout fired.
|
|
208
|
+
// B) createWebSocket resolves BEFORE the timeout, but
|
|
209
|
+
// `onopen` fires AFTER the timeout fired.
|
|
210
|
+
// C) createWebSocket resolves and `onopen` fires BEFORE the
|
|
211
|
+
// timeout (the success path).
|
|
212
|
+
// `pendingSocket` lets the timeout proactively close any
|
|
213
|
+
// socket that's already been created but hasn't fired
|
|
214
|
+
// `onopen` yet. The `timedOut` flag covers (A) inside `.then`
|
|
215
|
+
// and (B) inside `socket.onopen`.
|
|
216
|
+
let timedOut = false;
|
|
217
|
+
let pendingSocket = null;
|
|
178
218
|
const timeoutId = setTimeout(() => {
|
|
219
|
+
timedOut = true;
|
|
220
|
+
if (pendingSocket) {
|
|
221
|
+
try {
|
|
222
|
+
pendingSocket.close(1000, 'Connection setup timed out');
|
|
223
|
+
}
|
|
224
|
+
catch { /* ignore */ }
|
|
225
|
+
}
|
|
179
226
|
reject(new Error(`Connection to ${url} timed out`));
|
|
180
227
|
}, CONNECTION_TIMEOUT_MS);
|
|
181
228
|
(0, WebSocketAdapter_js_1.createWebSocket)(url)
|
|
182
229
|
.then((socket) => {
|
|
230
|
+
if (timedOut) {
|
|
231
|
+
// Caller already saw the rejection. Discard the late
|
|
232
|
+
// socket so we don't leak it.
|
|
233
|
+
try {
|
|
234
|
+
socket.close(1000, 'Connection setup timed out');
|
|
235
|
+
}
|
|
236
|
+
catch { /* ignore */ }
|
|
237
|
+
return;
|
|
238
|
+
}
|
|
239
|
+
pendingSocket = socket;
|
|
183
240
|
const relay = {
|
|
184
241
|
url,
|
|
185
242
|
socket,
|
|
@@ -189,9 +246,30 @@ class NostrClient {
|
|
|
189
246
|
reconnectTimer: null,
|
|
190
247
|
pingTimer: null,
|
|
191
248
|
lastPongTime: Date.now(),
|
|
249
|
+
unansweredPings: 0,
|
|
192
250
|
wasConnected: existingRelay?.wasConnected ?? false,
|
|
251
|
+
// Reset on every new connection: a relay's per-connection
|
|
252
|
+
// sub-slot accounting is fresh, so previously-rejected REQs
|
|
253
|
+
// should be re-issued on the new socket.
|
|
254
|
+
closedSubIds: new Set(),
|
|
255
|
+
eosedSubIds: new Set(),
|
|
193
256
|
};
|
|
194
257
|
socket.onopen = () => {
|
|
258
|
+
// The `.then` block already guards against a socket
|
|
259
|
+
// arriving after the connection timeout, but the socket
|
|
260
|
+
// can also be created BEFORE the timeout while
|
|
261
|
+
// `onopen` fires AFTER the timeout has rejected the
|
|
262
|
+
// outer promise. Without this second guard we'd register
|
|
263
|
+
// the relay, start a pingTimer, and resubscribe — orphan
|
|
264
|
+
// background resources the caller can't see or clean up
|
|
265
|
+
// because their connect() call already saw a rejection.
|
|
266
|
+
if (timedOut) {
|
|
267
|
+
try {
|
|
268
|
+
socket.close(1000, 'Connection setup timed out');
|
|
269
|
+
}
|
|
270
|
+
catch { /* ignore */ }
|
|
271
|
+
return;
|
|
272
|
+
}
|
|
195
273
|
clearTimeout(timeoutId);
|
|
196
274
|
relay.connected = true;
|
|
197
275
|
relay.reconnectAttempts = 0; // Reset on successful connection
|
|
@@ -216,10 +294,11 @@ class NostrClient {
|
|
|
216
294
|
socket.onmessage = (event) => {
|
|
217
295
|
try {
|
|
218
296
|
const data = (0, WebSocketAdapter_js_1.extractMessageData)(event);
|
|
219
|
-
// Update last pong time on any message (relay is alive)
|
|
297
|
+
// Update last pong time and reset unanswered pings on any message (relay is alive)
|
|
220
298
|
const r = this.relays.get(url);
|
|
221
299
|
if (r) {
|
|
222
300
|
r.lastPongTime = Date.now();
|
|
301
|
+
r.unansweredPings = 0;
|
|
223
302
|
}
|
|
224
303
|
this.handleRelayMessage(url, data);
|
|
225
304
|
}
|
|
@@ -231,9 +310,40 @@ class NostrClient {
|
|
|
231
310
|
const wasConnected = relay.connected;
|
|
232
311
|
relay.connected = false;
|
|
233
312
|
this.stopPingTimer(url);
|
|
313
|
+
// Pre-onopen close: TCP handshake failure or relay
|
|
314
|
+
// immediately closed the WS during the upgrade. Without
|
|
315
|
+
// this, the connectToRelay promise stays pending until
|
|
316
|
+
// CONNECTION_TIMEOUT_MS (30s) expires; surfacing it now
|
|
317
|
+
// lets the caller see the failure promptly and retry.
|
|
318
|
+
if (!wasConnected && !timedOut) {
|
|
319
|
+
timedOut = true;
|
|
320
|
+
clearTimeout(timeoutId);
|
|
321
|
+
reject(new Error(`Connection to ${url} closed during handshake: ${event?.reason || 'no reason'}`));
|
|
322
|
+
}
|
|
234
323
|
if (wasConnected) {
|
|
235
324
|
const reason = event?.reason || 'Connection closed';
|
|
236
325
|
this.emitConnectionEvent('disconnect', url, reason);
|
|
326
|
+
// Re-trigger the all-done check on every active sub.
|
|
327
|
+
// queryWithFirstSeenWins.allRelaysDoneFor only runs
|
|
328
|
+
// from listener callbacks (EOSE / CLOSED via onError);
|
|
329
|
+
// a socket that drops without sending either would
|
|
330
|
+
// otherwise leave the query hanging until
|
|
331
|
+
// queryTimeoutMs even though the disconnected relay no
|
|
332
|
+
// longer counts toward "still pending" relays. Firing
|
|
333
|
+
// a synthetic onError gives every active sub a chance
|
|
334
|
+
// to re-evaluate now that the relay set has shrunk.
|
|
335
|
+
// Include the relay URL so listeners in a multi-relay
|
|
336
|
+
// client can attribute which relay dropped.
|
|
337
|
+
const inflight = Array.from(this.subscriptions.entries());
|
|
338
|
+
for (const [subId, sub] of inflight) {
|
|
339
|
+
try {
|
|
340
|
+
sub.listener.onError?.(subId, `Relay disconnected (${url}): ${reason}`);
|
|
341
|
+
}
|
|
342
|
+
catch {
|
|
343
|
+
// Ignore listener errors — we're notifying
|
|
344
|
+
// best-effort.
|
|
345
|
+
}
|
|
346
|
+
}
|
|
237
347
|
}
|
|
238
348
|
if (!this.closed && this.autoReconnect && !relay.reconnecting) {
|
|
239
349
|
this.scheduleReconnect(url);
|
|
@@ -245,7 +355,11 @@ class NostrClient {
|
|
|
245
355
|
reject(new Error(`Failed to connect to ${url}: ${error.message || 'Unknown error'}`));
|
|
246
356
|
}
|
|
247
357
|
};
|
|
248
|
-
this.relays
|
|
358
|
+
// Note: we do NOT register the relay in `this.relays` here —
|
|
359
|
+
// only after `onopen` fires successfully. Registering eagerly
|
|
360
|
+
// (before onopen) would leak the relay into the global map
|
|
361
|
+
// even when the connection setup times out and the caller's
|
|
362
|
+
// promise has already rejected.
|
|
249
363
|
})
|
|
250
364
|
.catch((error) => {
|
|
251
365
|
clearTimeout(timeoutId);
|
|
@@ -303,11 +417,16 @@ class NostrClient {
|
|
|
303
417
|
this.stopPingTimer(url);
|
|
304
418
|
return;
|
|
305
419
|
}
|
|
306
|
-
// Check if we've received any message recently
|
|
307
420
|
const timeSinceLastPong = Date.now() - relay.lastPongTime;
|
|
308
|
-
if (timeSinceLastPong > this.pingIntervalMs * 2) {
|
|
309
|
-
//
|
|
310
|
-
|
|
421
|
+
if (timeSinceLastPong > this.pingIntervalMs * 2 && relay.unansweredPings >= 2) {
|
|
422
|
+
// No inbound message for 2x the ping interval AND we've sent at least 2 pings
|
|
423
|
+
// without any response — the connection is truly stale.
|
|
424
|
+
// The unanswered pings gate handles browser tab throttling: on the first tick
|
|
425
|
+
// after waking, unansweredPings is 0, so we send a ping and wait. If the relay
|
|
426
|
+
// is alive it responds (resetting the counter). If dead, subsequent ticks
|
|
427
|
+
// increment the counter until it reaches the threshold, even under sustained
|
|
428
|
+
// throttling where intervals are irregular.
|
|
429
|
+
console.warn(`Relay ${url} appears stale (no response for ${timeSinceLastPong}ms, ${relay.unansweredPings} unanswered pings), reconnecting...`);
|
|
311
430
|
this.stopPingTimer(url);
|
|
312
431
|
try {
|
|
313
432
|
relay.socket.close();
|
|
@@ -317,17 +436,26 @@ class NostrClient {
|
|
|
317
436
|
}
|
|
318
437
|
return;
|
|
319
438
|
}
|
|
320
|
-
// Send a subscription request as a ping (relays respond with EOSE)
|
|
321
|
-
//
|
|
322
|
-
//
|
|
439
|
+
// Send a subscription request as a ping (relays respond with EOSE).
|
|
440
|
+
// The filter MUST be tightly scoped — an open `{ limit: 1 }` filter
|
|
441
|
+
// with no kinds/authors/#p will, after EOSE, stream every event the
|
|
442
|
+
// relay receives (NIP-01 live tail), saturating the connection and
|
|
443
|
+
// exhausting per-connection subscription slots on busy relays.
|
|
444
|
+
// Scoping by `authors:[self]` keeps the live tail empty in practice
|
|
445
|
+
// (the relay would only forward our own future events).
|
|
323
446
|
try {
|
|
324
|
-
const
|
|
447
|
+
const selfPubkey = this.keyManager.getPublicKeyHex();
|
|
325
448
|
// First close any existing ping subscription to ensure we don't accumulate
|
|
326
|
-
const closeMessage = JSON.stringify(['CLOSE',
|
|
449
|
+
const closeMessage = JSON.stringify(['CLOSE', PING_SUB_ID]);
|
|
327
450
|
relay.socket.send(closeMessage);
|
|
328
451
|
// Then send the new ping request (limit:1 ensures relay sends EOSE)
|
|
329
|
-
const pingMessage = JSON.stringify([
|
|
452
|
+
const pingMessage = JSON.stringify([
|
|
453
|
+
'REQ',
|
|
454
|
+
PING_SUB_ID,
|
|
455
|
+
{ authors: [selfPubkey], limit: 1 },
|
|
456
|
+
]);
|
|
330
457
|
relay.socket.send(pingMessage);
|
|
458
|
+
relay.unansweredPings++;
|
|
331
459
|
}
|
|
332
460
|
catch {
|
|
333
461
|
// Send failed, connection likely dead
|
|
@@ -360,6 +488,11 @@ class NostrClient {
|
|
|
360
488
|
if (!relay?.socket || !relay.connected)
|
|
361
489
|
return;
|
|
362
490
|
for (const [subId, info] of this.subscriptions) {
|
|
491
|
+
// Skip subs this relay has previously CLOSED — re-issuing them
|
|
492
|
+
// just triggers the same rejection in a loop. Other healthy
|
|
493
|
+
// relays still resubscribe.
|
|
494
|
+
if (relay.closedSubIds.has(subId))
|
|
495
|
+
continue;
|
|
363
496
|
const message = JSON.stringify(['REQ', subId, info.filter.toJSON()]);
|
|
364
497
|
relay.socket.send(message);
|
|
365
498
|
}
|
|
@@ -379,7 +512,7 @@ class NostrClient {
|
|
|
379
512
|
/**
|
|
380
513
|
* Handle a message from a relay.
|
|
381
514
|
*/
|
|
382
|
-
handleRelayMessage(
|
|
515
|
+
handleRelayMessage(relayUrl, message) {
|
|
383
516
|
try {
|
|
384
517
|
const json = JSON.parse(message);
|
|
385
518
|
if (!Array.isArray(json) || json.length < 2)
|
|
@@ -393,16 +526,16 @@ class NostrClient {
|
|
|
393
526
|
this.handleOkMessage(json);
|
|
394
527
|
break;
|
|
395
528
|
case 'EOSE':
|
|
396
|
-
this.handleEOSEMessage(json);
|
|
529
|
+
this.handleEOSEMessage(relayUrl, json);
|
|
397
530
|
break;
|
|
398
531
|
case 'NOTICE':
|
|
399
532
|
this.handleNoticeMessage(json);
|
|
400
533
|
break;
|
|
401
534
|
case 'CLOSED':
|
|
402
|
-
this.handleClosedMessage(json);
|
|
535
|
+
this.handleClosedMessage(relayUrl, json);
|
|
403
536
|
break;
|
|
404
537
|
case 'AUTH':
|
|
405
|
-
this.handleAuthMessage(
|
|
538
|
+
this.handleAuthMessage(relayUrl, json);
|
|
406
539
|
break;
|
|
407
540
|
}
|
|
408
541
|
}
|
|
@@ -414,7 +547,7 @@ class NostrClient {
|
|
|
414
547
|
* Handle EVENT message from relay.
|
|
415
548
|
*/
|
|
416
549
|
handleEventMessage(json) {
|
|
417
|
-
if (json.length < 3)
|
|
550
|
+
if (json.length < 3 || typeof json[1] !== 'string')
|
|
418
551
|
return;
|
|
419
552
|
const subscriptionId = json[1];
|
|
420
553
|
const eventData = json[2];
|
|
@@ -452,11 +585,23 @@ class NostrClient {
|
|
|
452
585
|
}
|
|
453
586
|
/**
|
|
454
587
|
* Handle EOSE (End of Stored Events) message from relay.
|
|
588
|
+
*
|
|
589
|
+
* Records the per-relay EOSE marker (mirroring closedSubIds) so
|
|
590
|
+
* queryWithFirstSeenWins can decide when ALL connected relays have
|
|
591
|
+
* finished — either streamed EOSE or rejected with CLOSED — instead
|
|
592
|
+
* of settling off the first fast relay's EOSE while a slower relay
|
|
593
|
+
* is still about to deliver matching events.
|
|
455
594
|
*/
|
|
456
|
-
handleEOSEMessage(json) {
|
|
457
|
-
if (json.length < 2)
|
|
595
|
+
handleEOSEMessage(relayUrl, json) {
|
|
596
|
+
if (json.length < 2 || typeof json[1] !== 'string')
|
|
458
597
|
return;
|
|
459
598
|
const subscriptionId = json[1];
|
|
599
|
+
if (!this.subscriptions.has(subscriptionId))
|
|
600
|
+
return;
|
|
601
|
+
const relay = this.relays.get(relayUrl);
|
|
602
|
+
if (relay) {
|
|
603
|
+
relay.eosedSubIds.add(subscriptionId);
|
|
604
|
+
}
|
|
460
605
|
const subscription = this.subscriptions.get(subscriptionId);
|
|
461
606
|
if (subscription?.listener.onEndOfStoredEvents) {
|
|
462
607
|
subscription.listener.onEndOfStoredEvents(subscriptionId);
|
|
@@ -473,15 +618,64 @@ class NostrClient {
|
|
|
473
618
|
}
|
|
474
619
|
/**
|
|
475
620
|
* Handle CLOSED message from relay (subscription closed by relay).
|
|
621
|
+
*
|
|
622
|
+
* NIP-01 CLOSED frames are terminal for the named subscription **on
|
|
623
|
+
* the sending relay**. In a multi-relay client the same sub_id may
|
|
624
|
+
* still be alive on a healthy relay, so we must NOT delete the
|
|
625
|
+
* global `this.subscriptions` entry here — that would silently drop
|
|
626
|
+
* EVENT/EOSE frames from the still-healthy relays in
|
|
627
|
+
* `handleEventMessage` (which consults the global map).
|
|
628
|
+
*
|
|
629
|
+
* Instead we record the rejection on the sending relay's
|
|
630
|
+
* `closedSubIds` set so `resubscribeAll` and post-AUTH resubscribe
|
|
631
|
+
* skip it on this relay only. The listener is notified via
|
|
632
|
+
* `onError` so callers (e.g. queryWithFirstSeenWins) can decide to
|
|
633
|
+
* settle and explicitly `unsubscribe()` if they want to give up
|
|
634
|
+
* across all relays.
|
|
476
635
|
*/
|
|
477
|
-
handleClosedMessage(json) {
|
|
478
|
-
|
|
636
|
+
handleClosedMessage(relayUrl, json) {
|
|
637
|
+
// NIP-01 makes the message field optional: `["CLOSED", <sub>]` is
|
|
638
|
+
// valid. Dropping such frames was exactly the leak this PR sets out
|
|
639
|
+
// to fix — no closedSubIds marker and no onError notification means
|
|
640
|
+
// queries hang until timeout and resubscribe loops persist.
|
|
641
|
+
if (json.length < 2 || typeof json[1] !== 'string')
|
|
479
642
|
return;
|
|
480
643
|
const subscriptionId = json[1];
|
|
481
|
-
|
|
644
|
+
// Ignore CLOSED for sub_ids we don't know about. A misbehaving or
|
|
645
|
+
// malicious relay could otherwise spam us with arbitrary sub_ids
|
|
646
|
+
// and grow `closedSubIds` unbounded over a long-lived connection,
|
|
647
|
+
// and could pre-emptively block sub_ids we might use later.
|
|
648
|
+
if (!this.subscriptions.has(subscriptionId))
|
|
649
|
+
return;
|
|
650
|
+
const message = typeof json[2] === 'string' ? json[2] : 'no reason provided';
|
|
651
|
+
// NIP-42 transient case: relays that require AUTH typically reject
|
|
652
|
+
// pre-auth REQs with `CLOSED("auth-required:...")` and then send
|
|
653
|
+
// an AUTH challenge. resubscribeAfterAuth re-issues the sub, so
|
|
654
|
+
// this rejection is NOT terminal. If we marked closedSubIds here,
|
|
655
|
+
// queryWithFirstSeenWins.onError would settle the future
|
|
656
|
+
// prematurely (single-relay → allRelaysDoneFor=true), unsubscribe
|
|
657
|
+
// the sub, and the post-AUTH retry would find nothing to retry.
|
|
658
|
+
// Listener still gets onError so callers see the reason; we just
|
|
659
|
+
// don't poison the per-relay state with a transient marker.
|
|
660
|
+
//
|
|
661
|
+
// We accept three on-the-wire shapes: `auth-required:...`
|
|
662
|
+
// (NIP-42 standard with reason), `auth-required ...` (whitespace
|
|
663
|
+
// separator), and bare `auth-required` (no suffix at all — some
|
|
664
|
+
// relays / tests).
|
|
665
|
+
const isAuthRequired = message === 'auth-required'
|
|
666
|
+
|| message.startsWith('auth-required:')
|
|
667
|
+
|| message.startsWith('auth-required ');
|
|
668
|
+
const relay = this.relays.get(relayUrl);
|
|
669
|
+
if (relay && !isAuthRequired) {
|
|
670
|
+
relay.closedSubIds.add(subscriptionId);
|
|
671
|
+
}
|
|
482
672
|
const subscription = this.subscriptions.get(subscriptionId);
|
|
483
673
|
if (subscription?.listener.onError) {
|
|
484
|
-
|
|
674
|
+
// Pass the relay's reason through verbatim so callers can
|
|
675
|
+
// pattern-match on standard prefixes (`auth-required:`,
|
|
676
|
+
// `rate-limited:`, `blocked:`, etc.) without parsing through
|
|
677
|
+
// a wrapper string.
|
|
678
|
+
subscription.listener.onError(subscriptionId, message);
|
|
485
679
|
}
|
|
486
680
|
}
|
|
487
681
|
/**
|
|
@@ -506,8 +700,27 @@ class NostrClient {
|
|
|
506
700
|
// Send AUTH response
|
|
507
701
|
const message = JSON.stringify(['AUTH', authEvent.toJSON()]);
|
|
508
702
|
relay.socket.send(message);
|
|
509
|
-
// Re-send subscriptions after auth (relay may have ignored pre-auth
|
|
703
|
+
// Re-send subscriptions after auth (relay may have ignored pre-auth
|
|
704
|
+
// requests). Two separate per-relay markers, two separate decisions:
|
|
705
|
+
//
|
|
706
|
+
// - `closedSubIds`: do NOT clear. handleClosedMessage already
|
|
707
|
+
// skips the auth-required transient case, so anything in this
|
|
708
|
+
// set is a TERMINAL rejection (rate-limited, blocked, etc.)
|
|
709
|
+
// that AUTH does not relax. The resubscribeAll guard then
|
|
710
|
+
// correctly skips terminal-rejected subs on this relay. They
|
|
711
|
+
// will be retried on the next reconnect, when onopen creates a
|
|
712
|
+
// fresh RelayConnection with empty markers.
|
|
713
|
+
//
|
|
714
|
+
// - `eosedSubIds`: clear. A relay may have EOSE'd a pre-auth sub
|
|
715
|
+
// with zero events (filter unsatisfiable without auth context);
|
|
716
|
+
// post-auth the same filter might match. We must re-arm the
|
|
717
|
+
// local "still waiting" state so any in-flight
|
|
718
|
+
// queryWithFirstSeenWins doesn't see this relay as already-done
|
|
719
|
+
// from a stale marker.
|
|
510
720
|
setTimeout(() => {
|
|
721
|
+
const r = this.relays.get(relayUrl);
|
|
722
|
+
if (r)
|
|
723
|
+
r.eosedSubIds.clear();
|
|
511
724
|
this.resubscribeAll(relayUrl);
|
|
512
725
|
}, AUTH_RESUBSCRIBE_DELAY_MS);
|
|
513
726
|
}
|
|
@@ -527,24 +740,40 @@ class NostrClient {
|
|
|
527
740
|
item.reject(new Error('Client disconnected'));
|
|
528
741
|
}
|
|
529
742
|
this.eventQueue = [];
|
|
530
|
-
// Close all relay connections and clean up timers
|
|
743
|
+
// Close all relay connections and clean up timers. Mark every
|
|
744
|
+
// relay disconnected synchronously BEFORE we notify subscriptions
|
|
745
|
+
// below, so any listener that consults `allRelaysDoneFor` sees
|
|
746
|
+
// zero connected relays and settles immediately.
|
|
531
747
|
for (const [url, relay] of this.relays) {
|
|
532
|
-
|
|
748
|
+
relay.connected = false;
|
|
533
749
|
if (relay.pingTimer) {
|
|
534
750
|
clearInterval(relay.pingTimer);
|
|
535
751
|
relay.pingTimer = null;
|
|
536
752
|
}
|
|
537
|
-
// Stop reconnect timer
|
|
538
753
|
if (relay.reconnectTimer) {
|
|
539
754
|
clearTimeout(relay.reconnectTimer);
|
|
540
755
|
relay.reconnectTimer = null;
|
|
541
756
|
}
|
|
542
|
-
// Close socket
|
|
543
757
|
if (relay.socket && relay.socket.readyState !== WebSocketAdapter_js_1.CLOSED) {
|
|
544
758
|
relay.socket.close(1000, 'Client disconnected');
|
|
545
759
|
}
|
|
546
760
|
this.emitConnectionEvent('disconnect', url, 'Client disconnected');
|
|
547
761
|
}
|
|
762
|
+
// Notify in-flight subscriptions that we're shutting down.
|
|
763
|
+
// queryWithFirstSeenWins.onError re-checks allRelaysDoneFor (now
|
|
764
|
+
// 0 connected → trivially true) and settles immediately, sparing
|
|
765
|
+
// callers the full queryTimeoutMs wait. Snapshot keys first
|
|
766
|
+
// because the listener may call unsubscribe(), which mutates
|
|
767
|
+
// this.subscriptions while we iterate.
|
|
768
|
+
const inflightSubs = Array.from(this.subscriptions.entries());
|
|
769
|
+
for (const [subId, sub] of inflightSubs) {
|
|
770
|
+
try {
|
|
771
|
+
sub.listener.onError?.(subId, 'Client disconnected');
|
|
772
|
+
}
|
|
773
|
+
catch {
|
|
774
|
+
// Ignore listener errors — we're tearing down anyway.
|
|
775
|
+
}
|
|
776
|
+
}
|
|
548
777
|
this.relays.clear();
|
|
549
778
|
this.subscriptions.clear();
|
|
550
779
|
}
|
|
@@ -746,7 +975,22 @@ class NostrClient {
|
|
|
746
975
|
filter = filterOrSubId;
|
|
747
976
|
listener = listenerOrFilter;
|
|
748
977
|
}
|
|
978
|
+
// Reserved prefix for SDK-internal sub_ids (currently just the
|
|
979
|
+
// keepalive `PING_SUB_ID`). Reject explicit caller use so the
|
|
980
|
+
// keepalive timer's CLOSE/REQ cycle can't stomp on user state.
|
|
981
|
+
if (subscriptionId.startsWith('__nostr-sdk-')) {
|
|
982
|
+
throw new Error(`Subscription ID "${subscriptionId}" uses the reserved "__nostr-sdk-" prefix — pick a different id.`);
|
|
983
|
+
}
|
|
749
984
|
this.subscriptions.set(subscriptionId, { filter, listener });
|
|
985
|
+
// Wipe any stale per-relay EOSE/CLOSED markers for this sub_id
|
|
986
|
+
// before issuing the REQ — otherwise a fresh subscribe with a
|
|
987
|
+
// sub_id that was previously CLOSED (or was just freshly
|
|
988
|
+
// EOSE'd) would be skipped or treated as "already done" on
|
|
989
|
+
// those relays.
|
|
990
|
+
for (const [, relay] of this.relays) {
|
|
991
|
+
relay.closedSubIds.delete(subscriptionId);
|
|
992
|
+
relay.eosedSubIds.delete(subscriptionId);
|
|
993
|
+
}
|
|
750
994
|
// Send subscription request to all connected relays
|
|
751
995
|
const message = JSON.stringify(['REQ', subscriptionId, filter.toJSON()]);
|
|
752
996
|
for (const [, relay] of this.relays) {
|
|
@@ -764,12 +1008,19 @@ class NostrClient {
|
|
|
764
1008
|
if (!this.subscriptions.has(subscriptionId))
|
|
765
1009
|
return;
|
|
766
1010
|
this.subscriptions.delete(subscriptionId);
|
|
767
|
-
// Send CLOSE to all connected relays
|
|
1011
|
+
// Send CLOSE to all connected relays — except those that already
|
|
1012
|
+
// CLOSED the sub themselves (no point telling the relay something
|
|
1013
|
+
// it told us).
|
|
768
1014
|
const message = JSON.stringify(['CLOSE', subscriptionId]);
|
|
769
1015
|
for (const [, relay] of this.relays) {
|
|
770
|
-
if (relay.connected && relay.socket?.readyState === WebSocketAdapter_js_1.OPEN
|
|
1016
|
+
if (relay.connected && relay.socket?.readyState === WebSocketAdapter_js_1.OPEN
|
|
1017
|
+
&& !relay.closedSubIds.has(subscriptionId)) {
|
|
771
1018
|
relay.socket.send(message);
|
|
772
1019
|
}
|
|
1020
|
+
// Drop both per-relay markers now that the sub is gone from
|
|
1021
|
+
// the global map.
|
|
1022
|
+
relay.closedSubIds.delete(subscriptionId);
|
|
1023
|
+
relay.eosedSubIds.delete(subscriptionId);
|
|
773
1024
|
}
|
|
774
1025
|
}
|
|
775
1026
|
/**
|
|
@@ -795,12 +1046,45 @@ class NostrClient {
|
|
|
795
1046
|
queryWithFirstSeenWins(filter, extractResult) {
|
|
796
1047
|
return new Promise((resolve) => {
|
|
797
1048
|
let subscriptionId = '';
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
1049
|
+
let settled = false;
|
|
1050
|
+
// Declared as `let` and initialized lazily so `finishWith` can be
|
|
1051
|
+
// invoked before the setTimeout call below without hitting the
|
|
1052
|
+
// TDZ on `clearTimeout(timeoutId)`. (The same comment on the
|
|
1053
|
+
// listener anticipates synchronous-callback hypothetical paths.)
|
|
1054
|
+
let timeoutId;
|
|
1055
|
+
// Accept an explicit `id` so callers from inside the listener can
|
|
1056
|
+
// pass the sub_id the relay echoed back. This guards against any
|
|
1057
|
+
// future change to subscribe() that would invoke listener
|
|
1058
|
+
// callbacks before its return value is bound to `subscriptionId`
|
|
1059
|
+
// — the closure-captured value would still be `''` and we'd skip
|
|
1060
|
+
// the CLOSE frame, leaking the slot on the relay.
|
|
1061
|
+
const finishWith = (result, id) => {
|
|
1062
|
+
if (settled)
|
|
1063
|
+
return;
|
|
1064
|
+
settled = true;
|
|
1065
|
+
if (timeoutId !== undefined)
|
|
1066
|
+
clearTimeout(timeoutId);
|
|
1067
|
+
const subId = id || subscriptionId;
|
|
1068
|
+
if (subId)
|
|
1069
|
+
this.unsubscribe(subId);
|
|
1070
|
+
resolve(result);
|
|
1071
|
+
};
|
|
1072
|
+
timeoutId = setTimeout(() => finishWith(null), this.queryTimeoutMs);
|
|
803
1073
|
const authors = new Map();
|
|
1074
|
+
const allRelaysDone = (id) => this.allRelaysDoneFor(id);
|
|
1075
|
+
const pickWinner = () => {
|
|
1076
|
+
let winnerEntry = null;
|
|
1077
|
+
let winnerPubkey = '';
|
|
1078
|
+
for (const [pubkey, entry] of authors) {
|
|
1079
|
+
if (!winnerEntry
|
|
1080
|
+
|| entry.firstSeen < winnerEntry.firstSeen
|
|
1081
|
+
|| (entry.firstSeen === winnerEntry.firstSeen && pubkey < winnerPubkey)) {
|
|
1082
|
+
winnerEntry = entry;
|
|
1083
|
+
winnerPubkey = pubkey;
|
|
1084
|
+
}
|
|
1085
|
+
}
|
|
1086
|
+
return winnerEntry ? extractResult(winnerEntry.latestEvent) : null;
|
|
1087
|
+
};
|
|
804
1088
|
subscriptionId = this.subscribe(filter, {
|
|
805
1089
|
onEvent: (event) => {
|
|
806
1090
|
// Verify signature to prevent relay injection of forged events (#4)
|
|
@@ -819,24 +1103,55 @@ class NostrClient {
|
|
|
819
1103
|
}
|
|
820
1104
|
}
|
|
821
1105
|
},
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
1106
|
+
// EOSE means *this relay* has finished delivering stored
|
|
1107
|
+
// events. In a multi-relay client we must not settle yet — a
|
|
1108
|
+
// slower relay may still be about to deliver matching events.
|
|
1109
|
+
// Settle only when every connected relay has either EOSE'd
|
|
1110
|
+
// OR CLOSED'd this sub. (Single-relay clients are unaffected:
|
|
1111
|
+
// allDone is trivially true with one relay.)
|
|
1112
|
+
onEndOfStoredEvents: (id) => {
|
|
1113
|
+
if (allRelaysDone(id)) {
|
|
1114
|
+
finishWith(pickWinner(), id);
|
|
1115
|
+
}
|
|
1116
|
+
},
|
|
1117
|
+
// Subscription error from the SDK — fires from three paths
|
|
1118
|
+
// that all need the same "is it time to settle?" check:
|
|
1119
|
+
// 1. Relay sent CLOSED for this sub. In a multi-relay
|
|
1120
|
+
// client the same sub_id may still be alive on a
|
|
1121
|
+
// healthy relay; settling on the first CLOSED would
|
|
1122
|
+
// prematurely abort a query other relays could
|
|
1123
|
+
// satisfy. handleClosedMessage records the rejection
|
|
1124
|
+
// on the sending relay's closedSubIds before invoking
|
|
1125
|
+
// us, so we can decide via allRelaysDoneFor.
|
|
1126
|
+
// 2. Relay disconnected mid-query (socket.onclose →
|
|
1127
|
+
// synthetic onError). The relay no longer counts as
|
|
1128
|
+
// connected, so allRelaysDoneFor excludes it.
|
|
1129
|
+
// 3. Client disconnected (disconnect() → synthetic
|
|
1130
|
+
// onError). All relays are torn down, allRelaysDoneFor
|
|
1131
|
+
// sees zero connected and settles.
|
|
1132
|
+
onError: (id, message) => {
|
|
1133
|
+
console.warn(`Subscription error on ${id}: ${message}`);
|
|
1134
|
+
if (allRelaysDone(id)) {
|
|
1135
|
+
finishWith(pickWinner(), id);
|
|
834
1136
|
}
|
|
835
|
-
|
|
1137
|
+
// else: keep waiting for EOSE / CLOSED from remaining
|
|
1138
|
+
// relays or the overall query timeout.
|
|
836
1139
|
},
|
|
837
1140
|
});
|
|
838
1141
|
});
|
|
839
1142
|
}
|
|
1143
|
+
/**
|
|
1144
|
+
* True if every currently-connected relay has finished delivering
|
|
1145
|
+
* for the given sub_id (either EOSE'd or CLOSED'd it). Used by
|
|
1146
|
+
* queryWithFirstSeenWins to coordinate multi-relay settlement.
|
|
1147
|
+
*/
|
|
1148
|
+
allRelaysDoneFor(subscriptionId) {
|
|
1149
|
+
const connected = Array.from(this.relays.values()).filter((r) => r.connected);
|
|
1150
|
+
// No connected relays at all → nothing to wait for; settle.
|
|
1151
|
+
if (connected.length === 0)
|
|
1152
|
+
return true;
|
|
1153
|
+
return connected.every((r) => r.eosedSubIds.has(subscriptionId) || r.closedSubIds.has(subscriptionId));
|
|
1154
|
+
}
|
|
840
1155
|
/**
|
|
841
1156
|
* Query for a public key by nametag.
|
|
842
1157
|
* Uses first-seen-wins anti-hijacking resolution.
|