holosphere 2.0.0-alpha4 → 2.0.0-alpha5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. package/dist/cjs/holosphere.cjs +1 -1
  2. package/dist/esm/holosphere.js +1 -1
  3. package/dist/{index-CBitK71M.cjs → index-BG8FStkt.cjs} +2 -2
  4. package/dist/{index-CBitK71M.cjs.map → index-BG8FStkt.cjs.map} +1 -1
  5. package/dist/{index-CV0eOogK.js → index-Bbey4GkP.js} +502 -56
  6. package/dist/index-Bbey4GkP.js.map +1 -0
  7. package/dist/{index-Cz-PLCUR.js → index-Cp3xctq8.js} +2 -2
  8. package/dist/{index-Cz-PLCUR.js.map → index-Cp3xctq8.js.map} +1 -1
  9. package/dist/index-hfVGRwSr.cjs +5 -0
  10. package/dist/index-hfVGRwSr.cjs.map +1 -0
  11. package/dist/{indexeddb-storage-CRsZyB2f.cjs → indexeddb-storage-BD70pN7q.cjs} +2 -2
  12. package/dist/{indexeddb-storage-CRsZyB2f.cjs.map → indexeddb-storage-BD70pN7q.cjs.map} +1 -1
  13. package/dist/{indexeddb-storage-DZaGlY_a.js → indexeddb-storage-Bjg84U5R.js} +2 -2
  14. package/dist/{indexeddb-storage-DZaGlY_a.js.map → indexeddb-storage-Bjg84U5R.js.map} +1 -1
  15. package/dist/{memory-storage-BkUi6sZG.js → memory-storage-CD0XFayE.js} +2 -2
  16. package/dist/{memory-storage-BkUi6sZG.js.map → memory-storage-CD0XFayE.js.map} +1 -1
  17. package/dist/{memory-storage-C0DuUsdY.cjs → memory-storage-DmMyJtOo.cjs} +2 -2
  18. package/dist/{memory-storage-C0DuUsdY.cjs.map → memory-storage-DmMyJtOo.cjs.map} +1 -1
  19. package/dist/{secp256k1-DN4FVXcv.js → secp256k1-69sS9O-P.js} +2 -2
  20. package/dist/{secp256k1-DN4FVXcv.js.map → secp256k1-69sS9O-P.js.map} +1 -1
  21. package/dist/{secp256k1-0kPdAVkK.cjs → secp256k1-TcN6vWGh.cjs} +2 -2
  22. package/dist/{secp256k1-0kPdAVkK.cjs.map → secp256k1-TcN6vWGh.cjs.map} +1 -1
  23. package/package.json +1 -1
  24. package/src/storage/nostr-async.js +149 -14
  25. package/src/storage/nostr-client.js +569 -46
  26. package/dist/index-BB_vVJgv.cjs +0 -5
  27. package/dist/index-BB_vVJgv.cjs.map +0 -1
  28. package/dist/index-CV0eOogK.js.map +0 -1
@@ -7,6 +7,116 @@ import { SimplePool, finalizeEvent, getPublicKey } from 'nostr-tools';
7
7
  import { OutboxQueue } from './outbox-queue.js';
8
8
  import { SyncService } from './sync-service.js';
9
9
 
10
+ // Global pool singleton - reuse connections across NostrClient instances
11
+ let globalPool = null;
12
+ let globalPoolRelays = new Set();
13
+
14
+ /**
15
+ * Get or create global SimplePool singleton
16
+ * This ensures WebSocket connections are reused across all operations
17
+ */
18
+ function getGlobalPool(config = {}) {
19
+ if (!globalPool) {
20
+ globalPool = new SimplePool({
21
+ enableReconnect: config.enableReconnect !== false,
22
+ enablePing: config.enablePing !== false,
23
+ });
24
+ }
25
+ return globalPool;
26
+ }
27
+
28
+ // Global pending queries map for deduplication
29
+ // Key: JSON-stringified filter, Value: { promise, timestamp }
30
+ const pendingQueries = new Map();
31
+ const PENDING_QUERY_TIMEOUT = 5000; // 5 seconds
32
+
33
+ // Global active subscriptions map for subscription deduplication
34
+ // Key: JSON-stringified filter, Value: { subscription, callbacks: Set, refCount }
35
+ const activeSubscriptions = new Map();
36
+
37
+ // Throttle background refreshes to avoid flooding the relay
38
+ // Key: path, Value: timestamp of last refresh
39
+ const backgroundRefreshThrottle = new Map();
40
+ const BACKGROUND_REFRESH_INTERVAL = 30000; // Only refresh same path every 30 seconds
41
+
42
+ // Write debouncing for rapid updates to the same path
43
+ // Key: d-tag path, Value: { event, timer, resolve, reject }
44
+ const pendingWrites = new Map();
45
+ const WRITE_DEBOUNCE_MS = 500; // Debounce writes within 500ms window
46
+
47
+ // Long-lived subscription manager - keeps ONE subscription per author for real-time updates
48
+ // Key: pubkey, Value: { subscription, lastEventTime, initialized }
49
+ const authorSubscriptions = new Map();
50
+ const AUTHOR_SUB_INIT_TIMEOUT = 5000; // Wait up to 5s for initial data load
51
+
52
+ /**
53
+ * Simple LRU Cache implementation
54
+ * Automatically evicts least recently used entries when max size is reached
55
+ */
56
+ class LRUCache {
57
+ constructor(maxSize = 500) {
58
+ this.maxSize = maxSize;
59
+ this.cache = new Map();
60
+ }
61
+
62
+ get(key) {
63
+ if (!this.cache.has(key)) return undefined;
64
+
65
+ // Move to end (most recently used)
66
+ const value = this.cache.get(key);
67
+ this.cache.delete(key);
68
+ this.cache.set(key, value);
69
+ return value;
70
+ }
71
+
72
+ set(key, value) {
73
+ // If key exists, delete it first to update position
74
+ if (this.cache.has(key)) {
75
+ this.cache.delete(key);
76
+ }
77
+
78
+ this.cache.set(key, value);
79
+
80
+ // Evict oldest entries if over capacity
81
+ while (this.cache.size > this.maxSize) {
82
+ const oldestKey = this.cache.keys().next().value;
83
+ this.cache.delete(oldestKey);
84
+ }
85
+ }
86
+
87
+ has(key) {
88
+ return this.cache.has(key);
89
+ }
90
+
91
+ delete(key) {
92
+ return this.cache.delete(key);
93
+ }
94
+
95
+ clear() {
96
+ this.cache.clear();
97
+ }
98
+
99
+ get size() {
100
+ return this.cache.size;
101
+ }
102
+
103
+ keys() {
104
+ return this.cache.keys();
105
+ }
106
+
107
+ values() {
108
+ return this.cache.values();
109
+ }
110
+
111
+ entries() {
112
+ return this.cache.entries();
113
+ }
114
+
115
+ forEach(callback) {
116
+ this.cache.forEach(callback);
117
+ }
118
+ }
119
+
10
120
  // Lazy-load WebSocket polyfill for Node.js environment
11
121
  let webSocketPolyfillPromise = null;
12
122
  function ensureWebSocket() {
@@ -57,9 +167,15 @@ export class NostrClient {
57
167
  this.config = config;
58
168
 
59
169
  this._subscriptions = new Map();
60
- this._eventCache = new Map(); // In-memory cache for recent events
170
+ this._eventCache = new LRUCache(config.cacheSize || 500); // LRU cache for recent events
171
+ this._cacheIndex = new Map(); // Reverse index: kind -> Set of cache keys affected by that kind
61
172
  this.persistentStorage = null;
62
173
 
174
+ // Batched persistent writes for better I/O performance
175
+ this._persistQueue = new Map(); // path -> event
176
+ this._persistTimer = null;
177
+ this._persistBatchMs = config.persistBatchMs || 100; // Batch writes within 100ms window
178
+
63
179
  // Initialize pool and storage asynchronously
64
180
  this._initReady = this._initialize();
65
181
  }
@@ -74,10 +190,11 @@ export class NostrClient {
74
190
 
75
191
  // Initialize SimplePool with options (only if relays exist)
76
192
  if (this.relays.length > 0) {
77
- this.pool = new SimplePool({
78
- enableReconnect: this.config.enableReconnect !== false,
79
- enablePing: this.config.enablePing !== false,
80
- });
193
+ // Use global pool singleton to reuse WebSocket connections
194
+ this.pool = getGlobalPool(this.config);
195
+
196
+ // Track relays used by this client
197
+ this.relays.forEach(r => globalPoolRelays.add(r));
81
198
  } else {
82
199
  // Mock pool for testing - returns mock promise that resolves immediately
83
200
  this.pool = {
@@ -90,6 +207,90 @@ export class NostrClient {
90
207
 
91
208
  // Initialize persistent storage
92
209
  await this._initPersistentStorage();
210
+
211
+ // Start long-lived subscription for real-time cache updates
212
+ if (this.relays.length > 0) {
213
+ this._initLongLivedSubscription();
214
+ }
215
+ }
216
+
217
+ /**
218
+ * Initialize a long-lived subscription to keep cache fresh
219
+ * This replaces polling with a single persistent subscription
220
+ * @private
221
+ */
222
+ _initLongLivedSubscription() {
223
+ const subKey = this.publicKey;
224
+
225
+ // Check if subscription already exists for this author
226
+ if (authorSubscriptions.has(subKey)) {
227
+ return;
228
+ }
229
+
230
+ const subInfo = {
231
+ subscription: null,
232
+ initialized: false,
233
+ initPromise: null,
234
+ initResolve: null,
235
+ };
236
+
237
+ // Create promise for initial load completion
238
+ subInfo.initPromise = new Promise(resolve => {
239
+ subInfo.initResolve = resolve;
240
+ });
241
+
242
+ authorSubscriptions.set(subKey, subInfo);
243
+
244
+ // Subscribe to ALL events for this author (kind 30000)
245
+ // This single subscription replaces all the polling queries
246
+ const filter = {
247
+ kinds: [30000],
248
+ authors: [this.publicKey],
249
+ };
250
+
251
+ const sub = this.pool.subscribeMany(
252
+ this.relays,
253
+ [filter],
254
+ {
255
+ onevent: (event) => {
256
+ // Verify author (relay may not respect filter)
257
+ if (event.pubkey !== this.publicKey) {
258
+ return;
259
+ }
260
+
261
+ // Cache the event - this keeps our local cache in sync
262
+ this._cacheEvent(event);
263
+ },
264
+ oneose: () => {
265
+ // End of stored events - initial load complete
266
+ if (!subInfo.initialized) {
267
+ subInfo.initialized = true;
268
+ subInfo.initResolve();
269
+ }
270
+ },
271
+ }
272
+ );
273
+
274
+ subInfo.subscription = sub;
275
+
276
+ // Set timeout for initial load in case EOSE never arrives
277
+ setTimeout(() => {
278
+ if (!subInfo.initialized) {
279
+ subInfo.initialized = true;
280
+ subInfo.initResolve();
281
+ }
282
+ }, AUTHOR_SUB_INIT_TIMEOUT);
283
+ }
284
+
285
+ /**
286
+ * Wait for long-lived subscription to complete initial load
287
+ * @private
288
+ */
289
+ async _waitForSubscriptionInit() {
290
+ const subInfo = authorSubscriptions.get(this.publicKey);
291
+ if (subInfo && subInfo.initPromise) {
292
+ await subInfo.initPromise;
293
+ }
93
294
  }
94
295
 
95
296
  /**
@@ -222,9 +423,11 @@ export class NostrClient {
222
423
 
223
424
  /**
224
425
  * Publish event to relays
426
+ * Supports debouncing for replaceable events (kind 30000-39999) to avoid rapid updates
225
427
  * @param {Object} event - Unsigned event object
226
428
  * @param {Object} options - Publish options
227
429
  * @param {boolean} options.waitForRelays - Wait for relay confirmation (default: false for speed)
430
+ * @param {boolean} options.debounce - Debounce rapid writes to same d-tag (default: true for replaceable events)
228
431
  * @returns {Promise<Object>} Signed event with relay publish results
229
432
  */
230
433
  async publish(event, options = {}) {
@@ -233,6 +436,66 @@ export class NostrClient {
233
436
 
234
437
  const waitForRelays = options.waitForRelays || false;
235
438
 
439
+ // For replaceable events, check if we should debounce
440
+ const isReplaceable = event.kind >= 30000 && event.kind < 40000;
441
+ const shouldDebounce = isReplaceable && options.debounce !== false && !waitForRelays;
442
+
443
+ if (shouldDebounce) {
444
+ const dTag = event.tags?.find(t => t[0] === 'd');
445
+ if (dTag && dTag[1]) {
446
+ return this._debouncedPublish(event, dTag[1], options);
447
+ }
448
+ }
449
+
450
+ return this._doPublish(event, options);
451
+ }
452
+
453
+ /**
454
+ * Debounced publish - coalesces rapid writes to the same d-tag
455
+ * @private
456
+ */
457
+ _debouncedPublish(event, dTagPath, options) {
458
+ return new Promise((resolve, reject) => {
459
+ const existing = pendingWrites.get(dTagPath);
460
+
461
+ if (existing) {
462
+ // Cancel previous pending write and use the new one
463
+ clearTimeout(existing.timer);
464
+ // Resolve the previous promise with the new event (it will be superseded)
465
+ existing.resolve({
466
+ event: null,
467
+ results: [],
468
+ debounced: true,
469
+ supersededBy: event,
470
+ });
471
+ }
472
+
473
+ // Set up debounced write
474
+ const timer = setTimeout(async () => {
475
+ pendingWrites.delete(dTagPath);
476
+ try {
477
+ const result = await this._doPublish(event, options);
478
+ resolve(result);
479
+ } catch (err) {
480
+ reject(err);
481
+ }
482
+ }, WRITE_DEBOUNCE_MS);
483
+
484
+ pendingWrites.set(dTagPath, { event, timer, resolve, reject });
485
+
486
+ // Cache immediately for local-first reads (even before relay publish)
487
+ const signedEvent = finalizeEvent(event, this.privateKey);
488
+ this._cacheEvent(signedEvent);
489
+ });
490
+ }
491
+
492
+ /**
493
+ * Internal publish implementation
494
+ * @private
495
+ */
496
+ async _doPublish(event, options = {}) {
497
+ const waitForRelays = options.waitForRelays || false;
498
+
236
499
  // Sign the event
237
500
  const signedEvent = finalizeEvent(event, this.privateKey);
238
501
 
@@ -325,10 +588,12 @@ export class NostrClient {
325
588
 
326
589
  /**
327
590
  * Query events from relays
591
+ * Uses long-lived subscription for cache updates - avoids polling
328
592
  * @param {Object} filter - Nostr filter object
329
593
  * @param {Object} options - Query options
330
594
  * @param {number} options.timeout - Query timeout in ms (default: 30000, set to 0 for no timeout)
331
595
  * @param {boolean} options.localFirst - Return local cache immediately, refresh in background (default: true)
596
+ * @param {boolean} options.forceRelay - Force relay query even if subscription cache is available (default: false)
332
597
  * @returns {Promise<Array>} Array of events
333
598
  */
334
599
  async query(filter, options = {}) {
@@ -337,6 +602,7 @@ export class NostrClient {
337
602
 
338
603
  const timeout = options.timeout !== undefined ? options.timeout : 30000;
339
604
  const localFirst = options.localFirst !== false; // Default to true for speed
605
+ const forceRelay = options.forceRelay === true;
340
606
 
341
607
  // If no relays, query from cache only
342
608
  if (this.relays.length === 0) {
@@ -344,6 +610,35 @@ export class NostrClient {
344
610
  return matchingEvents;
345
611
  }
346
612
 
613
+ // Check if this query can be served from the long-lived subscription cache
614
+ // The subscription keeps ALL events for this author in cache, updated in real-time
615
+ const subInfo = authorSubscriptions.get(this.publicKey);
616
+ const isOwnDataQuery = filter.authors &&
617
+ filter.authors.length === 1 &&
618
+ filter.authors[0] === this.publicKey;
619
+
620
+ // If we have an initialized subscription for our own data, use cache
621
+ if (!forceRelay && isOwnDataQuery && subInfo && subInfo.initialized) {
622
+ // Return matching events from cache - no relay query needed!
623
+ const matchingEvents = this._getMatchingCachedEvents(filter);
624
+ return matchingEvents;
625
+ }
626
+
627
+ // For first query before subscription initializes, wait briefly
628
+ if (isOwnDataQuery && subInfo && !subInfo.initialized) {
629
+ // Wait for subscription to initialize (up to timeout)
630
+ await Promise.race([
631
+ subInfo.initPromise,
632
+ new Promise(resolve => setTimeout(resolve, Math.min(timeout, AUTHOR_SUB_INIT_TIMEOUT)))
633
+ ]);
634
+
635
+ // Now try cache again
636
+ if (subInfo.initialized) {
637
+ const matchingEvents = this._getMatchingCachedEvents(filter);
638
+ return matchingEvents;
639
+ }
640
+ }
641
+
347
642
  // Check d-tag cache first for single-item queries (most common case)
348
643
  // This ensures recently written data is returned immediately
349
644
  if (filter['#d'] && filter['#d'].length === 1 && filter.kinds && filter.kinds.length === 1) {
@@ -376,29 +671,101 @@ export class NostrClient {
376
671
 
377
672
  /**
378
673
  * Query relays and update cache
674
+ * Uses global pending queries map to deduplicate identical concurrent queries
379
675
  * @private
380
676
  */
381
677
  async _queryRelaysAndCache(filter, cacheKey, timeout) {
382
- let events = await this.pool.querySync(this.relays, filter, { timeout });
383
-
384
- // CRITICAL: Filter out events from other authors (relay may not respect filter)
385
- if (filter.authors && filter.authors.length > 0) {
386
- events = events.filter(event => filter.authors.includes(event.pubkey));
678
+ // Check if there's already a pending query for this exact filter
679
+ const pending = pendingQueries.get(cacheKey);
680
+ if (pending && Date.now() - pending.timestamp < PENDING_QUERY_TIMEOUT) {
681
+ // Reuse the pending query promise instead of creating a new one
682
+ return pending.promise;
387
683
  }
388
684
 
389
- // Cache results
390
- this._eventCache.set(cacheKey, {
391
- events,
685
+ // Create the query promise
686
+ const queryPromise = (async () => {
687
+ try {
688
+ let events = await this.pool.querySync(this.relays, filter, { timeout });
689
+
690
+ // CRITICAL: Filter out events from other authors (relay may not respect filter)
691
+ if (filter.authors && filter.authors.length > 0) {
692
+ events = events.filter(event => filter.authors.includes(event.pubkey));
693
+ }
694
+
695
+ // Cache results
696
+ this._eventCache.set(cacheKey, {
697
+ events,
698
+ timestamp: Date.now(),
699
+ });
700
+
701
+ // Update reverse index for fast invalidation
702
+ this._indexCacheEntry(cacheKey, filter);
703
+
704
+ return events;
705
+ } finally {
706
+ // Clean up pending query after completion
707
+ pendingQueries.delete(cacheKey);
708
+ }
709
+ })();
710
+
711
+ // Store the pending query
712
+ pendingQueries.set(cacheKey, {
713
+ promise: queryPromise,
392
714
  timestamp: Date.now(),
393
715
  });
394
716
 
395
- // Limit cache size
396
- if (this._eventCache.size > 100) {
397
- const firstKey = this._eventCache.keys().next().value;
398
- this._eventCache.delete(firstKey);
717
+ return queryPromise;
718
+ }
719
+
720
+ /**
721
+ * Limit cache size (called after cache operations)
722
+ * Note: LRU cache handles this automatically, kept for API compatibility
723
+ * @private
724
+ */
725
+ _limitCacheSize() {
726
+ // LRU cache handles size limiting automatically
727
+ }
728
+
729
+ /**
730
+ * Add cache entry to reverse index for fast invalidation
731
+ * @private
732
+ */
733
+ _indexCacheEntry(cacheKey, filter) {
734
+ // Index by kinds for fast lookup during invalidation
735
+ if (filter.kinds) {
736
+ for (const kind of filter.kinds) {
737
+ if (!this._cacheIndex.has(kind)) {
738
+ this._cacheIndex.set(kind, new Set());
739
+ }
740
+ this._cacheIndex.get(kind).add(cacheKey);
741
+ }
399
742
  }
743
+ }
400
744
 
401
- return events;
745
+ /**
746
+ * Remove cache entry from reverse index
747
+ * @private
748
+ */
749
+ _unindexCacheEntry(cacheKey) {
750
+ // Try to parse the filter from the cache key to remove from index
751
+ if (!cacheKey.startsWith('{')) return;
752
+
753
+ try {
754
+ const filter = JSON.parse(cacheKey);
755
+ if (filter.kinds) {
756
+ for (const kind of filter.kinds) {
757
+ const indexSet = this._cacheIndex.get(kind);
758
+ if (indexSet) {
759
+ indexSet.delete(cacheKey);
760
+ if (indexSet.size === 0) {
761
+ this._cacheIndex.delete(kind);
762
+ }
763
+ }
764
+ }
765
+ }
766
+ } catch {
767
+ // Not a valid filter key, skip
768
+ }
402
769
  }
403
770
 
404
771
  /**
@@ -427,20 +794,42 @@ export class NostrClient {
427
794
 
428
795
  /**
429
796
  * Internal method to refresh a path from relays
797
+ * Throttled to avoid flooding the relay with repeated requests
430
798
  * @private
431
799
  */
432
800
  async _doBackgroundPathRefresh(path, kind, options) {
433
801
  if (this.relays.length === 0) return;
434
802
 
803
+ // Throttle: Skip if we've refreshed this path recently
804
+ const lastRefresh = backgroundRefreshThrottle.get(path);
805
+ if (lastRefresh && Date.now() - lastRefresh < BACKGROUND_REFRESH_INTERVAL) {
806
+ return; // Skip - recently refreshed
807
+ }
808
+
809
+ // Mark as refreshed
810
+ backgroundRefreshThrottle.set(path, Date.now());
811
+
812
+ // Clean up old throttle entries periodically (keep map from growing)
813
+ if (backgroundRefreshThrottle.size > 1000) {
814
+ const cutoff = Date.now() - BACKGROUND_REFRESH_INTERVAL;
815
+ for (const [key, timestamp] of backgroundRefreshThrottle) {
816
+ if (timestamp < cutoff) {
817
+ backgroundRefreshThrottle.delete(key);
818
+ }
819
+ }
820
+ }
821
+
435
822
  const filter = {
436
823
  kinds: [kind],
437
824
  authors: options.authors || [this.publicKey],
438
825
  '#d': [path],
439
826
  limit: 1,
440
827
  };
828
+ const cacheKey = JSON.stringify(filter);
441
829
 
830
+ // Use our query deduplication by calling query() instead of pool.querySync() directly
442
831
  const timeout = options.timeout || 30000;
443
- const events = await this.pool.querySync(this.relays, filter, { timeout });
832
+ const events = await this._queryRelaysAndCache(filter, cacheKey, timeout);
444
833
 
445
834
  // Filter by author (relays may not respect filter)
446
835
  const authorFiltered = events.filter(e =>
@@ -468,22 +857,35 @@ export class NostrClient {
468
857
 
469
858
  /**
470
859
  * Internal method to refresh a prefix from relays
860
+ * Throttled to avoid flooding the relay with repeated requests
471
861
  * @private
472
862
  */
473
863
  async _doBackgroundPrefixRefresh(prefix, kind, options) {
474
864
  if (this.relays.length === 0) return;
475
865
 
866
+ // Throttle: Skip if we've refreshed this prefix recently
867
+ const throttleKey = `prefix:${prefix}`;
868
+ const lastRefresh = backgroundRefreshThrottle.get(throttleKey);
869
+ if (lastRefresh && Date.now() - lastRefresh < BACKGROUND_REFRESH_INTERVAL) {
870
+ return; // Skip - recently refreshed
871
+ }
872
+
873
+ // Mark as refreshed
874
+ backgroundRefreshThrottle.set(throttleKey, Date.now());
875
+
476
876
  // Query with wildcard-ish filter (relays handle d-tag prefix matching)
477
877
  const filter = {
478
878
  kinds: [kind],
479
879
  authors: options.authors || [this.publicKey],
480
880
  limit: options.limit || 1000,
481
881
  };
882
+ const cacheKey = JSON.stringify(filter);
482
883
 
884
+ // Use our query deduplication
483
885
  const timeout = options.timeout || 30000;
484
- let events = await this.pool.querySync(this.relays, filter, { timeout });
886
+ let events = await this._queryRelaysAndCache(filter, cacheKey, timeout);
485
887
 
486
- // Filter by author
888
+ // Filter by author (already done by _queryRelaysAndCache, but double-check)
487
889
  events = events.filter(e =>
488
890
  (options.authors || [this.publicKey]).includes(e.pubkey)
489
891
  );
@@ -590,6 +992,7 @@ export class NostrClient {
590
992
 
591
993
  /**
592
994
  * Subscribe to events
995
+ * Uses subscription deduplication to avoid creating multiple identical subscriptions
593
996
  * @param {Object} filter - Nostr filter object
594
997
  * @param {Function} onEvent - Callback for each event
595
998
  * @param {Object} options - Subscription options
@@ -599,7 +1002,8 @@ export class NostrClient {
599
1002
  // Ensure initialization is complete
600
1003
  await this._initReady;
601
1004
 
602
- const subId = `sub-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
1005
+ const subId = `sub-${Date.now()}-${Math.random().toString(36).slice(2, 11)}`;
1006
+ const filterKey = JSON.stringify(filter);
603
1007
 
604
1008
  // If no relays, check cache for matching events and trigger callbacks
605
1009
  if (this.relays.length === 0) {
@@ -636,6 +1040,43 @@ export class NostrClient {
636
1040
  };
637
1041
  }
638
1042
 
1043
+ // Check if we already have an active subscription for this filter
1044
+ const existing = activeSubscriptions.get(filterKey);
1045
+ if (existing) {
1046
+ // Add callback to existing subscription
1047
+ existing.callbacks.add(onEvent);
1048
+ existing.refCount++;
1049
+
1050
+ // Return wrapper that removes this specific callback on unsubscribe
1051
+ return {
1052
+ id: subId,
1053
+ unsubscribe: () => {
1054
+ existing.callbacks.delete(onEvent);
1055
+ existing.refCount--;
1056
+
1057
+ // Only close actual subscription when no more callbacks
1058
+ if (existing.refCount === 0) {
1059
+ if (existing.subscription && existing.subscription.close) {
1060
+ existing.subscription.close();
1061
+ }
1062
+ activeSubscriptions.delete(filterKey);
1063
+ }
1064
+ this._subscriptions.delete(subId);
1065
+ },
1066
+ };
1067
+ }
1068
+
1069
+ // Create new subscription with shared callback dispatcher
1070
+ const callbacks = new Set([onEvent]);
1071
+ const subscriptionInfo = {
1072
+ callbacks,
1073
+ refCount: 1,
1074
+ subscription: null,
1075
+ };
1076
+
1077
+ // Store before creating subscription to handle race conditions
1078
+ activeSubscriptions.set(filterKey, subscriptionInfo);
1079
+
639
1080
  const sub = this.pool.subscribeMany(
640
1081
  this.relays,
641
1082
  [filter],
@@ -651,7 +1092,18 @@ export class NostrClient {
651
1092
  }
652
1093
 
653
1094
  this._cacheEvent(event);
654
- onEvent(event);
1095
+
1096
+ // Dispatch to ALL registered callbacks for this subscription
1097
+ const subInfo = activeSubscriptions.get(filterKey);
1098
+ if (subInfo) {
1099
+ for (const cb of subInfo.callbacks) {
1100
+ try {
1101
+ cb(event);
1102
+ } catch (err) {
1103
+ console.warn('[nostr] Subscription callback error:', err.message);
1104
+ }
1105
+ }
1106
+ }
655
1107
  },
656
1108
  oneose: () => {
657
1109
  if (options.onEOSE) options.onEOSE();
@@ -659,12 +1111,21 @@ export class NostrClient {
659
1111
  }
660
1112
  );
661
1113
 
1114
+ // Store the actual subscription object
1115
+ subscriptionInfo.subscription = sub;
662
1116
  this._subscriptions.set(subId, sub);
663
1117
 
664
1118
  return {
665
1119
  id: subId,
666
1120
  unsubscribe: () => {
667
- if (sub.close) sub.close();
1121
+ callbacks.delete(onEvent);
1122
+ subscriptionInfo.refCount--;
1123
+
1124
+ // Only close actual subscription when no more callbacks
1125
+ if (subscriptionInfo.refCount === 0) {
1126
+ if (sub.close) sub.close();
1127
+ activeSubscriptions.delete(filterKey);
1128
+ }
668
1129
  this._subscriptions.delete(subId);
669
1130
  },
670
1131
  };
@@ -714,16 +1175,26 @@ export class NostrClient {
714
1175
 
715
1176
  /**
716
1177
  * Invalidate query caches that might be affected by a new event
1178
+ * Uses reverse index for O(1) lookup instead of O(n) scan
717
1179
  * @private
718
1180
  */
719
1181
  _invalidateQueryCachesForEvent(event) {
720
- // Find and remove query cache entries that could match this event
721
- // Query cache keys are JSON-stringified filters
1182
+ // Use reverse index for fast lookup - only check caches that could match this event's kind
1183
+ const indexedKeys = this._cacheIndex.get(event.kind);
1184
+ if (!indexedKeys || indexedKeys.size === 0) {
1185
+ return; // No cached queries for this kind
1186
+ }
1187
+
722
1188
  const keysToDelete = [];
723
1189
 
724
- for (const [cacheKey, cached] of this._eventCache.entries()) {
725
- // Skip non-query caches (event IDs and d-tag keys)
726
- if (!cacheKey.startsWith('{')) continue;
1190
+ // Only iterate over cache entries that match the event's kind
1191
+ for (const cacheKey of indexedKeys) {
1192
+ const cached = this._eventCache.get(cacheKey);
1193
+ if (!cached) {
1194
+ // Cache entry was evicted, clean up index
1195
+ indexedKeys.delete(cacheKey);
1196
+ continue;
1197
+ }
727
1198
 
728
1199
  try {
729
1200
  const filter = JSON.parse(cacheKey);
@@ -732,38 +1203,42 @@ export class NostrClient {
732
1203
  keysToDelete.push(cacheKey);
733
1204
  }
734
1205
  } catch {
735
- // Not a JSON key, skip
1206
+ // Not a valid JSON key, clean up index
1207
+ indexedKeys.delete(cacheKey);
736
1208
  }
737
1209
  }
738
1210
 
739
1211
  for (const key of keysToDelete) {
740
1212
  this._eventCache.delete(key);
1213
+ this._unindexCacheEntry(key);
741
1214
  }
742
1215
  }
743
1216
 
744
1217
  /**
745
- * Cache event in memory and persist
1218
+ * Cache event in memory and persist (batched)
746
1219
  * @private
747
1220
  */
748
1221
  async _cacheEvent(event) {
749
- // Cache in memory
1222
+ // Cache in memory (synchronous - immediate for local-first reads)
750
1223
  this._cacheEventSync(event);
751
1224
 
752
- // Persist to storage
1225
+ // Queue for batched persistence (async - batches writes for I/O efficiency)
753
1226
  if (this.persistentStorage) {
754
- try {
755
- // For replaceable events, use d-tag as key
756
- let storageKey = event.id;
757
- if (event.kind >= 30000 && event.kind < 40000) {
758
- const dTag = event.tags.find(t => t[0] === 'd');
759
- if (dTag && dTag[1]) {
760
- storageKey = dTag[1]; // Use d-tag as key for replaceable events
761
- }
1227
+ // For replaceable events, use d-tag as key
1228
+ let storageKey = event.id;
1229
+ if (event.kind >= 30000 && event.kind < 40000) {
1230
+ const dTag = event.tags.find(t => t[0] === 'd');
1231
+ if (dTag && dTag[1]) {
1232
+ storageKey = dTag[1]; // Use d-tag as key for replaceable events
762
1233
  }
1234
+ }
763
1235
 
764
- await this.persistentStorage.put(storageKey, event);
765
- } catch (error) {
766
- console.warn('Failed to persist event:', error);
1236
+ // Queue for batched write (overwrites previous if same key)
1237
+ this._persistQueue.set(storageKey, event);
1238
+
1239
+ // Schedule batch flush if not already scheduled
1240
+ if (!this._persistTimer) {
1241
+ this._persistTimer = setTimeout(() => this._flushPersistQueue(), this._persistBatchMs);
767
1242
  }
768
1243
  }
769
1244
 
@@ -782,6 +1257,34 @@ export class NostrClient {
782
1257
  }
783
1258
  }
784
1259
 
1260
+ /**
1261
+ * Flush batched persistent writes
1262
+ * @private
1263
+ */
1264
+ async _flushPersistQueue() {
1265
+ this._persistTimer = null;
1266
+
1267
+ if (!this.persistentStorage || this._persistQueue.size === 0) {
1268
+ return;
1269
+ }
1270
+
1271
+ // Take snapshot of current queue and clear it
1272
+ const toWrite = Array.from(this._persistQueue.entries());
1273
+ this._persistQueue.clear();
1274
+
1275
+ // Write all queued events
1276
+ const writePromises = toWrite.map(async ([key, event]) => {
1277
+ try {
1278
+ await this.persistentStorage.put(key, event);
1279
+ } catch (error) {
1280
+ console.warn(`Failed to persist event ${key}:`, error.message);
1281
+ }
1282
+ });
1283
+
1284
+ // Wait for all writes to complete
1285
+ await Promise.all(writePromises);
1286
+ }
1287
+
785
1288
  /**
786
1289
  * Get cached events matching a filter
787
1290
  * @private
@@ -898,13 +1401,32 @@ export class NostrClient {
898
1401
 
899
1402
  /**
900
1403
  * Close all connections and subscriptions
1404
+ * @param {Object} options - Close options
1405
+ * @param {boolean} options.flush - Flush pending writes before closing (default: true)
901
1406
  */
902
- close() {
1407
+ async close(options = {}) {
1408
+ const shouldFlush = options.flush !== false;
1409
+
1410
+ // Flush pending persistent writes before closing
1411
+ if (shouldFlush && this._persistTimer) {
1412
+ clearTimeout(this._persistTimer);
1413
+ await this._flushPersistQueue();
1414
+ }
1415
+
903
1416
  // Stop background sync service
904
1417
  if (this.syncService) {
905
1418
  this.syncService.stop();
906
1419
  }
907
1420
 
1421
+ // Close long-lived author subscription
1422
+ const authorSub = authorSubscriptions.get(this.publicKey);
1423
+ if (authorSub && authorSub.subscription) {
1424
+ if (authorSub.subscription.close) {
1425
+ authorSub.subscription.close();
1426
+ }
1427
+ authorSubscriptions.delete(this.publicKey);
1428
+ }
1429
+
908
1430
  // Close all subscriptions
909
1431
  for (const sub of this._subscriptions.values()) {
910
1432
  if (sub.close) {
@@ -919,8 +1441,9 @@ export class NostrClient {
919
1441
  // Close pool
920
1442
  this.pool.close(this.relays);
921
1443
 
922
- // Clear cache
1444
+ // Clear cache and index
923
1445
  this._eventCache.clear();
1446
+ this._cacheIndex.clear();
924
1447
  }
925
1448
 
926
1449
  /**