holosphere 2.0.0-alpha4 → 2.0.0-alpha6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/dist/cjs/holosphere.cjs +1 -1
  2. package/dist/esm/holosphere.js +1 -1
  3. package/dist/index-BtKHqqet.cjs +5 -0
  4. package/dist/index-BtKHqqet.cjs.map +1 -0
  5. package/dist/{index-CBitK71M.cjs → index-CmzkI7SI.cjs} +2 -2
  6. package/dist/{index-CBitK71M.cjs.map → index-CmzkI7SI.cjs.map} +1 -1
  7. package/dist/{index-Cz-PLCUR.js → index-JFz-dW43.js} +2 -2
  8. package/dist/{index-Cz-PLCUR.js.map → index-JFz-dW43.js.map} +1 -1
  9. package/dist/{index-CV0eOogK.js → index-NOravBLu.js} +733 -164
  10. package/dist/index-NOravBLu.js.map +1 -0
  11. package/dist/{indexeddb-storage-CRsZyB2f.cjs → indexeddb-storage-C4HsulhA.cjs} +2 -2
  12. package/dist/{indexeddb-storage-CRsZyB2f.cjs.map → indexeddb-storage-C4HsulhA.cjs.map} +1 -1
  13. package/dist/{indexeddb-storage-DZaGlY_a.js → indexeddb-storage-OtSAVDZY.js} +2 -2
  14. package/dist/{indexeddb-storage-DZaGlY_a.js.map → indexeddb-storage-OtSAVDZY.js.map} +1 -1
  15. package/dist/{memory-storage-BkUi6sZG.js → memory-storage-ChpcYvxA.js} +2 -2
  16. package/dist/{memory-storage-BkUi6sZG.js.map → memory-storage-ChpcYvxA.js.map} +1 -1
  17. package/dist/{memory-storage-C0DuUsdY.cjs → memory-storage-MD6ED00P.cjs} +2 -2
  18. package/dist/{memory-storage-C0DuUsdY.cjs.map → memory-storage-MD6ED00P.cjs.map} +1 -1
  19. package/dist/{secp256k1-0kPdAVkK.cjs → secp256k1-DcTYQrqC.cjs} +2 -2
  20. package/dist/{secp256k1-0kPdAVkK.cjs.map → secp256k1-DcTYQrqC.cjs.map} +1 -1
  21. package/dist/{secp256k1-DN4FVXcv.js → secp256k1-PfNOEI7a.js} +2 -2
  22. package/dist/{secp256k1-DN4FVXcv.js.map → secp256k1-PfNOEI7a.js.map} +1 -1
  23. package/package.json +1 -1
  24. package/src/contracts/abis/Bundle.json +1438 -1435
  25. package/src/contracts/deployer.js +32 -3
  26. package/src/federation/handshake.js +13 -5
  27. package/src/index.js +9 -1
  28. package/src/storage/gun-async.js +55 -6
  29. package/src/storage/gun-auth.js +81 -30
  30. package/src/storage/gun-wrapper.js +56 -48
  31. package/src/storage/nostr-async.js +149 -14
  32. package/src/storage/nostr-client.js +574 -48
  33. package/dist/index-BB_vVJgv.cjs +0 -5
  34. package/dist/index-BB_vVJgv.cjs.map +0 -1
  35. package/dist/index-CV0eOogK.js.map +0 -1
@@ -7,6 +7,116 @@ import { SimplePool, finalizeEvent, getPublicKey } from 'nostr-tools';
7
7
  import { OutboxQueue } from './outbox-queue.js';
8
8
  import { SyncService } from './sync-service.js';
9
9
 
10
+ // Global pool singleton - reuse connections across NostrClient instances
11
+ let globalPool = null;
12
+ let globalPoolRelays = new Set();
13
+
14
+ /**
15
+ * Get or create global SimplePool singleton
16
+ * This ensures WebSocket connections are reused across all operations
17
+ */
18
+ function getGlobalPool(config = {}) {
19
+ if (!globalPool) {
20
+ globalPool = new SimplePool({
21
+ enableReconnect: config.enableReconnect !== false,
22
+ enablePing: config.enablePing !== false,
23
+ });
24
+ }
25
+ return globalPool;
26
+ }
27
+
28
+ // Global pending queries map for deduplication
29
+ // Key: JSON-stringified filter, Value: { promise, timestamp }
30
+ const pendingQueries = new Map();
31
+ const PENDING_QUERY_TIMEOUT = 5000; // 5 seconds
32
+
33
+ // Global active subscriptions map for subscription deduplication
34
+ // Key: JSON-stringified filter, Value: { subscription, callbacks: Set, refCount }
35
+ const activeSubscriptions = new Map();
36
+
37
+ // Throttle background refreshes to avoid flooding the relay
38
+ // Key: path, Value: timestamp of last refresh
39
+ const backgroundRefreshThrottle = new Map();
40
+ const BACKGROUND_REFRESH_INTERVAL = 30000; // Only refresh same path every 30 seconds
41
+
42
+ // Write debouncing for rapid updates to the same path
43
+ // Key: d-tag path, Value: { event, timer, resolve, reject }
44
+ const pendingWrites = new Map();
45
+ const WRITE_DEBOUNCE_MS = 500; // Debounce writes within 500ms window
46
+
47
+ // Long-lived subscription manager - keeps ONE subscription per author for real-time updates
48
+ // Key: pubkey, Value: { subscription, lastEventTime, initialized }
49
+ const authorSubscriptions = new Map();
50
+ const AUTHOR_SUB_INIT_TIMEOUT = 5000; // Wait up to 5s for initial data load
51
+
52
+ /**
53
+ * Simple LRU Cache implementation
54
+ * Automatically evicts least recently used entries when max size is reached
55
+ */
56
+ class LRUCache {
57
+ constructor(maxSize = 500) {
58
+ this.maxSize = maxSize;
59
+ this.cache = new Map();
60
+ }
61
+
62
+ get(key) {
63
+ if (!this.cache.has(key)) return undefined;
64
+
65
+ // Move to end (most recently used)
66
+ const value = this.cache.get(key);
67
+ this.cache.delete(key);
68
+ this.cache.set(key, value);
69
+ return value;
70
+ }
71
+
72
+ set(key, value) {
73
+ // If key exists, delete it first to update position
74
+ if (this.cache.has(key)) {
75
+ this.cache.delete(key);
76
+ }
77
+
78
+ this.cache.set(key, value);
79
+
80
+ // Evict oldest entries if over capacity
81
+ while (this.cache.size > this.maxSize) {
82
+ const oldestKey = this.cache.keys().next().value;
83
+ this.cache.delete(oldestKey);
84
+ }
85
+ }
86
+
87
+ has(key) {
88
+ return this.cache.has(key);
89
+ }
90
+
91
+ delete(key) {
92
+ return this.cache.delete(key);
93
+ }
94
+
95
+ clear() {
96
+ this.cache.clear();
97
+ }
98
+
99
+ get size() {
100
+ return this.cache.size;
101
+ }
102
+
103
+ keys() {
104
+ return this.cache.keys();
105
+ }
106
+
107
+ values() {
108
+ return this.cache.values();
109
+ }
110
+
111
+ entries() {
112
+ return this.cache.entries();
113
+ }
114
+
115
+ forEach(callback) {
116
+ this.cache.forEach(callback);
117
+ }
118
+ }
119
+
10
120
  // Lazy-load WebSocket polyfill for Node.js environment
11
121
  let webSocketPolyfillPromise = null;
12
122
  function ensureWebSocket() {
@@ -57,9 +167,15 @@ export class NostrClient {
57
167
  this.config = config;
58
168
 
59
169
  this._subscriptions = new Map();
60
- this._eventCache = new Map(); // In-memory cache for recent events
170
+ this._eventCache = new LRUCache(config.cacheSize || 500); // LRU cache for recent events
171
+ this._cacheIndex = new Map(); // Reverse index: kind -> Set of cache keys affected by that kind
61
172
  this.persistentStorage = null;
62
173
 
174
+ // Batched persistent writes for better I/O performance
175
+ this._persistQueue = new Map(); // path -> event
176
+ this._persistTimer = null;
177
+ this._persistBatchMs = config.persistBatchMs || 100; // Batch writes within 100ms window
178
+
63
179
  // Initialize pool and storage asynchronously
64
180
  this._initReady = this._initialize();
65
181
  }
@@ -74,10 +190,11 @@ export class NostrClient {
74
190
 
75
191
  // Initialize SimplePool with options (only if relays exist)
76
192
  if (this.relays.length > 0) {
77
- this.pool = new SimplePool({
78
- enableReconnect: this.config.enableReconnect !== false,
79
- enablePing: this.config.enablePing !== false,
80
- });
193
+ // Use global pool singleton to reuse WebSocket connections
194
+ this.pool = getGlobalPool(this.config);
195
+
196
+ // Track relays used by this client
197
+ this.relays.forEach(r => globalPoolRelays.add(r));
81
198
  } else {
82
199
  // Mock pool for testing - returns mock promise that resolves immediately
83
200
  this.pool = {
@@ -90,6 +207,90 @@ export class NostrClient {
90
207
 
91
208
  // Initialize persistent storage
92
209
  await this._initPersistentStorage();
210
+
211
+ // Start long-lived subscription for real-time cache updates
212
+ if (this.relays.length > 0) {
213
+ this._initLongLivedSubscription();
214
+ }
215
+ }
216
+
217
+ /**
218
+ * Initialize a long-lived subscription to keep cache fresh
219
+ * This replaces polling with a single persistent subscription
220
+ * @private
221
+ */
222
+ _initLongLivedSubscription() {
223
+ const subKey = this.publicKey;
224
+
225
+ // Check if subscription already exists for this author
226
+ if (authorSubscriptions.has(subKey)) {
227
+ return;
228
+ }
229
+
230
+ const subInfo = {
231
+ subscription: null,
232
+ initialized: false,
233
+ initPromise: null,
234
+ initResolve: null,
235
+ };
236
+
237
+ // Create promise for initial load completion
238
+ subInfo.initPromise = new Promise(resolve => {
239
+ subInfo.initResolve = resolve;
240
+ });
241
+
242
+ authorSubscriptions.set(subKey, subInfo);
243
+
244
+ // Subscribe to ALL events for this author (kind 30000)
245
+ // This single subscription replaces all the polling queries
246
+ const filter = {
247
+ kinds: [30000],
248
+ authors: [this.publicKey],
249
+ };
250
+
251
+ const sub = this.pool.subscribeMany(
252
+ this.relays,
253
+ [filter],
254
+ {
255
+ onevent: (event) => {
256
+ // Verify author (relay may not respect filter)
257
+ if (event.pubkey !== this.publicKey) {
258
+ return;
259
+ }
260
+
261
+ // Cache the event - this keeps our local cache in sync
262
+ this._cacheEvent(event);
263
+ },
264
+ oneose: () => {
265
+ // End of stored events - initial load complete
266
+ if (!subInfo.initialized) {
267
+ subInfo.initialized = true;
268
+ subInfo.initResolve();
269
+ }
270
+ },
271
+ }
272
+ );
273
+
274
+ subInfo.subscription = sub;
275
+
276
+ // Set timeout for initial load in case EOSE never arrives
277
+ setTimeout(() => {
278
+ if (!subInfo.initialized) {
279
+ subInfo.initialized = true;
280
+ subInfo.initResolve();
281
+ }
282
+ }, AUTHOR_SUB_INIT_TIMEOUT);
283
+ }
284
+
285
+ /**
286
+ * Wait for long-lived subscription to complete initial load
287
+ * @private
288
+ */
289
+ async _waitForSubscriptionInit() {
290
+ const subInfo = authorSubscriptions.get(this.publicKey);
291
+ if (subInfo && subInfo.initPromise) {
292
+ await subInfo.initPromise;
293
+ }
93
294
  }
94
295
 
95
296
  /**
@@ -222,9 +423,11 @@ export class NostrClient {
222
423
 
223
424
  /**
224
425
  * Publish event to relays
426
+ * Supports debouncing for replaceable events (kind 30000-39999) to avoid rapid updates
225
427
  * @param {Object} event - Unsigned event object
226
428
  * @param {Object} options - Publish options
227
429
  * @param {boolean} options.waitForRelays - Wait for relay confirmation (default: false for speed)
430
+ * @param {boolean} options.debounce - Debounce rapid writes to same d-tag (default: true for replaceable events)
228
431
  * @returns {Promise<Object>} Signed event with relay publish results
229
432
  */
230
433
  async publish(event, options = {}) {
@@ -233,8 +436,71 @@ export class NostrClient {
233
436
 
234
437
  const waitForRelays = options.waitForRelays || false;
235
438
 
236
- // Sign the event
237
- const signedEvent = finalizeEvent(event, this.privateKey);
439
+ // For replaceable events, check if we should debounce
440
+ const isReplaceable = event.kind >= 30000 && event.kind < 40000;
441
+ const shouldDebounce = isReplaceable && options.debounce !== false && !waitForRelays;
442
+
443
+ if (shouldDebounce) {
444
+ const dTag = event.tags?.find(t => t[0] === 'd');
445
+ if (dTag && dTag[1]) {
446
+ return this._debouncedPublish(event, dTag[1], options);
447
+ }
448
+ }
449
+
450
+ return this._doPublish(event, options);
451
+ }
452
+
453
+ /**
454
+ * Debounced publish - coalesces rapid writes to the same d-tag
455
+ * @private
456
+ */
457
+ _debouncedPublish(event, dTagPath, options) {
458
+ return new Promise((resolve, reject) => {
459
+ const existing = pendingWrites.get(dTagPath);
460
+
461
+ if (existing) {
462
+ // Cancel previous pending write and use the new one
463
+ clearTimeout(existing.timer);
464
+ // Resolve the previous promise with the new event (it will be superseded)
465
+ existing.resolve({
466
+ event: null,
467
+ results: [],
468
+ debounced: true,
469
+ supersededBy: event,
470
+ });
471
+ }
472
+
473
+ // Set up debounced write
474
+ const timer = setTimeout(async () => {
475
+ pendingWrites.delete(dTagPath);
476
+ try {
477
+ const result = await this._doPublish(event, options);
478
+ resolve(result);
479
+ } catch (err) {
480
+ reject(err);
481
+ }
482
+ }, WRITE_DEBOUNCE_MS);
483
+
484
+ pendingWrites.set(dTagPath, { event, timer, resolve, reject });
485
+
486
+ // Cache immediately for local-first reads (even before relay publish)
487
+ const signedEvent = finalizeEvent(event, this.privateKey);
488
+ this._cacheEvent(signedEvent);
489
+ });
490
+ }
491
+
492
+ /**
493
+ * Internal publish implementation
494
+ * @private
495
+ */
496
+ async _doPublish(event, options = {}) {
497
+ const waitForRelays = options.waitForRelays || false;
498
+
499
+ // Check if event is already signed (has id and sig)
500
+ // If so, use it as-is; otherwise sign it
501
+ const signedEvent = (event.id && event.sig)
502
+ ? event
503
+ : finalizeEvent(event, this.privateKey);
238
504
 
239
505
  // 1. Cache the event locally first (this makes reads instant)
240
506
  await this._cacheEvent(signedEvent);
@@ -325,10 +591,12 @@ export class NostrClient {
325
591
 
326
592
  /**
327
593
  * Query events from relays
594
+ * Uses long-lived subscription for cache updates - avoids polling
328
595
  * @param {Object} filter - Nostr filter object
329
596
  * @param {Object} options - Query options
330
597
  * @param {number} options.timeout - Query timeout in ms (default: 30000, set to 0 for no timeout)
331
598
  * @param {boolean} options.localFirst - Return local cache immediately, refresh in background (default: true)
599
+ * @param {boolean} options.forceRelay - Force relay query even if subscription cache is available (default: false)
332
600
  * @returns {Promise<Array>} Array of events
333
601
  */
334
602
  async query(filter, options = {}) {
@@ -337,6 +605,7 @@ export class NostrClient {
337
605
 
338
606
  const timeout = options.timeout !== undefined ? options.timeout : 30000;
339
607
  const localFirst = options.localFirst !== false; // Default to true for speed
608
+ const forceRelay = options.forceRelay === true;
340
609
 
341
610
  // If no relays, query from cache only
342
611
  if (this.relays.length === 0) {
@@ -344,6 +613,35 @@ export class NostrClient {
344
613
  return matchingEvents;
345
614
  }
346
615
 
616
+ // Check if this query can be served from the long-lived subscription cache
617
+ // The subscription keeps ALL events for this author in cache, updated in real-time
618
+ const subInfo = authorSubscriptions.get(this.publicKey);
619
+ const isOwnDataQuery = filter.authors &&
620
+ filter.authors.length === 1 &&
621
+ filter.authors[0] === this.publicKey;
622
+
623
+ // If we have an initialized subscription for our own data, use cache
624
+ if (!forceRelay && isOwnDataQuery && subInfo && subInfo.initialized) {
625
+ // Return matching events from cache - no relay query needed!
626
+ const matchingEvents = this._getMatchingCachedEvents(filter);
627
+ return matchingEvents;
628
+ }
629
+
630
+ // For first query before subscription initializes, wait briefly
631
+ if (isOwnDataQuery && subInfo && !subInfo.initialized) {
632
+ // Wait for subscription to initialize (up to timeout)
633
+ await Promise.race([
634
+ subInfo.initPromise,
635
+ new Promise(resolve => setTimeout(resolve, Math.min(timeout, AUTHOR_SUB_INIT_TIMEOUT)))
636
+ ]);
637
+
638
+ // Now try cache again
639
+ if (subInfo.initialized) {
640
+ const matchingEvents = this._getMatchingCachedEvents(filter);
641
+ return matchingEvents;
642
+ }
643
+ }
644
+
347
645
  // Check d-tag cache first for single-item queries (most common case)
348
646
  // This ensures recently written data is returned immediately
349
647
  if (filter['#d'] && filter['#d'].length === 1 && filter.kinds && filter.kinds.length === 1) {
@@ -376,29 +674,101 @@ export class NostrClient {
376
674
 
377
675
  /**
378
676
  * Query relays and update cache
677
+ * Uses global pending queries map to deduplicate identical concurrent queries
379
678
  * @private
380
679
  */
381
680
  async _queryRelaysAndCache(filter, cacheKey, timeout) {
382
- let events = await this.pool.querySync(this.relays, filter, { timeout });
383
-
384
- // CRITICAL: Filter out events from other authors (relay may not respect filter)
385
- if (filter.authors && filter.authors.length > 0) {
386
- events = events.filter(event => filter.authors.includes(event.pubkey));
681
+ // Check if there's already a pending query for this exact filter
682
+ const pending = pendingQueries.get(cacheKey);
683
+ if (pending && Date.now() - pending.timestamp < PENDING_QUERY_TIMEOUT) {
684
+ // Reuse the pending query promise instead of creating a new one
685
+ return pending.promise;
387
686
  }
388
687
 
389
- // Cache results
390
- this._eventCache.set(cacheKey, {
391
- events,
688
+ // Create the query promise
689
+ const queryPromise = (async () => {
690
+ try {
691
+ let events = await this.pool.querySync(this.relays, filter, { timeout });
692
+
693
+ // CRITICAL: Filter out events from other authors (relay may not respect filter)
694
+ if (filter.authors && filter.authors.length > 0) {
695
+ events = events.filter(event => filter.authors.includes(event.pubkey));
696
+ }
697
+
698
+ // Cache results
699
+ this._eventCache.set(cacheKey, {
700
+ events,
701
+ timestamp: Date.now(),
702
+ });
703
+
704
+ // Update reverse index for fast invalidation
705
+ this._indexCacheEntry(cacheKey, filter);
706
+
707
+ return events;
708
+ } finally {
709
+ // Clean up pending query after completion
710
+ pendingQueries.delete(cacheKey);
711
+ }
712
+ })();
713
+
714
+ // Store the pending query
715
+ pendingQueries.set(cacheKey, {
716
+ promise: queryPromise,
392
717
  timestamp: Date.now(),
393
718
  });
394
719
 
395
- // Limit cache size
396
- if (this._eventCache.size > 100) {
397
- const firstKey = this._eventCache.keys().next().value;
398
- this._eventCache.delete(firstKey);
720
+ return queryPromise;
721
+ }
722
+
723
+ /**
724
+ * Limit cache size (called after cache operations)
725
+ * Note: LRU cache handles this automatically, kept for API compatibility
726
+ * @private
727
+ */
728
+ _limitCacheSize() {
729
+ // LRU cache handles size limiting automatically
730
+ }
731
+
732
+ /**
733
+ * Add cache entry to reverse index for fast invalidation
734
+ * @private
735
+ */
736
+ _indexCacheEntry(cacheKey, filter) {
737
+ // Index by kinds for fast lookup during invalidation
738
+ if (filter.kinds) {
739
+ for (const kind of filter.kinds) {
740
+ if (!this._cacheIndex.has(kind)) {
741
+ this._cacheIndex.set(kind, new Set());
742
+ }
743
+ this._cacheIndex.get(kind).add(cacheKey);
744
+ }
399
745
  }
746
+ }
400
747
 
401
- return events;
748
+ /**
749
+ * Remove cache entry from reverse index
750
+ * @private
751
+ */
752
+ _unindexCacheEntry(cacheKey) {
753
+ // Try to parse the filter from the cache key to remove from index
754
+ if (!cacheKey.startsWith('{')) return;
755
+
756
+ try {
757
+ const filter = JSON.parse(cacheKey);
758
+ if (filter.kinds) {
759
+ for (const kind of filter.kinds) {
760
+ const indexSet = this._cacheIndex.get(kind);
761
+ if (indexSet) {
762
+ indexSet.delete(cacheKey);
763
+ if (indexSet.size === 0) {
764
+ this._cacheIndex.delete(kind);
765
+ }
766
+ }
767
+ }
768
+ }
769
+ } catch {
770
+ // Not a valid filter key, skip
771
+ }
402
772
  }
403
773
 
404
774
  /**
@@ -427,20 +797,42 @@ export class NostrClient {
427
797
 
428
798
  /**
429
799
  * Internal method to refresh a path from relays
800
+ * Throttled to avoid flooding the relay with repeated requests
430
801
  * @private
431
802
  */
432
803
  async _doBackgroundPathRefresh(path, kind, options) {
433
804
  if (this.relays.length === 0) return;
434
805
 
806
+ // Throttle: Skip if we've refreshed this path recently
807
+ const lastRefresh = backgroundRefreshThrottle.get(path);
808
+ if (lastRefresh && Date.now() - lastRefresh < BACKGROUND_REFRESH_INTERVAL) {
809
+ return; // Skip - recently refreshed
810
+ }
811
+
812
+ // Mark as refreshed
813
+ backgroundRefreshThrottle.set(path, Date.now());
814
+
815
+ // Clean up old throttle entries periodically (keep map from growing)
816
+ if (backgroundRefreshThrottle.size > 1000) {
817
+ const cutoff = Date.now() - BACKGROUND_REFRESH_INTERVAL;
818
+ for (const [key, timestamp] of backgroundRefreshThrottle) {
819
+ if (timestamp < cutoff) {
820
+ backgroundRefreshThrottle.delete(key);
821
+ }
822
+ }
823
+ }
824
+
435
825
  const filter = {
436
826
  kinds: [kind],
437
827
  authors: options.authors || [this.publicKey],
438
828
  '#d': [path],
439
829
  limit: 1,
440
830
  };
831
+ const cacheKey = JSON.stringify(filter);
441
832
 
833
+ // Use our query deduplication by calling query() instead of pool.querySync() directly
442
834
  const timeout = options.timeout || 30000;
443
- const events = await this.pool.querySync(this.relays, filter, { timeout });
835
+ const events = await this._queryRelaysAndCache(filter, cacheKey, timeout);
444
836
 
445
837
  // Filter by author (relays may not respect filter)
446
838
  const authorFiltered = events.filter(e =>
@@ -468,22 +860,35 @@ export class NostrClient {
468
860
 
469
861
  /**
470
862
  * Internal method to refresh a prefix from relays
863
+ * Throttled to avoid flooding the relay with repeated requests
471
864
  * @private
472
865
  */
473
866
  async _doBackgroundPrefixRefresh(prefix, kind, options) {
474
867
  if (this.relays.length === 0) return;
475
868
 
869
+ // Throttle: Skip if we've refreshed this prefix recently
870
+ const throttleKey = `prefix:${prefix}`;
871
+ const lastRefresh = backgroundRefreshThrottle.get(throttleKey);
872
+ if (lastRefresh && Date.now() - lastRefresh < BACKGROUND_REFRESH_INTERVAL) {
873
+ return; // Skip - recently refreshed
874
+ }
875
+
876
+ // Mark as refreshed
877
+ backgroundRefreshThrottle.set(throttleKey, Date.now());
878
+
476
879
  // Query with wildcard-ish filter (relays handle d-tag prefix matching)
477
880
  const filter = {
478
881
  kinds: [kind],
479
882
  authors: options.authors || [this.publicKey],
480
883
  limit: options.limit || 1000,
481
884
  };
885
+ const cacheKey = JSON.stringify(filter);
482
886
 
887
+ // Use our query deduplication
483
888
  const timeout = options.timeout || 30000;
484
- let events = await this.pool.querySync(this.relays, filter, { timeout });
889
+ let events = await this._queryRelaysAndCache(filter, cacheKey, timeout);
485
890
 
486
- // Filter by author
891
+ // Filter by author (already done by _queryRelaysAndCache, but double-check)
487
892
  events = events.filter(e =>
488
893
  (options.authors || [this.publicKey]).includes(e.pubkey)
489
894
  );
@@ -590,6 +995,7 @@ export class NostrClient {
590
995
 
591
996
  /**
592
997
  * Subscribe to events
998
+ * Uses subscription deduplication to avoid creating multiple identical subscriptions
593
999
  * @param {Object} filter - Nostr filter object
594
1000
  * @param {Function} onEvent - Callback for each event
595
1001
  * @param {Object} options - Subscription options
@@ -599,7 +1005,8 @@ export class NostrClient {
599
1005
  // Ensure initialization is complete
600
1006
  await this._initReady;
601
1007
 
602
- const subId = `sub-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
1008
+ const subId = `sub-${Date.now()}-${Math.random().toString(36).slice(2, 11)}`;
1009
+ const filterKey = JSON.stringify(filter);
603
1010
 
604
1011
  // If no relays, check cache for matching events and trigger callbacks
605
1012
  if (this.relays.length === 0) {
@@ -636,6 +1043,43 @@ export class NostrClient {
636
1043
  };
637
1044
  }
638
1045
 
1046
+ // Check if we already have an active subscription for this filter
1047
+ const existing = activeSubscriptions.get(filterKey);
1048
+ if (existing) {
1049
+ // Add callback to existing subscription
1050
+ existing.callbacks.add(onEvent);
1051
+ existing.refCount++;
1052
+
1053
+ // Return wrapper that removes this specific callback on unsubscribe
1054
+ return {
1055
+ id: subId,
1056
+ unsubscribe: () => {
1057
+ existing.callbacks.delete(onEvent);
1058
+ existing.refCount--;
1059
+
1060
+ // Only close actual subscription when no more callbacks
1061
+ if (existing.refCount === 0) {
1062
+ if (existing.subscription && existing.subscription.close) {
1063
+ existing.subscription.close();
1064
+ }
1065
+ activeSubscriptions.delete(filterKey);
1066
+ }
1067
+ this._subscriptions.delete(subId);
1068
+ },
1069
+ };
1070
+ }
1071
+
1072
+ // Create new subscription with shared callback dispatcher
1073
+ const callbacks = new Set([onEvent]);
1074
+ const subscriptionInfo = {
1075
+ callbacks,
1076
+ refCount: 1,
1077
+ subscription: null,
1078
+ };
1079
+
1080
+ // Store before creating subscription to handle race conditions
1081
+ activeSubscriptions.set(filterKey, subscriptionInfo);
1082
+
639
1083
  const sub = this.pool.subscribeMany(
640
1084
  this.relays,
641
1085
  [filter],
@@ -651,7 +1095,18 @@ export class NostrClient {
651
1095
  }
652
1096
 
653
1097
  this._cacheEvent(event);
654
- onEvent(event);
1098
+
1099
+ // Dispatch to ALL registered callbacks for this subscription
1100
+ const subInfo = activeSubscriptions.get(filterKey);
1101
+ if (subInfo) {
1102
+ for (const cb of subInfo.callbacks) {
1103
+ try {
1104
+ cb(event);
1105
+ } catch (err) {
1106
+ console.warn('[nostr] Subscription callback error:', err.message);
1107
+ }
1108
+ }
1109
+ }
655
1110
  },
656
1111
  oneose: () => {
657
1112
  if (options.onEOSE) options.onEOSE();
@@ -659,12 +1114,21 @@ export class NostrClient {
659
1114
  }
660
1115
  );
661
1116
 
1117
+ // Store the actual subscription object
1118
+ subscriptionInfo.subscription = sub;
662
1119
  this._subscriptions.set(subId, sub);
663
1120
 
664
1121
  return {
665
1122
  id: subId,
666
1123
  unsubscribe: () => {
667
- if (sub.close) sub.close();
1124
+ callbacks.delete(onEvent);
1125
+ subscriptionInfo.refCount--;
1126
+
1127
+ // Only close actual subscription when no more callbacks
1128
+ if (subscriptionInfo.refCount === 0) {
1129
+ if (sub.close) sub.close();
1130
+ activeSubscriptions.delete(filterKey);
1131
+ }
668
1132
  this._subscriptions.delete(subId);
669
1133
  },
670
1134
  };
@@ -714,16 +1178,26 @@ export class NostrClient {
714
1178
 
715
1179
  /**
716
1180
  * Invalidate query caches that might be affected by a new event
1181
+ * Uses reverse index for O(1) lookup instead of O(n) scan
717
1182
  * @private
718
1183
  */
719
1184
  _invalidateQueryCachesForEvent(event) {
720
- // Find and remove query cache entries that could match this event
721
- // Query cache keys are JSON-stringified filters
1185
+ // Use reverse index for fast lookup - only check caches that could match this event's kind
1186
+ const indexedKeys = this._cacheIndex.get(event.kind);
1187
+ if (!indexedKeys || indexedKeys.size === 0) {
1188
+ return; // No cached queries for this kind
1189
+ }
1190
+
722
1191
  const keysToDelete = [];
723
1192
 
724
- for (const [cacheKey, cached] of this._eventCache.entries()) {
725
- // Skip non-query caches (event IDs and d-tag keys)
726
- if (!cacheKey.startsWith('{')) continue;
1193
+ // Only iterate over cache entries that match the event's kind
1194
+ for (const cacheKey of indexedKeys) {
1195
+ const cached = this._eventCache.get(cacheKey);
1196
+ if (!cached) {
1197
+ // Cache entry was evicted, clean up index
1198
+ indexedKeys.delete(cacheKey);
1199
+ continue;
1200
+ }
727
1201
 
728
1202
  try {
729
1203
  const filter = JSON.parse(cacheKey);
@@ -732,38 +1206,42 @@ export class NostrClient {
732
1206
  keysToDelete.push(cacheKey);
733
1207
  }
734
1208
  } catch {
735
- // Not a JSON key, skip
1209
+ // Not a valid JSON key, clean up index
1210
+ indexedKeys.delete(cacheKey);
736
1211
  }
737
1212
  }
738
1213
 
739
1214
  for (const key of keysToDelete) {
740
1215
  this._eventCache.delete(key);
1216
+ this._unindexCacheEntry(key);
741
1217
  }
742
1218
  }
743
1219
 
744
1220
  /**
745
- * Cache event in memory and persist
1221
+ * Cache event in memory and persist (batched)
746
1222
  * @private
747
1223
  */
748
1224
  async _cacheEvent(event) {
749
- // Cache in memory
1225
+ // Cache in memory (synchronous - immediate for local-first reads)
750
1226
  this._cacheEventSync(event);
751
1227
 
752
- // Persist to storage
1228
+ // Queue for batched persistence (async - batches writes for I/O efficiency)
753
1229
  if (this.persistentStorage) {
754
- try {
755
- // For replaceable events, use d-tag as key
756
- let storageKey = event.id;
757
- if (event.kind >= 30000 && event.kind < 40000) {
758
- const dTag = event.tags.find(t => t[0] === 'd');
759
- if (dTag && dTag[1]) {
760
- storageKey = dTag[1]; // Use d-tag as key for replaceable events
761
- }
1230
+ // For replaceable events, use d-tag as key
1231
+ let storageKey = event.id;
1232
+ if (event.kind >= 30000 && event.kind < 40000) {
1233
+ const dTag = event.tags.find(t => t[0] === 'd');
1234
+ if (dTag && dTag[1]) {
1235
+ storageKey = dTag[1]; // Use d-tag as key for replaceable events
762
1236
  }
1237
+ }
763
1238
 
764
- await this.persistentStorage.put(storageKey, event);
765
- } catch (error) {
766
- console.warn('Failed to persist event:', error);
1239
+ // Queue for batched write (overwrites previous if same key)
1240
+ this._persistQueue.set(storageKey, event);
1241
+
1242
+ // Schedule batch flush if not already scheduled
1243
+ if (!this._persistTimer) {
1244
+ this._persistTimer = setTimeout(() => this._flushPersistQueue(), this._persistBatchMs);
767
1245
  }
768
1246
  }
769
1247
 
@@ -782,6 +1260,34 @@ export class NostrClient {
782
1260
  }
783
1261
  }
784
1262
 
1263
+ /**
1264
+ * Flush batched persistent writes
1265
+ * @private
1266
+ */
1267
+ async _flushPersistQueue() {
1268
+ this._persistTimer = null;
1269
+
1270
+ if (!this.persistentStorage || this._persistQueue.size === 0) {
1271
+ return;
1272
+ }
1273
+
1274
+ // Take snapshot of current queue and clear it
1275
+ const toWrite = Array.from(this._persistQueue.entries());
1276
+ this._persistQueue.clear();
1277
+
1278
+ // Write all queued events
1279
+ const writePromises = toWrite.map(async ([key, event]) => {
1280
+ try {
1281
+ await this.persistentStorage.put(key, event);
1282
+ } catch (error) {
1283
+ console.warn(`Failed to persist event ${key}:`, error.message);
1284
+ }
1285
+ });
1286
+
1287
+ // Wait for all writes to complete
1288
+ await Promise.all(writePromises);
1289
+ }
1290
+
785
1291
  /**
786
1292
  * Get cached events matching a filter
787
1293
  * @private
@@ -898,13 +1404,32 @@ export class NostrClient {
898
1404
 
899
1405
  /**
900
1406
  * Close all connections and subscriptions
1407
+ * @param {Object} options - Close options
1408
+ * @param {boolean} options.flush - Flush pending writes before closing (default: true)
901
1409
  */
902
- close() {
1410
+ async close(options = {}) {
1411
+ const shouldFlush = options.flush !== false;
1412
+
1413
+ // Flush pending persistent writes before closing
1414
+ if (shouldFlush && this._persistTimer) {
1415
+ clearTimeout(this._persistTimer);
1416
+ await this._flushPersistQueue();
1417
+ }
1418
+
903
1419
  // Stop background sync service
904
1420
  if (this.syncService) {
905
1421
  this.syncService.stop();
906
1422
  }
907
1423
 
1424
+ // Close long-lived author subscription
1425
+ const authorSub = authorSubscriptions.get(this.publicKey);
1426
+ if (authorSub && authorSub.subscription) {
1427
+ if (authorSub.subscription.close) {
1428
+ authorSub.subscription.close();
1429
+ }
1430
+ authorSubscriptions.delete(this.publicKey);
1431
+ }
1432
+
908
1433
  // Close all subscriptions
909
1434
  for (const sub of this._subscriptions.values()) {
910
1435
  if (sub.close) {
@@ -919,8 +1444,9 @@ export class NostrClient {
919
1444
  // Close pool
920
1445
  this.pool.close(this.relays);
921
1446
 
922
- // Clear cache
1447
+ // Clear cache and index
923
1448
  this._eventCache.clear();
1449
+ this._cacheIndex.clear();
924
1450
  }
925
1451
 
926
1452
  /**