holosphere 2.0.0-alpha1 → 2.0.0-alpha4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (154) hide show
  1. package/dist/2019-D2OG2idw.js +6680 -0
  2. package/dist/2019-D2OG2idw.js.map +1 -0
  3. package/dist/2019-EION3wKo.cjs +8 -0
  4. package/dist/2019-EION3wKo.cjs.map +1 -0
  5. package/dist/_commonjsHelpers-C37NGDzP.cjs +2 -0
  6. package/dist/_commonjsHelpers-C37NGDzP.cjs.map +1 -0
  7. package/dist/_commonjsHelpers-CUmg6egw.js +7 -0
  8. package/dist/_commonjsHelpers-CUmg6egw.js.map +1 -0
  9. package/dist/browser-BSniCNqO.js +3058 -0
  10. package/dist/browser-BSniCNqO.js.map +1 -0
  11. package/dist/browser-Cq59Ij19.cjs +2 -0
  12. package/dist/browser-Cq59Ij19.cjs.map +1 -0
  13. package/dist/cjs/holosphere.cjs +2 -0
  14. package/dist/cjs/holosphere.cjs.map +1 -0
  15. package/dist/esm/holosphere.js +53 -0
  16. package/dist/esm/holosphere.js.map +1 -0
  17. package/dist/index-BB_vVJgv.cjs +5 -0
  18. package/dist/index-BB_vVJgv.cjs.map +1 -0
  19. package/dist/index-CBitK71M.cjs +12 -0
  20. package/dist/index-CBitK71M.cjs.map +1 -0
  21. package/dist/index-CV0eOogK.js +37423 -0
  22. package/dist/index-CV0eOogK.js.map +1 -0
  23. package/dist/index-Cz-PLCUR.js +15104 -0
  24. package/dist/index-Cz-PLCUR.js.map +1 -0
  25. package/dist/indexeddb-storage-CRsZyB2f.cjs +2 -0
  26. package/dist/indexeddb-storage-CRsZyB2f.cjs.map +1 -0
  27. package/dist/indexeddb-storage-DZaGlY_a.js +132 -0
  28. package/dist/indexeddb-storage-DZaGlY_a.js.map +1 -0
  29. package/dist/memory-storage-BkUi6sZG.js +51 -0
  30. package/dist/memory-storage-BkUi6sZG.js.map +1 -0
  31. package/dist/memory-storage-C0DuUsdY.cjs +2 -0
  32. package/dist/memory-storage-C0DuUsdY.cjs.map +1 -0
  33. package/dist/secp256k1-0kPdAVkK.cjs +12 -0
  34. package/dist/secp256k1-0kPdAVkK.cjs.map +1 -0
  35. package/dist/secp256k1-DN4FVXcv.js +1890 -0
  36. package/dist/secp256k1-DN4FVXcv.js.map +1 -0
  37. package/docs/CONTRACTS.md +797 -0
  38. package/docs/FOSDEM_PROPOSAL.md +388 -0
  39. package/docs/LOCALFIRST.md +266 -0
  40. package/docs/contracts/api-interface.md +793 -0
  41. package/docs/data-model.md +476 -0
  42. package/docs/gun-async-usage.md +338 -0
  43. package/docs/plan.md +349 -0
  44. package/docs/quickstart.md +674 -0
  45. package/docs/research.md +362 -0
  46. package/docs/spec.md +244 -0
  47. package/docs/storage-backends.md +326 -0
  48. package/docs/tasks.md +947 -0
  49. package/examples/demo.html +47 -0
  50. package/package.json +10 -5
  51. package/src/contracts/abis/Appreciative.json +1280 -0
  52. package/src/contracts/abis/AppreciativeFactory.json +101 -0
  53. package/src/contracts/abis/Bundle.json +1435 -0
  54. package/src/contracts/abis/BundleFactory.json +106 -0
  55. package/src/contracts/abis/Holon.json +881 -0
  56. package/src/contracts/abis/Holons.json +330 -0
  57. package/src/contracts/abis/Managed.json +1262 -0
  58. package/src/contracts/abis/ManagedFactory.json +149 -0
  59. package/src/contracts/abis/Membrane.json +261 -0
  60. package/src/contracts/abis/Splitter.json +1624 -0
  61. package/src/contracts/abis/SplitterFactory.json +220 -0
  62. package/src/contracts/abis/TestToken.json +321 -0
  63. package/src/contracts/abis/Zoned.json +1461 -0
  64. package/src/contracts/abis/ZonedFactory.json +154 -0
  65. package/src/contracts/chain-manager.js +375 -0
  66. package/src/contracts/deployer.js +443 -0
  67. package/src/contracts/event-listener.js +507 -0
  68. package/src/contracts/holon-contracts.js +344 -0
  69. package/src/contracts/index.js +83 -0
  70. package/src/contracts/networks.js +224 -0
  71. package/src/contracts/operations.js +670 -0
  72. package/src/contracts/queries.js +589 -0
  73. package/src/core/holosphere.js +453 -1
  74. package/src/crypto/nostr-utils.js +263 -0
  75. package/src/federation/handshake.js +455 -0
  76. package/src/federation/hologram.js +1 -1
  77. package/src/hierarchical/upcast.js +6 -5
  78. package/src/index.js +463 -1939
  79. package/src/lib/ai-methods.js +308 -0
  80. package/src/lib/contract-methods.js +293 -0
  81. package/src/lib/errors.js +23 -0
  82. package/src/lib/federation-methods.js +238 -0
  83. package/src/lib/index.js +26 -0
  84. package/src/spatial/h3-operations.js +2 -2
  85. package/src/storage/backends/gundb-backend.js +377 -46
  86. package/src/storage/global-tables.js +28 -1
  87. package/src/storage/gun-auth.js +303 -0
  88. package/src/storage/gun-federation.js +776 -0
  89. package/src/storage/gun-references.js +198 -0
  90. package/src/storage/gun-schema.js +291 -0
  91. package/src/storage/gun-wrapper.js +347 -31
  92. package/src/storage/indexeddb-storage.js +49 -11
  93. package/src/storage/memory-storage.js +5 -0
  94. package/src/storage/nostr-async.js +45 -23
  95. package/src/storage/nostr-client.js +11 -5
  96. package/src/storage/persistent-storage.js +6 -1
  97. package/src/storage/unified-storage.js +119 -0
  98. package/src/subscriptions/manager.js +1 -1
  99. package/types/index.d.ts +133 -0
  100. package/tests/unit/ai/aggregation.test.js +0 -295
  101. package/tests/unit/ai/breakdown.test.js +0 -446
  102. package/tests/unit/ai/classifier.test.js +0 -294
  103. package/tests/unit/ai/council.test.js +0 -262
  104. package/tests/unit/ai/embeddings.test.js +0 -384
  105. package/tests/unit/ai/federation-ai.test.js +0 -344
  106. package/tests/unit/ai/h3-ai.test.js +0 -458
  107. package/tests/unit/ai/index.test.js +0 -304
  108. package/tests/unit/ai/json-ops.test.js +0 -307
  109. package/tests/unit/ai/llm-service.test.js +0 -390
  110. package/tests/unit/ai/nl-query.test.js +0 -383
  111. package/tests/unit/ai/relationships.test.js +0 -311
  112. package/tests/unit/ai/schema-extractor.test.js +0 -384
  113. package/tests/unit/ai/spatial.test.js +0 -279
  114. package/tests/unit/ai/tts.test.js +0 -279
  115. package/tests/unit/content.test.js +0 -332
  116. package/tests/unit/contract/core.test.js +0 -88
  117. package/tests/unit/contract/crypto.test.js +0 -198
  118. package/tests/unit/contract/data.test.js +0 -223
  119. package/tests/unit/contract/federation.test.js +0 -181
  120. package/tests/unit/contract/hierarchical.test.js +0 -113
  121. package/tests/unit/contract/schema.test.js +0 -114
  122. package/tests/unit/contract/social.test.js +0 -217
  123. package/tests/unit/contract/spatial.test.js +0 -110
  124. package/tests/unit/contract/subscriptions.test.js +0 -128
  125. package/tests/unit/contract/utils.test.js +0 -159
  126. package/tests/unit/core.test.js +0 -152
  127. package/tests/unit/crypto.test.js +0 -328
  128. package/tests/unit/federation.test.js +0 -234
  129. package/tests/unit/gun-async.test.js +0 -252
  130. package/tests/unit/hierarchical.test.js +0 -399
  131. package/tests/unit/integration/scenario-01-geographic-storage.test.js +0 -74
  132. package/tests/unit/integration/scenario-02-federation.test.js +0 -76
  133. package/tests/unit/integration/scenario-03-subscriptions.test.js +0 -102
  134. package/tests/unit/integration/scenario-04-validation.test.js +0 -129
  135. package/tests/unit/integration/scenario-05-hierarchy.test.js +0 -125
  136. package/tests/unit/integration/scenario-06-social.test.js +0 -135
  137. package/tests/unit/integration/scenario-07-persistence.test.js +0 -130
  138. package/tests/unit/integration/scenario-08-authorization.test.js +0 -161
  139. package/tests/unit/integration/scenario-09-cross-dimensional.test.js +0 -139
  140. package/tests/unit/integration/scenario-10-cross-holosphere-capabilities.test.js +0 -357
  141. package/tests/unit/integration/scenario-11-cross-holosphere-federation.test.js +0 -410
  142. package/tests/unit/integration/scenario-12-capability-federated-read.test.js +0 -719
  143. package/tests/unit/performance/benchmark.test.js +0 -85
  144. package/tests/unit/schema.test.js +0 -213
  145. package/tests/unit/spatial.test.js +0 -158
  146. package/tests/unit/storage.test.js +0 -195
  147. package/tests/unit/subscriptions.test.js +0 -328
  148. package/tests/unit/test-data-permanence-debug.js +0 -197
  149. package/tests/unit/test-data-permanence.js +0 -340
  150. package/tests/unit/test-key-persistence-fixed.js +0 -148
  151. package/tests/unit/test-key-persistence.js +0 -172
  152. package/tests/unit/test-relay-permanence.js +0 -376
  153. package/tests/unit/test-second-node.js +0 -95
  154. package/tests/unit/test-simple-write.js +0 -89
@@ -0,0 +1,388 @@
1
+ # FOSDEM 2025 - Local First Devroom
2
+ ## Talk Proposal
3
+
4
+ **Title:** "HoloSphere: Federated Local-First Spatial Data with Nostr and Capability Tokens"
5
+
6
+ **Category:** FOSS Frameworks that enable Local First app development
7
+
8
+ **Speaker:** [Your name/organization]
9
+
10
+ **Duration:** 20-25 minutes + Q&A
11
+
12
+ ---
13
+
14
+ ## Abstract
15
+
16
+ What if your local-first app knew *where* it was - and could federate data across geographic boundaries without a central server?
17
+
18
+ **HoloSphere** is an open-source JavaScript library that combines three powerful ideas: Uber's H3 hexagonal spatial indexing, the Nostr relay protocol for P2P synchronization, and a novel "hologram" reference system for zero-duplication federation.
19
+
20
+ Every write is signed with your secp256k1 key, cached in memory, persisted to IndexedDB/filesystem, then published to relays in the background. Reads return instantly from local cache. The result: offline writes, instant reads, eventual sync - with no backend infrastructure.
21
+
22
+ The key innovation is **holograms** - lightweight references (~150 bytes) that point to data in other locations without copying it. When biodiversity data in a local hexagon federates to regional and national views, only references propagate - the source remains authoritative. Cross-project federation uses **capability tokens** with scoped permissions, enabling controlled data sharing between independent HoloSphere instances.
23
+
24
+ This talk demonstrates a citizen science use case: field researchers collecting biodiversity observations offline, with data federating up through spatial hierarchies and syncing across the Nostr network when connectivity returns.
25
+
26
+ ---
27
+
28
+ ## How Nostr Enables Local-First
29
+
30
+ ### The Storage Model
31
+
32
+ HoloSphere uses Nostr's **parameterized replaceable events (kind 30000)** as its storage primitive. Every data item becomes a signed event with a d-tag encoding the path:
33
+
34
+ ```
35
+ d-tag: "appname/holonId/lensName/dataId"
36
+ "biomap/8928308280fffff/biodiversity/obs-001"
37
+ ```
38
+
39
+ ### The Write Path (Local-First)
40
+
41
+ ```
42
+ write(holon, lens, data)
43
+ |
44
+ Sign with secp256k1 private key
45
+ |
46
+ Cache in memory (~1ms)
47
+ |
48
+ Persist to IndexedDB/filesystem (~10ms)
49
+ |
50
+ Publish to relays (background, fire-and-forget)
51
+ |
52
+ Return success (before relay confirmation)
53
+ ```
54
+
55
+ **Key insight:** The write returns after local persistence, not after relay confirmation. This makes writes instant and offline-capable.
56
+
57
+ ### The Read Path (Cache-First)
58
+
59
+ 1. Check memory cache (5-second TTL) - instant return
60
+ 2. Check persistent storage - still fast
61
+ 3. Query relays only on cache miss
62
+ 4. Background refresh for stale data
63
+
64
+ **Result:** Hot reads are ~1ms. Cold reads hit local storage before network. Network is truly optional for reads.
65
+
66
+ ### What This Gives You
67
+
68
+ - **No accounts:** Your keypair *is* your identity
69
+ - **No servers:** Connect to any public relay, or run your own
70
+ - **No lock-in:** Standard Nostr protocol, portable keys
71
+ - **Data sovereignty:** You sign everything, you control access
72
+
73
+ ---
74
+
75
+ ## Federation Without Duplication: Holograms
76
+
77
+ ### The Problem
78
+
79
+ A biodiversity observation recorded in hexagon A should appear in:
80
+ - The local forest view (resolution 9)
81
+ - The regional view (resolution 6)
82
+ - The national aggregation (resolution 3)
83
+
84
+ Copying data to each level = storage explosion + stale data + no single source of truth.
85
+
86
+ ### The Solution: Holograms
87
+
88
+ A **hologram** is a lightweight reference (~150 bytes) that points to the original:
89
+
90
+ ```javascript
91
+ // Original observation in local hexagon
92
+ {
93
+ id: "obs-001",
94
+ species: "Parus major",
95
+ timestamp: 1701234567000,
96
+ confidence: 0.95
97
+ }
98
+
99
+ // Hologram in regional hexagon (~150 bytes)
100
+ {
101
+ id: "obs-001",
102
+ hologram: true,
103
+ soul: "biomap/8928308280fffff/biodiversity/obs-001",
104
+ target: { holonId, lensName, dataId },
105
+ // Local overrides (optional):
106
+ aggregationWeight: 1.0,
107
+ verifiedBy: "regional_coordinator"
108
+ }
109
+ ```
110
+
111
+ ### Automatic Resolution
112
+
113
+ When you read from the regional view, holograms resolve automatically:
114
+ 1. Detect `hologram: true`
115
+ 2. Follow `soul` to source
116
+ 3. Verify capability (if cross-project)
117
+ 4. Merge source data with local overrides
118
+ 5. Return complete object with `_hologram` metadata
119
+
120
+ **Circular detection:** Tracks visited souls, max depth 10.
121
+
122
+ **Active hologram tracking:** Sources know all their holograms via `_meta.activeHolograms` - enables cascade updates and cleanup.
123
+
124
+ ---
125
+
126
+ ## Cross-Project Federation: Capabilities
127
+
128
+ ### The Challenge
129
+
130
+ Different research projects want to share data:
131
+ - Project A: Local birdsong observations
132
+ - Project B: Regional biodiversity aggregation
133
+ - Project C: National conservation database
134
+
135
+ How do you control who reads what without a central auth server?
136
+
137
+ ### Capability Tokens
138
+
139
+ ```javascript
140
+ {
141
+ type: "capability",
142
+ permissions: ["read"],
143
+ scope: {
144
+ holonId: "*", // Any hexagon
145
+ lensName: "biodiversity" // Only biodiversity lens
146
+ },
147
+ recipient: "project_b_pubkey",
148
+ expires: 1735689600000, // 30 days
149
+ // Signed by issuer's private key
150
+ }
151
+ ```
152
+
153
+ ### Federation Handshake (Over Nostr)
154
+
155
+ 1. Project A sends federation request (kind 30078) with offered capability
156
+ 2. Project B accepts (kind 30079) with reciprocal capability
157
+ 3. Both store received capabilities in their federation registry
158
+ 4. Holograms can now resolve across project boundaries
159
+
160
+ ### Registry Tracks
161
+
162
+ - **Federated partners:** Public keys of trusted projects
163
+ - **Inbound capabilities:** What we can read from them
164
+ - **Outbound capabilities:** What they can read from us
165
+
166
+ ---
167
+
168
+ ## Use Case: Citizen Science Biodiversity Monitoring
169
+
170
+ ### The Scenario
171
+
172
+ Field researchers and citizen scientists mapping biodiversity in remote areas:
173
+
174
+ 1. **Select Region:** Draw polygon on map (forest, watershed, reserve)
175
+ 2. **Convert to Hexagons:** Polygon becomes H3 hexagons at chosen resolution
176
+ 3. **Collect Offline:** Record observations in the field
177
+ - Species identified
178
+ - GPS coordinates (auto-assigned to hexagon)
179
+ - Audio recordings
180
+ - Environmental conditions
181
+ 4. **Local Storage:** Data persists to device, immediately queryable
182
+ 5. **Background Sync:** When connectivity returns, publishes to relays
183
+ 6. **Federation:** Regional coordinators see aggregated data via holograms
184
+ 7. **Cross-Project:** National database receives capability-gated access
185
+
186
+ ### Why This Architecture Works
187
+
188
+ - **Poor connectivity:** Field sites often have no network - offline-first is essential
189
+ - **Spatial data:** Observations are inherently geographic - hexagons are natural
190
+ - **Multi-scale:** Local → regional → national aggregation via hologram hierarchy
191
+ - **Data sovereignty:** Researchers keep keys, control their data
192
+ - **No infrastructure:** Public Nostr relays, no servers to maintain
193
+
194
+ ### Technical Example
195
+
196
+ ```javascript
197
+ // Initialize HoloSphere with keypair
198
+ const hs = new HoloSphere({
199
+ appName: 'biomap',
200
+ privateKey: researcherPrivateKey,
201
+ relays: ['wss://relay.damus.io', 'wss://relay.nostr.band']
202
+ });
203
+
204
+ // Convert GPS to hexagon
205
+ const hexagon = await hs.toHolon(45.5231, -122.6765, 9);
206
+
207
+ // Record observation (works offline)
208
+ await hs.write(hexagon, 'biodiversity', {
209
+ species: 'Parus major',
210
+ timestamp: Date.now(),
211
+ audio: recordingBlob,
212
+ confidence: 0.95,
213
+ observer: hs.publicKey
214
+ });
215
+
216
+ // Federate to regional hexagon (creates hologram)
217
+ const regionalHex = (await hs.getParents(hexagon))[2]; // Resolution 6
218
+ await hs.federate(hexagon, regionalHex, 'biodiversity', {
219
+ direction: 'outbound',
220
+ mode: 'reference' // Hologram, not copy
221
+ });
222
+
223
+ // Regional coordinator queries aggregated data
224
+ const regionalData = await hs.getAll(regionalHex, 'biodiversity');
225
+ // Returns: original observations + resolved holograms from child hexagons
226
+ ```
227
+
228
+ ---
229
+
230
+ ## Current Capabilities vs Limitations
231
+
232
+ ### What Works Today
233
+
234
+ | Feature | Status | Notes |
235
+ |---------|--------|-------|
236
+ | Local persistence | Working | IndexedDB (browser), filesystem (Node) |
237
+ | Nostr relay sync | Working | Fire-and-forget publish, background sync |
238
+ | H3 spatial indexing | Working | 16 resolution levels, parent/child navigation |
239
+ | Hologram references | Working | Same-project federation |
240
+ | Hologram resolution | Working | Automatic, circular detection |
241
+ | Local overrides | Working | Position, metadata on holograms |
242
+ | Active hologram tracking | Working | Source tracks all references |
243
+ | Real-time subscriptions | Working | Nostr-based, with throttling |
244
+ | Capability tokens | Working | Scope + permissions + expiration |
245
+ | Cross-project holograms | Working | With capability verification |
246
+ | Federation discovery | Working | Nostr events for request/accept |
247
+
248
+ ### Known Limitations (Honest Assessment)
249
+
250
+ | Feature | Status | Notes |
251
+ |---------|--------|-------|
252
+ | Capability signature verification | Stubbed | Tokens generated but verification TODO |
253
+ | CRDT/automatic merge | Not implemented | Last-write-wins only |
254
+ | Offline write queue | Partial | Writes persist locally, no retry queue |
255
+ | Conflict resolution | Not implemented | No vector clocks, no causal ordering |
256
+ | Guaranteed delivery | Not implemented | Relay publish is best-effort |
257
+ | Bidirectional sync | Not implemented | Holograms are read-only references |
258
+
259
+ ### Architecture Classification
260
+
261
+ HoloSphere is currently **"local-caching with remote sync"** rather than pure **"local-first with remote replication"**:
262
+
263
+ - Writes: Local-first (persist before relay)
264
+ - Reads: Cache-first (local before network)
265
+ - Sync: Eventually consistent via relays
266
+ - Conflicts: Last-write-wins (no merge)
267
+
268
+ This is sufficient for many use cases but not full local-first as defined by Ink & Switch.
269
+
270
+ ---
271
+
272
+ ## Demo Plan
273
+
274
+ Live demonstration (5-7 minutes):
275
+
276
+ 1. **Draw polygon** on interactive map (Leaflet.draw)
277
+ 2. **Convert to hexagons** at multiple resolutions
278
+ 3. **Record biodiversity observations** in hexagons
279
+ 4. **Show local persistence** (IndexedDB inspector)
280
+ 5. **Disconnect network** - continue adding data
281
+ 6. **Query locally** - data remains available
282
+ 7. **Federate to regional hexagon** - create holograms
283
+ 8. **Show hologram resolution** - ~150 bytes vs full data
284
+ 9. **Reconnect** - watch background sync to relays
285
+ 10. **Cross-project federation** - exchange capabilities, resolve foreign holograms
286
+
287
+ ---
288
+
289
+ ## Why This Matters for Local-First
290
+
291
+ - **Spatial local-first:** First framework combining H3 geospatial with offline-capable P2P
292
+ - **Nostr beyond social:** Demonstrates Nostr as general-purpose sync layer
293
+ - **Federation without servers:** Capability tokens enable controlled sharing
294
+ - **Real use case:** Biodiversity monitoring has genuine connectivity constraints
295
+ - **Honest about gaps:** We show what works and what's still TODO
296
+
297
+ ---
298
+
299
+ ## Technical Stack
300
+
301
+ - **h3-js** - Uber's hexagonal geospatial indexing
302
+ - **nostr-tools** - Nostr protocol implementation
303
+ - **@noble/curves** - secp256k1 cryptography
304
+ - **IndexedDB/Filesystem** - Persistent local storage
305
+ - **OpenAI** (optional) - AI-assisted spatial queries
306
+
307
+ ---
308
+
309
+ ## Repository & Materials
310
+
311
+ - **Code:** https://github.com/liminalvillage/holosphere2
312
+ - **Documentation:** LOCALFIRST.md (architecture), VIBE.md (vision)
313
+ - **License:** MIT
314
+ - **Live Demo:** [URL TBD]
315
+
316
+ ---
317
+
318
+ ## Target Audience
319
+
320
+ - Developers building local-first applications
321
+ - Environmental tech / citizen science builders
322
+ - Decentralized app developers (especially Nostr ecosystem)
323
+ - Anyone interested in geospatial + P2P
324
+
325
+ ## Takeaway
326
+
327
+ Attendees will understand how to:
328
+ 1. Use Nostr as a local-first sync layer (not just social media)
329
+ 2. Organize data spatially with H3 hexagonal indexing
330
+ 3. Federate across boundaries using holograms (zero duplication)
331
+ 4. Control access with capability tokens (no central auth)
332
+
333
+ And honestly assess which local-first properties the current implementation achieves vs. what remains to be built.
334
+
335
+ ---
336
+
337
+ ## Appendix: Data Structures
338
+
339
+ ### Nostr Event (Data Write)
340
+
341
+ ```javascript
342
+ {
343
+ kind: 30000,
344
+ pubkey: "author_hex_pubkey",
345
+ created_at: 1701234567,
346
+ tags: [["d", "biomap/8928308280fffff/biodiversity/obs-001"]],
347
+ content: "{\"species\":\"Parus major\",\"confidence\":0.95,...}",
348
+ sig: "hex_signature"
349
+ }
350
+ ```
351
+
352
+ ### Hologram Object
353
+
354
+ ```javascript
355
+ {
356
+ id: "obs-001",
357
+ hologram: true,
358
+ soul: "biomap/8928308280fffff/biodiversity/obs-001",
359
+ target: {
360
+ appname: "biomap",
361
+ holonId: "8928308280fffff",
362
+ lensName: "biodiversity",
363
+ dataId: "obs-001",
364
+ authorPubKey: "hex_pubkey" // For cross-project
365
+ },
366
+ capability: "base64_token", // Optional
367
+ crossHolosphere: true, // If cross-project
368
+ // Local overrides:
369
+ verifiedBy: "coordinator_key",
370
+ aggregationWeight: 1.0
371
+ }
372
+ ```
373
+
374
+ ### Capability Token
375
+
376
+ ```javascript
377
+ {
378
+ type: "capability",
379
+ permissions: ["read"],
380
+ scope: { holonId: "*", lensName: "biodiversity" },
381
+ recipient: "partner_pubkey",
382
+ issuer: "holosphere",
383
+ nonce: "unique_random",
384
+ issued: 1701234567000,
385
+ expires: 1703826567000
386
+ }
387
+ // Encoded: base64(JSON).signature
388
+ ```
@@ -0,0 +1,266 @@
1
+ # Local-First Architecture
2
+
3
+ ## Overview
4
+
5
+ HoloSphere implements **true local-first architecture** where persistent storage is the source of truth, and Nostr relays serve as the sync/replication layer. This document describes the architecture and how it achieves local-first principles.
6
+
7
+ ---
8
+
9
+ ## Architecture
10
+
11
+ ### Storage Layer Stack
12
+
13
+ ```
14
+ +----------------------------------------------------------+
15
+ | HoloSphere API |
16
+ +----------------------------------------------------------+
17
+ | nostr-wrapper.js |
18
+ | (CRUD abstraction: write, read, update) |
19
+ +----------------------------------------------------------+
20
+ | nostr-async.js |
21
+ | (LOCAL-FIRST: persistentGet → background relay sync) |
22
+ +----------------------------------------------------------+
23
+ | nostr-client.js |
24
+ | (NostrClient: OutboxQueue, SyncService, relay pool) |
25
+ +----------------------------------------------------------+
26
+ | |
27
+ | +------------------+ +-------------------------+ |
28
+ | | Persistent Store | | OutboxQueue | |
29
+ | | (PRIMARY) | | (guaranteed delivery) | |
30
+ | | filesystem/IDB | | exponential backoff | |
31
+ | +------------------+ +-------------------------+ |
32
+ | | | |
33
+ | v v |
34
+ | +------------------+ +-------------------------+ |
35
+ | | Memory Cache | | SyncService | |
36
+ | | (optimization) | | (background retry) | |
37
+ | +------------------+ +-------------------------+ |
38
+ | |
39
+ | +--------------------------------------------------+ |
40
+ | | Nostr Relays (Replication) | |
41
+ | | wss://relay.damus.io, wss://relay.nostr.band | |
42
+ | +--------------------------------------------------+ |
43
+ +----------------------------------------------------------+
44
+ ```
45
+
46
+ ### Data Flow
47
+
48
+ #### Write Path (Guaranteed Delivery)
49
+
50
+ ```
51
+ write(holon, lens, data)
52
+
53
+
54
+ Sign with secp256k1 private key
55
+
56
+
57
+ Cache in memory (~1ms)
58
+
59
+
60
+ Persist to storage (~10ms) ──► Return success immediately
61
+
62
+
63
+ Enqueue in OutboxQueue (persistent)
64
+
65
+
66
+ Attempt immediate delivery to relays
67
+
68
+ ├─► Success: Remove from queue
69
+
70
+ └─► Failure: SyncService retries with exponential backoff
71
+ (1s → 2s → 4s → 8s → 16s, max 60s)
72
+ Failed events purged after 24 hours
73
+ ```
74
+
75
+ **Key property:** Writes return after local persistence, not after relay confirmation.
76
+
77
+ #### Read Path (Persistent-First)
78
+
79
+ ```
80
+ read(holon, lens, dataId)
81
+
82
+
83
+ Check persistent storage FIRST
84
+
85
+ ├─► Found: Return immediately
86
+ │ │
87
+ │ └─► Trigger background refresh from relays
88
+
89
+ └─► Not found: Query relays (fallback)
90
+
91
+ └─► Cache result locally
92
+ ```
93
+
94
+ **Key property:** Reads never block on network if data exists locally.
95
+
96
+ ---
97
+
98
+ ## Local-First Principles Assessment
99
+
100
+ Based on [Ink & Switch's Local-First Software](https://www.inkandswitch.com/local-first/) principles:
101
+
102
+ | Principle | Score | Implementation |
103
+ |-----------|-------|----------------|
104
+ | **No spinners** | ✅ Good | Reads return from persistent storage instantly; writes return after local persist |
105
+ | **Works offline** | ✅ Good | Full read/write functionality; OutboxQueue ensures writes sync when online |
106
+ | **Data ownership** | ✅ Good | Keypair-based identity; user signs all data with secp256k1 |
107
+ | **Longevity** | ✅ Good | Data persists locally (IndexedDB/filesystem); relays are backup |
108
+ | **Privacy** | ✅ Good | User controls keys; data can be encrypted |
109
+ | **Collaboration** | ⚠️ Partial | Last-write-wins; no CRDTs (future enhancement) |
110
+ | **No lock-in** | ✅ Good | Standard Nostr protocol; portable keypairs |
111
+
112
+ ---
113
+
114
+ ## Implementation Details
115
+
116
+ ### OutboxQueue (`src/storage/outbox-queue.js`)
117
+
118
+ Persistent queue for guaranteed relay delivery:
119
+
120
+ ```javascript
121
+ // Events are enqueued with full retry metadata
122
+ {
123
+ id: 'event-id',
124
+ event: { /* signed Nostr event */ },
125
+ relays: ['wss://relay1', 'wss://relay2'],
126
+ status: 'pending', // 'pending' | 'failed'
127
+ retries: 0,
128
+ createdAt: 1701234567000,
129
+ nextRetryAt: 1701234567000,
130
+ }
131
+ ```
132
+
133
+ **Features:**
134
+ - Exponential backoff: 1s → 2s → 4s → 8s → 16s (max 60s)
135
+ - Max 5 retry attempts before marking as 'failed'
136
+ - Failed events auto-purge after 24 hours
137
+ - Manual retry available via `retryFailed(eventId)`
138
+
139
+ ### SyncService (`src/storage/sync-service.js`)
140
+
141
+ Background service for reliable sync:
142
+
143
+ ```javascript
144
+ const syncService = new SyncService(client, {
145
+ interval: 10000, // Process queue every 10 seconds
146
+ autoStart: true,
147
+ });
148
+ ```
149
+
150
+ **Operations:**
151
+ - Processes pending events from OutboxQueue
152
+ - Retries failed deliveries with backoff
153
+ - Purges old failed events (24h TTL)
154
+ - Graceful shutdown on `client.close()`
155
+
156
+ ### Persistent-First Reads (`src/storage/nostr-async.js`)
157
+
158
+ ```javascript
159
+ // nostrGet() - single item
160
+ export async function nostrGet(client, path, kind, options) {
161
+ // LOCAL-FIRST: Check persistent storage FIRST
162
+ const persistedEvent = await client.persistentGet(path);
163
+ if (persistedEvent) {
164
+ // Trigger background refresh (non-blocking)
165
+ client.refreshPathInBackground(path, kind, options);
166
+ return JSON.parse(persistedEvent.content);
167
+ }
168
+
169
+ // Fallback to relay query only if not in local storage
170
+ return queryRelays(client, path, kind, options);
171
+ }
172
+
173
+ // nostrGetAll() - collection
174
+ export async function nostrGetAll(client, pathPrefix, kind, options) {
175
+ // LOCAL-FIRST: Check persistent storage FIRST
176
+ const persistedEvents = await client.persistentGetAll(pathPrefix);
177
+ if (persistedEvents.length > 0) {
178
+ // Trigger background refresh (non-blocking)
179
+ client.refreshPrefixInBackground(pathPrefix, kind, options);
180
+ return parseEvents(persistedEvents);
181
+ }
182
+
183
+ // Fallback to relay query
184
+ return queryRelays(client, pathPrefix, kind, options);
185
+ }
186
+ ```
187
+
188
+ ---
189
+
190
+ ## Configuration Options
191
+
192
+ ```javascript
193
+ const hs = new HoloSphere({
194
+ appName: 'myapp',
195
+ relays: ['wss://relay.damus.io'],
196
+ privateKey: 'hex...',
197
+
198
+ // Local-first options
199
+ persistence: true, // Enable persistent storage (default: true)
200
+ backgroundSync: true, // Enable SyncService (default: true)
201
+ syncInterval: 10000, // Sync interval in ms (default: 10000)
202
+ maxRetries: 5, // Max retry attempts (default: 5)
203
+ retryBaseDelay: 1000, // Base delay for backoff (default: 1000ms)
204
+ retryMaxDelay: 60000, // Max delay cap (default: 60000ms)
205
+ failedTTL: 86400000, // TTL for failed events (default: 24 hours)
206
+ });
207
+ ```
208
+
209
+ ---
210
+
211
+ ## Comparison: Before vs After
212
+
213
+ ### Before (Relay-First with Local Caching)
214
+
215
+ ```
216
+ Read: Memory cache → NETWORK (blocking up to 30s)
217
+ Write: Cache → Fire-and-forget to relays (no retry)
218
+ ```
219
+
220
+ **Problems:**
221
+ - Cold reads block on network
222
+ - Writes can be lost if relay fails
223
+ - No guaranteed delivery
224
+
225
+ ### After (Local-First with Relay Sync)
226
+
227
+ ```
228
+ Read: Persistent storage → Return immediately → Background refresh
229
+ Write: Cache → Persist → OutboxQueue → Retry until delivered
230
+ ```
231
+
232
+ **Benefits:**
233
+ - Reads never block on network
234
+ - Writes guaranteed to eventually deliver
235
+ - Works fully offline
236
+ - Data survives app restarts
237
+
238
+ ---
239
+
240
+ ## Remaining Limitations
241
+
242
+ | Feature | Status | Notes |
243
+ |---------|--------|-------|
244
+ | Conflict resolution | Last-write-wins | No CRDTs; timestamps determine winner |
245
+ | Multi-device sync | Via relays | No direct peer-to-peer sync |
246
+ | Real-time collaboration | Limited | No operational transforms |
247
+
248
+ ### Future Enhancements
249
+
250
+ 1. **CRDTs** - Automerge/Yjs for collaborative editing
251
+ 2. **Direct sync** - mDNS/local network device discovery
252
+ 3. **Conflict UI** - Surface conflicts for user resolution
253
+
254
+ ---
255
+
256
+ ## References
257
+
258
+ - [Local-First Software (Ink & Switch)](https://www.inkandswitch.com/local-first/)
259
+ - [Nostr Protocol Specification](https://github.com/nostr-protocol/nips)
260
+ - [CRDTs and Local-First](https://crdt.tech/)
261
+
262
+ ---
263
+
264
+ *Document Version: 2.0*
265
+ *Last Updated: 2025-12*
266
+ *Status: Implemented*