nappup 1.5.9 → 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -6,7 +6,7 @@
6
6
  "url": "git+https://github.com/44billion/nappup.git"
7
7
  },
8
8
  "license": "MIT",
9
- "version": "1.5.9",
9
+ "version": "1.6.0",
10
10
  "description": "Nostr App Uploader",
11
11
  "type": "module",
12
12
  "scripts": {
package/src/index.js CHANGED
@@ -1,13 +1,13 @@
1
1
  import NMMR from 'nmmr'
2
2
  import { appEncode } from '#helpers/nip19.js'
3
- import Base93Encoder from '#services/base93-encoder.js'
4
3
  import nostrRelays, { nappRelays } from '#services/nostr-relays.js'
5
4
  import NostrSigner from '#services/nostr-signer.js'
6
5
  import { streamToChunks, streamToText } from '#helpers/stream.js'
7
6
  import { isNostrAppDTagSafe, deriveNostrAppDTag } from '#helpers/app.js'
8
7
  import { extractHtmlMetadata, findFavicon, findIndexFile } from '#helpers/app-metadata.js'
9
- import { stringifyEvent } from '#helpers/event.js'
10
8
  import { NAPP_CATEGORIES } from '#config/napp-categories.js'
9
+ import { getBlossomServers, healthCheckServers, uploadFilesToBlossom } from '#services/blossom-upload.js'
10
+ import { uploadBinaryDataChunks, throttledSendEvent } from '#services/irfs-upload.js'
11
11
 
12
12
  export default async function (...args) {
13
13
  try {
@@ -68,6 +68,20 @@ export async function toApp (fileList, nostrSigner, { log = () => {}, dTag, dTag
68
68
 
69
69
  let pause = 1000
70
70
 
71
+ // Check for blossom servers
72
+ log('Checking for blossom servers...')
73
+ const blossomServerUrls = await getBlossomServers(nostrSigner, writeRelays)
74
+ let healthyBlossomServers = []
75
+ if (blossomServerUrls.length > 0) {
76
+ log(`Found ${blossomServerUrls.length} blossom servers: ${blossomServerUrls.join(', ')}`)
77
+ healthyBlossomServers = await healthCheckServers(blossomServerUrls, nostrSigner, { log })
78
+ log(`${healthyBlossomServers.length} of ${blossomServerUrls.length} blossom servers are healthy`)
79
+ } else {
80
+ log('No blossom servers configured, will use relay-based file upload (irfs)')
81
+ }
82
+
83
+ const useBlossom = healthyBlossomServers.length > 0
84
+
71
85
  // Upload icon from napp.json if present
72
86
  if (nappJson.stallIcon?.[0]?.[0]) {
73
87
  try {
@@ -80,19 +94,41 @@ export async function toApp (fileList, nostrSigner, { log = () => {}, dTag, dTag
80
94
 
81
95
  log('Uploading icon from napp.json')
82
96
 
83
- nmmr = new NMMR()
84
- const stream = blob.stream()
85
- let chunkLength = 0
86
- for await (const chunk of streamToChunks(stream, 51000)) {
87
- chunkLength++
88
- await nmmr.append(chunk)
97
+ if (useBlossom) {
98
+ const { uploadedFiles, failedFiles } = await uploadFilesToBlossom({
99
+ fileList: [Object.assign(blob, { webkitRelativePath: `_/${filename}` })],
100
+ servers: healthyBlossomServers,
101
+ signer: nostrSigner,
102
+ shouldReupload,
103
+ log
104
+ })
105
+ if (uploadedFiles.length > 0) {
106
+ iconMetadata = {
107
+ rootHash: uploadedFiles[0].sha256,
108
+ mimeType,
109
+ service: 'blossom'
110
+ }
111
+ } else if (failedFiles.length > 0) {
112
+ log('Blossom icon upload failed, falling back to relay upload')
113
+ }
89
114
  }
90
115
 
91
- if (chunkLength) {
92
- ;({ pause } = (await uploadBinaryDataChunks({ nmmr, signer: nostrSigner, filename, chunkLength, log, pause, mimeType, shouldReupload })))
93
- iconMetadata = {
94
- rootHash: nmmr.getRoot(),
95
- mimeType
116
+ if (!iconMetadata) {
117
+ nmmr = new NMMR()
118
+ const stream = blob.stream()
119
+ let chunkLength = 0
120
+ for await (const chunk of streamToChunks(stream, 51000)) {
121
+ chunkLength++
122
+ await nmmr.append(chunk)
123
+ }
124
+
125
+ if (chunkLength) {
126
+ ;({ pause } = (await uploadBinaryDataChunks({ nmmr, signer: nostrSigner, filename, chunkLength, log, pause, mimeType, shouldReupload })))
127
+ iconMetadata = {
128
+ rootHash: nmmr.getRoot(),
129
+ mimeType,
130
+ service: 'irfs'
131
+ }
96
132
  }
97
133
  }
98
134
  } catch (e) {
@@ -101,7 +137,45 @@ export async function toApp (fileList, nostrSigner, { log = () => {}, dTag, dTag
101
137
  }
102
138
 
103
139
  log(`Processing ${fileList.length} files`)
104
- for (const file of fileList) {
140
+
141
+ // Files to upload via relay (irfs) — either all files or blossom failures
142
+ let irfsFileList = fileList
143
+
144
+ if (useBlossom) {
145
+ const { uploadedFiles, failedFiles } = await uploadFilesToBlossom({
146
+ fileList,
147
+ servers: healthyBlossomServers,
148
+ signer: nostrSigner,
149
+ shouldReupload,
150
+ log
151
+ })
152
+
153
+ for (const uploaded of uploadedFiles) {
154
+ fileMetadata.push({
155
+ rootHash: uploaded.sha256,
156
+ filename: uploaded.filename,
157
+ mimeType: uploaded.mimeType,
158
+ service: 'blossom'
159
+ })
160
+
161
+ if (faviconFile && uploaded.file === faviconFile) {
162
+ iconMetadata = {
163
+ rootHash: uploaded.sha256,
164
+ mimeType: uploaded.mimeType,
165
+ service: 'blossom'
166
+ }
167
+ }
168
+ }
169
+
170
+ if (failedFiles.length > 0) {
171
+ log(`${failedFiles.length} files failed blossom upload, falling back to relay upload (irfs)`)
172
+ irfsFileList = failedFiles.map(f => f.file)
173
+ } else {
174
+ irfsFileList = []
175
+ }
176
+ }
177
+
178
+ for (const file of irfsFileList) {
105
179
  nmmr = new NMMR()
106
180
  const stream = file.stream()
107
181
 
@@ -118,13 +192,15 @@ export async function toApp (fileList, nostrSigner, { log = () => {}, dTag, dTag
118
192
  fileMetadata.push({
119
193
  rootHash: nmmr.getRoot(),
120
194
  filename,
121
- mimeType: file.type || 'application/octet-stream'
195
+ mimeType: file.type || 'application/octet-stream',
196
+ service: 'irfs'
122
197
  })
123
198
 
124
199
  if (faviconFile && file === faviconFile) {
125
200
  iconMetadata = {
126
201
  rootHash: nmmr.getRoot(),
127
- mimeType: file.type || 'application/octet-stream'
202
+ mimeType: file.type || 'application/octet-stream',
203
+ service: 'irfs'
128
204
  }
129
205
  }
130
206
  }
@@ -165,193 +241,6 @@ export async function toApp (fileList, nostrSigner, { log = () => {}, dTag, dTag
165
241
  log(`Visit at https://44billion.net/${appEntity}`)
166
242
  }
167
243
 
168
- async function uploadBinaryDataChunks ({ nmmr, signer, filename, chunkLength, log, pause = 0, mimeType, shouldReupload = false }) {
169
- const pubkey = await signer.getPublicKey()
170
- const writeRelays = (await signer.getRelays()).write
171
- const relays = [...new Set([...writeRelays, ...nappRelays].map(r => r.trim().replace(/\/$/, '')))]
172
-
173
- // Find max stored created_at for this file's chunks
174
- const rootHash = nmmr.getRoot()
175
- const allCTags = Array.from({ length: chunkLength }, (_, i) => `${rootHash}:${i}`)
176
- let maxStoredCreatedAt = 0
177
-
178
- for (let i = 0; i < allCTags.length; i += 100) {
179
- const batch = allCTags.slice(i, i + 100)
180
- const storedEvents = (await nostrRelays.getEvents({
181
- kinds: [34600],
182
- authors: [pubkey],
183
- '#c': batch,
184
- limit: 1
185
- }, relays)).result
186
-
187
- if (storedEvents.length > 0) {
188
- // Since results are desc sorted by created_at, the first one is the max
189
- maxStoredCreatedAt = Math.max(maxStoredCreatedAt, storedEvents[0].created_at)
190
- }
191
- }
192
-
193
- // Set initial created_at based on what's higher, maxStoredCreatedAt or current time
194
- let createdAtCursor = (Math.max(maxStoredCreatedAt, Math.floor(Date.now() / 1000)) + chunkLength)
195
-
196
- let chunkIndex = 0
197
- for await (const chunk of nmmr.getChunks()) {
198
- const dTag = chunk.x
199
- const currentCtag = `${chunk.rootX}:${chunk.index}`
200
- const { otherCtags, hasCurrentCtag, foundEvent, missingRelays } = await getPreviousCtags(dTag, currentCtag, relays, signer)
201
- if (!shouldReupload && hasCurrentCtag) {
202
- // Handling of partial uploads/resumes:
203
- // If we are observing an existing chunk, we use its created_at to re-align our cursor
204
- // for the next chunks (so next chunk will be this_chunk_time - 1)
205
- if (foundEvent) {
206
- createdAtCursor = foundEvent.created_at - 1
207
- }
208
-
209
- if (missingRelays.length === 0) {
210
- log(`${filename}: Skipping chunk ${++chunkIndex} of ${chunkLength} (already uploaded)`)
211
- continue
212
- }
213
- log(`${filename}: Re-uploading chunk ${++chunkIndex} of ${chunkLength} to ${missingRelays.length} missing relays (out of ${relays.length})`)
214
- ;({ pause } = (await throttledSendEvent(foundEvent, missingRelays, { pause, log, trailingPause: true, minSuccessfulRelays: 0 })))
215
- continue
216
- }
217
-
218
- const effectiveCreatedAt = createdAtCursor
219
- // The lower chunk index, the higher created_at must be
220
- // for relays to serve chunks in the most efficient order
221
- createdAtCursor--
222
-
223
- const binaryDataChunk = {
224
- kind: 34600,
225
- tags: [
226
- ['d', dTag],
227
- ...otherCtags,
228
- ['c', currentCtag, chunk.length, ...chunk.proof],
229
- ...(mimeType ? [['m', mimeType]] : [])
230
- ],
231
- // These chunks already have the expected size of 51000 bytes
232
- content: new Base93Encoder().update(chunk.contentBytes).getEncoded(),
233
- created_at: effectiveCreatedAt
234
- }
235
-
236
- const event = await signer.signEvent(binaryDataChunk)
237
- const fallbackRelayCount = relays.length - writeRelays.length
238
- log(`${filename}: Uploading file part ${++chunkIndex} of ${chunkLength} to ${writeRelays.length} relays${fallbackRelayCount > 0 ? ` (+${fallbackRelayCount} fallback)` : ''}`)
239
- ;({ pause } = (await throttledSendEvent(event, relays, { pause, log, trailingPause: true })))
240
- }
241
- return { pause }
242
- }
243
-
244
- async function throttledSendEvent (event, relays, {
245
- pause, log,
246
- retries = 0, maxRetries = 10,
247
- minSuccessfulRelays = 1,
248
- leadingPause = false, trailingPause = false
249
- }) {
250
- if (pause && leadingPause) await new Promise(resolve => setTimeout(resolve, pause))
251
- if (retries > 0) log(`Retrying upload to ${relays.length} relays: ${relays.join(', ')}`)
252
-
253
- const { errors } = (await nostrRelays.sendEvent(event, relays, 15000))
254
- if (errors.length === 0) {
255
- if (pause && trailingPause) await new Promise(resolve => setTimeout(resolve, pause))
256
- return { pause }
257
- }
258
-
259
- const [rateLimitErrors, maybeUnretryableErrors, unretryableErrors] =
260
- errors.reduce((r, v) => {
261
- const message = v.reason?.message ?? ''
262
- if (message.startsWith('rate-limited:')) r[0].push(v)
263
- // https://github.com/nbd-wtf/nostr-tools/blob/28f7553187d201088c8a1009365db4ecbe03e568/abstract-relay.ts#L311
264
- else if (message === 'publish timed out') r[1].push(v)
265
- else r[2].push(v)
266
- return r
267
- }, [[], [], []])
268
-
269
- // One-time special retry
270
- if (maybeUnretryableErrors.length > 0) {
271
- const timedOutRelays = maybeUnretryableErrors.map(v => v.relay)
272
- log(`${maybeUnretryableErrors.length} timeout errors, retrying once after ${pause}ms:\n${maybeUnretryableErrors.map(v => `${v.relay}: ${v.reason.message}`).join('; ')}`)
273
- if (pause) await new Promise(resolve => setTimeout(resolve, pause))
274
- const { errors: timeoutRetryErrors } = await nostrRelays.sendEvent(event, timedOutRelays, 15000)
275
- unretryableErrors.push(...timeoutRetryErrors)
276
- }
277
-
278
- if (unretryableErrors.length > 0) {
279
- log(`${unretryableErrors.length} unretryable errors:\n${unretryableErrors.map(v => `${v.relay}: ${v.reason.message}`).join('; ')}`)
280
- console.log('Erroed event:', stringifyEvent(event))
281
- }
282
- const maybeSuccessfulRelays = relays.length - unretryableErrors.length
283
- const hasReachedMaxRetries = retries > maxRetries
284
- if (
285
- hasReachedMaxRetries ||
286
- maybeSuccessfulRelays < minSuccessfulRelays
287
- ) {
288
- const finalErrors = [...rateLimitErrors, ...unretryableErrors]
289
- throw new Error(finalErrors.map(v => `\n${v.relay}: ${v.reason}`).join('\n'))
290
- }
291
-
292
- if (rateLimitErrors.length === 0) {
293
- if (pause && trailingPause) await new Promise(resolve => setTimeout(resolve, pause))
294
- return { pause }
295
- }
296
-
297
- const erroedRelays = rateLimitErrors.map(v => v.relay)
298
- log(`Rate limited by ${erroedRelays.length} relays, pausing for ${pause + 2000} ms`)
299
- await new Promise(resolve => setTimeout(resolve, (pause += 2000)))
300
-
301
- // Subtracts the successful publishes from the original minSuccessfulRelays goal
302
- minSuccessfulRelays = Math.max(0, minSuccessfulRelays - (relays.length - erroedRelays.length - unretryableErrors.length))
303
- return await throttledSendEvent(event, erroedRelays, {
304
- pause, log, retries: ++retries, maxRetries, minSuccessfulRelays, leadingPause: false, trailingPause
305
- })
306
- }
307
-
308
- async function getPreviousCtags (dTagValue, currentCtagValue, relays, signer) {
309
- const targetRelays = [...new Set([...relays, ...nappRelays].map(r => r.trim().replace(/\/$/, '')))]
310
- const storedEvents = (await nostrRelays.getEvents({
311
- kinds: [34600],
312
- authors: [await signer.getPublicKey()],
313
- '#d': [dTagValue],
314
- limit: 1
315
- }, targetRelays)).result
316
-
317
- let hasCurrentCtag = false
318
- const hasEvent = storedEvents.length > 0
319
- if (!hasEvent) return { otherCtags: [], hasEvent, hasCurrentCtag }
320
-
321
- const cTagValues = { [currentCtagValue]: true }
322
- storedEvents.sort((a, b) => b.created_at - a.created_at)
323
- const bestEvent = storedEvents[0]
324
- const prevTags = bestEvent.tags
325
-
326
- if (!Array.isArray(prevTags)) return { otherCtags: [], hasEvent, hasCurrentCtag }
327
-
328
- hasCurrentCtag = prevTags.some(tag =>
329
- Array.isArray(tag) &&
330
- tag[0] === 'c' &&
331
- tag[1] === currentCtagValue
332
- )
333
-
334
- const otherCtags = prevTags
335
- .filter(v => {
336
- const isCTag =
337
- Array.isArray(v) &&
338
- v[0] === 'c' &&
339
- typeof v[1] === 'string' &&
340
- /^[0-9a-f]{64}:\d+$/.test(v[1])
341
- if (!isCTag) return false
342
-
343
- const isntDuplicate = !cTagValues[v[1]]
344
- cTagValues[v[1]] = true
345
- return isCTag && isntDuplicate
346
- })
347
-
348
- const matchingEvents = storedEvents.filter(e => e.id === bestEvent.id)
349
- const coveredRelays = new Set(matchingEvents.map(e => e.meta?.relay).filter(Boolean))
350
- const missingRelays = targetRelays.filter(r => !coveredRelays.has(r))
351
-
352
- return { otherCtags, hasEvent, hasCurrentCtag, foundEvent: bestEvent, missingRelays }
353
- }
354
-
355
244
  async function uploadBundle ({ dTag, channel, fileMetadata, signer, pause = 0, shouldReupload = false, log = () => {} }) {
356
245
  const kind = {
357
246
  main: 37448, // stable
@@ -359,7 +248,7 @@ async function uploadBundle ({ dTag, channel, fileMetadata, signer, pause = 0, s
359
248
  draft: 37450 // vibe coded preview
360
249
  }[channel] ?? 37448
361
250
 
362
- const fileTags = fileMetadata.map(v => ['file', v.rootHash, v.filename, v.mimeType])
251
+ const fileTags = fileMetadata.map(v => ['file', v.rootHash, v.filename, v.mimeType, v.service || 'irfs'])
363
252
  const tags = [
364
253
  ['d', dTag],
365
254
  ...fileTags
@@ -396,7 +285,7 @@ async function uploadBundle ({ dTag, channel, fileMetadata, signer, pause = 0, s
396
285
 
397
286
  const isSame = currentFileTags.length === recentFileTags.length && currentFileTags.every((t, i) => {
398
287
  const rt = recentFileTags[i]
399
- return rt.length >= 4 && rt[1] === t[1] && rt[2] === t[2] && rt[3] === t[3]
288
+ return rt.length >= 4 && rt[1] === t[1] && rt[2] === t[2] && rt[3] === t[3] && (rt[4] || 'irfs') === (t[4] || 'irfs')
400
289
  })
401
290
 
402
291
  if (isSame) {
@@ -457,6 +346,7 @@ async function maybeUploadStall ({
457
346
  const trimmedSummary = typeof summary === 'string' ? summary.trim() : ''
458
347
  const iconRootHash = icon?.rootHash
459
348
  const iconMimeType = icon?.mimeType
349
+ const iconService = icon?.service || 'irfs'
460
350
  const hasMetadata = Boolean(trimmedName) || Boolean(trimmedSummary) || Boolean(iconRootHash) ||
461
351
  Boolean(self) || (countries && countries.length > 0) || (categories && categories.length > 0) || (hashtags && hashtags.length > 0)
462
352
 
@@ -523,7 +413,7 @@ async function maybeUploadStall ({
523
413
  let hasName = false
524
414
  if (iconRootHash && iconMimeType) {
525
415
  hasIcon = true
526
- tags.push(['icon', iconRootHash, iconMimeType])
416
+ tags.push(['icon', iconRootHash, iconMimeType, iconService])
527
417
  if (isIconAuto) tags.push(['auto', 'icon'])
528
418
  }
529
419
 
@@ -693,7 +583,7 @@ async function maybeUploadStall ({
693
583
  if (iconRootHash && iconMimeType) {
694
584
  if (!isIconAuto || hasAuto('icon')) {
695
585
  ensureTagValue('icon', (_) => {
696
- return ['icon', iconRootHash, iconMimeType]
586
+ return ['icon', iconRootHash, iconMimeType, iconService]
697
587
  })
698
588
  if (!isIconAuto) removeAuto('icon')
699
589
  }
@@ -0,0 +1,194 @@
1
+ import { BlossomClient } from 'nostr-tools/nipb7'
2
+ import nostrRelays from '#services/nostr-relays.js'
3
+ import { bytesToBase16 } from '#helpers/base16.js'
4
+
5
+ /**
6
+ * Fetches the user's blossom server list from their kind 10063 event.
7
+ * Returns an array of server URLs, or empty array if none configured.
8
+ */
9
+ export async function getBlossomServers (signer, writeRelays) {
10
+ const pubkey = await signer.getPublicKey()
11
+ const events = (await nostrRelays.getEvents({
12
+ kinds: [10063],
13
+ authors: [pubkey],
14
+ limit: 1
15
+ }, writeRelays)).result
16
+
17
+ if (events.length === 0) return []
18
+
19
+ events.sort((a, b) => b.created_at - a.created_at)
20
+ const best = events[0]
21
+
22
+ return (best.tags ?? [])
23
+ .filter(t => Array.isArray(t) && t[0] === 'server' && /^https?:\/\//.test(t[1]))
24
+ .map(t => t[1].trim().replace(/\/$/, ''))
25
+ .filter(Boolean)
26
+ }
27
+
28
+ /**
29
+ * Health-checks blossom servers using the `check` method with a random sha256 hash.
30
+ * A server is considered healthy if the check call completes without a network-level error.
31
+ * The check is expected to fail with a 404 (blob not found), which is fine — it means the server is up.
32
+ * Returns the subset of servers that are reachable.
33
+ */
34
+ export async function healthCheckServers (servers, signer, { log = () => {} } = {}) {
35
+ const randomBytes = crypto.getRandomValues(new Uint8Array(32))
36
+ const hashBuffer = await crypto.subtle.digest('SHA-256', randomBytes)
37
+ const randomHash = bytesToBase16(new Uint8Array(hashBuffer))
38
+
39
+ const results = await Promise.allSettled(
40
+ servers.map(async (serverUrl) => {
41
+ const client = new BlossomClient(serverUrl, signer)
42
+ try {
43
+ await client.check(randomHash)
44
+ } catch (err) {
45
+ // check() throws on non-2xx. A 404 means the server is up but blob doesn't exist — that's fine.
46
+ // We only want to filter out servers that are truly unreachable (network errors).
47
+ const message = err?.message ?? ''
48
+ if (message.includes('returned an error')) {
49
+ // Server responded with an HTTP error — it's reachable
50
+ return serverUrl
51
+ }
52
+ throw err
53
+ }
54
+ return serverUrl
55
+ })
56
+ )
57
+
58
+ const healthy = []
59
+ for (let i = 0; i < results.length; i++) {
60
+ if (results[i].status === 'fulfilled') {
61
+ healthy.push(results[i].value)
62
+ } else {
63
+ log(`Blossom server ${servers[i]} is unreachable: ${results[i].reason?.message ?? results[i].reason}`)
64
+ }
65
+ }
66
+ return healthy
67
+ }
68
+
69
+ /**
70
+ * Computes the sha256 hex hash of a File/Blob.
71
+ */
72
+ export async function computeFileHash (file) {
73
+ const bytes = new Uint8Array(await file.arrayBuffer())
74
+ const hashBuffer = await crypto.subtle.digest('SHA-256', bytes)
75
+ return bytesToBase16(new Uint8Array(hashBuffer))
76
+ }
77
+
78
+ /**
79
+ * Uploads a single file to a single blossom server with retry+backoff.
80
+ * Returns { success: true, descriptor } or { success: false, error }.
81
+ */
82
+ async function uploadFileToServer (client, file, fileHash, mimeType, { shouldReupload, log, maxRetries = 5 }) {
83
+ // Check if already uploaded
84
+ if (!shouldReupload) {
85
+ try {
86
+ await client.check(fileHash)
87
+ // File already exists on this server
88
+ return { success: true, alreadyExists: true }
89
+ } catch {
90
+ // Not found — proceed to upload
91
+ }
92
+ }
93
+
94
+ let pause = 1000
95
+ for (let attempt = 0; attempt <= maxRetries; attempt++) {
96
+ try {
97
+ if (attempt > 0) {
98
+ log(`Retrying upload to ${client.mediaserver} (attempt ${attempt + 1}/${maxRetries + 1})`)
99
+ await new Promise(resolve => setTimeout(resolve, pause))
100
+ pause += 2000
101
+ }
102
+ const descriptor = await client.uploadBlob(file, mimeType)
103
+ return { success: true, descriptor }
104
+ } catch (err) {
105
+ if (attempt === maxRetries) {
106
+ return { success: false, error: err }
107
+ }
108
+ }
109
+ }
110
+ return { success: false, error: new Error('Max retries exceeded') }
111
+ }
112
+
113
+ /**
114
+ * Uploads all files to blossom servers.
115
+ *
116
+ * For each server, files are uploaded one at a time (sequentially).
117
+ * Different servers run in parallel.
118
+ *
119
+ * Returns { uploadedFiles: [...], failedFiles: [...] }
120
+ * where each uploadedFile has { file, filename, sha256, mimeType }
121
+ * and each failedFile has { file, filename, mimeType, errors }.
122
+ */
123
+ export async function uploadFilesToBlossom ({
124
+ fileList,
125
+ servers,
126
+ signer,
127
+ shouldReupload = false,
128
+ maxRetries = 5,
129
+ log = () => {}
130
+ }) {
131
+ if (servers.length === 0) return { uploadedFiles: [], failedFiles: [...fileList.map(f => ({ file: f }))] }
132
+
133
+ // Pre-compute file info
134
+ const fileInfos = await Promise.all(
135
+ fileList.map(async (file) => {
136
+ const filename = file.webkitRelativePath.split('/').slice(1).join('/')
137
+ const mimeType = file.type || 'application/octet-stream'
138
+ const fileHash = await computeFileHash(file)
139
+ return { file, filename, mimeType, sha256: fileHash }
140
+ })
141
+ )
142
+
143
+ // For each file, track which servers accepted it
144
+ const fileServerResults = fileInfos.map(() => ({ successCount: 0, errors: [] }))
145
+
146
+ // Upload to each server in parallel, but within a server, upload files sequentially
147
+ const serverTasks = servers.map(async (serverUrl) => {
148
+ const client = new BlossomClient(serverUrl, signer)
149
+
150
+ for (let i = 0; i < fileInfos.length; i++) {
151
+ const info = fileInfos[i]
152
+ log(`Uploading ${info.filename} to ${serverUrl}`)
153
+ const result = await uploadFileToServer(client, info.file, info.sha256, info.mimeType, { shouldReupload, log, maxRetries })
154
+
155
+ if (result.success) {
156
+ fileServerResults[i].successCount++
157
+ if (result.alreadyExists) {
158
+ log(`${info.filename}: Already exists on ${serverUrl}`)
159
+ } else {
160
+ log(`${info.filename}: Uploaded to ${serverUrl}`)
161
+ }
162
+ } else {
163
+ fileServerResults[i].errors.push({ server: serverUrl, error: result.error })
164
+ log(`${info.filename}: Failed to upload to ${serverUrl}: ${result.error?.message ?? result.error}`)
165
+ }
166
+ }
167
+ })
168
+
169
+ await Promise.allSettled(serverTasks)
170
+
171
+ const uploadedFiles = []
172
+ const failedFiles = []
173
+
174
+ for (let i = 0; i < fileInfos.length; i++) {
175
+ const info = fileInfos[i]
176
+ if (fileServerResults[i].successCount > 0) {
177
+ uploadedFiles.push({
178
+ file: info.file,
179
+ filename: info.filename,
180
+ sha256: info.sha256,
181
+ mimeType: info.mimeType
182
+ })
183
+ } else {
184
+ failedFiles.push({
185
+ file: info.file,
186
+ filename: info.filename,
187
+ mimeType: info.mimeType,
188
+ errors: fileServerResults[i].errors
189
+ })
190
+ }
191
+ }
192
+
193
+ return { uploadedFiles, failedFiles }
194
+ }
@@ -0,0 +1,241 @@
1
+ import nostrRelays, { nappRelays } from '#services/nostr-relays.js'
2
+ import Base93Encoder from '#services/base93-encoder.js'
3
+ import { stringifyEvent } from '#helpers/event.js'
4
+
5
+ /**
6
+ * Uploads binary data chunks for a file to Nostr relays using the InterRelay File System (IRFS).
7
+ *
8
+ * Splits file content via NMMR (Nostr Merkle Mountain Range) into chunks,
9
+ * encodes each chunk with Base93, and publishes them as kind 34600 events.
10
+ * Supports resume via created_at cursor alignment with previously stored chunks.
11
+ *
12
+ * @param {object} params
13
+ * @param {object} params.nmmr - NMMR instance with chunks already appended
14
+ * @param {object} params.signer - Nostr signer with getPublicKey(), getRelays(), signEvent()
15
+ * @param {string} params.filename - Display name of the file being uploaded
16
+ * @param {number} params.chunkLength - Total number of chunks
17
+ * @param {Function} params.log - Logging function
18
+ * @param {number} [params.pause=0] - Current pause duration in ms (for rate-limit backoff)
19
+ * @param {string} params.mimeType - MIME type of the file
20
+ * @param {boolean} [params.shouldReupload=false] - Whether to force re-upload existing chunks
21
+ * @returns {Promise<{pause: number}>} Updated pause duration
22
+ */
23
+ export async function uploadBinaryDataChunks ({ nmmr, signer, filename, chunkLength, log, pause = 0, mimeType, shouldReupload = false }) {
24
+ const pubkey = await signer.getPublicKey()
25
+ const writeRelays = (await signer.getRelays()).write
26
+ const relays = [...new Set([...writeRelays, ...nappRelays].map(r => r.trim().replace(/\/$/, '')))]
27
+
28
+ // Find max stored created_at for this file's chunks
29
+ const rootHash = nmmr.getRoot()
30
+ const allCTags = Array.from({ length: chunkLength }, (_, i) => `${rootHash}:${i}`)
31
+ let maxStoredCreatedAt = 0
32
+
33
+ for (let i = 0; i < allCTags.length; i += 100) {
34
+ const batch = allCTags.slice(i, i + 100)
35
+ const storedEvents = (await nostrRelays.getEvents({
36
+ kinds: [34600],
37
+ authors: [pubkey],
38
+ '#c': batch,
39
+ limit: 1
40
+ }, relays)).result
41
+
42
+ if (storedEvents.length > 0) {
43
+ const batchMaxCreatedAt = storedEvents.reduce((m, e) => Math.max(m, (e && typeof e.created_at === 'number') ? e.created_at : 0), 0)
44
+ if (batchMaxCreatedAt > maxStoredCreatedAt) maxStoredCreatedAt = batchMaxCreatedAt
45
+ }
46
+ }
47
+
48
+ // Set initial created_at based on what's higher, maxStoredCreatedAt or current time
49
+ let createdAtCursor = (Math.max(maxStoredCreatedAt, Math.floor(Date.now() / 1000)) + chunkLength)
50
+
51
+ let chunkIndex = 0
52
+ for await (const chunk of nmmr.getChunks()) {
53
+ const dTag = chunk.x
54
+ const currentCtag = `${chunk.rootX}:${chunk.index}`
55
+ const { otherCtags, hasCurrentCtag, foundEvent, missingRelays } = await getPreviousCtags(dTag, currentCtag, relays, signer)
56
+ if (!shouldReupload && hasCurrentCtag) {
57
+ // Handling of partial uploads/resumes:
58
+ // If we are observing an existing chunk, we use its created_at to re-align our cursor
59
+ // for the next chunks (so next chunk will be this_chunk_time - 1)
60
+ if (foundEvent) {
61
+ createdAtCursor = foundEvent.created_at - 1
62
+ }
63
+
64
+ if (missingRelays.length === 0) {
65
+ log(`${filename}: Skipping chunk ${++chunkIndex} of ${chunkLength} (already uploaded)`)
66
+ continue
67
+ }
68
+ log(`${filename}: Re-uploading chunk ${++chunkIndex} of ${chunkLength} to ${missingRelays.length} missing relays (out of ${relays.length})`)
69
+ ;({ pause } = (await throttledSendEvent(foundEvent, missingRelays, { pause, log, trailingPause: true, minSuccessfulRelays: 0 })))
70
+ continue
71
+ }
72
+
73
+ const effectiveCreatedAt = createdAtCursor
74
+ // The lower chunk index, the higher created_at must be
75
+ // for relays to serve chunks in the most efficient order
76
+ createdAtCursor--
77
+
78
+ const binaryDataChunk = {
79
+ kind: 34600,
80
+ tags: [
81
+ ['d', dTag],
82
+ ...otherCtags,
83
+ ['c', currentCtag, chunk.length, ...chunk.proof],
84
+ ...(mimeType ? [['m', mimeType]] : [])
85
+ ],
86
+ // These chunks already have the expected size of 51000 bytes
87
+ content: new Base93Encoder().update(chunk.contentBytes).getEncoded(),
88
+ created_at: effectiveCreatedAt
89
+ }
90
+
91
+ const event = await signer.signEvent(binaryDataChunk)
92
+ const fallbackRelayCount = relays.length - writeRelays.length
93
+ log(`${filename}: Uploading file part ${++chunkIndex} of ${chunkLength} to ${writeRelays.length} relays${fallbackRelayCount > 0 ? ` (+${fallbackRelayCount} fallback)` : ''}`)
94
+ ;({ pause } = (await throttledSendEvent(event, relays, { pause, log, trailingPause: true })))
95
+ }
96
+ return { pause }
97
+ }
98
+
99
+ /**
100
+ * Sends a signed Nostr event to relays with retry logic and rate-limit backoff.
101
+ *
102
+ * Handles three error categories:
103
+ * - Rate-limit errors: retries with increasing pause (+2000ms per retry)
104
+ * - Timeout errors: one-time immediate retry
105
+ * - Unretryable errors: logged and counted against success threshold
106
+ *
107
+ * @param {object} event - Signed Nostr event to send
108
+ * @param {string[]} relays - Array of relay URLs
109
+ * @param {object} opts
110
+ * @param {number} opts.pause - Current pause duration in ms
111
+ * @param {Function} opts.log - Logging function
112
+ * @param {number} [opts.retries=0] - Current retry count (used internally)
113
+ * @param {number} [opts.maxRetries=10] - Maximum number of retries
114
+ * @param {number} [opts.minSuccessfulRelays=1] - Minimum relays that must accept the event
115
+ * @param {boolean} [opts.leadingPause=false] - Whether to pause before sending
116
+ * @param {boolean} [opts.trailingPause=false] - Whether to pause after successful send
117
+ * @returns {Promise<{pause: number}>} Updated pause duration
118
+ */
119
+ export async function throttledSendEvent (event, relays, {
120
+ pause, log,
121
+ retries = 0, maxRetries = 10,
122
+ minSuccessfulRelays = 1,
123
+ leadingPause = false, trailingPause = false
124
+ }) {
125
+ if (pause && leadingPause) await new Promise(resolve => setTimeout(resolve, pause))
126
+ if (retries > 0) log(`Retrying upload to ${relays.length} relays: ${relays.join(', ')}`)
127
+
128
+ const { errors } = (await nostrRelays.sendEvent(event, relays, 15000))
129
+ if (errors.length === 0) {
130
+ if (pause && trailingPause) await new Promise(resolve => setTimeout(resolve, pause))
131
+ return { pause }
132
+ }
133
+
134
+ const [rateLimitErrors, maybeUnretryableErrors, unretryableErrors] =
135
+ errors.reduce((r, v) => {
136
+ const message = v.reason?.message ?? ''
137
+ if (message.startsWith('rate-limited:')) r[0].push(v)
138
+ // https://github.com/nbd-wtf/nostr-tools/blob/28f7553187d201088c8a1009365db4ecbe03e568/abstract-relay.ts#L311
139
+ else if (message === 'publish timed out') r[1].push(v)
140
+ else r[2].push(v)
141
+ return r
142
+ }, [[], [], []])
143
+
144
+ // One-time special retry
145
+ if (maybeUnretryableErrors.length > 0) {
146
+ const timedOutRelays = maybeUnretryableErrors.map(v => v.relay)
147
+ log(`${maybeUnretryableErrors.length} timeout errors, retrying once after ${pause}ms:\n${maybeUnretryableErrors.map(v => `${v.relay}: ${v.reason.message}`).join('; ')}`)
148
+ if (pause) await new Promise(resolve => setTimeout(resolve, pause))
149
+ const { errors: timeoutRetryErrors } = await nostrRelays.sendEvent(event, timedOutRelays, 15000)
150
+ unretryableErrors.push(...timeoutRetryErrors)
151
+ }
152
+
153
+ if (unretryableErrors.length > 0) {
154
+ log(`${unretryableErrors.length} unretryable errors:\n${unretryableErrors.map(v => `${v.relay}: ${v.reason.message}`).join('; ')}`)
155
+ console.log('Erroed event:', stringifyEvent(event))
156
+ }
157
+ const maybeSuccessfulRelays = relays.length - unretryableErrors.length
158
+ const hasReachedMaxRetries = retries > maxRetries
159
+ if (
160
+ hasReachedMaxRetries ||
161
+ maybeSuccessfulRelays < minSuccessfulRelays
162
+ ) {
163
+ const finalErrors = [...rateLimitErrors, ...unretryableErrors]
164
+ throw new Error(finalErrors.map(v => `\n${v.relay}: ${v.reason}`).join('\n'))
165
+ }
166
+
167
+ if (rateLimitErrors.length === 0) {
168
+ if (pause && trailingPause) await new Promise(resolve => setTimeout(resolve, pause))
169
+ return { pause }
170
+ }
171
+
172
+ const erroedRelays = rateLimitErrors.map(v => v.relay)
173
+ log(`Rate limited by ${erroedRelays.length} relays, pausing for ${pause + 2000} ms`)
174
+ await new Promise(resolve => setTimeout(resolve, (pause += 2000)))
175
+
176
+ // Subtracts the successful publishes from the original minSuccessfulRelays goal
177
+ minSuccessfulRelays = Math.max(0, minSuccessfulRelays - (relays.length - erroedRelays.length - unretryableErrors.length))
178
+ return await throttledSendEvent(event, erroedRelays, {
179
+ pause, log, retries: ++retries, maxRetries, minSuccessfulRelays, leadingPause: false, trailingPause
180
+ })
181
+ }
182
+
183
+ /**
184
+ * Checks if a chunk (identified by its d-tag) already exists on relays.
185
+ *
186
+ * Returns info about existing c-tags on the stored event (for deduplication),
187
+ * whether the current c-tag is already present, the found event itself,
188
+ * and which relays are missing the event.
189
+ *
190
+ * @param {string} dTagValue - The d-tag value of the chunk event
191
+ * @param {string} currentCtagValue - The current c-tag value (rootHash:index)
192
+ * @param {string[]} relays - Array of relay URLs to check
193
+ * @param {object} signer - Nostr signer with getPublicKey()
194
+ * @returns {Promise<{otherCtags: Array, hasEvent: boolean, hasCurrentCtag: boolean, foundEvent?: object, missingRelays?: string[]}>}
195
+ */
196
+ export async function getPreviousCtags (dTagValue, currentCtagValue, relays, signer) {
197
+ const targetRelays = [...new Set([...relays, ...nappRelays].map(r => r.trim().replace(/\/$/, '')))]
198
+ const storedEvents = (await nostrRelays.getEvents({
199
+ kinds: [34600],
200
+ authors: [await signer.getPublicKey()],
201
+ '#d': [dTagValue],
202
+ limit: 1
203
+ }, targetRelays)).result
204
+
205
+ let hasCurrentCtag = false
206
+ const hasEvent = storedEvents.length > 0
207
+ if (!hasEvent) return { otherCtags: [], hasEvent, hasCurrentCtag }
208
+
209
+ const cTagValues = { [currentCtagValue]: true }
210
+ storedEvents.sort((a, b) => b.created_at - a.created_at)
211
+ const bestEvent = storedEvents[0]
212
+ const prevTags = bestEvent.tags
213
+
214
+ if (!Array.isArray(prevTags)) return { otherCtags: [], hasEvent, hasCurrentCtag }
215
+
216
+ hasCurrentCtag = prevTags.some(tag =>
217
+ Array.isArray(tag) &&
218
+ tag[0] === 'c' &&
219
+ tag[1] === currentCtagValue
220
+ )
221
+
222
+ const otherCtags = prevTags
223
+ .filter(v => {
224
+ const isCTag =
225
+ Array.isArray(v) &&
226
+ v[0] === 'c' &&
227
+ typeof v[1] === 'string' &&
228
+ /^[0-9a-f]{64}:\d+$/.test(v[1])
229
+ if (!isCTag) return false
230
+
231
+ const isntDuplicate = !cTagValues[v[1]]
232
+ cTagValues[v[1]] = true
233
+ return isCTag && isntDuplicate
234
+ })
235
+
236
+ const matchingEvents = storedEvents.filter(e => e.id === bestEvent.id)
237
+ const coveredRelays = new Set(matchingEvents.map(e => e.meta?.relay).filter(Boolean))
238
+ const missingRelays = targetRelays.filter(r => !coveredRelays.has(r))
239
+
240
+ return { otherCtags, hasEvent, hasCurrentCtag, foundEvent: bestEvent, missingRelays }
241
+ }