braidfs 0.0.132 → 0.0.134

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/index.js +152 -232
  2. package/package.json +2 -2
package/index.js CHANGED
@@ -2,6 +2,7 @@
2
2
 
3
3
  var { diff_main } = require(`${__dirname}/diff.js`),
4
4
  braid_text = require("braid-text"),
5
+ braid_blob = require("braid-blob"),
5
6
  braid_fetch = require('braid-http').fetch
6
7
 
7
8
  // Helper function to check if a file is binary based on its extension
@@ -27,6 +28,8 @@ var braidfs_config_dir = `${sync_base}/.braidfs`,
27
28
  braidfs_config_file = `${braidfs_config_dir}/config`,
28
29
  sync_base_meta = `${braidfs_config_dir}/proxy_base_meta`
29
30
  braid_text.db_folder = `${braidfs_config_dir}/braid-text-db`
31
+ braid_blob.db_folder = { read: () => {}, write: () => {}, delete: () => {} }
32
+ braid_blob.meta_folder = `${braidfs_config_dir}/braid-blob-meta`
30
33
  var trash = `${braidfs_config_dir}/trash`
31
34
  var temp_folder = `${braidfs_config_dir}/temp`
32
35
 
@@ -138,6 +141,10 @@ You can run it with:
138
141
  async function main() {
139
142
  process.on("unhandledRejection", (x) => console.log(`unhandledRejection: ${x.stack}`))
140
143
  process.on("uncaughtException", (x) => console.log(`uncaughtException: ${x.stack}`))
144
+
145
+ await braid_text.db_folder_init()
146
+ await braid_blob.init()
147
+
141
148
  require('http').createServer(async (req, res) => {
142
149
  try {
143
150
  // console.log(`${req.method} ${req.url}`)
@@ -213,45 +220,44 @@ async function main() {
213
220
  console.log(`daemon started on port ${config.port}`)
214
221
  console.log('!! only accessible from localhost !!')
215
222
 
216
- sync_url('.braidfs/config').then(() => {
217
- braid_text.get('.braidfs/config', {
218
- subscribe: async update => {
219
- let prev = config
223
+ sync_url('.braidfs/config')
224
+ braid_text.get('.braidfs/config', {
225
+ subscribe: async update => {
226
+ let prev = config
220
227
 
221
- let x = await braid_text.get('.braidfs/config')
222
- try {
223
- config = JSON.parse(x)
224
-
225
- // did anything get deleted?
226
- var old_syncs = Object.entries(prev.sync).filter(x => x[1]).map(x => normalize_url(x[0]).replace(/^https?:\/\//, ''))
227
- var new_syncs = new Set(Object.entries(config.sync).filter(x => x[1]).map(x => normalize_url(x[0]).replace(/^https?:\/\//, '')))
228
- for (let url of old_syncs.filter(x => !new_syncs.has(x)))
229
- unsync_url(url)
230
-
231
- // sync all the new stuff
232
- for (let x of Object.entries(config.sync)) if (x[1]) sync_url(x[0])
233
-
234
- // if any auth stuff has changed,
235
- // have the appropriate connections reconnect
236
- let changed = new Set()
237
- // any old domains no longer exist?
238
- for (let domain of Object.keys(prev.cookies ?? {}))
239
- if (!config.cookies?.[domain]) changed.add(domain)
240
- // any new domains not like the old?
241
- for (let [domain, v] of Object.entries(config.cookies ?? {}))
242
- if (!prev.cookies?.[domain]
243
- || JSON.stringify(prev.cookies[domain]) !== JSON.stringify(v))
244
- changed.add(domain)
245
- // ok, have every domain which has changed reconnect
246
- for (let [path, x] of Object.entries(sync_url.cache))
247
- if (changed.has(path.split(/\//)[0].split(/:/)[0]))
248
- (await x).reconnect?.()
249
- } catch (e) {
250
- if (x !== '') console.log(`warning: config file is currently invalid.`)
251
- return
252
- }
228
+ let x = await braid_text.get('.braidfs/config')
229
+ try {
230
+ config = JSON.parse(x)
231
+
232
+ // did anything get deleted?
233
+ var old_syncs = Object.entries(prev.sync).filter(x => x[1]).map(x => normalize_url(x[0]).replace(/^https?:\/\//, ''))
234
+ var new_syncs = new Set(Object.entries(config.sync).filter(x => x[1]).map(x => normalize_url(x[0]).replace(/^https?:\/\//, '')))
235
+ for (let url of old_syncs.filter(x => !new_syncs.has(x)))
236
+ unsync_url(url)
237
+
238
+ // sync all the new stuff
239
+ for (let x of Object.entries(config.sync)) if (x[1]) sync_url(x[0])
240
+
241
+ // if any auth stuff has changed,
242
+ // have the appropriate connections reconnect
243
+ let changed = new Set()
244
+ // any old domains no longer exist?
245
+ for (let domain of Object.keys(prev.cookies ?? {}))
246
+ if (!config.cookies?.[domain]) changed.add(domain)
247
+ // any new domains not like the old?
248
+ for (let [domain, v] of Object.entries(config.cookies ?? {}))
249
+ if (!prev.cookies?.[domain]
250
+ || JSON.stringify(prev.cookies[domain]) !== JSON.stringify(v))
251
+ changed.add(domain)
252
+ // ok, have every domain which has changed reconnect
253
+ for (let [path, x] of Object.entries(sync_url.cache))
254
+ if (changed.has(path.split(/\//)[0].split(/:/)[0]))
255
+ (await x).reconnect?.()
256
+ } catch (e) {
257
+ if (x !== '') console.log(`warning: config file is currently invalid.`)
258
+ return
253
259
  }
254
- })
260
+ }
255
261
  })
256
262
  sync_url('.braidfs/errors')
257
263
 
@@ -410,14 +416,12 @@ function unsync_url(url) {
410
416
  delete unsync_url.cache[url]
411
417
  }
412
418
 
413
- async function sync_url(url) {
419
+ function sync_url(url) {
414
420
  // normalize url by removing any trailing /index/index/
415
421
  var normalized_url = normalize_url(url),
416
422
  wasnt_normal = normalized_url != url
417
423
  url = normalized_url
418
424
 
419
- await braid_text.db_folder_init()
420
-
421
425
  var is_external_link = url.match(/^https?:\/\//),
422
426
  path = is_external_link ? url.replace(/^https?:\/\//, '') : url,
423
427
  fullpath = `${sync_base}/${path}`,
@@ -451,7 +455,10 @@ async function sync_url(url) {
451
455
  await self.disconnect?.()
452
456
  await wait_promise
453
457
 
454
- await braid_text.delete(url)
458
+ if (self.merge_type === 'dt')
459
+ await braid_text.delete(url)
460
+ else if (self.merge_type === 'aww')
461
+ await braid_blob.delete(url)
455
462
 
456
463
  try {
457
464
  console.log(`trying to delete: ${meta_path}`)
@@ -473,7 +480,6 @@ async function sync_url(url) {
473
480
  } else throw new Error(`unknown merge-type: ${self.merge_type}`)
474
481
  })()
475
482
  }
476
- return
477
483
 
478
484
  async function detect_merge_type() {
479
485
  // special case for .braidfs/config and .braidfs/error
@@ -520,12 +526,14 @@ async function sync_url(url) {
520
526
  await require('fs').promises.writeFile(meta_path, JSON.stringify({
521
527
  merge_type: self.merge_type,
522
528
  peer: self.peer,
523
- version: self.version,
524
529
  file_mtimeNs_str: self.file_mtimeNs_str
525
530
  }))
526
531
  }
527
532
 
528
- await within_fiber(url, async () => {
533
+ self.file_mtimeNs_str = null
534
+ self.file_read_only = null
535
+
536
+ await within_fiber(fullpath, async () => {
529
537
  try {
530
538
  Object.assign(self, JSON.parse(
531
539
  await require('fs').promises.readFile(meta_path, 'utf8')))
@@ -533,219 +541,131 @@ async function sync_url(url) {
533
541
  if (freed) return
534
542
 
535
543
  if (!self.peer) self.peer = Math.random().toString(36).slice(2)
536
-
537
- // create file if it doesn't exist
538
- var fullpath = await get_fullpath()
539
- if (freed) return
540
- if (!(await file_exists(fullpath))) {
541
- if (freed) return
542
- await wait_on(require('fs').promises.writeFile(fullpath, ''))
543
- if (freed) return
544
- var stat = await require('fs').promises.stat(fullpath, { bigint: true })
545
- if (freed) return
546
- self.file_mtimeNs_str = '' + stat.mtimeNs
547
- self.last_touch = Date.now()
548
- }
549
- if (freed) return
550
-
551
- await save_meta()
552
544
  })
553
545
  if (freed) return
554
546
 
555
- var waitTime = 1
556
- var last_connect_timer = null
547
+ self.signal_file_needs_reading = async () => {
548
+ await within_fiber(fullpath, async () => {
549
+ try {
550
+ if (freed) return
557
551
 
558
- self.signal_file_needs_reading = () => {}
552
+ var fullpath = await get_fullpath()
553
+ if (freed) return
559
554
 
560
- connect()
561
- async function connect() {
562
- if (freed) return
563
- if (last_connect_timer) return
555
+ var stat = await require('fs').promises.stat(fullpath, { bigint: true })
556
+ if (freed) return
564
557
 
565
- var closed = false
566
- var prev_disconnect = self.disconnect
567
- self.disconnect = async () => {
568
- if (closed) return
569
- closed = true
570
- reconnect_rate_limiter.on_diss(url)
571
- for (var a of aborts) a.abort()
572
- aborts.clear()
573
- }
574
- self.reconnect = connect
575
-
576
- await prev_disconnect?.()
577
- if (freed || closed) return
578
-
579
- await reconnect_rate_limiter.get_turn(url)
580
- if (freed || closed) return
581
-
582
- function retry(e) {
583
- if (freed || closed) return
584
- var p = self.disconnect()
585
-
586
- var delay = waitTime * (config.retry_delay_ms ?? 1000)
587
- console.log(`reconnecting in ${(delay / 1000).toFixed(2)}s: ${url} after error: ${e}`)
588
- last_connect_timer = setTimeout(async () => {
589
- await p
590
- last_connect_timer = null
591
- connect()
592
- }, delay)
593
- waitTime = Math.min(waitTime + 1, 3)
594
- }
558
+ if (self.file_mtimeNs_str !== '' + stat.mtimeNs) {
559
+ var data = await require('fs').promises.readFile(fullpath, { encoding: 'utf8' })
560
+ if (freed) return
595
561
 
596
- try {
597
- var a = new AbortController()
598
- aborts.add(a)
599
-
600
- var fork_point
601
- if (self.version) {
602
- // Check if server has our version
603
- var r = await braid_fetch(url, {
604
- signal: a.signal,
605
- method: "HEAD",
606
- version: ['' + self.version]
607
- })
608
- if (r.ok) fork_point = ['' + self.version]
562
+ await braid_blob.put(url, data, { skip_write: true })
563
+ if (freed) return
564
+
565
+ self.file_mtimeNs_str = '' + stat.mtimeNs
566
+ await save_meta()
567
+ }
568
+ } catch (e) {
569
+ if (e.code !== 'ENOENT') throw e
609
570
  }
571
+ })
572
+ }
573
+ await self.signal_file_needs_reading()
610
574
 
611
- var res = await braid_fetch(url, {
612
- signal: a.signal,
613
- headers: {
614
- ...(x => x && {Cookie: x})(config.cookies?.[new URL(url).hostname]),
615
- },
616
- subscribe: true,
617
- heartbeats: 120,
618
- peer: self.peer,
619
- parents: fork_point || []
620
- })
621
- if (freed || closed) return
622
-
623
- if (res.status < 200 || res.status >= 300) return retry(new Error(`unexpected status: ${res.status}`))
624
-
625
- if (res.status !== 209)
626
- return log_error(`Can't sync ${url} -- got bad response ${res.status} from server (expected 209)`)
627
-
628
- self.file_read_only = res.headers.get('editable') === 'false'
575
+ var db = {
576
+ read: async (_key) => {
577
+ return await within_fiber(fullpath, async () => {
578
+ var fullpath = await get_fullpath()
579
+ if (freed) return
629
580
 
630
- await wait_on(within_fiber(url, async () => {
581
+ try {
582
+ return await require('fs').promises.readFile(fullpath)
583
+ } catch (e) {
584
+ if (e.code === 'ENOENT') return null
585
+ throw e
586
+ }
587
+ })
588
+ },
589
+ write: async (_key, data) => {
590
+ return await within_fiber(fullpath, async () => {
631
591
  var fullpath = await get_fullpath()
632
- if (freed || closed) return
592
+ if (freed) return
633
593
 
634
- await set_read_only(fullpath, self.file_read_only)
635
- }))
594
+ try {
595
+ var temp = `${temp_folder}/${Math.random().toString(36).slice(2)}`
596
+ await require('fs').promises.writeFile(temp, data)
597
+ if (freed) return
636
598
 
637
- console.log(`connected to ${url}${self.file_read_only ? ' (readonly)' : ''}`)
599
+ var stat = await require('fs').promises.stat(temp, { bigint: true })
600
+ if (freed) return
638
601
 
639
- reconnect_rate_limiter.on_conn(url)
640
-
641
- res.subscribe(async update => {
642
- if (freed || closed) return
643
- if (update.version.length === 0) return
644
- if (update.version.length !== 1) throw 'unexpected'
645
- var version = 1*update.version[0]
646
- if (!update.body) return
647
-
648
- if (self.version != null &&
649
- version <= self.version) return
650
- self.version = version
651
-
652
- await within_fiber(url, async () => {
653
- var fullpath = await get_fullpath()
654
- if (freed || closed) return
655
-
656
- await wait_on(set_read_only(fullpath, false))
657
- if (freed || closed) return
658
- await wait_on(require('fs').promises.writeFile(fullpath, update.body))
659
- if (freed || closed) return
660
- await wait_on(set_read_only(fullpath, self.file_read_only))
661
- if (freed || closed) return
662
-
663
- var stat = await require('fs').promises.stat(fullpath, { bigint: true })
664
- if (freed || closed) return
665
- self.file_mtimeNs_str = '' + stat.mtimeNs
666
- self.last_touch = Date.now()
602
+ await require('fs').promises.rename(temp, fullpath)
603
+ if (freed) return
667
604
 
605
+ self.file_mtimeNs_str = '' + stat.mtimeNs
668
606
  await save_meta()
669
- })
670
- }, retry)
671
-
672
- async function send_file(fullpath) {
673
- var body = await require('fs').promises.readFile(fullpath)
674
- if (freed || closed) return
675
-
676
- try {
677
- var a = new AbortController()
678
- aborts.add(a)
679
- var r = await braid_fetch(url, {
680
- method: 'PUT',
681
- signal: a.signal,
682
- version: ['' + self.version],
683
- body,
684
- headers: {
685
- ...(x => x && {Cookie: x})(config.cookies?.[new URL(url).hostname])
686
- },
687
- })
688
- if (freed || closed) return
689
-
690
- // if we're not authorized,
691
- if (r.status == 401 || r.status == 403) {
692
- // then revert it
693
- console.log(`access denied: reverting local edits`)
694
- unsync_url(url)
695
- sync_url(url)
696
- } else if (!r.ok) {
697
- retry(new Error(`unexpected PUT status: ${r.status}`))
698
- }
699
- } catch (e) { retry(e) }
700
- }
701
-
702
- // if what we have now is newer than what the server has,
703
- // go ahead and send it
704
- await within_fiber(url, async () => {
705
- if (freed || closed) return
607
+ } catch (e) {
608
+ if (e.code === 'ENOENT') return null
609
+ throw e
610
+ }
611
+ })
612
+ },
613
+ delete: async (_key) => {
614
+ return await within_fiber(fullpath, async () => {
706
615
  var fullpath = await get_fullpath()
707
- if (freed || closed) return
616
+ if (freed) return
617
+
708
618
  try {
709
- var stat = await require('fs').promises.stat(fullpath, { bigint: true })
710
- } catch (e) { return }
711
- if (freed || closed) return
712
-
713
- var server_v = JSON.parse(`[${res.headers.get('current-version')}]`)
714
- if (self.version != null &&
715
- '' + stat.mtimeNs === self.file_mtimeNs_str && (
716
- !server_v.length ||
717
- 1*server_v[0] < self.version
718
- )) await send_file(fullpath)
619
+ await require('fs').promises.unlink(fullpath)
620
+ } catch (e) {
621
+ if (e.code !== 'ENOENT') throw e
622
+ }
719
623
  })
624
+ }
625
+ }
720
626
 
721
- self.signal_file_needs_reading = () => {
722
- within_fiber(url, async () => {
723
- if (freed || closed) return
724
-
725
- var fullpath = await get_fullpath()
726
- if (freed || closed) return
727
-
728
- try {
729
- var stat = await require('fs').promises.stat(fullpath, { bigint: true })
730
- } catch (e) { return }
731
- if (freed || closed) return
627
+ var ac
628
+ function start_sync() {
629
+ if (ac) ac.abort()
630
+ if (freed) return
732
631
 
733
- if ('' + stat.mtimeNs !== self.file_mtimeNs_str) {
734
- self.version = Math.max((self.version || 0) + 1,
735
- Math.round(Number(stat.mtimeNs) / 1000000))
632
+ var closed = false
633
+ ac = new AbortController()
736
634
 
737
- self.file_mtimeNs_str = '' + stat.mtimeNs
738
- self.last_touch = Date.now()
635
+ self.disconnect = async () => {
636
+ if (closed) return
637
+ closed = true
638
+ reconnect_rate_limiter.on_diss(url)
639
+ ac.abort()
640
+ }
739
641
 
740
- await save_meta()
741
- await send_file(fullpath)
742
- }
743
- })
744
- }
745
- self.signal_file_needs_reading()
746
- } catch (e) { return retry(e) }
642
+ braid_blob.sync(url, new URL(url), {
643
+ db,
644
+ signal: ac.signal,
645
+ peer: self.peer,
646
+ headers: {
647
+ 'Content-Type': 'text/plain',
648
+ ...(x => x && { Cookie: x })(config.cookies?.[new URL(url).hostname])
649
+ },
650
+ on_pre_connect: () => reconnect_rate_limiter.get_turn(url),
651
+ on_res: res => {
652
+ if (freed) return
653
+ reconnect_rate_limiter.on_conn(url)
654
+ self.file_read_only = res.headers.get('editable') === 'false'
655
+ console.log(`connected to ${url}${self.file_read_only ? ' (readonly)' : ''}`)
656
+ },
657
+ on_unauthorized: async () => {
658
+ console.log(`access denied: reverting local edits`)
659
+ unsync_url(url)
660
+ sync_url(url)
661
+ },
662
+ on_disconnect: () => reconnect_rate_limiter.on_diss(url)
663
+ })
747
664
  }
748
665
 
666
+ self.reconnect = () => start_sync()
667
+
668
+ start_sync()
749
669
  return self
750
670
  }
751
671
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "braidfs",
3
- "version": "0.0.132",
3
+ "version": "0.0.134",
4
4
  "description": "braid technology synchronizing files and webpages",
5
5
  "author": "Braid Working Group",
6
6
  "repository": "braid-org/braidfs",
@@ -8,7 +8,7 @@
8
8
  "dependencies": {
9
9
  "braid-http": "~1.3.85",
10
10
  "braid-text": "~0.2.97",
11
- "braid-blob": "~0.0.30",
11
+ "braid-blob": "~0.0.42",
12
12
  "chokidar": "^4.0.3"
13
13
  },
14
14
  "bin": {