braidfs 0.0.96 → 0.0.97
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.js +251 -168
- package/package.json +1 -1
package/index.js
CHANGED
|
@@ -13,7 +13,8 @@ var trash = `${braidfs_config_dir}/trash`
|
|
|
13
13
|
var temp_folder = `${braidfs_config_dir}/temp`
|
|
14
14
|
|
|
15
15
|
var config = null,
|
|
16
|
-
watcher_misses = 0
|
|
16
|
+
watcher_misses = 0,
|
|
17
|
+
reconnect_rate_limiter = new ReconnectRateLimiter()
|
|
17
18
|
|
|
18
19
|
if (require('fs').existsSync(sync_base)) {
|
|
19
20
|
try {
|
|
@@ -732,12 +733,14 @@ async function sync_url(url) {
|
|
|
732
733
|
async function connect() {
|
|
733
734
|
if (freed) return
|
|
734
735
|
if (last_connect_timer) return
|
|
736
|
+
console.log(`connecting to ${url}`)
|
|
735
737
|
|
|
736
738
|
var closed = false
|
|
737
739
|
var prev_disconnect = self.disconnect
|
|
738
740
|
self.disconnect = async () => {
|
|
739
741
|
if (closed) return
|
|
740
742
|
closed = true
|
|
743
|
+
reconnect_rate_limiter.on_diss(url)
|
|
741
744
|
for (var a of aborts) a.abort()
|
|
742
745
|
aborts.clear()
|
|
743
746
|
if (braid_text_get_options) await braid_text.forget(url, braid_text_get_options)
|
|
@@ -755,112 +758,110 @@ async function sync_url(url) {
|
|
|
755
758
|
console.log(`reconnecting in ${waitTime}s: ${url} after error: ${e}`)
|
|
756
759
|
last_connect_timer = setTimeout(async () => {
|
|
757
760
|
await p
|
|
761
|
+
await reconnect_rate_limiter.get_turn(url)
|
|
758
762
|
last_connect_timer = null
|
|
759
763
|
connect()
|
|
760
764
|
}, waitTime * 1000)
|
|
761
765
|
waitTime = Math.min(waitTime + 1, 3)
|
|
762
766
|
}
|
|
763
767
|
|
|
764
|
-
|
|
765
|
-
|
|
768
|
+
try {
|
|
769
|
+
var initial_connect_done
|
|
770
|
+
var initial_connect_promise = new Promise(done => initial_connect_done = done)
|
|
766
771
|
|
|
767
|
-
|
|
768
|
-
if (freed || closed) return
|
|
769
|
-
try {
|
|
770
|
-
var a = new AbortController()
|
|
771
|
-
aborts.add(a)
|
|
772
|
-
return await braid_fetch(url, {
|
|
773
|
-
signal: a.signal,
|
|
774
|
-
headers: {
|
|
775
|
-
"Merge-Type": "dt",
|
|
776
|
-
"Content-Type": 'text/plain',
|
|
777
|
-
...(x => x && {Cookie: x})(config.cookies?.[new URL(url).hostname])
|
|
778
|
-
},
|
|
779
|
-
retry: { retryRes: r => r.status !== 401 && r.status !== 403 },
|
|
780
|
-
...params
|
|
781
|
-
})
|
|
782
|
-
} catch (e) {
|
|
783
|
-
if (freed || closed) return
|
|
784
|
-
if (e?.name !== "AbortError") console.log(e)
|
|
785
|
-
} finally {
|
|
772
|
+
async function my_fetch(params) {
|
|
786
773
|
if (freed || closed) return
|
|
787
|
-
|
|
774
|
+
try {
|
|
775
|
+
var a = new AbortController()
|
|
776
|
+
aborts.add(a)
|
|
777
|
+
return await braid_fetch(url, {
|
|
778
|
+
signal: a.signal,
|
|
779
|
+
headers: {
|
|
780
|
+
"Merge-Type": "dt",
|
|
781
|
+
"Content-Type": 'text/plain',
|
|
782
|
+
...(x => x && {Cookie: x})(config.cookies?.[new URL(url).hostname])
|
|
783
|
+
},
|
|
784
|
+
...params
|
|
785
|
+
})
|
|
786
|
+
} catch (e) {
|
|
787
|
+
if (freed || closed) return
|
|
788
|
+
aborts.delete(a)
|
|
789
|
+
throw e
|
|
790
|
+
}
|
|
788
791
|
}
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
if (freed || closed) return
|
|
792
|
+
|
|
793
|
+
async function send_out(stuff) {
|
|
794
|
+
if (freed || closed) return
|
|
793
795
|
|
|
794
|
-
|
|
796
|
+
console.log(`send_out ${url} ${JSON.stringify(stuff, null, 4).slice(0, 1000)}`)
|
|
795
797
|
|
|
796
|
-
|
|
797
|
-
|
|
798
|
+
var r = await my_fetch({ method: "PUT", ...stuff,
|
|
799
|
+
retry: { retryRes: r => r.status !== 401 && r.status !== 403 }})
|
|
800
|
+
if (freed || closed) return
|
|
801
|
+
|
|
802
|
+
// the server has acknowledged this version,
|
|
803
|
+
// so add it to the fork point
|
|
804
|
+
if (r.ok) self.update_fork_point(stuff.version[0], stuff.parents)
|
|
798
805
|
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
console.log(`access denied: reverting local edits`)
|
|
809
|
-
unsync_url(url)
|
|
810
|
-
sync_url(url)
|
|
806
|
+
// if we're not authorized,
|
|
807
|
+
if (r.status == 401 || r.status == 403) {
|
|
808
|
+
// and it's one of our versions (a local edit),
|
|
809
|
+
if (self.peer === braid_text.decode_version(stuff.version[0])[0]) {
|
|
810
|
+
// then revert it
|
|
811
|
+
console.log(`access denied: reverting local edits`)
|
|
812
|
+
unsync_url(url)
|
|
813
|
+
sync_url(url)
|
|
814
|
+
}
|
|
811
815
|
}
|
|
812
816
|
}
|
|
813
|
-
}
|
|
814
817
|
|
|
815
|
-
|
|
816
|
-
if (freed || closed) return
|
|
817
|
-
console.log(`[find_fork_point] url: ${url}`)
|
|
818
|
-
|
|
819
|
-
// see if remote has the fork point
|
|
820
|
-
if (self.fork_point) {
|
|
821
|
-
var r = await my_fetch({ method: "HEAD", version: self.fork_point })
|
|
818
|
+
async function find_fork_point() {
|
|
822
819
|
if (freed || closed) return
|
|
823
|
-
|
|
824
|
-
}
|
|
820
|
+
console.log(`[find_fork_point] url: ${url}`)
|
|
825
821
|
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
822
|
+
// see if remote has the fork point
|
|
823
|
+
if (self.fork_point) {
|
|
824
|
+
var r = await my_fetch({ method: "HEAD", version: self.fork_point })
|
|
825
|
+
if (freed || closed) return
|
|
826
|
+
if (r.ok) return console.log(`[find_fork_point] it has our latest fork point, hooray!`)
|
|
827
|
+
}
|
|
830
828
|
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
var i = Math.floor((min + max)/2)
|
|
836
|
-
var version = [events[i]]
|
|
829
|
+
// otherwise let's binary search for new fork point..
|
|
830
|
+
var bytes = resource.doc.toBytes()
|
|
831
|
+
var [_, events, __] = braid_text.dt_parse([...bytes])
|
|
832
|
+
events = events.map(x => x.join('-'))
|
|
837
833
|
|
|
838
|
-
|
|
834
|
+
var min = -1
|
|
835
|
+
var max = events.length
|
|
836
|
+
self.fork_point = []
|
|
837
|
+
while (min + 1 < max) {
|
|
838
|
+
var i = Math.floor((min + max)/2)
|
|
839
|
+
var version = [events[i]]
|
|
839
840
|
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
841
|
+
console.log(`min=${min}, max=${max}, i=${i}, version=${version}`)
|
|
842
|
+
|
|
843
|
+
var st = Date.now()
|
|
844
|
+
var r = await my_fetch({ method: "HEAD", version })
|
|
845
|
+
if (freed || closed) return
|
|
846
|
+
console.log(`fetched in ${Date.now() - st}`)
|
|
844
847
|
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
848
|
+
if (r.ok) {
|
|
849
|
+
min = i
|
|
850
|
+
self.fork_point = version
|
|
851
|
+
} else max = i
|
|
852
|
+
}
|
|
853
|
+
console.log(`[find_fork_point] settled on: ${JSON.stringify(self.fork_point)}`)
|
|
854
|
+
self.signal_file_needs_writing(true)
|
|
849
855
|
}
|
|
850
|
-
console.log(`[find_fork_point] settled on: ${JSON.stringify(self.fork_point)}`)
|
|
851
|
-
self.signal_file_needs_writing(true)
|
|
852
|
-
}
|
|
853
856
|
|
|
854
|
-
|
|
855
|
-
|
|
857
|
+
await find_fork_point()
|
|
858
|
+
if (freed || closed) return
|
|
856
859
|
|
|
857
|
-
|
|
858
|
-
|
|
860
|
+
await send_new_stuff()
|
|
861
|
+
if (freed || closed) return
|
|
859
862
|
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
aborts.add(a)
|
|
863
|
-
try {
|
|
863
|
+
let a = new AbortController()
|
|
864
|
+
aborts.add(a)
|
|
864
865
|
var res = await braid_fetch(url, {
|
|
865
866
|
signal: a.signal,
|
|
866
867
|
headers: {
|
|
@@ -873,106 +874,108 @@ async function sync_url(url) {
|
|
|
873
874
|
parents: self.fork_point,
|
|
874
875
|
peer: self.peer
|
|
875
876
|
})
|
|
876
|
-
|
|
877
|
-
if (freed || closed) return
|
|
877
|
+
if (freed || closed) return
|
|
878
878
|
|
|
879
|
-
|
|
879
|
+
if (res.status < 200 || res.status >= 300) return retry(new Error(`unexpected status: ${res.status}`))
|
|
880
880
|
|
|
881
|
-
|
|
882
|
-
|
|
881
|
+
if (res.status !== 209)
|
|
882
|
+
return log_error(`Can't sync ${url} -- got bad response ${res.status} from server (expected 209)`)
|
|
883
883
|
|
|
884
|
-
|
|
885
|
-
|
|
884
|
+
console.log(`connected to ${url}`)
|
|
885
|
+
console.log(` editable = ${res.headers.get('editable')}`)
|
|
886
886
|
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
887
|
+
reconnect_rate_limiter.on_conn(url)
|
|
888
|
+
|
|
889
|
+
self.file_read_only = res.headers.get('editable') === 'false'
|
|
890
|
+
self.signal_file_needs_writing()
|
|
891
|
+
|
|
892
|
+
initial_connect_done()
|
|
893
|
+
res.subscribe(async update => {
|
|
894
|
+
if (freed || closed) return
|
|
895
|
+
console.log(`got external update about ${url}`)
|
|
894
896
|
|
|
895
|
-
|
|
896
|
-
|
|
897
|
+
if (update.body) update.body = update.body_text
|
|
898
|
+
if (update.patches) for (let p of update.patches) p.content = p.content_text
|
|
897
899
|
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
900
|
+
// console.log(`update: ${JSON.stringify(update, null, 4)}`)
|
|
901
|
+
if (update.version.length === 0) return
|
|
902
|
+
if (update.version.length !== 1) throw 'unexpected'
|
|
901
903
|
|
|
902
|
-
|
|
903
|
-
|
|
904
|
+
await wait_on(braid_text.put(url, { ...update, peer: self.peer, merge_type: 'dt' }))
|
|
905
|
+
if (freed || closed) return
|
|
904
906
|
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
907
|
+
// the server is giving us this version,
|
|
908
|
+
// so it must have it,
|
|
909
|
+
// so let's add it to our fork point
|
|
910
|
+
self.update_fork_point(update.version[0], update.parents)
|
|
909
911
|
|
|
910
|
-
|
|
911
|
-
|
|
912
|
+
self.signal_file_needs_writing()
|
|
913
|
+
}, retry)
|
|
912
914
|
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
915
|
+
// send it stuff we have but it doesn't't
|
|
916
|
+
async function send_new_stuff() {
|
|
917
|
+
if (freed || closed) return
|
|
918
|
+
var q = []
|
|
919
|
+
var in_flight = new Map()
|
|
920
|
+
var max_in_flight = 10
|
|
921
|
+
var send_pump_lock = 0
|
|
922
|
+
|
|
923
|
+
async function send_pump() {
|
|
924
|
+
send_pump_lock++
|
|
925
|
+
if (send_pump_lock > 1) return
|
|
926
|
+
try {
|
|
927
|
+
if (freed || closed) return
|
|
928
|
+
if (in_flight.size >= max_in_flight) return
|
|
929
|
+
if (!q.length) {
|
|
930
|
+
var frontier = self.fork_point
|
|
931
|
+
for (var u of in_flight.values())
|
|
932
|
+
frontier = extend_frontier(frontier, u.version[0], u.parents)
|
|
933
|
+
|
|
934
|
+
var options = {
|
|
935
|
+
parents: frontier,
|
|
936
|
+
merge_type: 'dt',
|
|
937
|
+
peer: self.peer,
|
|
938
|
+
subscribe: u => u.version.length && q.push(u)
|
|
939
|
+
}
|
|
940
|
+
await braid_text.get(url, options)
|
|
941
|
+
await braid_text.forget(url, options)
|
|
937
942
|
}
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
943
|
+
while (q.length && in_flight.size < max_in_flight) {
|
|
944
|
+
let u = q.shift()
|
|
945
|
+
in_flight.set(u.version[0], u);
|
|
946
|
+
(async () => {
|
|
947
|
+
await initial_connect_promise
|
|
948
|
+
if (freed || closed) return
|
|
949
|
+
await send_out({...u, peer: self.peer})
|
|
950
|
+
if (freed || closed) return
|
|
951
|
+
in_flight.delete(u.version[0])
|
|
952
|
+
setTimeout(send_pump, 0)
|
|
953
|
+
})()
|
|
954
|
+
}
|
|
955
|
+
} finally {
|
|
956
|
+
var retry = send_pump_lock > 1
|
|
957
|
+
send_pump_lock = 0
|
|
958
|
+
if (retry) setTimeout(send_pump, 0)
|
|
952
959
|
}
|
|
953
|
-
} finally {
|
|
954
|
-
var retry = send_pump_lock > 1
|
|
955
|
-
send_pump_lock = 0
|
|
956
|
-
if (retry) setTimeout(send_pump, 0)
|
|
957
960
|
}
|
|
958
|
-
}
|
|
959
961
|
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
962
|
+
var initial_stuff = true
|
|
963
|
+
await wait_on(braid_text.get(url, braid_text_get_options = {
|
|
964
|
+
parents: self.fork_point,
|
|
965
|
+
merge_type: 'dt',
|
|
966
|
+
peer: self.peer,
|
|
967
|
+
subscribe: async (u) => {
|
|
968
|
+
if (freed || closed) return
|
|
969
|
+
if (u.version.length) {
|
|
970
|
+
self.signal_file_needs_writing()
|
|
971
|
+
if (initial_stuff || in_flight.size < max_in_flight) q.push(u)
|
|
972
|
+
send_pump()
|
|
973
|
+
}
|
|
974
|
+
},
|
|
975
|
+
}))
|
|
976
|
+
initial_stuff = false
|
|
977
|
+
}
|
|
978
|
+
} catch (e) { return retry(e) }
|
|
976
979
|
}
|
|
977
980
|
|
|
978
981
|
// for config and errors file, listen for web changes
|
|
@@ -1007,6 +1010,86 @@ async function ensure_path(path) {
|
|
|
1007
1010
|
}
|
|
1008
1011
|
}
|
|
1009
1012
|
|
|
1013
|
+
function ReconnectRateLimiter(wait_time = 1000) {
|
|
1014
|
+
var self = {}
|
|
1015
|
+
|
|
1016
|
+
self.conns = new Map() // Map<host, Set<url>>
|
|
1017
|
+
self.host_to_q = new Map() // Map<host, Array<resolve>>
|
|
1018
|
+
self.qs = [] // Array<{host, turns: Array<resolve>, last_turn}>
|
|
1019
|
+
self.last_turn = 0
|
|
1020
|
+
self.timer = null
|
|
1021
|
+
|
|
1022
|
+
function process() {
|
|
1023
|
+
if (self.timer) clearTimeout(self.timer)
|
|
1024
|
+
self.timer = null
|
|
1025
|
+
|
|
1026
|
+
if (!self.qs.length) return
|
|
1027
|
+
var now = Date.now()
|
|
1028
|
+
var my_last_turn = () => self.conns.size === 0 ? self.last_turn : self.qs[0].last_turn
|
|
1029
|
+
|
|
1030
|
+
while (self.qs.length && now >= my_last_turn() + wait_time) {
|
|
1031
|
+
var x = self.qs.shift()
|
|
1032
|
+
if (!x.turns.length) {
|
|
1033
|
+
self.host_to_q.delete(x.host)
|
|
1034
|
+
continue
|
|
1035
|
+
}
|
|
1036
|
+
|
|
1037
|
+
x.turns.shift()()
|
|
1038
|
+
x.last_turn = self.last_turn = now
|
|
1039
|
+
self.qs.push(x)
|
|
1040
|
+
}
|
|
1041
|
+
|
|
1042
|
+
if (self.qs.length)
|
|
1043
|
+
self.timer = setTimeout(process, Math.max(0,
|
|
1044
|
+
my_last_turn() + wait_time - now))
|
|
1045
|
+
}
|
|
1046
|
+
|
|
1047
|
+
self.get_turn = async (url) => {
|
|
1048
|
+
var host = new URL(url).host
|
|
1049
|
+
|
|
1050
|
+
// If host has connections, give turn immediately
|
|
1051
|
+
if (self.conns.has(host)) return
|
|
1052
|
+
|
|
1053
|
+
console.log(`throttling reconn to ${url} (no conns yet to ${self.conns.size ? host : 'anything'})`)
|
|
1054
|
+
|
|
1055
|
+
if (!self.host_to_q.has(host)) {
|
|
1056
|
+
var turns = []
|
|
1057
|
+
self.host_to_q.set(host, turns)
|
|
1058
|
+
self.qs.unshift({host, turns, last_turn: 0})
|
|
1059
|
+
}
|
|
1060
|
+
var p = new Promise(resolve => self.host_to_q.get(host).push(resolve))
|
|
1061
|
+
process()
|
|
1062
|
+
await p
|
|
1063
|
+
}
|
|
1064
|
+
|
|
1065
|
+
self.on_conn = url => {
|
|
1066
|
+
var host = new URL(url).host
|
|
1067
|
+
if (!self.conns.has(host))
|
|
1068
|
+
self.conns.set(host, new Set())
|
|
1069
|
+
self.conns.get(host).add(url)
|
|
1070
|
+
|
|
1071
|
+
// If there are turns waiting for this host, resolve them all immediately
|
|
1072
|
+
var turns = self.host_to_q.get(host)
|
|
1073
|
+
if (turns) {
|
|
1074
|
+
for (var turn of turns) turn()
|
|
1075
|
+
turns.splice(0, turns.length)
|
|
1076
|
+
}
|
|
1077
|
+
|
|
1078
|
+
process()
|
|
1079
|
+
}
|
|
1080
|
+
|
|
1081
|
+
self.on_diss = url => {
|
|
1082
|
+
var host = new URL(url).host
|
|
1083
|
+
var urls = self.conns.get(host)
|
|
1084
|
+
if (urls) {
|
|
1085
|
+
urls.delete(url)
|
|
1086
|
+
if (urls.size === 0) self.conns.delete(host)
|
|
1087
|
+
}
|
|
1088
|
+
}
|
|
1089
|
+
|
|
1090
|
+
return self
|
|
1091
|
+
}
|
|
1092
|
+
|
|
1010
1093
|
////////////////////////////////
|
|
1011
1094
|
|
|
1012
1095
|
function normalize_url(url) {
|