querysub 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. package/.dependency-cruiser.js +304 -0
  2. package/.eslintrc.js +51 -0
  3. package/.github/copilot-instructions.md +1 -0
  4. package/.vscode/settings.json +25 -0
  5. package/bin/deploy.js +4 -0
  6. package/bin/function.js +4 -0
  7. package/bin/server.js +4 -0
  8. package/costsBenefits.txt +112 -0
  9. package/deploy.ts +3 -0
  10. package/inject.ts +1 -0
  11. package/package.json +60 -0
  12. package/prompts.txt +54 -0
  13. package/spec.txt +820 -0
  14. package/src/-a-archives/archiveCache.ts +913 -0
  15. package/src/-a-archives/archives.ts +148 -0
  16. package/src/-a-archives/archivesBackBlaze.ts +792 -0
  17. package/src/-a-archives/archivesDisk.ts +418 -0
  18. package/src/-a-archives/copyLocalToBackblaze.ts +24 -0
  19. package/src/-a-auth/certs.ts +517 -0
  20. package/src/-a-auth/der.ts +122 -0
  21. package/src/-a-auth/ed25519.ts +1015 -0
  22. package/src/-a-auth/node-forge-ed25519.d.ts +17 -0
  23. package/src/-b-authorities/dnsAuthority.ts +203 -0
  24. package/src/-b-authorities/emailAuthority.ts +57 -0
  25. package/src/-c-identity/IdentityController.ts +200 -0
  26. package/src/-d-trust/NetworkTrust2.ts +150 -0
  27. package/src/-e-certs/EdgeCertController.ts +288 -0
  28. package/src/-e-certs/certAuthority.ts +192 -0
  29. package/src/-f-node-discovery/NodeDiscovery.ts +543 -0
  30. package/src/-g-core-values/NodeCapabilities.ts +134 -0
  31. package/src/-g-core-values/oneTimeForward.ts +91 -0
  32. package/src/-h-path-value-serialize/PathValueSerializer.ts +769 -0
  33. package/src/-h-path-value-serialize/stringSerializer.ts +176 -0
  34. package/src/0-path-value-core/LoggingClient.tsx +24 -0
  35. package/src/0-path-value-core/NodePathAuthorities.ts +978 -0
  36. package/src/0-path-value-core/PathController.ts +1 -0
  37. package/src/0-path-value-core/PathValueCommitter.ts +565 -0
  38. package/src/0-path-value-core/PathValueController.ts +231 -0
  39. package/src/0-path-value-core/archiveLocks/ArchiveLocks.ts +154 -0
  40. package/src/0-path-value-core/archiveLocks/ArchiveLocks2.ts +820 -0
  41. package/src/0-path-value-core/archiveLocks/archiveSnapshots.ts +180 -0
  42. package/src/0-path-value-core/debugLogs.ts +90 -0
  43. package/src/0-path-value-core/pathValueArchives.ts +483 -0
  44. package/src/0-path-value-core/pathValueCore.ts +2217 -0
  45. package/src/1-path-client/RemoteWatcher.ts +558 -0
  46. package/src/1-path-client/pathValueClientWatcher.ts +702 -0
  47. package/src/2-proxy/PathValueProxyWatcher.ts +1857 -0
  48. package/src/2-proxy/archiveMoveHarness.ts +376 -0
  49. package/src/2-proxy/garbageCollection.ts +753 -0
  50. package/src/2-proxy/pathDatabaseProxyBase.ts +37 -0
  51. package/src/2-proxy/pathValueProxy.ts +139 -0
  52. package/src/2-proxy/schema2.ts +518 -0
  53. package/src/3-path-functions/PathFunctionHelpers.ts +129 -0
  54. package/src/3-path-functions/PathFunctionRunner.ts +619 -0
  55. package/src/3-path-functions/PathFunctionRunnerMain.ts +67 -0
  56. package/src/3-path-functions/deployBlock.ts +10 -0
  57. package/src/3-path-functions/deployCheck.ts +7 -0
  58. package/src/3-path-functions/deployMain.ts +160 -0
  59. package/src/3-path-functions/pathFunctionLoader.ts +282 -0
  60. package/src/3-path-functions/syncSchema.ts +475 -0
  61. package/src/3-path-functions/tests/functionsTest.ts +135 -0
  62. package/src/3-path-functions/tests/rejectTest.ts +77 -0
  63. package/src/4-dom/css.tsx +29 -0
  64. package/src/4-dom/cssTypes.d.ts +212 -0
  65. package/src/4-dom/qreact.tsx +2322 -0
  66. package/src/4-dom/qreactTest.tsx +417 -0
  67. package/src/4-querysub/Querysub.ts +877 -0
  68. package/src/4-querysub/QuerysubController.ts +620 -0
  69. package/src/4-querysub/copyEvent.ts +0 -0
  70. package/src/4-querysub/permissions.ts +289 -0
  71. package/src/4-querysub/permissionsShared.ts +1 -0
  72. package/src/4-querysub/querysubPrediction.ts +525 -0
  73. package/src/5-diagnostics/FullscreenModal.tsx +67 -0
  74. package/src/5-diagnostics/GenericFormat.tsx +165 -0
  75. package/src/5-diagnostics/Modal.tsx +79 -0
  76. package/src/5-diagnostics/Table.tsx +183 -0
  77. package/src/5-diagnostics/TimeGrouper.tsx +114 -0
  78. package/src/5-diagnostics/diskValueAudit.ts +216 -0
  79. package/src/5-diagnostics/memoryValueAudit.ts +442 -0
  80. package/src/5-diagnostics/nodeMetadata.ts +135 -0
  81. package/src/5-diagnostics/qreactDebug.tsx +309 -0
  82. package/src/5-diagnostics/shared.ts +26 -0
  83. package/src/5-diagnostics/synchronousLagTracking.ts +47 -0
  84. package/src/TestController.ts +35 -0
  85. package/src/allowclient.flag +0 -0
  86. package/src/bits.ts +86 -0
  87. package/src/buffers.ts +69 -0
  88. package/src/config.ts +53 -0
  89. package/src/config2.ts +48 -0
  90. package/src/diagnostics/ActionsHistory.ts +56 -0
  91. package/src/diagnostics/NodeViewer.tsx +503 -0
  92. package/src/diagnostics/SizeLimiter.ts +62 -0
  93. package/src/diagnostics/TimeDebug.tsx +18 -0
  94. package/src/diagnostics/benchmark.ts +139 -0
  95. package/src/diagnostics/errorLogs/ErrorLogController.ts +515 -0
  96. package/src/diagnostics/errorLogs/ErrorLogCore.ts +274 -0
  97. package/src/diagnostics/errorLogs/LogClassifiers.tsx +302 -0
  98. package/src/diagnostics/errorLogs/LogFilterUI.tsx +84 -0
  99. package/src/diagnostics/errorLogs/LogNotify.tsx +101 -0
  100. package/src/diagnostics/errorLogs/LogTimeSelector.tsx +724 -0
  101. package/src/diagnostics/errorLogs/LogViewer.tsx +757 -0
  102. package/src/diagnostics/errorLogs/hookErrors.ts +60 -0
  103. package/src/diagnostics/errorLogs/logFiltering.tsx +149 -0
  104. package/src/diagnostics/heapTag.ts +13 -0
  105. package/src/diagnostics/listenOnDebugger.ts +77 -0
  106. package/src/diagnostics/logs/DiskLoggerPage.tsx +572 -0
  107. package/src/diagnostics/logs/ObjectDisplay.tsx +165 -0
  108. package/src/diagnostics/logs/ansiFormat.ts +108 -0
  109. package/src/diagnostics/logs/diskLogGlobalContext.ts +38 -0
  110. package/src/diagnostics/logs/diskLogger.ts +305 -0
  111. package/src/diagnostics/logs/diskShimConsoleLogs.ts +32 -0
  112. package/src/diagnostics/logs/injectFileLocationToConsole.ts +50 -0
  113. package/src/diagnostics/logs/logGitHashes.ts +30 -0
  114. package/src/diagnostics/managementPages.tsx +289 -0
  115. package/src/diagnostics/periodic.ts +89 -0
  116. package/src/diagnostics/runSaturationTest.ts +416 -0
  117. package/src/diagnostics/satSchema.ts +64 -0
  118. package/src/diagnostics/trackResources.ts +82 -0
  119. package/src/diagnostics/watchdog.ts +55 -0
  120. package/src/errors.ts +132 -0
  121. package/src/forceProduction.ts +3 -0
  122. package/src/fs.ts +72 -0
  123. package/src/heapDumps.ts +666 -0
  124. package/src/https.ts +2 -0
  125. package/src/inject.ts +1 -0
  126. package/src/library-components/ATag.tsx +84 -0
  127. package/src/library-components/Button.tsx +344 -0
  128. package/src/library-components/ButtonSelector.tsx +64 -0
  129. package/src/library-components/DropdownCustom.tsx +151 -0
  130. package/src/library-components/DropdownSelector.tsx +32 -0
  131. package/src/library-components/Input.tsx +334 -0
  132. package/src/library-components/InputLabel.tsx +198 -0
  133. package/src/library-components/InputPicker.tsx +125 -0
  134. package/src/library-components/LazyComponent.tsx +62 -0
  135. package/src/library-components/MeasureHeightCSS.tsx +48 -0
  136. package/src/library-components/MeasuredDiv.tsx +47 -0
  137. package/src/library-components/ShowMore.tsx +51 -0
  138. package/src/library-components/SyncedController.ts +171 -0
  139. package/src/library-components/TimeRangeSelector.tsx +407 -0
  140. package/src/library-components/URLParam.ts +263 -0
  141. package/src/library-components/colors.tsx +14 -0
  142. package/src/library-components/drag.ts +114 -0
  143. package/src/library-components/icons.tsx +692 -0
  144. package/src/library-components/niceStringify.ts +50 -0
  145. package/src/library-components/renderToString.ts +52 -0
  146. package/src/misc/PromiseRace.ts +101 -0
  147. package/src/misc/color.ts +30 -0
  148. package/src/misc/getParentProcessId.cs +53 -0
  149. package/src/misc/getParentProcessId.ts +53 -0
  150. package/src/misc/hash.ts +83 -0
  151. package/src/misc/ipPong.js +13 -0
  152. package/src/misc/networking.ts +2 -0
  153. package/src/misc/random.ts +45 -0
  154. package/src/misc.ts +19 -0
  155. package/src/noserverhotreload.flag +0 -0
  156. package/src/path.ts +226 -0
  157. package/src/persistentLocalStore.ts +37 -0
  158. package/src/promise.ts +15 -0
  159. package/src/server.ts +73 -0
  160. package/src/src.d.ts +1 -0
  161. package/src/test/heapProcess.ts +36 -0
  162. package/src/test/mongoSatTest.tsx +55 -0
  163. package/src/test/satTest.ts +193 -0
  164. package/src/test/test.tsx +552 -0
  165. package/src/zip.ts +92 -0
  166. package/src/zipThreaded.ts +106 -0
  167. package/src/zipThreadedWorker.js +19 -0
  168. package/tsconfig.json +27 -0
  169. package/yarnSpec.txt +56 -0
package/spec.txt ADDED
@@ -0,0 +1,820 @@
1
+ *** Goal is to get to === Full public website stack milestone === ***
2
+ === 3 tasks to fully fledged syncing KVP database, 2023 / 25 / 2 ===
3
+ === 1 tasks to fully fledged syncing KVP database, 2023 / 26 / 2 ===
4
+ === FINISHED fully fledged syncing KVP database, 2023 / 28 / 1 ===
5
+ === 6 tasks to "Full public website stack", 2023 / 31 / 1 ===
6
+ === 5 tasks to "Full public website stack", 2023 / 31 / 1 ===
7
+ === FunctionRunner bones done!, 2023 / 31 / 2 ===
8
+ === Disk garbage collection bones done, 2023 / 11 / 3 ===
9
+ === Access for non-network nodes (via permissions) done, 2023 / 19 / 3 ===
10
+ === Clientside + clientside function prediction done! 2023 / 23 / 3 ===
11
+ === Prototype SD app with 2 creation modes, arbitrary model support, paint, diffuse, openpose, onnx model generation, etc. 2023 / 10 / 1 ===
12
+ === Lots of failed embedding training research. 2023 / 12 / 18 ===
13
+ === Better single source of truth. 2023 / 12 / 31 ===
14
+ === Better serialization format. 2023 / 12 / 31 ===
15
+ === Fixed storage / multi process storage. 2023 / 12 / 31 ===
16
+
17
+ TIMING: Transactions, currently about ~1.5ms per transaction + 10us per write and read path in the transaction
18
+ - About 5ms latency, the fast times are only if you don't wait for transactions to finish (because why would you wait when the whole system is based on predicting writes?)
19
+ - With 1 watcher (with 0 watchers transactions are probably about 0.5ms per)
20
+ - Much of the transaction over is serialization time, mostly in JSON.stringify (to send the value to the watcher)
21
+ - MUST FASTER NOW, if they are batched
22
+
23
+ TIMING: Function calls, about ~1ms-3ms for a trivial function, if it can be batched with similar functions
24
+ - Slower if we need to sync new paths
25
+
26
+ Watcher diff mode
27
+ - Running stress tests (satTest.ts) with a single watcher shows that the slowest part is our function watcher, specifically all of the parts that don't operate in a delta mode (ex, setWatches).
28
+ - If we made a watcher fully support a delta mode (which is fairly easy, as it converts all the changes to deltas anyways), the watcher would likely work MUCH faster.
29
+ - ClientWatcher needs to expose a delta interface
30
+ - proxyWatcher also needs a delta mode
31
+ - Rerun the benchmark after using the for FunctionRunner, and our call/s should go up significantly
32
+
33
+ Shard display / manual sharding / automatic sharding
34
+ - Sharding will be fixed on process start
35
+ - We can augment our database path size distribution display to also support explicit sharding control
36
+ - This means we have all machines register, and then tell them what pathAuthorities to run (generally just running a single process, but always a new process, with us never changing pathAuthorities for a process/node)
37
+ - THEN, we can write code that automatically sets the sharding configuration, not changing it too often, but keeping everything fairly well distributed
38
+ - Our merging should automatically handle cleaning up dead nodes, so we really just need to create and kill processes to control sharding.
39
+
40
+ Function implementation benchmarking / profiling / diagnostics
41
+ - Have a debug page which shows all functions that have been run, with diagnostics
42
+ - Time taken (total, average, etc)
43
+ - Lock count
44
+ - Rerun count
45
+ - Really important, as it is easy to right functions with a high rerun count. Ideally every function should run at most 2 times, once to know which path it requires, and again once those are all synced.
46
+ - Reject %
47
+
48
+ Fix wildcard permissions
49
+ - Wildcard values should do more than just check "", they should check all possible direct permission keys
50
+ - This fixes the `{ admin: { PERMISSIONS(){ return users()[config.callerId].isAdmin; } } }` type of permission check
51
+ - Also `{ serviceSecrets: { PERMISSIONS(){ return false; } } }`
52
+ - It isn't SO bad, as Object.values() only provides shallow values, but... we should still fix it.
53
+
54
+ Browser local PathValue caching
55
+ - https://rxdb.info/rx-storage-opfs.html
56
+ - Do it at a core level
57
+ - Start with being able to enable it for specific paths, triggered by a flag to createLocalSchema which causes that entire schema to be synced.
58
+ - Or... createLocalSyncedSchema?
59
+ - Eventually add support for remote paths, but in a way to not erronously trigger the sync flag
60
+ - We need to batch the PathValues that we store, and load them in efficiently, etc, etc
61
+
62
+ Combined console/event display
63
+ - Everything grouped by event, with minor sorting based on the latest events, but it is mostly just an aggregator
64
+ - Show the most recent events
65
+ - Allow drilling into a specific event, searching it, filtering it, etc
66
+ - Allow setting up debug workflows
67
+ - Take a specific event / search, run some code on it to filter and create another search, which gets values, which we then can run another set of code on, etc, cascading
68
+ - Tree summarization / navigation
69
+ - For tree display, take any node that has > N children (maybe 100?), and wildcard it, collapsing all children into a single node (and merging all child tree, etc, basically rewriting it so instead of many keys, it has exactly 1 key). This should give us a small number of finite paths. Then we can drill down far enough to split each node into large enough chunks (so we aren’t only looking at roots, but also not only looking at children), with the split factor being configurable (so we CAN just look at root, or just look really high level).
70
+
71
+ Heap analyzer
72
+ - Start by showing shallow size with a depth of 1, and then allow clicking to add more depth
73
+ - Maybe allow viewing the options at each level (instead of just taking the first reference)?
74
+
75
+ Better serverside logging
76
+ - Batch it or something, so 20 requests per second (which isn't even that much), doesn't cause the console to be unusable.
77
+ - Maybe... group by type, and show all the categories on the screen at once, allowing the user to type into the console to expand a group, pin a group, filter, etc
78
+ - The most recent groups can be shown first, WITH, extra prioritization for severity
79
+ - Expanding a group will show the specific entries (otherwise we just show the a count, and MAYBE the latest value?)
80
+ - Filtering would probably be useful too
81
+ - OR... we could just show a really simple log, and link to a web UI?
82
+ - In the web UI we could show logs for multiple processes too!
83
+ - We could tag groups with processes that contribute to them
84
+ - And also show active processes, and recently terminated processes
85
+
86
+ Cloudflare bootstrap caching
87
+ - Add round robin DNS entries (or just verify we are already using it), for our root domain, and turn on some level of caching
88
+ - I think we return the correct e-tag, so we should be able to cache fairly heavily
89
+ - If we turn the file download to be 2 stage we can cache the second stage forever (including the hashes we obtained from the first stage), which... SHOULD be a lot faster (the first call will be slower, and two calls are required, but the second call should be always cached in cloudflare, and then in the user's browser).
90
+ - Fix clientside node routing
91
+ - Update our certificate generation to have one level of wildcard.
92
+ - I believe (we should verify this), you can't have multiple levels of wildcards, so... this certificate WON'T be able to impersonate our full node domains (which are 3 parts), and even if it can... we do extra verification of the public key, so... it would actually be fine.
93
+ - Publish A records for our machines
94
+ - Convert the node ids to just use the machine id clientside, allowing the client to pick any Querysub node
95
+ - Have the node list accessed via HTTP, and cached for a few minutes, as it will change constantly, but... clients don't need the latest version
96
+ - I GUESS after the first read, if we can't find a Querysub node, we can add some flag to disable caching for it, which would eliminate the lag between fixing all the servers and the site being usable again
97
+
98
+ Querysub node sharding
99
+ - Presently we allow any client to use any Querysub node. We might want to shard them too? Or at least make it allowed?
100
+
101
+ Archives disk caching
102
+ - Cache backblaze files on disk.
103
+ - Only files that are at the file size limit and old enough (coordinate with the thresholds the merger uses).
104
+ - At a certain point the merger will stop touching files, or touch them less often, and so we will be able to cache them on our disk for a while.
105
+ - Limit our size to a fixed disk % (probably 5%?), with a configurable list of disks to use
106
+
107
+ Clientside sourcemaps in error callstacks (maybe just have Querysub install the error stack library by default, in the browser?)
108
+
109
+ typenode improvements
110
+ - Log typenode timings after setImmediate, top 10 + remaining timing, in a table
111
+ - Also, maybe entries with time > 10ms (including remaining), and not logging the table if this would make it empty.
112
+
113
+ Tool to suggest balanced path ranges (based on data size, not access patterns).
114
+ - We can even just have it be a function, which we call on startup (telling it the count of nodes, and which index we are), and which caches the shards. It isn't live automatic sharding, but it is technically offline automatic sharding, which isn't too bad.
115
+
116
+ FunctionRunner automatic balancing
117
+ - Exposed functions will also define define their shard distribution.
118
+ - Ex, setShardDistribution(addToUserBalance, (userId, balance) => userId)
119
+ - It is somewhat important for the shard distribution to be consistent across various functions.
120
+ - If this is just set on our most commonly called function, sharding should work quite well
121
+ - IF a shard becomes too far behind, other nodes will pick up the slack, with the closest shards picking it up first, then those farther away, etc, etc.
122
+ - Hopefully we can use the shard values to prevent too much duplicate work, even without any direct communication between the nodes
123
+
124
+ Turn on and test backblaze storage
125
+ - Create a utility to go from disk => backblaze (and might as well go from backblaze => disk)
126
+ - Test starting a server on another machine
127
+ - After setting up the .json keys files, it... should just work?
128
+
129
+ Undo support
130
+ - Not needed for games, but needed for every other application
131
+ - Probably by marking certain functions as "undoable", then showing (in the app UI), a list of "versions" (time points) at a certain path (showing all the changes under a path), and letting the user pick a point. Anything that is undoable can just be reverted, creating new writes, everything that isn't undoable... well, any non-undoable changes cause the undo to not be able to go that far
132
+ - ALSO, anything that depends on writes has to be undoable as well, and... also undone (ideally nothing outside the undo scope would depend on it)
133
+ - Alternatively, dependencies could be made "undoIgnored", which would allow them to stick around, ignoring that fact that their write has been clobbered.
134
+ - OR, maybe we just ignore dependencies, as... undoing adds new writes, so anything that is undoable has to be something where reversing is fine?
135
+ - OR, we made multiple undo modes, "safe", and "unsafe"? Or different undo flags, for failing on dependencies, or not failing? Hmm...
136
+
137
+ Anti-rejection code (isn't REQUIRED to make the database useful, so we should wait. Would be pretty slick though... Ideally this can all be extensions that have no or only modular impact on the core functions, or even no or modular impact on the proxy? Although that might not be possible...)
138
+ Summary
139
+ Excess work (N^2) due to rejections
140
+ - Past time reading (removes causality guarantee, almost completely preventing rejections)
141
+ - Unsafe reads (to remove locks, and then selectively add them back)
142
+ Jitter (due to rejections)
143
+ - Pase time reading (even further in the past than required, to allow a buffer of values)
144
+ Jitter (due to varying client lag)
145
+ - Past time reading
146
+ Lag makes player near impossible to kill
147
+ - Adjust writeTime to stay > min value, depending on the values changed (some values have to be < 100ms, etc)
148
+ Laggy player does things only they can see
149
+ - Adjust age of past dependencies to stay > min value, depending on the values changed (some values have to be < 100ms, etc)
150
+ Clientside operation lag resulting in inability to perform delicate operations (ex, shots miss)
151
+ - Past time reading
152
+ Clientside operations applied out of order
153
+ - More consistently adjust writeTimes (instead of one some functions but not others)
154
+ Operation cause-and-effect no ordered (phasing through objects)
155
+ - Stop using past time reading
156
+ Clientside hacking script to automatically react to values in the past
157
+ - Serverside changing of time, rejecting clientside predictions, AND, something to tween to the correct state, to prevent snapping
158
+
159
+ todonext
160
+ brainstorm: `So what is the FAIREST way to prevent lag from HELPING.`
161
+
162
+ == Test app, with top down "shooter"? (with just circles and lines?)
163
+ - Movement will be "key" based, not frame based. This is harder to implement, but takes a lot of pressure off of the synchronization code.
164
+ - And... the look position will be... just blurred, using maybe 3 ticks per second? Ugh... that will work, for now. Eventually the synchronization code SHOULD be fast enough to handle 30 ticks per second, but presently... our overhead is just too high, and so operations that SHOULD take ~10ns take ~10us, and there's not much we can do about it until we use deltas everywhere, and replace our usage of proxies.
165
+ - This makes the physics system harder to calculate. Although, not that much harder, as we can still use the "intersection" approach, if we want to, by locally emitting positions per frame.
166
+ - I'm not sure the best way to do it... intersections (which really decomposes into line segments) is nice, as it allows infinite precision.
167
+ - Of course, if we have any concept of "gravity", then it is harder. BUT, even then, we can still extrapolate, and even better, we can extrapolate with non-linear segments, which is really the best way to do those kind of simulations anyways.
168
+ - We WOULD require a way to make the non-linear segments update consistently, but... as we have a universal time, that wouldn't be so hard...
169
+ - WELL, no matter what we need some kind of snapshots, to prevent having to re-simulate TOO much state.
170
+ - So... we can have "keys" only last for a certain period of time, and after that, movement stops. Of course, if we had gravity this would make it possible to freeze in mid-air, but, eh... it is what it is.
171
+ - AND, if we had any NPCs or AI, those would require an app server, or some kind of general purpose interval function calling server anyways...
172
+ == Add AI to automatically move around nodes, and automatically shoot
173
+ == Add multiple nodes, with various latencies, which flucuate with different magititudes
174
+ - We should see nodes jumping around
175
+ - It will be hard to hit laggy nodes, as you don't know where they are!
176
+
177
+ Delayed prediction rejection
178
+ - Not sure if this is needed, but in theory if there is contention a function might always require multiple runs to be correct. This means our prediction will always be rejected. BUT, it is probably pretty close, so... we should just keep it around for a bit, until FunctionRunner finishes up the function, giving us the most up to date state.
179
+
180
+ Past time client reading
181
+ - Intentionally read client positions older that the latest, in order to cause a smoother state.
182
+ - Will read a combination of a fraction + value in the past (so clients that are 1000ms +/- 500ms
183
+ still look like they are smooth)
184
+ - The readLocks will have to have an empty time range (as we KNOW their range will have invalid values)
185
+ - At least for write values. For the UI, this doesn't matter, the UI is readonly
186
+ - Bakes in these values to the function call when asking the server to run the function
187
+ - When we evaluate the function call, if those paths are read, we have to adjust the read times
188
+ - OF COURSE, only if those paths are read with the request for "past time client reading"
189
+ - Allow CUSTOM limiting (we always limit to a few minutes), of time in the past, adjusting the read to be more recent if it is too old (by the Querysub, on received time)
190
+ - This is important in competitive games to prevent things from undoing a lot of other state, just because one client is lagging badly.
191
+ - TWO settings, one on a global level, and another on a path level, inside of FunctionRunner (using the writeTime as the base)
192
+ - MAYBE we actually limit read times based on the writeTime, which we can use to estimate the lag. Ex, they can only read back as far as 4X their current write lag? The default won't be to do this check, but... for competitive situations we do want to do it...
193
+ - The writeTime will still be the client writeTime, so by default we preserve client side-effect ordering (ex, cast "shield", then "fire", so the shield protects against the fire)
194
+
195
+ ++ Fixes nodes jumping around, as well as making it hard to hit laggy nodes
196
+
197
+ == Add a check so that dead nodes can't shoot anyone (which we might already have as check?)
198
+ == Laggy clients will be hard to kill, because by the time they know they are dead, they will have killed their attacker in the past, so they won't be dead anymore!
199
+
200
+ Ignored client write times?
201
+ - With support of "past time reads", we can always change the writeTime (somewhat, we will have to add a special case where all reads are now forced to be past time), without causing a rejection.
202
+ - This would need to be down on a function level (instead of on a path value)
203
+ - This would allowing resurrecting bad values, as well as changing the order of effects (ex, shield, then cast fire. BUT, if shield is run at server write time, it could happen after the fire, and so the fire could affect you even though you KNOW the order was correct).
204
+ - Which... is fine, we just have to make it clear that this reorders the function implications
205
+ - ALSO, make it clear that this can cause REALLY bad rejections. So if this is done with movement... the client better have really good rejection handling, or it will jitter like crazy
206
+ - ALSO, if the time of our write is important (ex, if we store keys, and then your position is your timeDelta * velocity), then this breaks that (so it isn't just order sensitive, but time sensitive values it impacts)
207
+ Expose writeTime to functions
208
+ - For example, so they can give some leeway in deaths:
209
+ `if (isDead && isDead < writeTime - 50ms) return "deadCantShoot"`
210
+
211
+ ++ Fixes making it hard to kill laggy nodes (by making them shoot at the serverTime, not the clientTime, so they can't surprise clients with "you're dead")
212
+
213
+ == Add "gravity" power, which either accelerates all users towards or away from the user
214
+ == ACTUALLY, I think the problem will be drift. If we are constantly writing, and assuming our prediction are correct, and having them read the previous predictions, we can get really far off from the server
215
+
216
+ Client cascading call resimulation
217
+ - To prevent drift, we can rerun client calls after a rejection. This is a bit annoying, and requires some way to hook directly into rejections (which... actually isn't that hard?)
218
+ - Basically, instead of clobbering all ReadLocks on call predictions, we identify any ReadLocks on other calls, and then watch them explicitly for rejections. If they reject, we re-simulate our call, writing in such a way that we force our previous call to be rejected (which then triggers ourself for future calls, etc, etc).
219
+ - We might need to write to versions MUCH farther in the past, as now we need to increment the version, so... yeah...
220
+ - ONLY if we don't have our result, of course, otherwise we would have no reason to run...
221
+ - Probably do this in a batch, which considers all calls after rejections, then deep compares values, then follows the call chain to get all candidates for rerunning (and the order they should rerun)
222
+ - We then need to wrote the original PathValues per result, which... is fine.
223
+ - ALWAYS update all the way to the latest, OR, doesn't update.
224
+ - Add a kind of throttling so we don't always update to the latest, to prevent lag causing huge amounts of cpu work. This will cause large "snaps" when we do resimulate, but... it will prevent the browser from locking up for too long?
225
+ - Set a constant factor (maybe... 60? as 60 * 16 is close to 1000?), of our extra call rate. Then it is relatively easy to throttle, basically, every resolved call value (they are all rejections) adds +60, and every rerun (each value different causes reruns, but can cause many, if there are multiple calls that depend on it) takes away 1 per call.
226
+ -Make sure to measure this time too, as it will be interesting to see how much time it takes
227
+
228
+ ++ Fixes drift
229
+
230
+ == Will there be a "jump" when a user stops toggling their power? If we are only looking at their keystrokes in the past, our predictions will always be off, but it should be relatively smooth?
231
+
232
+ == Add collisions
233
+ == After having very few users this becomes intractable, due to the inefficiencies of calculating collisions
234
+
235
+ PHYSICS HELPER
236
+ - Takes many path+shape+times as input to seed the data
237
+ - Expose a function which takes a shape, and a range of times (as we don't know which path we are reading, so we don't know which past time to use), and gives all paths which might be close to it
238
+ - We then take the paths that MIGHT intersect, and do full collision checks
239
+ - We can do it with "past time" reads or not
240
+ - "past time" reads allows object to phase through each other
241
+ - regular reads causes more rejections, which can cause more server load, and clientside jitter
242
+ - We should make it so one half of the world does regular reads, and the other does "past reads", and see the differences
243
+ - Well, the difference should be, the "past time" world has no backtracking, so if you do a jump puzzle on your end, you did it. BUT, the "past time" world will also have more objects phasing through each other, and you will see other clients jumping on invisible platforms, etc, etc.
244
+ - The "regular time" world will be more consistent, but sometimes you will swear you did something (jumping on a platform), but then you will backtrack and fall through it, because it WAS broken before you could jump on it.
245
+ *** Implement Local cache helper ***
246
+ - If we create a watchFunction with proxyWatcher, we can make the output synchronously accessible to other functions, as a way to allow unsafe reads
247
+ - If anything accesses this AND the accessed watcher has never been fully up to date (if it has been, but now isn't... allow it, I guess?), then the accessor is not fully up to date as well. It doesn't tracks it as a dependency, but instead as a "watch"
248
+ - AND, when the cache is up to date, the watcher will rerun
249
+ - AND, if the server does the same thing, we will USUALLY result in similar values, BUT, when we don't the server only runs the function once, so it will decide the final result.
250
+ - For things that matter, such as shooting a player, we will want actually dependencies, but this can be used to limit the things we access which we have to depend on
251
+ - Can lazily run, and just uses the database, so it will run in isolation fairly easily
252
+ - When accessing the result, sometimes... register the accessed values as being used (so they aren't unsynced). This might be annoying, and require some batching / delay, but... it is very important!
253
+
254
+ ++ Makes system a lot faster, by reducing rejections that don't change the value, and by increasing the efficiency of comparisons
255
+
256
+ Explicit schema objects accesses
257
+ - Allow defining a strict schema, with defines the names of both the keys and values, with fixed values just being implicit to the schema, and not requiring a key or any storage.
258
+ - Would access values via the schema, which changes how to PathValue is interpretted
259
+ - ALSO, we encode the value using the schema, so that when it is both encoded and interpretted with the schema, the operation becomes VERY fast.
260
+ - ONLY for wire transfer, not archival storage
261
+ - We can probably encode the schema when transferring it, storing all values of the same schema in a row, making it highly compresssed, and efficient to decode.
262
+ - Could be used for function calls, which would make permission checks faster
263
+ - Could help permission checks too
264
+ - Would have to support multiple levels, so you can check the root permission check, etc (if all the levels use the schema object, this then becomes very efficient)
265
+ - By default key schemas would derive from the Data schema
266
+ - If values weren't serialized using the schema we can quickly fallback to using the path+value
267
+ - We need a string wrapper so we can pass PathValue and then use our schema to access it the path on it
268
+ - We also need to make ALL PathValue.path accesses use this helper function, even if they aren't providing a schema.
269
+ - Right now we have 52 references to PathValue.path, so... this is very doable. Of course this just covers compatibility, but we would also want to allow not even decoding the path, such as in cases where we are just storing it in a Map and checking for ===.
270
+ - A way to access paths to get a value that is not the path, but will preserve === (just threadwide is fine)
271
+ - Using a number would be plausible. IF the path comes from a schema we can make the first 32 bits be associated with the schema, which can make finding a new number a lot faster.
272
+ - AND THEN... the end goal, is to have functions which ONLY use schemas, and then transpile them to a schema-full language (such as C#, or even C).
273
+
274
+ Automatic large value storage separation
275
+ - If a value is > ~10MB, when we write it to archives, we should ALWAYS break it into another file
276
+ - Have this decided via a flag on the PathValue, which can be dynamically set via size, or explicitly set.
277
+ - We should lazily read these values
278
+ - Garbage collection them after a while, when the file referencing them disappears
279
+ - We should create a cache of these on disk as well, so even if we do need to read the values, we can often just risk off of the local disk
280
+ - This will be annoying, but... if we already have disk value offloading, it shouldn't be so bad... AND, could really reduce startup time
281
+
282
+ Archives disk cache value separation
283
+ - If we already support large value separation, we can also separate values from archive value disk cache
284
+ - STILL via some limit
285
+ - Because it is on our own disk, we can seek inside the files, reading JUST the part we want, so the limit can be much lower, probably 1KB?
286
+ - This can reduce startup time even further
287
+
288
+ Network visualization
289
+ - NONE of this will use our PathValue system, and will instead use specialized instrumentation functions
290
+ - Will still be realtime though, and show paths, etc
291
+ - The nodes, and paths, in the browser, with information on time alive, etc
292
+ - Traffic information
293
+
294
+ Multi domain support
295
+ NOTE: Special case LOCAL_DOMAIN, so that we: 1) don't trust anyone else's LOCAL_DOMAIN, and 2) assume no one else trusts our LOCAL_DOMAIN
296
+ - If you call a function on a cross domain, it won't execute inline, but instead result in a write
297
+ - rootCertDomain needs to be removed and updated with a dynamic domain
298
+ - NetworkTrustController needs to maintain trust per domain
299
+ - Only allow readLocks on paths in trusted domains (otherwise throw, not even committing the write).
300
+ - Writes to domains that don't trust us readLocks sanitized, so they don't depend on any of our domain values, otherwise our writes will become rejected immediately
301
+ - Ensure we are still ignoring writes from nodes that aren't the authority on them (I think we are, but this becomes even more important, as now we might partially trust a node, accepting some values, but not others)?
302
+
303
+ Querysub bootstrapper + git repo file server + local file system server
304
+ - Basically... instead of requiring a static file server, we can have Querysub automatically host the files / repos based on some data written during deploy (probably specified via a function that is called in something the deploy.ts imports.)
305
+ - Querysub won't even know which servers it is hosting, and just host a generic bootstrapper, which when run on the client will just do a regular data access with the domain + path to get the entry point
306
+ - Querysub will basically just have a utility to go from gitrepo => file WITH, load development file system support.
307
+
308
+
309
+ Automatic schema usage/generation
310
+ - Parse the types to generate the schema, then automatically update accesses to use the schema object
311
+
312
+ PathValue.value disk cache
313
+ - If an entire server (PathValue server and Querysub) NEVER uses path values, we can move PathValue.values onto the disk, only reading them off disk when we send values over the wire
314
+ - This will have to be done explicitly, via a setting in AuthorityPathValueStorage
315
+ - The values will be read off disk by our serializer, which will ask AuthorityPathValueStorage to read all the real values off disk
316
+ - The FunctionRunner SHOULDN'T need this, as all of the values it stores should have been used semi-recently...
317
+ - Actually, only really only the PathValue server requires this, as otherwise we discard values after ClientWatcher.WATCH_STICK_TIME, which is presently 10 seconds!!!??? But even at a few minutes, that value won't be too low. What are we going to do, render 2GB of images at once? (and if it is video we need to stream it anyways, likely from another storage source, as there is no reason to use PathValues for videos...)
318
+
319
+ Optimizations
320
+ - Client storage of values, allowing the server to only send Time for large value the client doesn't have
321
+ - The client can just skip adding the PathValue, BUT, keep track of the last thing that is waiting to add. Then if the value is superceded anyway, it just won't add the full value, after it receives it.
322
+ - If the values are VERY large the server might be storing them on disk, so this allows the server to avoid even reading them off disk, which is extra efficient.
323
+ - One time subscriptions + polling subscriptions
324
+ - For large data that changes frequently this might be better?
325
+ - Sending very large values seems to cause something to block for a really long time. Not sure what, but... sending 100K writes with writeOnly, and dontStorePredictions, locks up for minutes at a time, then allows a lot of values to get through, then locks up?
326
+ - 10K writes is a lot better, and 1K writes is really fast.
327
+ - Maybe... writes on the websocket are buffered, and then the socket disconnects due to having too much send at once, and only reconnects later due to some random polling?
328
+ - MAYBE we just need better server enforced throttling?
329
+ - OR, if the problem are that the funtctions are getting rejected (which I am 99% is the problem), we should support "updateWriteTimeIfNeeded", and see if that fixes it?
330
+ - AND, we also need to handle streaming the updates better in FunctionRunner. It is kind of just batching everything, when it needs to wait (maybe with an "io" delay), to let the outputs clear?
331
+ - Delta supporting watcher
332
+ - Take all places that iterate over all paths, and have them work on deltas
333
+ - The callback will still have to process the changed paths directly (although it will have access to any state), and indicate the deltas to the watches
334
+ - This could be used in FunctionRunner to make it more efficient
335
+ - If we track when a path was created, wait longer to archive (2.5 times MAX_CHANGE_AGE), and then detect that is was deleted < MAX_CHANGE_AGE * 0.5 after it was created... we can avoid archiving it. But... then we need to clean up the tracking of when it was created, and that's a whole thing, so... it's just difficult...
336
+ - The server should batch forwardWrites when it receives them. That way if it is lagging due to a slow (synchronous) operation, a lot of values will buffer up on the network (but the batch queue will be empty, as we will have synchronously emptied it), and then when we finish the request we will have the batch time to gather up all the network requests (which should be a lot! as we can handle a lot of bytes in even just 10ms!).
337
+ - Of course, this adds ANOTHER 10ms delay, but... that should be fine, we won't get forwarded values often...
338
+ - We don't need to send back predicted values, when the prediction is accepted
339
+ - Client side we should only trigger watchers when a watched values actually changes (not just when there are any changes on the watches paths, at any point in history)
340
+ - Client syncing thrashing
341
+ - When we no longer need a value, we should stop immediately clearing it, and just mark it as unneeded, and then clear and unsync it a bit later on. This will take more memory, but allow navigation to be quite a bit faster
342
+ - Optimize parent accesses to not do brute force `.startsWith` searches, and instead maintain some kind of a lookup?
343
+ - Maybe... although, is this even much faster? It will certainly slow down writes.
344
+ - Optimize readLocks
345
+ - Each write stores readLocks very redundantly, making each function have `WRITES * READS` readLocks, which takes a lot to store, and requires a lot to synchronize.
346
+ - We can probably store a lockHash, to quickly detect duplicates
347
+ - Wire calls can use lockHash to depopulate duplicates, then re-populate them on the otherside.
348
+ - Such as commitWrites/forwardWrites, etc
349
+ - Our storage can depopulate duplicates when storing as well, maybe even storing readLocks in another file?
350
+ - Clear old pathValueProxy cache
351
+ - Better object comparison in certain values when we compare PathValue.value values? (only really clientside, I think?)
352
+ - As well as more object comparison checks, to not notify if the value is set to an identical value.
353
+ - Streaming / waiting during storage operations
354
+ - For example, we can stringify the file in chunks, instead of all at once, that way we don't block for > 100ms. We should wait ~10ms, giving a lot of requests time to clear.
355
+ - This isn't going to be especially big for large datasets, which may require merging everything at once, which with our current algorithm could take seconds!
356
+ - More efficient wire size and serialization speed
357
+ - If we write to 200K number values, which all depend on the previous states of 200K values, we end up sending 94MB, which takes 1200ms to serialize. We also receive a lot of data back for all the valid states. It might not be easy, but... it would be nice to optimize this, in some way.
358
+ - Probably via schema handling?
359
+ - ALSO, 94MB exceeds our default packet limit, so it doesn't even send! We could fix this with compression, but then it would take even longer to serialize!
360
+
361
+ Forward retry code
362
+ - We should maybe retry to forward writes if we can't forward them (and we aren't an authority)?
363
+
364
+ Wait a bit before loading values, in case a server wrote values, died, but the remote storage system isn't serving the read back yet?
365
+
366
+ Changing authorities bug
367
+ - If an authority changes, it is possible for it to receive values it doesn't care about, and then respond saying the results are valid. However, they aren't, instead it just ignored the values.
368
+ - ALSO, it is possible we will send a client ends up watching valid states from multiple authorities, due to being unsure if any are down, etc.
369
+ - AND, we could end up subscribed to multiple authorities for a single path (due to watchValueLocks), which will result in thrashing.
370
+
371
+ Prediction limiting / server overload protection
372
+ - If the PathValue server gets too far behind, we should stop predicting values, and start waiting for updates until the server can catch up
373
+ - This can also help when the server goes down, OR, more likely (hopefully), when the client's internet goes down.
374
+ - ALSO, the first thing to try is really to try switching servers, which will give us a form of automatic load balancing
375
+ - Although... if the limiting factor is our internet... then switching servers will actually make it worse. So... idk... this is actually difficult!
376
+ - MAYBE the server has to ask us to switch servers, and all we do when we find slowness is to slow down our writes?
377
+
378
+ Clientside function threading
379
+ - Clientside it would be fantastic, as it would allow render functions to NOT block the render thread. This means we could run multiple threads, keeping the application responsive even if a very slow component is rendering.
380
+ - We can convert dom event callbacks (or any closed values) to { functionContents, closedValues: { [variableName: string]: unknown } }. If the variable values are either serializable, OR, can be linked to exports (ex, misc_1.isEmpty), then we can recreate an identical function just by using eval.
381
+ - If we do this, we would we ALWAYS want to recreate the function. This ensures there is consistency if we call the callback cross thread, or on the same thread.
382
+ - We can store this style inside of the synced state, and make it available to be called directly.
383
+
384
+ Serverside function threading
385
+ - Required to kill very slow running functions
386
+ - They will of course we able to change their default timeout time
387
+ - They will need to be given an error result, to prevent them from rerunning
388
+
389
+ Serverside rendering
390
+ - Create a mounter which can go from vnode => string
391
+ - Clientside, probably render from scratch, and just have our clientside mounter check for existing DOM nodes (probably just when instructed to, which will happen on initial render, maybe... always on initial render?)
392
+ - Events COULD be an issue, or... we could just assume any unknown nodes have no event handlers?
393
+ - I THINK we can probably ignore events before we render, at least for version 1. The app is mostly a SPA anyways, so the time before render should be VERY short, and then everything should involve re-renders and NOT re-navigations. For version 2 we can capture everything globally via a built in script tag, and then re-trigger them once we actually mount (and either block, or delay links, so we don't get in a state of constantly trying to load/render, getting 50% of the way there, but then the user navigates via a link).
394
+
395
+ TESTS
396
+ - Tests which track the code used so we know which tests need to be rerun
397
+ - Render tests, with render JSX and paint it
398
+ - Can use a partial JSX => paint cache, so most of the time if the output doesn't change, OR, if the output doesn't change enough to change how it is painted, we don't have to repaint it.
399
+ - Also, a test harness that can click on vnodes and simulate their changes would be incredibly useful!
400
+ - This would get rid of the chrome javascript runtime step. We might still use puppeteer, but also... it is possible we could just use blink, or some other library, to just go from HTML => image extremely fast (realistically most pages should take < 50ms to render)
401
+ - True test coverage checking
402
+ - NOOP expressions (x + y becomes 0, etc, a function call that returns a number becomes 0, etc, etc), and see if any test changes. If not... then that line does not have test coverage
403
+
404
+ === Development tools feature complete | TASKS = 22 ===
405
+
406
+ Latency / load weighted node selection
407
+ - When they are multiple candidates, prefer the closer nodes
408
+ - ALSO, prefer nodes that have less load, allowing a form of automatic network balancing
409
+ - The ability to switch nodes would be nice as well
410
+ - Switching PathValueController nodes should be easy, we just subscribe everything to a new node, then disconnect from the previous node.
411
+ - We switch Querysub easily (using multiple), and disconnecting from the previous ones as soon as all calls on them have finished running (really that is true for most nodes, we don't want to disconnect when we have pending calls!)
412
+
413
+ Speculative function calls (calling functions that haven't been requested yet, just to sync the state they might need)
414
+ - Will be a bit involved, and might even require some clientside cooperation (ex, to detect which actions are possibly via which event handlers are being used), but... could result in a HUGE speed improvement
415
+ - Will have to determine what functions are most likely to be called
416
+ - We will likely need to give some context information for functions, or at least know their index, and maybe even page position? And maybe even the mouse position?
417
+ - If we run functions calls when the user moves their mouse towards a button we can probably increase our estimate quality with little effort
418
+ - We should test to see how long the mouse dwells over a button before clicking. Even 10ms would be useful. And also the average mouse speed. If we find users usually click after dwelling for say... 10ms, but take 20ms to click, we can get a 10ms headstart on the call!
419
+ - Will re-render after the click and then pre-sync the new reads needed
420
+ - If we do this on another thread, and somehow set the new syncs with low priority... this should be basically free (as if the user is moving their mouse around it isn't a background task, and we can at least monopolize a single core!)
421
+ - Will also pre-populate the clientside function cache, so when they really click it we can get the prediction very fast
422
+
423
+ Delta based render cache
424
+ - The best idea is to mark reads as being delta allowed, and THEN, marking output values as being reduceable. Then we just run with partial values given to the read, and merge the new writes with the old writes. IF the writes differ in any places that are not reducable... then we have to run with the entire set of data (and it results in a warning). But if not... it just works!
425
+ - The reduce functions can be
426
+ We already know the delta when we re-run watchers (as we reran them for a reason!),
427
+ - Maybe at the component level? Although... I kind of want to implement it for just arbitrary data
428
+ - A function will have the ability to read from an object/list with a "delta" read
429
+ - It will still receive the object/list, it might just partial
430
+ - EXCEPT, we might need to return special state for deletions?
431
+ - If the only reads of the object/list are delta, then the next time there is a change, only the new/changed values are provided
432
+ - The output is analyzed, and the differences between the new output are found
433
+ - For every different write, we determine the update technique
434
+ - This will probably involve setting some special value on the write?
435
+ - Examples would be "sum" or "join" or "insertSorted"
436
+ - The most generic case receives the previous state (that has changed, already drilled down), and the new state, and updates the previous state (in memory) so it now has the new state as well.
437
+ - We need to handle writes that propagate deletions
438
+
439
+ Blocking FunctionRunner call mode
440
+ - Will probably be slower in most cases though, as each value accesses required a round trip with an authority to read the value
441
+ - Might be useful for code with expensive side-effects?
442
+ - Add a BLOCKING mode to PathValueProxyWatcher, which uses Atomics.wait to block until a read value is synced.
443
+ - This potentially allows for efficient function evaluation, by not requiring any code to rerun.
444
+ - This is only possible if something can mark a value as synced on another thread, and absolutely not if we are single threaded (Atomics.wait wouldn't even work on the main thread of a single thread app anyways)
445
+
446
+ Function results!
447
+ - Specially marked "specializedHardwareCalls" functions will be able to "return" a result
448
+ - Only possible with blocking FunctionRunner mode, as for the functions we want to return results we also only want to run them once (for expensive operations)
449
+ - Of course, this doesn't prevent: 1) slowness cascasding syncing, or 2) result invalidation
450
+ - Hmm... we also want the ability to cancel calls?
451
+ - Maybe calls can do this without themselves?
452
+ - Hmm... if the calls are so slow... maybe the user should just manage a queue themselves, so they can see what is going on? It really isn't that hard, and if they have to manage special hardware anyway... then it won't like this would prevent them from just quickly scaling a function?
453
+ - At this point we should have support for unsafe function writes. We MIGHT want to make the writes from this unsafe by default?
454
+ - By default probably allow write time adjustment, to prevent invalidation (at least when we start the function call)
455
+ - The return object will be { type: Symbol("hardwareCall"), call: number, path: ".,querysub.com,.test" }
456
+ - ALSO, unioned with a type for an error { type: "error", errorMessage: string; errorMessageFull: string; }?
457
+ - The function return types will be modified so any callers have to handle this symbol type
458
+ - The proxy watcher will temporarily register to receive all calls, so it can call them
459
+ - The proxy watcher will search the output writes for these values
460
+ - Only shallowly, which is fine for now, as if they nest it I believe the Symbol will cause a throw, so the wrong value won't be written and not noticed
461
+ - The proxy watcher will then actually construct the call objects, telling them where to place their results
462
+ - When the FunctionRunner evaluates the calls, it will place the results in the specified paths
463
+
464
+ Support VERY large data sets (> local disk size)
465
+ - If an authority hasn't been used for a while, dump the memory for it, and... become "unready". Then, if someone accesses it, go about loading it again.
466
+ - This only works if we are sharded, but there isn't really a better way.
467
+ - We should test this with some very large values.
468
+
469
+ Automatic sharding of ValuePath nodes
470
+ - For KVP nodes as well as Function nodes
471
+ - More important for function nodes, but... it could help for KVP nodes as well?
472
+ - IF we have high contention, and many servers, the resolving process takes SERVER_COUNT * SERVER_LATENCY, which could be high. Automatic balancing can help fix this, by finding a way to identify overlapping writes via reads (ex, spatially hash read paths, and if those hashes are ===, see if the spatial hash of the write paths are as well).
473
+ - This reduces the SERVER_COUNT involved in contention, which... fixes the issue!
474
+ - We will need to support remotePathAuthority to get this to work
475
+ - Needs to be somewhat generic, so we can use it for both PathValueController, and FunctionRunnerController
476
+ - Nodes will inform their parent path nodes of their utilization, so parent nodes can redistribute child nodes accordingly
477
+ - Oversaturated nodes will split, pushing newer nodes down to more specific traffic
478
+ - Ex, from ["x"] to ["x", "x.[0 <= hash < 0.5]"], ["x.[0.5 <= hash < 1]"]. AND, when a node has enough descendants, it will stop with all child paths, and the parent will just be ["x"]
479
+ - BUT, we should only shard when needed. Otherwise syncing parents can become overly expensive? (Ex, if we have 100 servers, we really wouldn't want to split a small collection across 100 servers, as then it would take a huge amount of work to sync the keys, and if there are < 100 keys, it would be needlessly slow)
480
+ - Nodes who have parents that have insufficient members will remerge with their parent (perhaps completely taking over the parent, if the parent completely disappears)
481
+ - Nodes with child nodes with insufficient members will redistribute, or remerge into one path, as needed
482
+ - Test on a local machine, spawning many processes, killing them, etc
483
+ - We need to fix our path limitation thing
484
+ - Detect the size limit of our underlying file system, and use that
485
+ - This allows us to turn on long paths on windows
486
+ - WARN if long paths are not enabled
487
+ - Use this both to constrain balancing AND to have archives write to the most specific path possible, within the current limits
488
+
489
+ Programmatic digital ocean droplet launching
490
+ - We will want some kind of administration page that the app serves by default, requiring NetworkTrust and localhost to access.
491
+ - From this page we should be able to:
492
+ - import digital ocean api keys
493
+ - see all currently configured droplets
494
+ - launch new droplets
495
+ - All launched droplets will be automatically configured with the code, given trust, and configured to auto restart all the necessary process, etc (so they should be able to run unattended after starting)
496
+ - This means we can get rid of the emails to add trust, as nodes will no longer just "appear", but instead by instructed to appear, at which point we can explicitly ask for their public key and trust it (we should never provide the key, as this is incompatible with a transition to hardware keys).
497
+ - We also want the ability to monitor traffic, and make recommendations about having less/more droplets
498
+ - These recommendation should display in the UI
499
+ - There should be daily or weekly emails about recommendations to some admin email account.
500
+ - EVENTUALLY we might have these recommendations automatically followed, for scaling, although maybe only to certain degrees, as automatic scaling can be dangerous.
501
+ - AND, it would be nice if we could view/administer MULTIPLE domains at once, OR AT LEAST summarize traffic recommendations for multiple domains at once.
502
+
503
+ Automatic digital ocean droplet launching
504
+
505
+ History for the purposes of reverting data
506
+ - EVEN just backups works, but... storing a transaction log is preferred
507
+ - If we store writes, we can re-simulate to a previous point in time. It might be slow, but it is better than losing all your data due to a database drop.
508
+ - If we store clobbered data as well, then we will be able to restore even if our history size exceeds the max and needs to be truncated
509
+ - Kind of required if we want to use it to store any important data...
510
+
511
+ Fix hardcoded email and TOS accept in certAuthority.ts
512
+
513
+ AST parsing to detect global side-effects in functions
514
+ - Global side-effects are going to break things. We should at least mark root functions that do this, OR, that have any child calls that do this.
515
+ - Who knows what libraries we call are going to be doing, they could have all kinds of unintended side effects involving the disk and who knows what else!
516
+ - The strictest mode would be the disallow global side-effects unless it is enabled per function call
517
+ - ALSO, AST parsing to detect module side-effects (that impact values outside of that module)
518
+
519
+ Write proxy replacing
520
+ - If we have a lot of local writes it can get slow, due to the proxy. If we replace the writes with function calls it could allow a lot more local writes.
521
+
522
+ Schemaed proxy replacing accesses
523
+ - Instead of passing an array, find common accesses and replace with a call to a factory function to create a schema, which generates a function, which we then pass the remaining variables to
524
+ - Try to put all of these beside the databaseTyped (or at least at a module level!), so we can view the code without source maps and see all the examples of accesses. Their will be 1 for each line that reads from the database (that we can find)
525
+ - If we set this schema in the write itself we can greatly reduce write overhead (especially if we have keys that are numbers).
526
+ - We can send a binary format to forwardWrites and onValues
527
+ - Maybe just for the wire communication. But sending a buffer as the payload, instead of a value.
528
+
529
+ Fix multi authority writes
530
+ - Multi authority writes are annoying, because it causes issues with predictions. As in, you write, predict your value, but then have to wait because one authority is taking a long time to resolve. THEN, another write uses our prediction, but this time only uses the fast authority. This results in the fast authority rejecting our write, until we can send it the full writes, which may take a while if the slow authority isn't resolving!
531
+ - AND, if the slow authority takes a REALLY long time, we might never commit the write, resulting in a valid state, BUT, we can no longer commit any writes! Although, maybe this state is okay, as if we can't write the full write... we probably shouldn't write?
532
+ - And... maybe this whole issue is fine, as the authorities will eventually resolve, and then it will be fine...
533
+
534
+ === Shippable features complete | TASKS = 12 ===
535
+
536
+ Triggers / data pipeline / autorun
537
+ - Basically an abstraction of what FunctionRunner does (where it watches the database for values, and then writes other values to the database)
538
+ - Setup via a value in the database
539
+ - Like a function write, but, this function reruns every time anything it read changes
540
+ - Will keep the same thread (maybe a dedicate thread?) for a long period of time
541
+ - The ability to watch data, calling a non-committed function when it changes
542
+ - The function that is triggered can call synced functions, committing values
543
+ - Will be able to sync a lot of state, and also keep values in untracked state (if it wants to)
544
+ ALTERNATIVE
545
+ - Just use FunctionRunner, and if you need to use dedicated hardware, use another domain
546
+
547
+ 1.3) Buffer support in SocketFunction? Hmm... I mean, if we have GB sized files, that would required... And sending a 20GB video file is maybe feasible? Hmm... or... maybe not? Maybe it is more useful for 1MB chunks, which we don't want to have to decode each time? So this is really just "increase data throughput"?
548
+ - Essentially, we want a JSON.stringify that converts to a Buffer, and also supports Buffers.
549
+ - We will probably eventually support threads, so... maybe we just use Accessors2? Hmm... Or at least some rework of Accessors2?
550
+ - Would also need to support it in function argument serialization / deserialization
551
+
552
+ Error notification system
553
+ - Some kind of immediate notification + email digest system
554
+
555
+ Runtime FunctionController type checking
556
+ - We should check the type of all arguments against their typescript type by default. That way accidental prototype contamination is less likely. And maybe there are other security vulnerabilities?
557
+
558
+ Backups
559
+ - Taking our entire database, and backing it up periodically
560
+ - We can probably check the time in the last backup, and use that to infer what values will definitely be in it, using this to backup less data (nothing that is already backed up), making this efficient (to write, not necessarily to read)
561
+ - Once in a while we would want to write the full state, otherwise after a few thousand backups restoring could take forever.
562
+ - We can also be somewhat efficient about reading from the database files, using block create time to know if they are only old and already backed up data.
563
+ - At this point we will already have a management interface, so... this should be configurable via that
564
+ - We will have to think about how to rate limit it. A lot of databases would probably work by just streaming every single write, then compressing them after they exceed a certain total size.
565
+ - AND, when we compress, we probably want to default to the "compress old data more", so we have logarithmic seeking capabilities.
566
+ - Might be made redundant by the history navigation feature?
567
+
568
+ === Feature complete | TASKS = 3 ===
569
+
570
+ Path queries for synchronization
571
+ - We could support a way to query paths (likely via a parent selector), to do synchronizations. It makes it slower for the server to continue to send values (each new value required evaluating the selector), but... it could make selecting large amounts of data (millions of values) much faster?
572
+
573
+ Local network call forwarding / neighbor prediction
574
+ - If there is a write on the local network we can get faster then waiting for it go to the server or back (by local network it mostly means on the same machine, but different process / browser tab / thread).
575
+ - We will need to ingest these values as regular predictions, except because we aren't writing them we also need to watch for their valid states
576
+ - We also aren't getting if the write is even commited or not (which tells us if it was EVER valid), so... we basically need to assume the value is invalid if (AFTER the valid watch is done), we don't get a valid notification after a short period of time
577
+ - We also need to validate writes coming from neighbors, to ensure they are recent enough, and maybe we should do some other checks?
578
+ - Having this work for function writes would be nice, but... we REALLY have to trust the neighbor for this to work?
579
+ - Well... we could always run the permissions checks? In which case... I guess it would be safe?
580
+ - Oh, well.. maybe permissions checks aren't that important. If they are on the same network they are sitting in the same room, so they are somewhat trusted!!!
581
+ (And worst case they just cause the data to temporarily change, which isn't the end of the world...)
582
+ - AND, we can have neighbor prediction off by default, and only turned on for certain application (maybe on by default in the browser).
583
+
584
+ Function call syncing (as opposed to PathValue syncing)
585
+ - If we have a physics based system where we predict the physics loop every tick, we can predict a HUGE amount of values (millions), possibly even using the GPU. HOWEVER, the server won't understand our prediction efficiently, and so it will need to send as all the values anyways!
586
+ - If instead of syncing the paths we use, we sync the functions that impact these paths, we can have a massive state be kept up to date without saturating the network
587
+ - We will have to be told the hash results of these functions, to ensure we are calculating them correctly.
588
+ - So, a server instance will have to basically watch all the paths itself, to figure out which functions apply.
589
+ - And... presumably the results need to be committed anyways, otherwise this is just a different database! Although... if the function runner and KVP owner are on the same machine, the transfer of paths should be somewhat efficient? And only snapshots need to be saved, so... this could work?
590
+ - Basically, you would run the KVP node, and the function runner, on the same machine.
591
+ - Some kind of pipeline would trigger the physics function to tick every frame
592
+ - This pipeline would be also predicted on the clientside, so it can consistent know about ticks
593
+ - Eventually the client will know what functions were run (it should be pretty accurate at knowing this!), And the hash results. The hash results can come later, as the client will know that it will have to simulate old physics results a few times as course changes trickle in.
594
+
595
+ The ability to mark writes as "no persistent storage"
596
+ - Useful for things such as streaming video, which can then have a pipeline that combined it and stores it in B2 (the pipeline would mark what is stored in B2 and the urls for that content, and the rest of the data would be available on the network directly)
597
+ - This makes live streaming possible, but also accessing terabytes of historical data.
598
+ - This is different than just video uploading, which should go directly to B2, because it trickles, requiring something to aggregate it, while also requiring instant access.
599
+
600
+ Read function triggers
601
+ - Allowing setting up a value that calls a function every time it is read, to produce the data (and non client predictable function)
602
+ - Allows for exposing an off-network source (like a database) to the network
603
+ - By default will just mark everything as valid, BUT, we could add valid ranges
604
+ - For databases like mongodb we could roughly do it via changestreams, to warn if the value has changed
605
+ - For blockchains we can perfectly detect if the value has changed
606
+ - For the writing to external sources side... we would need "unsafe" functions, that don't add any readLocks (so they can never become rejected). Because we can't reject values in external sources!
607
+
608
+ FunctionController path syncing
609
+ - For small number of reads, the latency of the read is acceptable for the trade off of reduced complexity of FunctionController.
610
+ - HOWEVER, for functions with a HUGE amount of reads (GBs of data), we will want to sync it instead...
611
+ - We still need to run historical functions, so... we basically need to:
612
+ 1) Ask for ALL future writes on a path
613
+ 2) Ask for all writes on that path (up to what the authority stores, which is up to and including the last golden value, which should be enough history)
614
+ 3) Sync the valid state of all writes we receive, forever
615
+ - The hard part, is trying to keep the paths synced simple. If we just sync every access... then we could be syncing tens of millions of paths... which means not only does an authority have to store that much data, it also has to store watchers for all of it... which... will probably be slow?
616
+ - Although... maybe not... it is just a Map<string, string>?
617
+
618
+ Support arrays, via adding another transparent property
619
+ - Basically the object properties, BUT, it will specify it is an array
620
+ - The proxy will have to be told of this, and... function properly?
621
+ - I think if it's object is [] it will work? Maybe? We should double check how proxys handle arrays...
622
+ - Does push just map to a set plus a .length update?
623
+ - And what about splices!
624
+ - This will be annoying, and probably slow...
625
+
626
+ Actually Object.keys() ACID handling
627
+ - Ugh... very annoying, and difficult
628
+ - A path authority can sync child keys for a range of time (similar to how it syncs a path for a range of time), and then compute a hash of them (sort and sha256). It won't be efficient, but... it should work.
629
+ - Will be OPT-IN, because the slowness of hashing the keys is almost never worth the ACID nature of it
630
+
631
+ Object.keyRanges()
632
+ - If we use search patterns that line up with our range sharding, we can efficiently access ranges of keys
633
+ - We will probably need to add more details in our range sharding
634
+ - We might want to specify sharding characteristics via some callback near proxyWatcher.databaseTyped
635
+ - Ex, schema.dataSharding(database => [{ path: database.logs, map: key => +key }]
636
+ - Ex, schema.dataSharding(database => [{ path: database.maps, map: key => key.split("_")[0] }]
637
+
638
+ Old data cleanup ability
639
+ - Some way to actually delete old data
640
+ - Maybe via both setting all values to undefined, and having nested undefined no longer stored
641
+
642
+ Ability to selectively undo history
643
+ - Probably recursively by path
644
+ - We would than have to store history under those paths
645
+ - Probably with a limited history (ex, 30 days?)
646
+ - The ability to select changes by values / by time, taking everything in the same changeset (via looking at readLocks), and adding a new change that reverts the change (if the revert value is still the latest)
647
+ - Ex, undo all changes by user for the past day, one specified paths
648
+
649
+ Disk support
650
+ Local disk support instead of backblaze
651
+ - AKA, implementing backblaze using the local disk of many nodes
652
+ Recent changes disk backup
653
+ - Store changes that are too young to be archived on disk
654
+ - CAN'T use archive the values, as those values are assumed to be valid, so... needs a new format, which is just used to store changes, in case of a crash.
655
+ - If we have a single node, we can stream our changes to the disk, reducing our data loss on crash from ~10 minutes, to < 10 seconds, with very little disk IO
656
+ - We can have a flush time option, which can be set even lower, which will increase IO, but can reduce losses from crash to any point we want.
657
+
658
+ C++ (.cpp) import support
659
+ - Via WASM compilation, which can be done via https://github.com/sliftist/cpp-portable-loader and `clang-wasm`
660
+
661
+ C# import support / other language child processes
662
+ - Would only work on the serverside
663
+ - Would have to run a child process which runs the C# code?
664
+ - If we are going to run another process, we can just run any easy scriptable language. Python would be easy to get working as well. On a smaller scale, Java would work too.
665
+ - Ideally we automatically generate typings, but... even if we didn't, that would be fine.
666
+ - C++ would be useful too! As a lot of the time this is useful for making OS calls, which are easiest in C++.
667
+
668
+ Rust import support
669
+ - Compiling Rust to wasm is even easier than C++, so... we could probably do it fairly easily
670
+ - I'm not sure how to get typings, although they aren't strictly needed
671
+
672
+ === Advanced optimizations | TASKS = 2 ===
673
+
674
+ === \/ Speculative tasks \/ ===
675
+
676
+ - function call results
677
+ - Some way for function calls to return T|undefined, and be given the location they are assigned to, so they can automatically write the result there
678
+ - Is basically the same as adding the path, so... this is probably a good way to do it.
679
+ - Can still with with function predict!
680
+ - Passing the path to the function call is a bit annoying, but... if we return a special value, we can observe where that gets written to in the proxy, so... it isn't so bad...
681
+ - We can even allow writing to multiple output locations
682
+ - Really only makes sense if the function is slow. Otherwise, is should just be an inline function!
683
+ - We should add in function level sharding, so we can exclude function calls from function runners that don't opt into those. That we can split up slow functions from regular functions.
684
+
685
+ - "limits", to limit resource strain any node can put on PathValueServer
686
+ - limit the total number of paths watched
687
+ - limit the number of writes per second
688
+ - limit the total bytes per second of writes
689
+ - maybe limit onValue callbacks, depending on size, etc
690
+ - Maybe dynamic limits, depending on load
691
+
692
+
693
+ - Fix our JSON serialization code converting undefined to null inside of arrays
694
+ - This impacts function calls, which can be really annoying / confusing...
695
+
696
+ - Actual query subscriptions
697
+ - Tell the querysub server that function we want to run, and everything it watches will be returned
698
+ - Anything the server misses can be additionally subscribed to
699
+ - This allows the cascading latency penalty to be equal to the querysub server latency. WHICH, if it is on the same server as the PathValue server, can be less than 1ms (we will need to turn off some automatic batching to lower it this far)
700
+ - AND, if the querysub server did a full PathValue sync, asking for all values on all paths... the latency would be 0.
701
+ - We would need to be able to fit the entire database into memory, but... if we allowed `watchRecursive(path)`, and either shared Querysub, OR, had queries explicitly map to the path they wanted (which hopefully would either be small, or shared with another server), then... it would just be efficient...
702
+ - AND, the client could even subscribe to `watchRecursive` as well?
703
+
704
+ Better FunctionRunner startup
705
+ - We should sync all of the repos we will need BEFORE taking any shard space? As the clone could easily take minutes, which would cause functions to timeout.
706
+ - Although... maybe just having it better distributed would solve that problem, as other nodes should automatically pick up the slack?
707
+
708
+ CORS support, so querysub can be accessed by other domains
709
+ - We would read the CORS values from the database, and they would be set in, or similar to "yarn shard-deploy" / "yarn deploy"
710
+ - Only querysub needs CORS, as the browser should know to only access it
711
+
712
+ Serverside "local" writes
713
+ - Update code to special case "local" writes, so that we never sanitize remote writes so they never use ReadLocks on "local" writes.
714
+ - BUT, allow "local" writes to have ReadLocks on remote writes (IF we are trusted on that domain). Our system should automatically subscribe to valid states, and maintain the valid state of "local" writes. This prevents our "local" values from having invalid values that stick around (as in, in a cache), even though we might end up writing them to a "remote", which would make them invalid.
715
+
716
+ Have deploy log the % files and % lines that are NOT allowclient
717
+ - This determines which files we might serve clientside
718
+ - We only really care about files NOT in node_modules, but, in our folder?
719
+ - Although, maybe we should consider any private npm packages as well?
720
+
721
+ Lazy Function / Function Prioritization
722
+ - If we have functions that can run at any time (but must run in order), then... we can run them later, when the server is more free.
723
+ - AND, we can even go as far as to detect when the predicted outputs of them are needed, running the functions at those times?
724
+ - Maybe we would actually want this to be functions triggered one read, but only triggered once?
725
+
726
+ Ability to disable syncing of paths
727
+ - Possibly depending on context, such as page
728
+ - Also, possibly just to reduce the update rate for certain paths
729
+ - This fixes the issue of lag (either network, client, or server), when many pages use a commonly changed value
730
+ - Ideally we WOULD like it to be updated, but... if we don't need it updated, and 99% of our server capacity is spent updating a few values we don't care about...
731
+ - We want this to be able to be added post-hoc, potentially live (at a data level, not a code level),
732
+ so the site can be quickly optimized on the fly
733
+ - But of course, the code will also need the ability to specify it doesn't want updates
734
+ - Will basically function as a watchLatestOnce, where it gets the latest value, and then no more.
735
+
736
+ Better undefined support
737
+ - We COULD make it so for the last run of a function, after all values are synced, we run again returning undefined for every value that is fully synchronized with no child values synchronized.
738
+ - If the first run has every value synchronized we would have to run again.
739
+ Pros
740
+ - This MIGHT make function run more naturally
741
+ Cons
742
+ - Functions are more complex to run (and must run more times)
743
+ Alternatives
744
+ a) Just use atomicObjectRead if you want to read undefined
745
+ b) Use `+x`, `x + ""`, OR `${x}`, to coerce it to a primitive
746
+
747
+ Support instances in our data?
748
+ - Benefits
749
+ - Instead of passing around string ids everywhere, and having programmers have to know which functions use it, you can pass around a class and just get autocomplete
750
+ - Costs
751
+ - Slow
752
+ - Harder to debug
753
+ - Alternatives
754
+ - Compile time instances => static function + id call
755
+ - Basically, we can "store" and "pass" around an instance, and even "new" it. But
756
+ new => create random id, and store data in static location under that id
757
+ function calls => call static function with instance (which is a string) as a parameter
758
+ instance accesses (including this.) => lookup data from root with static lookup
759
+ - This is harder to do, but... faster, and easy to debug.
760
+ - Function modules with local .databaseTyped call
761
+ - You can go to defintion and see all the exported functions, which are probably how you mutate the data (especially if the .databaseTyped call isn't exported!)
762
+ - BASICALLY, everything starts as a static call. But... those calls may wish to create class instances, and store those (ex, binding data and functions)
763
+ - SO, on seeing a set that is a class (ex, has functions), instead of just stripping the functions, we...
764
+ - Figure out if it is a deployed function (has exposedRootClass decorator).
765
+ - If not, we don't store it
766
+ - Store a new InstanceId in our schema with the data provided
767
+ [DomainName][ClassName].Instances[InstanceId].Data = Data
768
+ - Replace the write with this new instanceId
769
+ - On accessing a value that... looks like an instanceId? We...
770
+ - On reading values we use the Instances.Data path instead
771
+ - On calling functions we load the class from the Source info, and call it
772
+ - Specifically, providing a this context equal to the data value.
773
+
774
+ Clientside "local" FunctionRunner
775
+ - In theory, some operations will heavily depend on some server function call completely successfully. BUT, also store local state?
776
+ - IF we never need this, then we never need to implement it. But... if we find a case where this is useful, in which just moving the data to the remote is not viable (and there is no alternative)... then... we could do it?
777
+ 1) Allow "local" functions to run using FunctionRunner, so they can automatically rerun when rejected
778
+ - We will require the ability to mount FunctionRunners under the "local" domain explicitly
779
+ 2) Allow (when specified) "local" functions to depend on "remote" writes
780
+ - This requires allowing syncing valid state, etc
781
+ - Without runing FunctionRunner rejections are usually not preferred. But with running it, rejections can result in a rerun, and so are not as bad.
782
+
783
+ Intercept DB, to allow writes without actual ownership of a domain, cascading to store all values that depend on these changes as well.
784
+ - Maybe only useful for hacking together application, or development, but... seems like it would be pretty cool?
785
+
786
+ Function call results (for cross domain calls)
787
+ - Probably not a good idea. Maybe... just never do this...
788
+ - Non-cross domain calls are all inline anyways, so this is only for cross domain calls
789
+ Something like: QuerySub.afterFunction(() => OtherController.queueAndRun(), result => TestController.finished(result)), which tells FunctionRunner to do some extra stuff after a cross domain call finishes.
790
+ - We COULD ALSO use async/await. We KNOW when the synchronous part of a function finishes
791
+ - Ex: `let result = await QuerySub.afterFunction(() => OtherController.queueAndRun())`
792
+ - As we are broken in on the machine, we don't need ALWAYS resume (although if the node is killed, and the function needs to be rerun, we would need to re-evaluate the previous call to catch up).
793
+ - We would need know when the await returns, wrapping it with some code?
794
+ - This is... impossible? Although... if the thread is CLEAN, we could just assume the next function will run, resolve, then wait for a promise, then assume the function is finished? It gets REALLY difficult
795
+ - This will break up the ACID nature of the function, which is fairly unfortunate
796
+
797
+ Size optimization
798
+ - Move register calls into a .client.ts file, so browsers only need to import the interface, and not the implementation too!
799
+ - Better HTTPS endpoint which prebundles some calls, so we can bootstrap faster
800
+
801
+ Streamlined externals, via a custom site that takes payment and handles setting up all the other services
802
+
803
+ Lazy connections
804
+ - NetworkState creates a lot of connections, which have very infrequent traffic. Instead of having N network connections, we should allow some connections to be marked as less important, and closed when not used for a bit. Although... if we say had, 10K open TCP connections, is this a problem? Hmm...
805
+
806
+ Synchronize time ourself
807
+ - We can't trust machines to synchronize their time. Synchronize it ourself in NetworkState (OR, maybe just by using NetworkState), and use that instead of Date.now() everywhere!
808
+ - We'll want to use a waitForTimeToSync() function in places to gate our major code locations to ensure we always have an synchronize time, so we can expose a getTime() function that is synchronous.
809
+
810
+ Replace ALMOST all external services
811
+ - We can run our own DNS server
812
+ - We can replace firestore with our regular storage controller
813
+
814
+ Very short expiry times on thread certificates, finding some way to automatically update them while running
815
+ - At first only node <=> node
816
+ - BUT THEN, our proxy should be able to handle updating the peer cert, as it can probably call renegotiate?
817
+ - Our system for the client to update it's cert after the fact can likely be reused to trigger an update of credentials?
818
+ - I assume outstanding connections won't be killed if their certs expire, so... we would actually want to
819
+ add an additional check to close a connection if it's cert isn't updated? Or... we could just call renegotiate, which
820
+ I think serverside TLS connections can do?