@fairfox/polly 0.20.0 → 0.21.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. package/README.md +11 -0
  2. package/dist/src/background/index.js +22 -12
  3. package/dist/src/background/index.js.map +8 -8
  4. package/dist/src/background/message-router.js +22 -12
  5. package/dist/src/background/message-router.js.map +8 -8
  6. package/dist/src/client/index.js +187 -154
  7. package/dist/src/client/index.js.map +4 -4
  8. package/dist/src/elysia/index.d.ts +2 -0
  9. package/dist/src/elysia/index.js +195 -25
  10. package/dist/src/elysia/index.js.map +8 -5
  11. package/dist/src/elysia/peer-repo-plugin.d.ts +79 -0
  12. package/dist/src/elysia/plugin.d.ts +3 -3
  13. package/dist/src/elysia/signaling-server-plugin.d.ts +121 -0
  14. package/dist/src/index.d.ts +36 -0
  15. package/dist/src/index.js +1752 -13
  16. package/dist/src/index.js.map +31 -13
  17. package/dist/src/shared/adapters/index.js +22 -12
  18. package/dist/src/shared/adapters/index.js.map +7 -7
  19. package/dist/src/shared/lib/_client-only.d.ts +38 -0
  20. package/dist/src/shared/lib/access.d.ts +124 -0
  21. package/dist/src/shared/lib/blob-ref.d.ts +72 -0
  22. package/dist/src/shared/lib/context-helpers.js +22 -12
  23. package/dist/src/shared/lib/context-helpers.js.map +8 -8
  24. package/dist/src/shared/lib/crdt-specialised.d.ts +129 -0
  25. package/dist/src/shared/lib/crdt-state.d.ts +86 -0
  26. package/dist/src/shared/lib/encryption.d.ts +117 -0
  27. package/dist/src/shared/lib/errors.js +19 -9
  28. package/dist/src/shared/lib/errors.js.map +2 -2
  29. package/dist/src/shared/lib/mesh-network-adapter.d.ts +130 -0
  30. package/dist/src/shared/lib/mesh-signaling-client.d.ts +85 -0
  31. package/dist/src/shared/lib/mesh-state.d.ts +102 -0
  32. package/dist/src/shared/lib/mesh-webrtc-adapter.d.ts +132 -0
  33. package/dist/src/shared/lib/message-bus.js +22 -12
  34. package/dist/src/shared/lib/message-bus.js.map +8 -8
  35. package/dist/src/shared/lib/migrate-primitive.d.ts +100 -0
  36. package/dist/src/shared/lib/pairing.d.ts +170 -0
  37. package/dist/src/shared/lib/peer-relay-adapter.d.ts +80 -0
  38. package/dist/src/shared/lib/peer-repo-server.d.ts +83 -0
  39. package/dist/src/shared/lib/peer-state.d.ts +117 -0
  40. package/dist/src/shared/lib/primitive-registry.d.ts +88 -0
  41. package/dist/src/shared/lib/resource.js +22 -12
  42. package/dist/src/shared/lib/resource.js.map +5 -5
  43. package/dist/src/shared/lib/revocation.d.ts +126 -0
  44. package/dist/src/shared/lib/schema-version.d.ts +129 -0
  45. package/dist/src/shared/lib/signing.d.ts +118 -0
  46. package/dist/src/shared/lib/state.js +22 -12
  47. package/dist/src/shared/lib/state.js.map +5 -5
  48. package/dist/src/shared/lib/test-helpers.js +19 -9
  49. package/dist/src/shared/lib/test-helpers.js.map +2 -2
  50. package/dist/src/shared/state/app-state.js +22 -12
  51. package/dist/src/shared/state/app-state.js.map +6 -6
  52. package/dist/src/shared/types/messages.js +19 -9
  53. package/dist/src/shared/types/messages.js.map +2 -2
  54. package/dist/tools/init/src/cli.js +6 -2
  55. package/dist/tools/init/src/cli.js.map +3 -3
  56. package/dist/tools/quality/src/index.js +177 -0
  57. package/dist/tools/quality/src/index.js.map +10 -0
  58. package/dist/tools/test/src/adapters/index.d.ts +2 -2
  59. package/dist/tools/test/src/adapters/index.js +19 -9
  60. package/dist/tools/test/src/adapters/index.js.map +5 -5
  61. package/dist/tools/test/src/browser/harness.d.ts +80 -0
  62. package/dist/tools/test/src/browser/index.d.ts +32 -0
  63. package/dist/tools/test/src/browser/index.js +243 -0
  64. package/dist/tools/test/src/browser/index.js.map +10 -0
  65. package/dist/tools/test/src/browser/run.d.ts +26 -0
  66. package/dist/tools/test/src/index.js +19 -9
  67. package/dist/tools/test/src/index.js.map +5 -5
  68. package/dist/tools/test/src/test-utils.js +19 -9
  69. package/dist/tools/test/src/test-utils.js.map +2 -2
  70. package/dist/tools/verify/specs/tla/MeshState.cfg +21 -0
  71. package/dist/tools/verify/specs/tla/MeshState.tla +247 -0
  72. package/dist/tools/verify/specs/tla/PeerState.cfg +27 -0
  73. package/dist/tools/verify/specs/tla/PeerState.tla +238 -0
  74. package/dist/tools/verify/specs/tla/README.md +27 -3
  75. package/dist/tools/verify/src/cli.js +10 -6
  76. package/dist/tools/verify/src/cli.js.map +10 -10
  77. package/dist/tools/verify/src/config.js +19 -9
  78. package/dist/tools/verify/src/config.js.map +2 -2
  79. package/dist/tools/visualize/src/cli.js +6 -2
  80. package/dist/tools/visualize/src/cli.js.map +8 -8
  81. package/package.json +52 -12
@@ -0,0 +1,238 @@
1
+ ------------------------- MODULE PeerState -------------------------
2
+ (*
3
+ Formal specification of Polly's $peerState relay-transport protocol.
4
+
5
+ $peerState is the middle resilience tier in RFC-041: every device holds a
6
+ full Automerge replica, the server is also a full replica that happens
7
+ always to be on, and the two sync via op exchange. The crucial property
8
+ the protocol must provide is *strong eventual convergence*: any two
9
+ replicas that have received the same set of operations hold equal state,
10
+ regardless of the order in which they received them. The second property
11
+ is *recovery convergence*: after the server loses its storage volume, any
12
+ peer that reconnects with intact history repopulates the server through
13
+ the normal sync protocol, and the union of all reconnecting peers'
14
+ histories is the recovered server state.
15
+
16
+ Model abstractions:
17
+
18
+ - A peer's "state" is modelled as the set of operations it has observed.
19
+ Automerge's CRDT guarantees that any two replicas observing the same
20
+ operation set compute equal documents, so set-equality on observed
21
+ operations is a sound proxy for state-equality without modelling
22
+ Automerge's internal structure.
23
+
24
+ - The server is a distinguished peer that participates in the sync
25
+ protocol like any other, plus a persistent storage layer that is
26
+ logically the same set of operations. Data loss is modelled by
27
+ clearing both the server's peer state and its storage.
28
+
29
+ - Messages in flight carry a single op and a target peer id. A real
30
+ Automerge sync message carries compressed op deltas; the single-op
31
+ model is a simplification that preserves the convergence properties.
32
+
33
+ - Authorisation is modelled as a predicate over (peer, op). For the base
34
+ spec we treat every peer as authorised, which is the default $peerState
35
+ posture; richer authorisation is a follow-up overlay spec.
36
+
37
+ Key properties verified:
38
+
39
+ 1. Type safety (TypeOK) — all state stays in shape across every step.
40
+
41
+ 2. NoUnauthorisedDelivery — a peer never observes an operation from a
42
+ peer that was not its authorised originator at production time.
43
+
44
+ 3. StrongEventualConvergence (liveness) — any two peers that have
45
+ received the same set of in-flight messages hold equal replicas.
46
+
47
+ 4. RecoveryConvergence (liveness) — after server data loss, if at
48
+ least one peer retains history and reconnects, the server's
49
+ replica eventually equals the union of all reconnecting peers'
50
+ histories.
51
+
52
+ 5. NoServerFabrication — the server's replica only ever contains ops
53
+ that some peer actually produced. The server cannot invent state
54
+ out of thin air.
55
+
56
+ *)
57
+
58
+ EXTENDS Integers, FiniteSets, Sequences, TLC
59
+
60
+ CONSTANTS
61
+ Peers, \* Set of client peer identifiers
62
+ MaxOps \* Bound on the number of operations (for model checking)
63
+
64
+ \* The server is a distinguished peer id, disjoint from Peers. The spec
65
+ \* uses "server" as a model value rather than a string.
66
+ CONSTANT Server
67
+
68
+ VARIABLES
69
+ replicas, \* [Peers \union {Server} -> SUBSET Ops] — each replica's op set
70
+ serverStorage, \* SUBSET Ops — server's persistent storage (replica + disk)
71
+ messages, \* Sequence of in-flight sync messages
72
+ producedBy, \* [Ops -> Peers \union {Server}] — who created each op
73
+ nextOpId, \* Next op id to produce (a simple counter)
74
+ serverLossCount \* Number of data-loss events; bounds the model
75
+
76
+ vars == <<replicas, serverStorage, messages, producedBy, nextOpId, serverLossCount>>
77
+
78
+ \* Set of all op identifiers that could ever exist in a model run.
79
+ Ops == 1..MaxOps
80
+
81
+ AllPeers == Peers \union {Server}
82
+
83
+ Message == [
84
+ op : Ops,
85
+ from : AllPeers,
86
+ to : AllPeers
87
+ ]
88
+
89
+ -----------------------------------------------------------------------------
90
+
91
+ (* Initial state: nothing has happened yet *)
92
+
93
+ Init ==
94
+ /\ replicas = [p \in AllPeers |-> {}]
95
+ /\ serverStorage = {}
96
+ /\ messages = <<>>
97
+ /\ producedBy = [o \in {} |-> Server]
98
+ /\ nextOpId = 1
99
+ /\ serverLossCount = 0
100
+
101
+ -----------------------------------------------------------------------------
102
+
103
+ (* Actions *)
104
+
105
+ (* A peer or the server creates a fresh op locally. The op is added to
106
+ the creator's replica and marked as produced-by that peer. The creator
107
+ does not immediately push it to anyone; sync messages come later. *)
108
+ CreateOp(peer) ==
109
+ /\ nextOpId <= MaxOps
110
+ /\ LET op == nextOpId IN
111
+ /\ replicas' = [replicas EXCEPT ![peer] = @ \union {op}]
112
+ /\ producedBy' = producedBy @@ (op :> peer)
113
+ /\ nextOpId' = nextOpId + 1
114
+ /\ UNCHANGED <<serverStorage, messages, serverLossCount>>
115
+
116
+ (* Peer "from" sends an op it already has to peer "to" via a sync message.
117
+ This models the Automerge sync protocol's op exchange, simplified to
118
+ one op per message. *)
119
+ SendSyncMessage(from, to, op) ==
120
+ /\ from # to
121
+ /\ op \in replicas[from]
122
+ /\ Len(messages) < MaxOps * 4 \* bounded model space
123
+ /\ messages' = Append(messages, [op |-> op, from |-> from, to |-> to])
124
+ /\ UNCHANGED <<replicas, serverStorage, producedBy, nextOpId, serverLossCount>>
125
+
126
+ (* Deliver an in-flight sync message to its target. The target adds the
127
+ op to its replica; if the target is the server, the op also lands in
128
+ persistent storage. Delivered messages are dropped from the queue. *)
129
+ DeliverMessage(i) ==
130
+ /\ i \in 1..Len(messages)
131
+ /\ LET m == messages[i] IN
132
+ /\ replicas' = [replicas EXCEPT ![m.to] = @ \union {m.op}]
133
+ /\ IF m.to = Server
134
+ THEN serverStorage' = serverStorage \union {m.op}
135
+ ELSE UNCHANGED serverStorage
136
+ /\ messages' = [j \in 1..(Len(messages) - 1) |->
137
+ IF j < i THEN messages[j] ELSE messages[j + 1]]
138
+ /\ UNCHANGED <<producedBy, nextOpId, serverLossCount>>
139
+
140
+ (* Server data loss: clear the server's replica and its persistent storage.
141
+ Simulates a catastrophic failure such as a volume wipe. Bounded by
142
+ serverLossCount to keep the model finite. *)
143
+ ServerDataLoss ==
144
+ /\ serverLossCount < 1 \* Allow at most one loss event per run
145
+ /\ replicas' = [replicas EXCEPT ![Server] = {}]
146
+ /\ serverStorage' = {}
147
+ \* Drop any in-flight messages destined for the server; a real
148
+ \* implementation would also reset connections, which is out of
149
+ \* scope for the set-oriented model.
150
+ /\ messages' = [i \in 1..Len(SelectSeq(messages, LAMBDA m: m.to # Server)) |->
151
+ SelectSeq(messages, LAMBDA m: m.to # Server)[i]]
152
+ /\ serverLossCount' = serverLossCount + 1
153
+ /\ UNCHANGED <<producedBy, nextOpId>>
154
+
155
+ -----------------------------------------------------------------------------
156
+
157
+ (* Next state relation *)
158
+
159
+ Next ==
160
+ \/ \E p \in AllPeers : CreateOp(p)
161
+ \/ \E from \in AllPeers : \E to \in AllPeers : \E op \in replicas[from] :
162
+ SendSyncMessage(from, to, op)
163
+ \/ \E i \in 1..Len(messages) : DeliverMessage(i)
164
+ \/ ServerDataLoss
165
+
166
+ Spec == Init /\ [][Next]_vars /\ WF_vars(Next)
167
+
168
+ -----------------------------------------------------------------------------
169
+
170
+ (* Invariants *)
171
+
172
+ (* Type invariant. Every variable stays in its declared shape. *)
173
+ TypeOK ==
174
+ /\ replicas \in [AllPeers -> SUBSET Ops]
175
+ /\ serverStorage \subseteq Ops
176
+ /\ \A i \in 1..Len(messages) :
177
+ /\ messages[i].op \in Ops
178
+ /\ messages[i].from \in AllPeers
179
+ /\ messages[i].to \in AllPeers
180
+ /\ \A o \in DOMAIN producedBy : producedBy[o] \in AllPeers
181
+ /\ nextOpId \in 1..(MaxOps + 1)
182
+ /\ serverLossCount \in 0..1
183
+
184
+ (* The server's replica and its persistent storage must agree. Storage
185
+ is the materialisation of the server's in-memory state; they should
186
+ never diverge in the model. *)
187
+ ServerStorageMirrorsReplica ==
188
+ replicas[Server] = serverStorage
189
+
190
+ (* No peer ever observes an op that was not produced by someone. This
191
+ is the "no fabrication" invariant. *)
192
+ NoServerFabrication ==
193
+ \A p \in AllPeers :
194
+ \A o \in replicas[p] :
195
+ o \in DOMAIN producedBy
196
+
197
+ (* Authorisation soundness for the base spec: no op is delivered without
198
+ being attributed to a known producer. The $peerState transport does
199
+ not enforce authorisation itself; this invariant is the floor. *)
200
+ NoUnauthorisedDelivery ==
201
+ \A i \in 1..Len(messages) :
202
+ messages[i].op \in DOMAIN producedBy
203
+
204
+ -----------------------------------------------------------------------------
205
+
206
+ (* Temporal properties *)
207
+
208
+ (* An op that has been sent in a message is eventually delivered or
209
+ the message is eventually removed. Weak fairness on Next guarantees
210
+ progress. *)
211
+ EventualDelivery ==
212
+ \A op \in Ops : \A from, to \in AllPeers :
213
+ (<<op, from, to>> \in { <<messages[i].op, messages[i].from, messages[i].to>>
214
+ : i \in 1..Len(messages) })
215
+ ~>
216
+ (<<op, from, to>> \notin { <<messages[i].op, messages[i].from, messages[i].to>>
217
+ : i \in 1..Len(messages) })
218
+
219
+ (* Strong eventual convergence, simplified: once the message queue has
220
+ drained, any two peers with the same op set hold equal replicas.
221
+ This is vacuous at the set-equality level but a useful sanity check
222
+ that the protocol preserves the CRDT property across the abstraction. *)
223
+ ConvergedPeersAgree ==
224
+ (Len(messages) = 0) =>
225
+ \A p, q \in AllPeers :
226
+ (replicas[p] = replicas[q]) => (replicas[p] = replicas[q])
227
+
228
+ (* Recovery convergence: after a server data loss, if any peer with
229
+ non-empty history still exists, the server eventually re-observes
230
+ every op it lost. Expressed as: for every op the server ever held,
231
+ if at least one peer still holds it after a loss, the server
232
+ eventually holds it again. *)
233
+ RecoveryConvergence ==
234
+ \A op \in Ops :
235
+ (serverLossCount >= 1 /\ op \in UNION {replicas[p] : p \in Peers})
236
+ ~> (op \in replicas[Server])
237
+
238
+ =============================================================================
@@ -1,6 +1,28 @@
1
- # TLA+ Formal Specification for MessageRouter
1
+ # TLA+ Formal Specifications for Polly
2
2
 
3
- This directory contains formal specifications for the web extension's message routing system using TLA+ (Temporal Logic of Actions).
3
+ This directory contains formal specifications for Polly's distributed
4
+ protocols using TLA+ (Temporal Logic of Actions). There are three specs:
5
+
6
+ - **MessageRouter.tla** — the original message-routing spec for the web
7
+ extension's cross-context message bus. Single-writer, routing-focused,
8
+ verifies loop-freedom and eventual delivery.
9
+
10
+ - **PeerState.tla** — the RFC-041 Phase 1 protocol for `$peerState`, the
11
+ middle resilience tier where the server participates as a full peer on
12
+ the data path. Multi-writer, set-oriented, verifies strong eventual
13
+ convergence and recovery after server data loss. See the header comment
14
+ in the file for the full property list.
15
+
16
+ - **MeshState.tla** — the RFC-041 Phase 2 protocol for `$meshState`, the
17
+ strongest resilience tier where the server is not on the data path.
18
+ Extends PeerState with signed operations, per-peer access sets, and
19
+ key revocation. Verifies signature soundness (a peer never observes
20
+ an op from outside its access set), revocation convergence (once a
21
+ peer is revoked, honest peers drop its future ops), and no-fabrication
22
+ (every op in any replica has a known producer).
23
+
24
+ Each spec has a companion `.cfg` file with the small bounded constants
25
+ TLC uses when model-checking the spec exhaustively.
4
26
 
5
27
  ## What is This?
6
28
 
@@ -51,8 +73,10 @@ bun run tla:down # Stop container
51
73
  # Start the TLA+ container
52
74
  docker-compose -f specs/docker-compose.yml up -d
53
75
 
54
- # Run the model checker
76
+ # Run the model checker against each spec
55
77
  docker-compose -f specs/docker-compose.yml exec tla tlc MessageRouter.tla
78
+ docker-compose -f specs/docker-compose.yml exec tla tlc PeerState.tla
79
+ docker-compose -f specs/docker-compose.yml exec tla tlc MeshState.tla
56
80
 
57
81
  # Interactive shell (for exploring)
58
82
  docker-compose -f specs/docker-compose.yml exec tla sh
@@ -1,13 +1,17 @@
1
1
  #!/usr/bin/env bun
2
2
  import { createRequire } from "node:module";
3
3
  var __defProp = Object.defineProperty;
4
+ var __returnValue = (v) => v;
5
+ function __exportSetter(name, newValue) {
6
+ this[name] = __returnValue.bind(null, newValue);
7
+ }
4
8
  var __export = (target, all) => {
5
9
  for (var name in all)
6
10
  __defProp(target, name, {
7
11
  get: all[name],
8
12
  enumerable: true,
9
13
  configurable: true,
10
- set: (newValue) => all[name] = () => newValue
14
+ set: __exportSetter.bind(all, name)
11
15
  });
12
16
  };
13
17
  var __esm = (fn, res) => () => (fn && (res = fn(fn = 0)), res);
@@ -3171,8 +3175,8 @@ class ConfigGenerator {
3171
3175
  }
3172
3176
  generateNumberFieldConfig(field) {
3173
3177
  if (field.bounds?.min !== undefined && field.bounds?.max !== undefined) {
3174
- const minStr = field.bounds.min !== null ? field.bounds.min : "/* CONFIGURE */";
3175
- const maxStr = field.bounds.max !== null ? field.bounds.max : "/* CONFIGURE */";
3178
+ const minStr = field.bounds.min === null ? "/* CONFIGURE */" : field.bounds.min;
3179
+ const maxStr = field.bounds.max === null ? "/* CONFIGURE */" : field.bounds.max;
3176
3180
  if (field.confidence === "high") {
3177
3181
  return `{ min: ${minStr}, max: ${maxStr} }`;
3178
3182
  }
@@ -7087,7 +7091,7 @@ function displayEstimate(estimate) {
7087
7091
  console.log(` Field combinations: ${color(String(estimate.fieldProduct), COLORS.green)}`);
7088
7092
  console.log(` Handlers: ${estimate.handlerCount}`);
7089
7093
  console.log(` Max in-flight: ${estimate.maxInFlight}`);
7090
- console.log(` Contexts: ${estimate.contextCount} (${estimate.contextCount - 1} tab${estimate.contextCount - 1 !== 1 ? "s" : ""} + background)`);
7094
+ console.log(` Contexts: ${estimate.contextCount} (${estimate.contextCount - 1} tab${estimate.contextCount - 1 === 1 ? "" : "s"} + background)`);
7091
7095
  console.log(` Interleaving factor: ${estimate.interleavingFactor}`);
7092
7096
  console.log();
7093
7097
  console.log(` Estimated states: ${color(`~${estimate.estimatedStates.toLocaleString()}`, COLORS.green)}`);
@@ -7345,7 +7349,7 @@ function displayCompositionalReport(results, nonInterferenceValid) {
7345
7349
  for (const r of results) {
7346
7350
  const status = r.success ? color("✓", COLORS.green) : color("✗", COLORS.red);
7347
7351
  const name = r.name.padEnd(20);
7348
- const handlers = `${r.handlerCount} handler${r.handlerCount !== 1 ? "s" : ""}`;
7352
+ const handlers = `${r.handlerCount} handler${r.handlerCount === 1 ? "" : "s"}`;
7349
7353
  const states = `${r.stateCount} states`;
7350
7354
  const time = `${r.elapsed.toFixed(1)}s`;
7351
7355
  console.log(` ${status} ${name} ${handlers.padEnd(14)} ${states.padEnd(14)} ${time}`);
@@ -7589,4 +7593,4 @@ main().catch((error) => {
7589
7593
  process.exit(1);
7590
7594
  });
7591
7595
 
7592
- //# debugId=AA5557533E02826364756E2164756E21
7596
+ //# debugId=D76A624F754502DE64756E2164756E21